1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 *
5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 *
7 * Thanks to the following companies for their support:
8 *
9 * - JMicron (hardware and technical support)
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/ktime.h>
16 #include <linux/highmem.h>
17 #include <linux/io.h>
18 #include <linux/module.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/scatterlist.h>
22 #include <linux/sizes.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/of.h>
26
27 #include <linux/leds.h>
28
29 #include <linux/mmc/mmc.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/card.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/slot-gpio.h>
34
35 #include <trace/hooks/mmc.h>
36
37 #include "sdhci.h"
38
39 #define DRIVER_NAME "sdhci"
40
41 #define DBG(f, x...) \
42 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
43
44 #define SDHCI_DUMP(f, x...) \
45 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
46
47 #define MAX_TUNING_LOOP 40
48
49 static unsigned int debug_quirks = 0;
50 static unsigned int debug_quirks2;
51
52 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
53
54 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
55
sdhci_dumpregs(struct sdhci_host * host)56 void sdhci_dumpregs(struct sdhci_host *host)
57 {
58 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
59
60 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
61 sdhci_readl(host, SDHCI_DMA_ADDRESS),
62 sdhci_readw(host, SDHCI_HOST_VERSION));
63 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
64 sdhci_readw(host, SDHCI_BLOCK_SIZE),
65 sdhci_readw(host, SDHCI_BLOCK_COUNT));
66 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
67 sdhci_readl(host, SDHCI_ARGUMENT),
68 sdhci_readw(host, SDHCI_TRANSFER_MODE));
69 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
70 sdhci_readl(host, SDHCI_PRESENT_STATE),
71 sdhci_readb(host, SDHCI_HOST_CONTROL));
72 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
73 sdhci_readb(host, SDHCI_POWER_CONTROL),
74 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
75 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
76 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
77 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
78 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
79 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
80 sdhci_readl(host, SDHCI_INT_STATUS));
81 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
82 sdhci_readl(host, SDHCI_INT_ENABLE),
83 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
84 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
85 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
86 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
87 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
88 sdhci_readl(host, SDHCI_CAPABILITIES),
89 sdhci_readl(host, SDHCI_CAPABILITIES_1));
90 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
91 sdhci_readw(host, SDHCI_COMMAND),
92 sdhci_readl(host, SDHCI_MAX_CURRENT));
93 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
94 sdhci_readl(host, SDHCI_RESPONSE),
95 sdhci_readl(host, SDHCI_RESPONSE + 4));
96 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
97 sdhci_readl(host, SDHCI_RESPONSE + 8),
98 sdhci_readl(host, SDHCI_RESPONSE + 12));
99 SDHCI_DUMP("Host ctl2: 0x%08x\n",
100 sdhci_readw(host, SDHCI_HOST_CONTROL2));
101
102 if (host->flags & SDHCI_USE_ADMA) {
103 if (host->flags & SDHCI_USE_64_BIT_DMA) {
104 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
105 sdhci_readl(host, SDHCI_ADMA_ERROR),
106 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
107 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
108 } else {
109 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
110 sdhci_readl(host, SDHCI_ADMA_ERROR),
111 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
112 }
113 }
114
115 if (host->ops->dump_vendor_regs)
116 host->ops->dump_vendor_regs(host);
117
118 SDHCI_DUMP("============================================\n");
119 }
120 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
121
122 /*****************************************************************************\
123 * *
124 * Low level functions *
125 * *
126 \*****************************************************************************/
127
sdhci_do_enable_v4_mode(struct sdhci_host * host)128 static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
129 {
130 u16 ctrl2;
131
132 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
133 if (ctrl2 & SDHCI_CTRL_V4_MODE)
134 return;
135
136 ctrl2 |= SDHCI_CTRL_V4_MODE;
137 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
138 }
139
140 /*
141 * This can be called before sdhci_add_host() by Vendor's host controller
142 * driver to enable v4 mode if supported.
143 */
sdhci_enable_v4_mode(struct sdhci_host * host)144 void sdhci_enable_v4_mode(struct sdhci_host *host)
145 {
146 host->v4_mode = true;
147 sdhci_do_enable_v4_mode(host);
148 }
149 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
150
sdhci_data_line_cmd(struct mmc_command * cmd)151 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
152 {
153 return cmd->data || cmd->flags & MMC_RSP_BUSY;
154 }
155
sdhci_set_card_detection(struct sdhci_host * host,bool enable)156 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
157 {
158 u32 present;
159
160 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
161 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
162 return;
163
164 if (enable) {
165 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
166 SDHCI_CARD_PRESENT;
167
168 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
169 SDHCI_INT_CARD_INSERT;
170 } else {
171 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
172 }
173
174 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
175 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
176 }
177
sdhci_enable_card_detection(struct sdhci_host * host)178 static void sdhci_enable_card_detection(struct sdhci_host *host)
179 {
180 sdhci_set_card_detection(host, true);
181 }
182
sdhci_disable_card_detection(struct sdhci_host * host)183 static void sdhci_disable_card_detection(struct sdhci_host *host)
184 {
185 sdhci_set_card_detection(host, false);
186 }
187
sdhci_runtime_pm_bus_on(struct sdhci_host * host)188 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
189 {
190 if (host->bus_on)
191 return;
192 host->bus_on = true;
193 pm_runtime_get_noresume(mmc_dev(host->mmc));
194 }
195
sdhci_runtime_pm_bus_off(struct sdhci_host * host)196 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
197 {
198 if (!host->bus_on)
199 return;
200 host->bus_on = false;
201 pm_runtime_put_noidle(mmc_dev(host->mmc));
202 }
203
sdhci_reset(struct sdhci_host * host,u8 mask)204 void sdhci_reset(struct sdhci_host *host, u8 mask)
205 {
206 ktime_t timeout;
207
208 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
209
210 if (mask & SDHCI_RESET_ALL) {
211 host->clock = 0;
212 /* Reset-all turns off SD Bus Power */
213 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
214 sdhci_runtime_pm_bus_off(host);
215 }
216
217 /* Wait max 100 ms */
218 timeout = ktime_add_ms(ktime_get(), 100);
219
220 /* hw clears the bit when it's done */
221 while (1) {
222 bool timedout = ktime_after(ktime_get(), timeout);
223
224 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
225 break;
226 if (timedout) {
227 pr_err("%s: Reset 0x%x never completed.\n",
228 mmc_hostname(host->mmc), (int)mask);
229 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
230 sdhci_dumpregs(host);
231 return;
232 }
233 udelay(10);
234 }
235 }
236 EXPORT_SYMBOL_GPL(sdhci_reset);
237
sdhci_do_reset(struct sdhci_host * host,u8 mask)238 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
239 {
240 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
241 struct mmc_host *mmc = host->mmc;
242
243 if (!mmc->ops->get_cd(mmc))
244 return;
245 }
246
247 host->ops->reset(host, mask);
248
249 if (mask & SDHCI_RESET_ALL) {
250 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
251 if (host->ops->enable_dma)
252 host->ops->enable_dma(host);
253 }
254
255 /* Resetting the controller clears many */
256 host->preset_enabled = false;
257 }
258 }
259
sdhci_set_default_irqs(struct sdhci_host * host)260 static void sdhci_set_default_irqs(struct sdhci_host *host)
261 {
262 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
263 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
264 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
265 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
266 SDHCI_INT_RESPONSE;
267
268 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
269 host->tuning_mode == SDHCI_TUNING_MODE_3)
270 host->ier |= SDHCI_INT_RETUNE;
271
272 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
273 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
274 }
275
sdhci_config_dma(struct sdhci_host * host)276 static void sdhci_config_dma(struct sdhci_host *host)
277 {
278 u8 ctrl;
279 u16 ctrl2;
280
281 if (host->version < SDHCI_SPEC_200)
282 return;
283
284 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
285
286 /*
287 * Always adjust the DMA selection as some controllers
288 * (e.g. JMicron) can't do PIO properly when the selection
289 * is ADMA.
290 */
291 ctrl &= ~SDHCI_CTRL_DMA_MASK;
292 if (!(host->flags & SDHCI_REQ_USE_DMA))
293 goto out;
294
295 /* Note if DMA Select is zero then SDMA is selected */
296 if (host->flags & SDHCI_USE_ADMA)
297 ctrl |= SDHCI_CTRL_ADMA32;
298
299 if (host->flags & SDHCI_USE_64_BIT_DMA) {
300 /*
301 * If v4 mode, all supported DMA can be 64-bit addressing if
302 * controller supports 64-bit system address, otherwise only
303 * ADMA can support 64-bit addressing.
304 */
305 if (host->v4_mode) {
306 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
307 ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
308 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
309 } else if (host->flags & SDHCI_USE_ADMA) {
310 /*
311 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
312 * set SDHCI_CTRL_ADMA64.
313 */
314 ctrl |= SDHCI_CTRL_ADMA64;
315 }
316 }
317
318 out:
319 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
320 }
321
sdhci_init(struct sdhci_host * host,int soft)322 static void sdhci_init(struct sdhci_host *host, int soft)
323 {
324 struct mmc_host *mmc = host->mmc;
325 unsigned long flags;
326
327 if (soft)
328 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
329 else
330 sdhci_do_reset(host, SDHCI_RESET_ALL);
331
332 if (host->v4_mode)
333 sdhci_do_enable_v4_mode(host);
334
335 spin_lock_irqsave(&host->lock, flags);
336 sdhci_set_default_irqs(host);
337 spin_unlock_irqrestore(&host->lock, flags);
338
339 host->cqe_on = false;
340
341 if (soft) {
342 /* force clock reconfiguration */
343 host->clock = 0;
344 host->reinit_uhs = true;
345 mmc->ops->set_ios(mmc, &mmc->ios);
346 }
347 }
348
sdhci_reinit(struct sdhci_host * host)349 static void sdhci_reinit(struct sdhci_host *host)
350 {
351 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
352
353 sdhci_init(host, 0);
354 sdhci_enable_card_detection(host);
355
356 /*
357 * A change to the card detect bits indicates a change in present state,
358 * refer sdhci_set_card_detection(). A card detect interrupt might have
359 * been missed while the host controller was being reset, so trigger a
360 * rescan to check.
361 */
362 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
363 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
364 }
365
__sdhci_led_activate(struct sdhci_host * host)366 static void __sdhci_led_activate(struct sdhci_host *host)
367 {
368 u8 ctrl;
369
370 if (host->quirks & SDHCI_QUIRK_NO_LED)
371 return;
372
373 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
374 ctrl |= SDHCI_CTRL_LED;
375 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
376 }
377
__sdhci_led_deactivate(struct sdhci_host * host)378 static void __sdhci_led_deactivate(struct sdhci_host *host)
379 {
380 u8 ctrl;
381
382 if (host->quirks & SDHCI_QUIRK_NO_LED)
383 return;
384
385 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
386 ctrl &= ~SDHCI_CTRL_LED;
387 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
388 }
389
390 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
sdhci_led_control(struct led_classdev * led,enum led_brightness brightness)391 static void sdhci_led_control(struct led_classdev *led,
392 enum led_brightness brightness)
393 {
394 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
395 unsigned long flags;
396
397 spin_lock_irqsave(&host->lock, flags);
398
399 if (host->runtime_suspended)
400 goto out;
401
402 if (brightness == LED_OFF)
403 __sdhci_led_deactivate(host);
404 else
405 __sdhci_led_activate(host);
406 out:
407 spin_unlock_irqrestore(&host->lock, flags);
408 }
409
sdhci_led_register(struct sdhci_host * host)410 static int sdhci_led_register(struct sdhci_host *host)
411 {
412 struct mmc_host *mmc = host->mmc;
413
414 if (host->quirks & SDHCI_QUIRK_NO_LED)
415 return 0;
416
417 snprintf(host->led_name, sizeof(host->led_name),
418 "%s::", mmc_hostname(mmc));
419
420 host->led.name = host->led_name;
421 host->led.brightness = LED_OFF;
422 host->led.default_trigger = mmc_hostname(mmc);
423 host->led.brightness_set = sdhci_led_control;
424
425 return led_classdev_register(mmc_dev(mmc), &host->led);
426 }
427
sdhci_led_unregister(struct sdhci_host * host)428 static void sdhci_led_unregister(struct sdhci_host *host)
429 {
430 if (host->quirks & SDHCI_QUIRK_NO_LED)
431 return;
432
433 led_classdev_unregister(&host->led);
434 }
435
sdhci_led_activate(struct sdhci_host * host)436 static inline void sdhci_led_activate(struct sdhci_host *host)
437 {
438 }
439
sdhci_led_deactivate(struct sdhci_host * host)440 static inline void sdhci_led_deactivate(struct sdhci_host *host)
441 {
442 }
443
444 #else
445
sdhci_led_register(struct sdhci_host * host)446 static inline int sdhci_led_register(struct sdhci_host *host)
447 {
448 return 0;
449 }
450
sdhci_led_unregister(struct sdhci_host * host)451 static inline void sdhci_led_unregister(struct sdhci_host *host)
452 {
453 }
454
sdhci_led_activate(struct sdhci_host * host)455 static inline void sdhci_led_activate(struct sdhci_host *host)
456 {
457 __sdhci_led_activate(host);
458 }
459
sdhci_led_deactivate(struct sdhci_host * host)460 static inline void sdhci_led_deactivate(struct sdhci_host *host)
461 {
462 __sdhci_led_deactivate(host);
463 }
464
465 #endif
466
sdhci_mod_timer(struct sdhci_host * host,struct mmc_request * mrq,unsigned long timeout)467 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
468 unsigned long timeout)
469 {
470 if (sdhci_data_line_cmd(mrq->cmd))
471 mod_timer(&host->data_timer, timeout);
472 else
473 mod_timer(&host->timer, timeout);
474 }
475
sdhci_del_timer(struct sdhci_host * host,struct mmc_request * mrq)476 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
477 {
478 if (sdhci_data_line_cmd(mrq->cmd))
479 del_timer(&host->data_timer);
480 else
481 del_timer(&host->timer);
482 }
483
sdhci_has_requests(struct sdhci_host * host)484 static inline bool sdhci_has_requests(struct sdhci_host *host)
485 {
486 return host->cmd || host->data_cmd;
487 }
488
489 /*****************************************************************************\
490 * *
491 * Core functions *
492 * *
493 \*****************************************************************************/
494
sdhci_read_block_pio(struct sdhci_host * host)495 static void sdhci_read_block_pio(struct sdhci_host *host)
496 {
497 unsigned long flags;
498 size_t blksize, len, chunk;
499 u32 scratch;
500 u8 *buf;
501
502 DBG("PIO reading\n");
503
504 blksize = host->data->blksz;
505 chunk = 0;
506
507 local_irq_save(flags);
508
509 while (blksize) {
510 BUG_ON(!sg_miter_next(&host->sg_miter));
511
512 len = min(host->sg_miter.length, blksize);
513
514 blksize -= len;
515 host->sg_miter.consumed = len;
516
517 buf = host->sg_miter.addr;
518
519 while (len) {
520 if (chunk == 0) {
521 scratch = sdhci_readl(host, SDHCI_BUFFER);
522 chunk = 4;
523 }
524
525 *buf = scratch & 0xFF;
526
527 buf++;
528 scratch >>= 8;
529 chunk--;
530 len--;
531 }
532 }
533
534 sg_miter_stop(&host->sg_miter);
535
536 local_irq_restore(flags);
537 }
538
sdhci_write_block_pio(struct sdhci_host * host)539 static void sdhci_write_block_pio(struct sdhci_host *host)
540 {
541 unsigned long flags;
542 size_t blksize, len, chunk;
543 u32 scratch;
544 u8 *buf;
545
546 DBG("PIO writing\n");
547
548 blksize = host->data->blksz;
549 chunk = 0;
550 scratch = 0;
551
552 local_irq_save(flags);
553
554 while (blksize) {
555 BUG_ON(!sg_miter_next(&host->sg_miter));
556
557 len = min(host->sg_miter.length, blksize);
558
559 blksize -= len;
560 host->sg_miter.consumed = len;
561
562 buf = host->sg_miter.addr;
563
564 while (len) {
565 scratch |= (u32)*buf << (chunk * 8);
566
567 buf++;
568 chunk++;
569 len--;
570
571 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
572 sdhci_writel(host, scratch, SDHCI_BUFFER);
573 chunk = 0;
574 scratch = 0;
575 }
576 }
577 }
578
579 sg_miter_stop(&host->sg_miter);
580
581 local_irq_restore(flags);
582 }
583
sdhci_transfer_pio(struct sdhci_host * host)584 static void sdhci_transfer_pio(struct sdhci_host *host)
585 {
586 u32 mask;
587
588 if (host->blocks == 0)
589 return;
590
591 if (host->data->flags & MMC_DATA_READ)
592 mask = SDHCI_DATA_AVAILABLE;
593 else
594 mask = SDHCI_SPACE_AVAILABLE;
595
596 /*
597 * Some controllers (JMicron JMB38x) mess up the buffer bits
598 * for transfers < 4 bytes. As long as it is just one block,
599 * we can ignore the bits.
600 */
601 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
602 (host->data->blocks == 1))
603 mask = ~0;
604
605 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
606 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
607 udelay(100);
608
609 if (host->data->flags & MMC_DATA_READ)
610 sdhci_read_block_pio(host);
611 else
612 sdhci_write_block_pio(host);
613
614 host->blocks--;
615 if (host->blocks == 0)
616 break;
617 }
618
619 DBG("PIO transfer complete.\n");
620 }
621
sdhci_pre_dma_transfer(struct sdhci_host * host,struct mmc_data * data,int cookie)622 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
623 struct mmc_data *data, int cookie)
624 {
625 int sg_count;
626
627 /*
628 * If the data buffers are already mapped, return the previous
629 * dma_map_sg() result.
630 */
631 if (data->host_cookie == COOKIE_PRE_MAPPED)
632 return data->sg_count;
633
634 /* Bounce write requests to the bounce buffer */
635 if (host->bounce_buffer) {
636 unsigned int length = data->blksz * data->blocks;
637
638 if (length > host->bounce_buffer_size) {
639 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
640 mmc_hostname(host->mmc), length,
641 host->bounce_buffer_size);
642 return -EIO;
643 }
644 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
645 /* Copy the data to the bounce buffer */
646 if (host->ops->copy_to_bounce_buffer) {
647 host->ops->copy_to_bounce_buffer(host,
648 data, length);
649 } else {
650 sg_copy_to_buffer(data->sg, data->sg_len,
651 host->bounce_buffer, length);
652 }
653 }
654 /* Switch ownership to the DMA */
655 dma_sync_single_for_device(mmc_dev(host->mmc),
656 host->bounce_addr,
657 host->bounce_buffer_size,
658 mmc_get_dma_dir(data));
659 /* Just a dummy value */
660 sg_count = 1;
661 } else {
662 /* Just access the data directly from memory */
663 sg_count = dma_map_sg(mmc_dev(host->mmc),
664 data->sg, data->sg_len,
665 mmc_get_dma_dir(data));
666 }
667
668 if (sg_count == 0)
669 return -ENOSPC;
670
671 data->sg_count = sg_count;
672 data->host_cookie = cookie;
673
674 return sg_count;
675 }
676
sdhci_kmap_atomic(struct scatterlist * sg,unsigned long * flags)677 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
678 {
679 local_irq_save(*flags);
680 return kmap_atomic(sg_page(sg)) + sg->offset;
681 }
682
sdhci_kunmap_atomic(void * buffer,unsigned long * flags)683 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
684 {
685 kunmap_atomic(buffer);
686 local_irq_restore(*flags);
687 }
688
sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)689 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
690 dma_addr_t addr, int len, unsigned int cmd)
691 {
692 struct sdhci_adma2_64_desc *dma_desc = *desc;
693
694 /* 32-bit and 64-bit descriptors have these members in same position */
695 dma_desc->cmd = cpu_to_le16(cmd);
696 dma_desc->len = cpu_to_le16(len);
697 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
698
699 if (host->flags & SDHCI_USE_64_BIT_DMA)
700 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
701
702 *desc += host->desc_sz;
703 }
704 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
705
__sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)706 static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
707 void **desc, dma_addr_t addr,
708 int len, unsigned int cmd)
709 {
710 if (host->ops->adma_write_desc)
711 host->ops->adma_write_desc(host, desc, addr, len, cmd);
712 else
713 sdhci_adma_write_desc(host, desc, addr, len, cmd);
714 }
715
sdhci_adma_mark_end(void * desc)716 static void sdhci_adma_mark_end(void *desc)
717 {
718 struct sdhci_adma2_64_desc *dma_desc = desc;
719
720 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
721 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
722 }
723
sdhci_adma_table_pre(struct sdhci_host * host,struct mmc_data * data,int sg_count)724 static void sdhci_adma_table_pre(struct sdhci_host *host,
725 struct mmc_data *data, int sg_count)
726 {
727 struct scatterlist *sg;
728 unsigned long flags;
729 dma_addr_t addr, align_addr;
730 void *desc, *align;
731 char *buffer;
732 int len, offset, i;
733
734 /*
735 * The spec does not specify endianness of descriptor table.
736 * We currently guess that it is LE.
737 */
738
739 host->sg_count = sg_count;
740
741 desc = host->adma_table;
742 align = host->align_buffer;
743
744 align_addr = host->align_addr;
745
746 for_each_sg(data->sg, sg, host->sg_count, i) {
747 addr = sg_dma_address(sg);
748 len = sg_dma_len(sg);
749
750 /*
751 * The SDHCI specification states that ADMA addresses must
752 * be 32-bit aligned. If they aren't, then we use a bounce
753 * buffer for the (up to three) bytes that screw up the
754 * alignment.
755 */
756 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
757 SDHCI_ADMA2_MASK;
758 if (offset) {
759 if (data->flags & MMC_DATA_WRITE) {
760 buffer = sdhci_kmap_atomic(sg, &flags);
761 memcpy(align, buffer, offset);
762 sdhci_kunmap_atomic(buffer, &flags);
763 }
764
765 /* tran, valid */
766 __sdhci_adma_write_desc(host, &desc, align_addr,
767 offset, ADMA2_TRAN_VALID);
768
769 BUG_ON(offset > 65536);
770
771 align += SDHCI_ADMA2_ALIGN;
772 align_addr += SDHCI_ADMA2_ALIGN;
773
774 addr += offset;
775 len -= offset;
776 }
777
778 /*
779 * The block layer forces a minimum segment size of PAGE_SIZE,
780 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
781 * multiple descriptors, noting that the ADMA table is sized
782 * for 4KiB chunks anyway, so it will be big enough.
783 */
784 while (len > host->max_adma) {
785 int n = 32 * 1024; /* 32KiB*/
786
787 __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
788 addr += n;
789 len -= n;
790 }
791
792 /* tran, valid */
793 if (len)
794 __sdhci_adma_write_desc(host, &desc, addr, len,
795 ADMA2_TRAN_VALID);
796
797 /*
798 * If this triggers then we have a calculation bug
799 * somewhere. :/
800 */
801 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
802 }
803
804 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
805 /* Mark the last descriptor as the terminating descriptor */
806 if (desc != host->adma_table) {
807 desc -= host->desc_sz;
808 sdhci_adma_mark_end(desc);
809 }
810 } else {
811 /* Add a terminating entry - nop, end, valid */
812 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
813 }
814 }
815
sdhci_adma_table_post(struct sdhci_host * host,struct mmc_data * data)816 static void sdhci_adma_table_post(struct sdhci_host *host,
817 struct mmc_data *data)
818 {
819 struct scatterlist *sg;
820 int i, size;
821 void *align;
822 char *buffer;
823 unsigned long flags;
824
825 if (data->flags & MMC_DATA_READ) {
826 bool has_unaligned = false;
827
828 /* Do a quick scan of the SG list for any unaligned mappings */
829 for_each_sg(data->sg, sg, host->sg_count, i)
830 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
831 has_unaligned = true;
832 break;
833 }
834
835 if (has_unaligned) {
836 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
837 data->sg_len, DMA_FROM_DEVICE);
838
839 align = host->align_buffer;
840
841 for_each_sg(data->sg, sg, host->sg_count, i) {
842 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
843 size = SDHCI_ADMA2_ALIGN -
844 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
845
846 buffer = sdhci_kmap_atomic(sg, &flags);
847 memcpy(buffer, align, size);
848 sdhci_kunmap_atomic(buffer, &flags);
849
850 align += SDHCI_ADMA2_ALIGN;
851 }
852 }
853 }
854 }
855 }
856
sdhci_set_adma_addr(struct sdhci_host * host,dma_addr_t addr)857 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
858 {
859 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
860 if (host->flags & SDHCI_USE_64_BIT_DMA)
861 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
862 }
863
sdhci_sdma_address(struct sdhci_host * host)864 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
865 {
866 if (host->bounce_buffer)
867 return host->bounce_addr;
868 else
869 return sg_dma_address(host->data->sg);
870 }
871
sdhci_set_sdma_addr(struct sdhci_host * host,dma_addr_t addr)872 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
873 {
874 if (host->v4_mode)
875 sdhci_set_adma_addr(host, addr);
876 else
877 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
878 }
879
sdhci_target_timeout(struct sdhci_host * host,struct mmc_command * cmd,struct mmc_data * data)880 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
881 struct mmc_command *cmd,
882 struct mmc_data *data)
883 {
884 unsigned int target_timeout;
885
886 /* timeout in us */
887 if (!data) {
888 target_timeout = cmd->busy_timeout * 1000;
889 } else {
890 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
891 if (host->clock && data->timeout_clks) {
892 unsigned long long val;
893
894 /*
895 * data->timeout_clks is in units of clock cycles.
896 * host->clock is in Hz. target_timeout is in us.
897 * Hence, us = 1000000 * cycles / Hz. Round up.
898 */
899 val = 1000000ULL * data->timeout_clks;
900 if (do_div(val, host->clock))
901 target_timeout++;
902 target_timeout += val;
903 }
904 }
905
906 return target_timeout;
907 }
908
sdhci_calc_sw_timeout(struct sdhci_host * host,struct mmc_command * cmd)909 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
910 struct mmc_command *cmd)
911 {
912 struct mmc_data *data = cmd->data;
913 struct mmc_host *mmc = host->mmc;
914 struct mmc_ios *ios = &mmc->ios;
915 unsigned char bus_width = 1 << ios->bus_width;
916 unsigned int blksz;
917 unsigned int freq;
918 u64 target_timeout;
919 u64 transfer_time;
920
921 target_timeout = sdhci_target_timeout(host, cmd, data);
922 target_timeout *= NSEC_PER_USEC;
923
924 if (data) {
925 blksz = data->blksz;
926 freq = mmc->actual_clock ? : host->clock;
927 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
928 do_div(transfer_time, freq);
929 /* multiply by '2' to account for any unknowns */
930 transfer_time = transfer_time * 2;
931 /* calculate timeout for the entire data */
932 host->data_timeout = data->blocks * target_timeout +
933 transfer_time;
934 } else {
935 host->data_timeout = target_timeout;
936 }
937
938 if (host->data_timeout)
939 host->data_timeout += MMC_CMD_TRANSFER_TIME;
940 }
941
sdhci_calc_timeout(struct sdhci_host * host,struct mmc_command * cmd,bool * too_big)942 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
943 bool *too_big)
944 {
945 u8 count;
946 struct mmc_data *data;
947 unsigned target_timeout, current_timeout;
948
949 *too_big = true;
950
951 /*
952 * If the host controller provides us with an incorrect timeout
953 * value, just skip the check and use the maximum. The hardware may take
954 * longer to time out, but that's much better than having a too-short
955 * timeout value.
956 */
957 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
958 return host->max_timeout_count;
959
960 /* Unspecified command, asume max */
961 if (cmd == NULL)
962 return host->max_timeout_count;
963
964 data = cmd->data;
965 /* Unspecified timeout, assume max */
966 if (!data && !cmd->busy_timeout)
967 return host->max_timeout_count;
968
969 /* timeout in us */
970 target_timeout = sdhci_target_timeout(host, cmd, data);
971
972 /*
973 * Figure out needed cycles.
974 * We do this in steps in order to fit inside a 32 bit int.
975 * The first step is the minimum timeout, which will have a
976 * minimum resolution of 6 bits:
977 * (1) 2^13*1000 > 2^22,
978 * (2) host->timeout_clk < 2^16
979 * =>
980 * (1) / (2) > 2^6
981 */
982 count = 0;
983 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
984 while (current_timeout < target_timeout) {
985 count++;
986 current_timeout <<= 1;
987 if (count > host->max_timeout_count)
988 break;
989 }
990
991 if (count > host->max_timeout_count) {
992 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
993 DBG("Too large timeout 0x%x requested for CMD%d!\n",
994 count, cmd->opcode);
995 count = host->max_timeout_count;
996 } else {
997 *too_big = false;
998 }
999
1000 return count;
1001 }
1002
sdhci_set_transfer_irqs(struct sdhci_host * host)1003 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
1004 {
1005 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
1006 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
1007
1008 if (host->flags & SDHCI_REQ_USE_DMA)
1009 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
1010 else
1011 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
1012
1013 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
1014 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
1015 else
1016 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
1017
1018 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1019 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1020 }
1021
sdhci_set_data_timeout_irq(struct sdhci_host * host,bool enable)1022 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
1023 {
1024 if (enable)
1025 host->ier |= SDHCI_INT_DATA_TIMEOUT;
1026 else
1027 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
1028 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1029 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1030 }
1031 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
1032
__sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1033 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1034 {
1035 bool too_big = false;
1036 u8 count = sdhci_calc_timeout(host, cmd, &too_big);
1037
1038 if (too_big &&
1039 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1040 sdhci_calc_sw_timeout(host, cmd);
1041 sdhci_set_data_timeout_irq(host, false);
1042 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1043 sdhci_set_data_timeout_irq(host, true);
1044 }
1045
1046 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1047 }
1048 EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
1049
sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1050 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1051 {
1052 if (host->ops->set_timeout)
1053 host->ops->set_timeout(host, cmd);
1054 else
1055 __sdhci_set_timeout(host, cmd);
1056 }
1057
sdhci_initialize_data(struct sdhci_host * host,struct mmc_data * data)1058 static void sdhci_initialize_data(struct sdhci_host *host,
1059 struct mmc_data *data)
1060 {
1061 WARN_ON(host->data);
1062
1063 /* Sanity checks */
1064 BUG_ON(data->blksz * data->blocks > 524288);
1065 BUG_ON(data->blksz > host->mmc->max_blk_size);
1066 BUG_ON(data->blocks > 65535);
1067
1068 host->data = data;
1069 host->data_early = 0;
1070 host->data->bytes_xfered = 0;
1071 }
1072
sdhci_set_block_info(struct sdhci_host * host,struct mmc_data * data)1073 static inline void sdhci_set_block_info(struct sdhci_host *host,
1074 struct mmc_data *data)
1075 {
1076 /* Set the DMA boundary value and block size */
1077 sdhci_writew(host,
1078 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1079 SDHCI_BLOCK_SIZE);
1080 /*
1081 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1082 * can be supported, in that case 16-bit block count register must be 0.
1083 */
1084 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1085 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1086 if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1087 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1088 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1089 } else {
1090 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1091 }
1092 }
1093
sdhci_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1094 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1095 {
1096 struct mmc_data *data = cmd->data;
1097
1098 sdhci_initialize_data(host, data);
1099
1100 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1101 struct scatterlist *sg;
1102 unsigned int length_mask, offset_mask;
1103 int i;
1104
1105 host->flags |= SDHCI_REQ_USE_DMA;
1106
1107 /*
1108 * FIXME: This doesn't account for merging when mapping the
1109 * scatterlist.
1110 *
1111 * The assumption here being that alignment and lengths are
1112 * the same after DMA mapping to device address space.
1113 */
1114 length_mask = 0;
1115 offset_mask = 0;
1116 if (host->flags & SDHCI_USE_ADMA) {
1117 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1118 length_mask = 3;
1119 /*
1120 * As we use up to 3 byte chunks to work
1121 * around alignment problems, we need to
1122 * check the offset as well.
1123 */
1124 offset_mask = 3;
1125 }
1126 } else {
1127 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1128 length_mask = 3;
1129 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1130 offset_mask = 3;
1131 }
1132
1133 if (unlikely(length_mask | offset_mask)) {
1134 for_each_sg(data->sg, sg, data->sg_len, i) {
1135 if (sg->length & length_mask) {
1136 DBG("Reverting to PIO because of transfer size (%d)\n",
1137 sg->length);
1138 host->flags &= ~SDHCI_REQ_USE_DMA;
1139 break;
1140 }
1141 if (sg->offset & offset_mask) {
1142 DBG("Reverting to PIO because of bad alignment\n");
1143 host->flags &= ~SDHCI_REQ_USE_DMA;
1144 break;
1145 }
1146 }
1147 }
1148 }
1149
1150 sdhci_config_dma(host);
1151
1152 if (host->flags & SDHCI_REQ_USE_DMA) {
1153 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1154
1155 if (sg_cnt <= 0) {
1156 /*
1157 * This only happens when someone fed
1158 * us an invalid request.
1159 */
1160 WARN_ON(1);
1161 host->flags &= ~SDHCI_REQ_USE_DMA;
1162 } else if (host->flags & SDHCI_USE_ADMA) {
1163 sdhci_adma_table_pre(host, data, sg_cnt);
1164 sdhci_set_adma_addr(host, host->adma_addr);
1165 } else {
1166 WARN_ON(sg_cnt != 1);
1167 sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1168 }
1169 }
1170
1171 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1172 int flags;
1173
1174 flags = SG_MITER_ATOMIC;
1175 if (host->data->flags & MMC_DATA_READ)
1176 flags |= SG_MITER_TO_SG;
1177 else
1178 flags |= SG_MITER_FROM_SG;
1179 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1180 host->blocks = data->blocks;
1181 }
1182
1183 sdhci_set_transfer_irqs(host);
1184
1185 sdhci_set_block_info(host, data);
1186 }
1187
1188 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
1189
sdhci_external_dma_init(struct sdhci_host * host)1190 static int sdhci_external_dma_init(struct sdhci_host *host)
1191 {
1192 int ret = 0;
1193 struct mmc_host *mmc = host->mmc;
1194
1195 host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx");
1196 if (IS_ERR(host->tx_chan)) {
1197 ret = PTR_ERR(host->tx_chan);
1198 if (ret != -EPROBE_DEFER)
1199 pr_warn("Failed to request TX DMA channel.\n");
1200 host->tx_chan = NULL;
1201 return ret;
1202 }
1203
1204 host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx");
1205 if (IS_ERR(host->rx_chan)) {
1206 if (host->tx_chan) {
1207 dma_release_channel(host->tx_chan);
1208 host->tx_chan = NULL;
1209 }
1210
1211 ret = PTR_ERR(host->rx_chan);
1212 if (ret != -EPROBE_DEFER)
1213 pr_warn("Failed to request RX DMA channel.\n");
1214 host->rx_chan = NULL;
1215 }
1216
1217 return ret;
1218 }
1219
sdhci_external_dma_channel(struct sdhci_host * host,struct mmc_data * data)1220 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1221 struct mmc_data *data)
1222 {
1223 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
1224 }
1225
sdhci_external_dma_setup(struct sdhci_host * host,struct mmc_command * cmd)1226 static int sdhci_external_dma_setup(struct sdhci_host *host,
1227 struct mmc_command *cmd)
1228 {
1229 int ret, i;
1230 enum dma_transfer_direction dir;
1231 struct dma_async_tx_descriptor *desc;
1232 struct mmc_data *data = cmd->data;
1233 struct dma_chan *chan;
1234 struct dma_slave_config cfg;
1235 dma_cookie_t cookie;
1236 int sg_cnt;
1237
1238 if (!host->mapbase)
1239 return -EINVAL;
1240
1241 memset(&cfg, 0, sizeof(cfg));
1242 cfg.src_addr = host->mapbase + SDHCI_BUFFER;
1243 cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
1244 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1245 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1246 cfg.src_maxburst = data->blksz / 4;
1247 cfg.dst_maxburst = data->blksz / 4;
1248
1249 /* Sanity check: all the SG entries must be aligned by block size. */
1250 for (i = 0; i < data->sg_len; i++) {
1251 if ((data->sg + i)->length % data->blksz)
1252 return -EINVAL;
1253 }
1254
1255 chan = sdhci_external_dma_channel(host, data);
1256
1257 ret = dmaengine_slave_config(chan, &cfg);
1258 if (ret)
1259 return ret;
1260
1261 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1262 if (sg_cnt <= 0)
1263 return -EINVAL;
1264
1265 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
1266 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
1267 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1268 if (!desc)
1269 return -EINVAL;
1270
1271 desc->callback = NULL;
1272 desc->callback_param = NULL;
1273
1274 cookie = dmaengine_submit(desc);
1275 if (dma_submit_error(cookie))
1276 ret = cookie;
1277
1278 return ret;
1279 }
1280
sdhci_external_dma_release(struct sdhci_host * host)1281 static void sdhci_external_dma_release(struct sdhci_host *host)
1282 {
1283 if (host->tx_chan) {
1284 dma_release_channel(host->tx_chan);
1285 host->tx_chan = NULL;
1286 }
1287
1288 if (host->rx_chan) {
1289 dma_release_channel(host->rx_chan);
1290 host->rx_chan = NULL;
1291 }
1292
1293 sdhci_switch_external_dma(host, false);
1294 }
1295
__sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1296 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
1297 struct mmc_command *cmd)
1298 {
1299 struct mmc_data *data = cmd->data;
1300
1301 sdhci_initialize_data(host, data);
1302
1303 host->flags |= SDHCI_REQ_USE_DMA;
1304 sdhci_set_transfer_irqs(host);
1305
1306 sdhci_set_block_info(host, data);
1307 }
1308
sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1309 static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1310 struct mmc_command *cmd)
1311 {
1312 if (!sdhci_external_dma_setup(host, cmd)) {
1313 __sdhci_external_dma_prepare_data(host, cmd);
1314 } else {
1315 sdhci_external_dma_release(host);
1316 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
1317 mmc_hostname(host->mmc));
1318 sdhci_prepare_data(host, cmd);
1319 }
1320 }
1321
sdhci_external_dma_pre_transfer(struct sdhci_host * host,struct mmc_command * cmd)1322 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1323 struct mmc_command *cmd)
1324 {
1325 struct dma_chan *chan;
1326
1327 if (!cmd->data)
1328 return;
1329
1330 chan = sdhci_external_dma_channel(host, cmd->data);
1331 if (chan)
1332 dma_async_issue_pending(chan);
1333 }
1334
1335 #else
1336
sdhci_external_dma_init(struct sdhci_host * host)1337 static inline int sdhci_external_dma_init(struct sdhci_host *host)
1338 {
1339 return -EOPNOTSUPP;
1340 }
1341
sdhci_external_dma_release(struct sdhci_host * host)1342 static inline void sdhci_external_dma_release(struct sdhci_host *host)
1343 {
1344 }
1345
sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1346 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1347 struct mmc_command *cmd)
1348 {
1349 /* This should never happen */
1350 WARN_ON_ONCE(1);
1351 }
1352
sdhci_external_dma_pre_transfer(struct sdhci_host * host,struct mmc_command * cmd)1353 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1354 struct mmc_command *cmd)
1355 {
1356 }
1357
sdhci_external_dma_channel(struct sdhci_host * host,struct mmc_data * data)1358 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1359 struct mmc_data *data)
1360 {
1361 return NULL;
1362 }
1363
1364 #endif
1365
sdhci_switch_external_dma(struct sdhci_host * host,bool en)1366 void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
1367 {
1368 host->use_external_dma = en;
1369 }
1370 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
1371
sdhci_auto_cmd12(struct sdhci_host * host,struct mmc_request * mrq)1372 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1373 struct mmc_request *mrq)
1374 {
1375 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1376 !mrq->cap_cmd_during_tfr;
1377 }
1378
sdhci_auto_cmd23(struct sdhci_host * host,struct mmc_request * mrq)1379 static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
1380 struct mmc_request *mrq)
1381 {
1382 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1383 }
1384
sdhci_manual_cmd23(struct sdhci_host * host,struct mmc_request * mrq)1385 static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
1386 struct mmc_request *mrq)
1387 {
1388 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
1389 }
1390
sdhci_auto_cmd_select(struct sdhci_host * host,struct mmc_command * cmd,u16 * mode)1391 static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1392 struct mmc_command *cmd,
1393 u16 *mode)
1394 {
1395 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1396 (cmd->opcode != SD_IO_RW_EXTENDED);
1397 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
1398 u16 ctrl2;
1399
1400 /*
1401 * In case of Version 4.10 or later, use of 'Auto CMD Auto
1402 * Select' is recommended rather than use of 'Auto CMD12
1403 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
1404 * here because some controllers (e.g sdhci-of-dwmshc) expect it.
1405 */
1406 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1407 (use_cmd12 || use_cmd23)) {
1408 *mode |= SDHCI_TRNS_AUTO_SEL;
1409
1410 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1411 if (use_cmd23)
1412 ctrl2 |= SDHCI_CMD23_ENABLE;
1413 else
1414 ctrl2 &= ~SDHCI_CMD23_ENABLE;
1415 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1416
1417 return;
1418 }
1419
1420 /*
1421 * If we are sending CMD23, CMD12 never gets sent
1422 * on successful completion (so no Auto-CMD12).
1423 */
1424 if (use_cmd12)
1425 *mode |= SDHCI_TRNS_AUTO_CMD12;
1426 else if (use_cmd23)
1427 *mode |= SDHCI_TRNS_AUTO_CMD23;
1428 }
1429
sdhci_set_transfer_mode(struct sdhci_host * host,struct mmc_command * cmd)1430 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1431 struct mmc_command *cmd)
1432 {
1433 u16 mode = 0;
1434 struct mmc_data *data = cmd->data;
1435
1436 if (data == NULL) {
1437 if (host->quirks2 &
1438 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1439 /* must not clear SDHCI_TRANSFER_MODE when tuning */
1440 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1441 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1442 } else {
1443 /* clear Auto CMD settings for no data CMDs */
1444 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1445 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1446 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1447 }
1448 return;
1449 }
1450
1451 WARN_ON(!host->data);
1452
1453 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1454 mode = SDHCI_TRNS_BLK_CNT_EN;
1455
1456 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1457 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1458 sdhci_auto_cmd_select(host, cmd, &mode);
1459 if (sdhci_auto_cmd23(host, cmd->mrq))
1460 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1461 }
1462
1463 if (data->flags & MMC_DATA_READ)
1464 mode |= SDHCI_TRNS_READ;
1465 if (host->flags & SDHCI_REQ_USE_DMA)
1466 mode |= SDHCI_TRNS_DMA;
1467
1468 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1469 }
1470
sdhci_needs_reset(struct sdhci_host * host,struct mmc_request * mrq)1471 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1472 {
1473 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1474 ((mrq->cmd && mrq->cmd->error) ||
1475 (mrq->sbc && mrq->sbc->error) ||
1476 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1477 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1478 }
1479
sdhci_set_mrq_done(struct sdhci_host * host,struct mmc_request * mrq)1480 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
1481 {
1482 int i;
1483
1484 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1485 if (host->mrqs_done[i] == mrq) {
1486 WARN_ON(1);
1487 return;
1488 }
1489 }
1490
1491 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1492 if (!host->mrqs_done[i]) {
1493 host->mrqs_done[i] = mrq;
1494 break;
1495 }
1496 }
1497
1498 WARN_ON(i >= SDHCI_MAX_MRQS);
1499 }
1500
__sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1501 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1502 {
1503 if (host->cmd && host->cmd->mrq == mrq)
1504 host->cmd = NULL;
1505
1506 if (host->data_cmd && host->data_cmd->mrq == mrq)
1507 host->data_cmd = NULL;
1508
1509 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
1510 host->deferred_cmd = NULL;
1511
1512 if (host->data && host->data->mrq == mrq)
1513 host->data = NULL;
1514
1515 if (sdhci_needs_reset(host, mrq))
1516 host->pending_reset = true;
1517
1518 sdhci_set_mrq_done(host, mrq);
1519
1520 sdhci_del_timer(host, mrq);
1521
1522 if (!sdhci_has_requests(host))
1523 sdhci_led_deactivate(host);
1524 }
1525
sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1526 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1527 {
1528 __sdhci_finish_mrq(host, mrq);
1529
1530 queue_work(host->complete_wq, &host->complete_work);
1531 }
1532
__sdhci_finish_data(struct sdhci_host * host,bool sw_data_timeout)1533 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
1534 {
1535 struct mmc_command *data_cmd = host->data_cmd;
1536 struct mmc_data *data = host->data;
1537
1538 host->data = NULL;
1539 host->data_cmd = NULL;
1540
1541 /*
1542 * The controller needs a reset of internal state machines upon error
1543 * conditions.
1544 */
1545 if (data->error) {
1546 if (!host->cmd || host->cmd == data_cmd)
1547 sdhci_do_reset(host, SDHCI_RESET_CMD);
1548 sdhci_do_reset(host, SDHCI_RESET_DATA);
1549 }
1550
1551 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1552 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1553 sdhci_adma_table_post(host, data);
1554
1555 /*
1556 * The specification states that the block count register must
1557 * be updated, but it does not specify at what point in the
1558 * data flow. That makes the register entirely useless to read
1559 * back so we have to assume that nothing made it to the card
1560 * in the event of an error.
1561 */
1562 if (data->error)
1563 data->bytes_xfered = 0;
1564 else
1565 data->bytes_xfered = data->blksz * data->blocks;
1566
1567 /*
1568 * Need to send CMD12 if -
1569 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
1570 * b) error in multiblock transfer
1571 */
1572 if (data->stop &&
1573 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
1574 data->error)) {
1575 /*
1576 * 'cap_cmd_during_tfr' request must not use the command line
1577 * after mmc_command_done() has been called. It is upper layer's
1578 * responsibility to send the stop command if required.
1579 */
1580 if (data->mrq->cap_cmd_during_tfr) {
1581 __sdhci_finish_mrq(host, data->mrq);
1582 } else {
1583 /* Avoid triggering warning in sdhci_send_command() */
1584 host->cmd = NULL;
1585 if (!sdhci_send_command(host, data->stop)) {
1586 if (sw_data_timeout) {
1587 /*
1588 * This is anyway a sw data timeout, so
1589 * give up now.
1590 */
1591 data->stop->error = -EIO;
1592 __sdhci_finish_mrq(host, data->mrq);
1593 } else {
1594 WARN_ON(host->deferred_cmd);
1595 host->deferred_cmd = data->stop;
1596 }
1597 }
1598 }
1599 } else {
1600 __sdhci_finish_mrq(host, data->mrq);
1601 }
1602 }
1603
sdhci_finish_data(struct sdhci_host * host)1604 static void sdhci_finish_data(struct sdhci_host *host)
1605 {
1606 __sdhci_finish_data(host, false);
1607 }
1608
sdhci_send_command(struct sdhci_host * host,struct mmc_command * cmd)1609 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1610 {
1611 int flags;
1612 u32 mask;
1613 unsigned long timeout;
1614
1615 WARN_ON(host->cmd);
1616
1617 /* Initially, a command has no error */
1618 cmd->error = 0;
1619
1620 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1621 cmd->opcode == MMC_STOP_TRANSMISSION)
1622 cmd->flags |= MMC_RSP_BUSY;
1623
1624 mask = SDHCI_CMD_INHIBIT;
1625 if (sdhci_data_line_cmd(cmd))
1626 mask |= SDHCI_DATA_INHIBIT;
1627
1628 /* We shouldn't wait for data inihibit for stop commands, even
1629 though they might use busy signaling */
1630 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1631 mask &= ~SDHCI_DATA_INHIBIT;
1632
1633 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
1634 return false;
1635
1636 host->cmd = cmd;
1637 host->data_timeout = 0;
1638 if (sdhci_data_line_cmd(cmd)) {
1639 WARN_ON(host->data_cmd);
1640 host->data_cmd = cmd;
1641 sdhci_set_timeout(host, cmd);
1642 }
1643
1644 if (cmd->data) {
1645 if (host->use_external_dma)
1646 sdhci_external_dma_prepare_data(host, cmd);
1647 else
1648 sdhci_prepare_data(host, cmd);
1649 }
1650
1651 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1652
1653 sdhci_set_transfer_mode(host, cmd);
1654
1655 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1656 WARN_ONCE(1, "Unsupported response type!\n");
1657 /*
1658 * This does not happen in practice because 136-bit response
1659 * commands never have busy waiting, so rather than complicate
1660 * the error path, just remove busy waiting and continue.
1661 */
1662 cmd->flags &= ~MMC_RSP_BUSY;
1663 }
1664
1665 if (!(cmd->flags & MMC_RSP_PRESENT))
1666 flags = SDHCI_CMD_RESP_NONE;
1667 else if (cmd->flags & MMC_RSP_136)
1668 flags = SDHCI_CMD_RESP_LONG;
1669 else if (cmd->flags & MMC_RSP_BUSY)
1670 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1671 else
1672 flags = SDHCI_CMD_RESP_SHORT;
1673
1674 if (cmd->flags & MMC_RSP_CRC)
1675 flags |= SDHCI_CMD_CRC;
1676 if (cmd->flags & MMC_RSP_OPCODE)
1677 flags |= SDHCI_CMD_INDEX;
1678
1679 /* CMD19 is special in that the Data Present Select should be set */
1680 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1681 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1682 flags |= SDHCI_CMD_DATA;
1683
1684 timeout = jiffies;
1685 if (host->data_timeout)
1686 timeout += nsecs_to_jiffies(host->data_timeout);
1687 else if (!cmd->data && cmd->busy_timeout > 9000)
1688 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1689 else
1690 timeout += 10 * HZ;
1691 sdhci_mod_timer(host, cmd->mrq, timeout);
1692
1693 if (host->use_external_dma)
1694 sdhci_external_dma_pre_transfer(host, cmd);
1695
1696 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1697
1698 return true;
1699 }
1700
sdhci_present_error(struct sdhci_host * host,struct mmc_command * cmd,bool present)1701 static bool sdhci_present_error(struct sdhci_host *host,
1702 struct mmc_command *cmd, bool present)
1703 {
1704 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1705 cmd->error = -ENOMEDIUM;
1706 return true;
1707 }
1708
1709 return false;
1710 }
1711
sdhci_send_command_retry(struct sdhci_host * host,struct mmc_command * cmd,unsigned long flags)1712 static bool sdhci_send_command_retry(struct sdhci_host *host,
1713 struct mmc_command *cmd,
1714 unsigned long flags)
1715 __releases(host->lock)
1716 __acquires(host->lock)
1717 {
1718 struct mmc_command *deferred_cmd = host->deferred_cmd;
1719 int timeout = 10; /* Approx. 10 ms */
1720 bool present;
1721
1722 while (!sdhci_send_command(host, cmd)) {
1723 if (!timeout--) {
1724 pr_err("%s: Controller never released inhibit bit(s).\n",
1725 mmc_hostname(host->mmc));
1726 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
1727 sdhci_dumpregs(host);
1728 cmd->error = -EIO;
1729 return false;
1730 }
1731
1732 spin_unlock_irqrestore(&host->lock, flags);
1733
1734 usleep_range(1000, 1250);
1735
1736 present = host->mmc->ops->get_cd(host->mmc);
1737
1738 spin_lock_irqsave(&host->lock, flags);
1739
1740 /* A deferred command might disappear, handle that */
1741 if (cmd == deferred_cmd && cmd != host->deferred_cmd)
1742 return true;
1743
1744 if (sdhci_present_error(host, cmd, present))
1745 return false;
1746 }
1747
1748 if (cmd == host->deferred_cmd)
1749 host->deferred_cmd = NULL;
1750
1751 return true;
1752 }
1753
sdhci_read_rsp_136(struct sdhci_host * host,struct mmc_command * cmd)1754 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1755 {
1756 int i, reg;
1757
1758 for (i = 0; i < 4; i++) {
1759 reg = SDHCI_RESPONSE + (3 - i) * 4;
1760 cmd->resp[i] = sdhci_readl(host, reg);
1761 }
1762
1763 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1764 return;
1765
1766 /* CRC is stripped so we need to do some shifting */
1767 for (i = 0; i < 4; i++) {
1768 cmd->resp[i] <<= 8;
1769 if (i != 3)
1770 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1771 }
1772 }
1773
sdhci_finish_command(struct sdhci_host * host)1774 static void sdhci_finish_command(struct sdhci_host *host)
1775 {
1776 struct mmc_command *cmd = host->cmd;
1777
1778 host->cmd = NULL;
1779
1780 if (cmd->flags & MMC_RSP_PRESENT) {
1781 if (cmd->flags & MMC_RSP_136) {
1782 sdhci_read_rsp_136(host, cmd);
1783 } else {
1784 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1785 }
1786 }
1787
1788 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1789 mmc_command_done(host->mmc, cmd->mrq);
1790
1791 /*
1792 * The host can send and interrupt when the busy state has
1793 * ended, allowing us to wait without wasting CPU cycles.
1794 * The busy signal uses DAT0 so this is similar to waiting
1795 * for data to complete.
1796 *
1797 * Note: The 1.0 specification is a bit ambiguous about this
1798 * feature so there might be some problems with older
1799 * controllers.
1800 */
1801 if (cmd->flags & MMC_RSP_BUSY) {
1802 if (cmd->data) {
1803 DBG("Cannot wait for busy signal when also doing a data transfer");
1804 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1805 cmd == host->data_cmd) {
1806 /* Command complete before busy is ended */
1807 return;
1808 }
1809 }
1810
1811 /* Finished CMD23, now send actual command. */
1812 if (cmd == cmd->mrq->sbc) {
1813 if (!sdhci_send_command(host, cmd->mrq->cmd)) {
1814 WARN_ON(host->deferred_cmd);
1815 host->deferred_cmd = cmd->mrq->cmd;
1816 }
1817 } else {
1818
1819 /* Processed actual command. */
1820 if (host->data && host->data_early)
1821 sdhci_finish_data(host);
1822
1823 if (!cmd->data)
1824 __sdhci_finish_mrq(host, cmd->mrq);
1825 }
1826 }
1827
sdhci_get_preset_value(struct sdhci_host * host)1828 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1829 {
1830 u16 preset = 0;
1831
1832 switch (host->timing) {
1833 case MMC_TIMING_MMC_HS:
1834 case MMC_TIMING_SD_HS:
1835 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
1836 break;
1837 case MMC_TIMING_UHS_SDR12:
1838 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1839 break;
1840 case MMC_TIMING_UHS_SDR25:
1841 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1842 break;
1843 case MMC_TIMING_UHS_SDR50:
1844 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1845 break;
1846 case MMC_TIMING_UHS_SDR104:
1847 case MMC_TIMING_MMC_HS200:
1848 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1849 break;
1850 case MMC_TIMING_UHS_DDR50:
1851 case MMC_TIMING_MMC_DDR52:
1852 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1853 break;
1854 case MMC_TIMING_MMC_HS400:
1855 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1856 break;
1857 default:
1858 pr_warn("%s: Invalid UHS-I mode selected\n",
1859 mmc_hostname(host->mmc));
1860 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1861 break;
1862 }
1863 return preset;
1864 }
1865
sdhci_calc_clk(struct sdhci_host * host,unsigned int clock,unsigned int * actual_clock)1866 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1867 unsigned int *actual_clock)
1868 {
1869 int div = 0; /* Initialized for compiler warning */
1870 int real_div = div, clk_mul = 1;
1871 u16 clk = 0;
1872 bool switch_base_clk = false;
1873
1874 if (host->version >= SDHCI_SPEC_300) {
1875 if (host->preset_enabled) {
1876 u16 pre_val;
1877
1878 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1879 pre_val = sdhci_get_preset_value(host);
1880 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
1881 if (host->clk_mul &&
1882 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
1883 clk = SDHCI_PROG_CLOCK_MODE;
1884 real_div = div + 1;
1885 clk_mul = host->clk_mul;
1886 } else {
1887 real_div = max_t(int, 1, div << 1);
1888 }
1889 goto clock_set;
1890 }
1891
1892 /*
1893 * Check if the Host Controller supports Programmable Clock
1894 * Mode.
1895 */
1896 if (host->clk_mul) {
1897 for (div = 1; div <= 1024; div++) {
1898 if ((host->max_clk * host->clk_mul / div)
1899 <= clock)
1900 break;
1901 }
1902 if ((host->max_clk * host->clk_mul / div) <= clock) {
1903 /*
1904 * Set Programmable Clock Mode in the Clock
1905 * Control register.
1906 */
1907 clk = SDHCI_PROG_CLOCK_MODE;
1908 real_div = div;
1909 clk_mul = host->clk_mul;
1910 div--;
1911 } else {
1912 /*
1913 * Divisor can be too small to reach clock
1914 * speed requirement. Then use the base clock.
1915 */
1916 switch_base_clk = true;
1917 }
1918 }
1919
1920 if (!host->clk_mul || switch_base_clk) {
1921 /* Version 3.00 divisors must be a multiple of 2. */
1922 if (host->max_clk <= clock)
1923 div = 1;
1924 else {
1925 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1926 div += 2) {
1927 if ((host->max_clk / div) <= clock)
1928 break;
1929 }
1930 }
1931 real_div = div;
1932 div >>= 1;
1933 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1934 && !div && host->max_clk <= 25000000)
1935 div = 1;
1936 }
1937 } else {
1938 /* Version 2.00 divisors must be a power of 2. */
1939 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1940 if ((host->max_clk / div) <= clock)
1941 break;
1942 }
1943 real_div = div;
1944 div >>= 1;
1945 }
1946
1947 clock_set:
1948 if (real_div)
1949 *actual_clock = (host->max_clk * clk_mul) / real_div;
1950 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1951 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1952 << SDHCI_DIVIDER_HI_SHIFT;
1953
1954 return clk;
1955 }
1956 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1957
sdhci_enable_clk(struct sdhci_host * host,u16 clk)1958 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1959 {
1960 ktime_t timeout;
1961
1962 clk |= SDHCI_CLOCK_INT_EN;
1963 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1964
1965 /* Wait max 150 ms */
1966 timeout = ktime_add_ms(ktime_get(), 150);
1967 while (1) {
1968 bool timedout = ktime_after(ktime_get(), timeout);
1969
1970 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1971 if (clk & SDHCI_CLOCK_INT_STABLE)
1972 break;
1973 if (timedout) {
1974 pr_err("%s: Internal clock never stabilised.\n",
1975 mmc_hostname(host->mmc));
1976 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
1977 sdhci_dumpregs(host);
1978 return;
1979 }
1980 udelay(10);
1981 }
1982
1983 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
1984 clk |= SDHCI_CLOCK_PLL_EN;
1985 clk &= ~SDHCI_CLOCK_INT_STABLE;
1986 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1987
1988 /* Wait max 150 ms */
1989 timeout = ktime_add_ms(ktime_get(), 150);
1990 while (1) {
1991 bool timedout = ktime_after(ktime_get(), timeout);
1992
1993 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1994 if (clk & SDHCI_CLOCK_INT_STABLE)
1995 break;
1996 if (timedout) {
1997 pr_err("%s: PLL clock never stabilised.\n",
1998 mmc_hostname(host->mmc));
1999 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
2000 sdhci_dumpregs(host);
2001 return;
2002 }
2003 udelay(10);
2004 }
2005 }
2006
2007 clk |= SDHCI_CLOCK_CARD_EN;
2008 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2009 }
2010 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
2011
sdhci_set_clock(struct sdhci_host * host,unsigned int clock)2012 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
2013 {
2014 u16 clk;
2015
2016 host->mmc->actual_clock = 0;
2017
2018 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2019
2020 if (clock == 0)
2021 return;
2022
2023 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
2024 sdhci_enable_clk(host, clk);
2025 }
2026 EXPORT_SYMBOL_GPL(sdhci_set_clock);
2027
sdhci_set_power_reg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2028 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
2029 unsigned short vdd)
2030 {
2031 struct mmc_host *mmc = host->mmc;
2032
2033 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2034
2035 if (mode != MMC_POWER_OFF)
2036 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
2037 else
2038 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2039 }
2040
sdhci_set_power_noreg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2041 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
2042 unsigned short vdd)
2043 {
2044 u8 pwr = 0;
2045
2046 if (mode != MMC_POWER_OFF) {
2047 switch (1 << vdd) {
2048 case MMC_VDD_165_195:
2049 /*
2050 * Without a regulator, SDHCI does not support 2.0v
2051 * so we only get here if the driver deliberately
2052 * added the 2.0v range to ocr_avail. Map it to 1.8v
2053 * for the purpose of turning on the power.
2054 */
2055 case MMC_VDD_20_21:
2056 pwr = SDHCI_POWER_180;
2057 break;
2058 case MMC_VDD_29_30:
2059 case MMC_VDD_30_31:
2060 pwr = SDHCI_POWER_300;
2061 break;
2062 case MMC_VDD_32_33:
2063 case MMC_VDD_33_34:
2064 /*
2065 * 3.4 ~ 3.6V are valid only for those platforms where it's
2066 * known that the voltage range is supported by hardware.
2067 */
2068 case MMC_VDD_34_35:
2069 case MMC_VDD_35_36:
2070 pwr = SDHCI_POWER_330;
2071 break;
2072 default:
2073 WARN(1, "%s: Invalid vdd %#x\n",
2074 mmc_hostname(host->mmc), vdd);
2075 break;
2076 }
2077 }
2078
2079 if (host->pwr == pwr)
2080 return;
2081
2082 host->pwr = pwr;
2083
2084 if (pwr == 0) {
2085 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2086 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2087 sdhci_runtime_pm_bus_off(host);
2088 } else {
2089 /*
2090 * Spec says that we should clear the power reg before setting
2091 * a new value. Some controllers don't seem to like this though.
2092 */
2093 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
2094 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2095
2096 /*
2097 * At least the Marvell CaFe chip gets confused if we set the
2098 * voltage and set turn on power at the same time, so set the
2099 * voltage first.
2100 */
2101 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
2102 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2103
2104 pwr |= SDHCI_POWER_ON;
2105
2106 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2107
2108 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2109 sdhci_runtime_pm_bus_on(host);
2110
2111 /*
2112 * Some controllers need an extra 10ms delay of 10ms before
2113 * they can apply clock after applying power
2114 */
2115 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
2116 mdelay(10);
2117 }
2118 }
2119 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
2120
sdhci_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2121 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
2122 unsigned short vdd)
2123 {
2124 if (IS_ERR(host->mmc->supply.vmmc))
2125 sdhci_set_power_noreg(host, mode, vdd);
2126 else
2127 sdhci_set_power_reg(host, mode, vdd);
2128 }
2129 EXPORT_SYMBOL_GPL(sdhci_set_power);
2130
2131 /*
2132 * Some controllers need to configure a valid bus voltage on their power
2133 * register regardless of whether an external regulator is taking care of power
2134 * supply. This helper function takes care of it if set as the controller's
2135 * sdhci_ops.set_power callback.
2136 */
sdhci_set_power_and_bus_voltage(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2137 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
2138 unsigned char mode,
2139 unsigned short vdd)
2140 {
2141 if (!IS_ERR(host->mmc->supply.vmmc)) {
2142 struct mmc_host *mmc = host->mmc;
2143
2144 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2145 }
2146 sdhci_set_power_noreg(host, mode, vdd);
2147 }
2148 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
2149
2150 /*****************************************************************************\
2151 * *
2152 * MMC callbacks *
2153 * *
2154 \*****************************************************************************/
2155
sdhci_request(struct mmc_host * mmc,struct mmc_request * mrq)2156 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
2157 {
2158 struct sdhci_host *host = mmc_priv(mmc);
2159 struct mmc_command *cmd;
2160 unsigned long flags;
2161 bool present;
2162
2163 /* Firstly check card presence */
2164 present = mmc->ops->get_cd(mmc);
2165
2166 spin_lock_irqsave(&host->lock, flags);
2167
2168 sdhci_led_activate(host);
2169
2170 if (sdhci_present_error(host, mrq->cmd, present))
2171 goto out_finish;
2172
2173 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2174
2175 if (!sdhci_send_command_retry(host, cmd, flags))
2176 goto out_finish;
2177
2178 spin_unlock_irqrestore(&host->lock, flags);
2179
2180 return;
2181
2182 out_finish:
2183 sdhci_finish_mrq(host, mrq);
2184 spin_unlock_irqrestore(&host->lock, flags);
2185 }
2186 EXPORT_SYMBOL_GPL(sdhci_request);
2187
sdhci_request_atomic(struct mmc_host * mmc,struct mmc_request * mrq)2188 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
2189 {
2190 struct sdhci_host *host = mmc_priv(mmc);
2191 struct mmc_command *cmd;
2192 unsigned long flags;
2193 int ret = 0;
2194
2195 spin_lock_irqsave(&host->lock, flags);
2196
2197 if (sdhci_present_error(host, mrq->cmd, true)) {
2198 sdhci_finish_mrq(host, mrq);
2199 goto out_finish;
2200 }
2201
2202 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2203
2204 /*
2205 * The HSQ may send a command in interrupt context without polling
2206 * the busy signaling, which means we should return BUSY if controller
2207 * has not released inhibit bits to allow HSQ trying to send request
2208 * again in non-atomic context. So we should not finish this request
2209 * here.
2210 */
2211 if (!sdhci_send_command(host, cmd))
2212 ret = -EBUSY;
2213 else
2214 sdhci_led_activate(host);
2215
2216 out_finish:
2217 spin_unlock_irqrestore(&host->lock, flags);
2218 return ret;
2219 }
2220 EXPORT_SYMBOL_GPL(sdhci_request_atomic);
2221
sdhci_set_bus_width(struct sdhci_host * host,int width)2222 void sdhci_set_bus_width(struct sdhci_host *host, int width)
2223 {
2224 u8 ctrl;
2225
2226 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2227 if (width == MMC_BUS_WIDTH_8) {
2228 ctrl &= ~SDHCI_CTRL_4BITBUS;
2229 ctrl |= SDHCI_CTRL_8BITBUS;
2230 } else {
2231 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
2232 ctrl &= ~SDHCI_CTRL_8BITBUS;
2233 if (width == MMC_BUS_WIDTH_4)
2234 ctrl |= SDHCI_CTRL_4BITBUS;
2235 else
2236 ctrl &= ~SDHCI_CTRL_4BITBUS;
2237 }
2238 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2239 }
2240 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
2241
sdhci_set_uhs_signaling(struct sdhci_host * host,unsigned timing)2242 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
2243 {
2244 u16 ctrl_2;
2245
2246 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2247 /* Select Bus Speed Mode for host */
2248 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
2249 if ((timing == MMC_TIMING_MMC_HS200) ||
2250 (timing == MMC_TIMING_UHS_SDR104))
2251 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2252 else if (timing == MMC_TIMING_UHS_SDR12)
2253 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2254 else if (timing == MMC_TIMING_UHS_SDR25)
2255 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2256 else if (timing == MMC_TIMING_UHS_SDR50)
2257 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2258 else if ((timing == MMC_TIMING_UHS_DDR50) ||
2259 (timing == MMC_TIMING_MMC_DDR52))
2260 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
2261 else if (timing == MMC_TIMING_MMC_HS400)
2262 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
2263 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2264 }
2265 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
2266
sdhci_timing_has_preset(unsigned char timing)2267 static bool sdhci_timing_has_preset(unsigned char timing)
2268 {
2269 switch (timing) {
2270 case MMC_TIMING_UHS_SDR12:
2271 case MMC_TIMING_UHS_SDR25:
2272 case MMC_TIMING_UHS_SDR50:
2273 case MMC_TIMING_UHS_SDR104:
2274 case MMC_TIMING_UHS_DDR50:
2275 case MMC_TIMING_MMC_DDR52:
2276 return true;
2277 };
2278 return false;
2279 }
2280
sdhci_preset_needed(struct sdhci_host * host,unsigned char timing)2281 static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing)
2282 {
2283 return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2284 sdhci_timing_has_preset(timing);
2285 }
2286
sdhci_presetable_values_change(struct sdhci_host * host,struct mmc_ios * ios)2287 static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios)
2288 {
2289 /*
2290 * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK
2291 * Frequency. Check if preset values need to be enabled, or the Driver
2292 * Strength needs updating. Note, clock changes are handled separately.
2293 */
2294 return !host->preset_enabled &&
2295 (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type);
2296 }
2297
sdhci_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)2298 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2299 {
2300 struct sdhci_host *host = mmc_priv(mmc);
2301 bool reinit_uhs = host->reinit_uhs;
2302 bool turning_on_clk = false;
2303 u8 ctrl;
2304
2305 host->reinit_uhs = false;
2306
2307 if (ios->power_mode == MMC_POWER_UNDEFINED)
2308 return;
2309
2310 if (host->flags & SDHCI_DEVICE_DEAD) {
2311 if (!IS_ERR(mmc->supply.vmmc) &&
2312 ios->power_mode == MMC_POWER_OFF)
2313 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
2314 return;
2315 }
2316
2317 /*
2318 * Reset the chip on each power off.
2319 * Should clear out any weird states.
2320 */
2321 if (ios->power_mode == MMC_POWER_OFF) {
2322 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2323 sdhci_reinit(host);
2324 }
2325
2326 if (host->version >= SDHCI_SPEC_300 &&
2327 (ios->power_mode == MMC_POWER_UP) &&
2328 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
2329 sdhci_enable_preset_value(host, false);
2330
2331 if (!ios->clock || ios->clock != host->clock) {
2332 turning_on_clk = ios->clock && !host->clock;
2333
2334 host->ops->set_clock(host, ios->clock);
2335 host->clock = ios->clock;
2336
2337 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
2338 host->clock) {
2339 host->timeout_clk = mmc->actual_clock ?
2340 mmc->actual_clock / 1000 :
2341 host->clock / 1000;
2342 mmc->max_busy_timeout =
2343 host->ops->get_max_timeout_count ?
2344 host->ops->get_max_timeout_count(host) :
2345 1 << 27;
2346 mmc->max_busy_timeout /= host->timeout_clk;
2347 }
2348 }
2349
2350 if (host->ops->set_power)
2351 host->ops->set_power(host, ios->power_mode, ios->vdd);
2352 else
2353 sdhci_set_power(host, ios->power_mode, ios->vdd);
2354
2355 if (host->ops->platform_send_init_74_clocks)
2356 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
2357
2358 host->ops->set_bus_width(host, ios->bus_width);
2359
2360 /*
2361 * Special case to avoid multiple clock changes during voltage
2362 * switching.
2363 */
2364 if (!reinit_uhs &&
2365 turning_on_clk &&
2366 host->timing == ios->timing &&
2367 host->version >= SDHCI_SPEC_300 &&
2368 !sdhci_presetable_values_change(host, ios))
2369 return;
2370
2371 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2372
2373 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
2374 if (ios->timing == MMC_TIMING_SD_HS ||
2375 ios->timing == MMC_TIMING_MMC_HS ||
2376 ios->timing == MMC_TIMING_MMC_HS400 ||
2377 ios->timing == MMC_TIMING_MMC_HS200 ||
2378 ios->timing == MMC_TIMING_MMC_DDR52 ||
2379 ios->timing == MMC_TIMING_UHS_SDR50 ||
2380 ios->timing == MMC_TIMING_UHS_SDR104 ||
2381 ios->timing == MMC_TIMING_UHS_DDR50 ||
2382 ios->timing == MMC_TIMING_UHS_SDR25)
2383 ctrl |= SDHCI_CTRL_HISPD;
2384 else
2385 ctrl &= ~SDHCI_CTRL_HISPD;
2386 }
2387
2388 if (host->version >= SDHCI_SPEC_300) {
2389 u16 clk, ctrl_2;
2390
2391 if (!host->preset_enabled) {
2392 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2393 /*
2394 * We only need to set Driver Strength if the
2395 * preset value enable is not set.
2396 */
2397 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2398 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
2399 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
2400 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
2401 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
2402 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2403 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
2404 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
2405 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
2406 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
2407 else {
2408 pr_warn("%s: invalid driver type, default to driver type B\n",
2409 mmc_hostname(mmc));
2410 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2411 }
2412
2413 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2414 host->drv_type = ios->drv_type;
2415 } else {
2416 /*
2417 * According to SDHC Spec v3.00, if the Preset Value
2418 * Enable in the Host Control 2 register is set, we
2419 * need to reset SD Clock Enable before changing High
2420 * Speed Enable to avoid generating clock gliches.
2421 */
2422
2423 /* Reset SD Clock Enable */
2424 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2425 clk &= ~SDHCI_CLOCK_CARD_EN;
2426 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2427
2428 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2429
2430 /* Re-enable SD Clock */
2431 host->ops->set_clock(host, host->clock);
2432 }
2433
2434 /* Reset SD Clock Enable */
2435 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2436 clk &= ~SDHCI_CLOCK_CARD_EN;
2437 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2438
2439 host->ops->set_uhs_signaling(host, ios->timing);
2440 host->timing = ios->timing;
2441
2442 if (sdhci_preset_needed(host, ios->timing)) {
2443 u16 preset;
2444
2445 sdhci_enable_preset_value(host, true);
2446 preset = sdhci_get_preset_value(host);
2447 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
2448 preset);
2449 host->drv_type = ios->drv_type;
2450 }
2451
2452 /* Re-enable SD Clock */
2453 host->ops->set_clock(host, host->clock);
2454 } else
2455 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2456
2457 /*
2458 * Some (ENE) controllers go apeshit on some ios operation,
2459 * signalling timeout and CRC errors even on CMD0. Resetting
2460 * it on each ios seems to solve the problem.
2461 */
2462 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2463 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2464 }
2465 EXPORT_SYMBOL_GPL(sdhci_set_ios);
2466
sdhci_get_cd(struct mmc_host * mmc)2467 static int sdhci_get_cd(struct mmc_host *mmc)
2468 {
2469 struct sdhci_host *host = mmc_priv(mmc);
2470 int gpio_cd = mmc_gpio_get_cd(mmc);
2471 bool allow = true;
2472
2473 if (host->flags & SDHCI_DEVICE_DEAD)
2474 return 0;
2475
2476 /* If nonremovable, assume that the card is always present. */
2477 if (!mmc_card_is_removable(mmc))
2478 return 1;
2479
2480 trace_android_vh_sdhci_get_cd(host, &allow);
2481 if (!allow)
2482 return 0;
2483
2484 /*
2485 * Try slot gpio detect, if defined it take precedence
2486 * over build in controller functionality
2487 */
2488 if (gpio_cd >= 0)
2489 return !!gpio_cd;
2490
2491 /* If polling, assume that the card is always present. */
2492 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2493 return 1;
2494
2495 /* Host native card detect */
2496 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2497 }
2498
sdhci_check_ro(struct sdhci_host * host)2499 static int sdhci_check_ro(struct sdhci_host *host)
2500 {
2501 unsigned long flags;
2502 int is_readonly;
2503
2504 spin_lock_irqsave(&host->lock, flags);
2505
2506 if (host->flags & SDHCI_DEVICE_DEAD)
2507 is_readonly = 0;
2508 else if (host->ops->get_ro)
2509 is_readonly = host->ops->get_ro(host);
2510 else if (mmc_can_gpio_ro(host->mmc))
2511 is_readonly = mmc_gpio_get_ro(host->mmc);
2512 else
2513 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2514 & SDHCI_WRITE_PROTECT);
2515
2516 spin_unlock_irqrestore(&host->lock, flags);
2517
2518 /* This quirk needs to be replaced by a callback-function later */
2519 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2520 !is_readonly : is_readonly;
2521 }
2522
2523 #define SAMPLE_COUNT 5
2524
sdhci_get_ro(struct mmc_host * mmc)2525 static int sdhci_get_ro(struct mmc_host *mmc)
2526 {
2527 struct sdhci_host *host = mmc_priv(mmc);
2528 int i, ro_count;
2529
2530 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2531 return sdhci_check_ro(host);
2532
2533 ro_count = 0;
2534 for (i = 0; i < SAMPLE_COUNT; i++) {
2535 if (sdhci_check_ro(host)) {
2536 if (++ro_count > SAMPLE_COUNT / 2)
2537 return 1;
2538 }
2539 msleep(30);
2540 }
2541 return 0;
2542 }
2543
sdhci_hw_reset(struct mmc_host * mmc)2544 static void sdhci_hw_reset(struct mmc_host *mmc)
2545 {
2546 struct sdhci_host *host = mmc_priv(mmc);
2547
2548 if (host->ops && host->ops->hw_reset)
2549 host->ops->hw_reset(host);
2550 }
2551
sdhci_enable_sdio_irq_nolock(struct sdhci_host * host,int enable)2552 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2553 {
2554 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2555 if (enable)
2556 host->ier |= SDHCI_INT_CARD_INT;
2557 else
2558 host->ier &= ~SDHCI_INT_CARD_INT;
2559
2560 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2561 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2562 }
2563 }
2564
sdhci_enable_sdio_irq(struct mmc_host * mmc,int enable)2565 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2566 {
2567 struct sdhci_host *host = mmc_priv(mmc);
2568 unsigned long flags;
2569
2570 if (enable)
2571 pm_runtime_get_noresume(mmc_dev(mmc));
2572
2573 spin_lock_irqsave(&host->lock, flags);
2574 sdhci_enable_sdio_irq_nolock(host, enable);
2575 spin_unlock_irqrestore(&host->lock, flags);
2576
2577 if (!enable)
2578 pm_runtime_put_noidle(mmc_dev(mmc));
2579 }
2580 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2581
sdhci_ack_sdio_irq(struct mmc_host * mmc)2582 static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2583 {
2584 struct sdhci_host *host = mmc_priv(mmc);
2585 unsigned long flags;
2586
2587 spin_lock_irqsave(&host->lock, flags);
2588 sdhci_enable_sdio_irq_nolock(host, true);
2589 spin_unlock_irqrestore(&host->lock, flags);
2590 }
2591
sdhci_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)2592 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2593 struct mmc_ios *ios)
2594 {
2595 struct sdhci_host *host = mmc_priv(mmc);
2596 u16 ctrl;
2597 int ret;
2598
2599 /*
2600 * Signal Voltage Switching is only applicable for Host Controllers
2601 * v3.00 and above.
2602 */
2603 if (host->version < SDHCI_SPEC_300)
2604 return 0;
2605
2606 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2607
2608 switch (ios->signal_voltage) {
2609 case MMC_SIGNAL_VOLTAGE_330:
2610 if (!(host->flags & SDHCI_SIGNALING_330))
2611 return -EINVAL;
2612 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2613 ctrl &= ~SDHCI_CTRL_VDD_180;
2614 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2615
2616 if (!IS_ERR(mmc->supply.vqmmc)) {
2617 ret = mmc_regulator_set_vqmmc(mmc, ios);
2618 if (ret < 0) {
2619 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2620 mmc_hostname(mmc));
2621 return -EIO;
2622 }
2623 }
2624 /* Wait for 5ms */
2625 usleep_range(5000, 5500);
2626
2627 /* 3.3V regulator output should be stable within 5 ms */
2628 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2629 if (!(ctrl & SDHCI_CTRL_VDD_180))
2630 return 0;
2631
2632 pr_warn("%s: 3.3V regulator output did not become stable\n",
2633 mmc_hostname(mmc));
2634
2635 return -EAGAIN;
2636 case MMC_SIGNAL_VOLTAGE_180:
2637 if (!(host->flags & SDHCI_SIGNALING_180))
2638 return -EINVAL;
2639 if (!IS_ERR(mmc->supply.vqmmc)) {
2640 ret = mmc_regulator_set_vqmmc(mmc, ios);
2641 if (ret < 0) {
2642 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2643 mmc_hostname(mmc));
2644 return -EIO;
2645 }
2646 }
2647
2648 /*
2649 * Enable 1.8V Signal Enable in the Host Control2
2650 * register
2651 */
2652 ctrl |= SDHCI_CTRL_VDD_180;
2653 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2654
2655 /* Some controller need to do more when switching */
2656 if (host->ops->voltage_switch)
2657 host->ops->voltage_switch(host);
2658
2659 /* 1.8V regulator output should be stable within 5 ms */
2660 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2661 if (ctrl & SDHCI_CTRL_VDD_180)
2662 return 0;
2663
2664 pr_warn("%s: 1.8V regulator output did not become stable\n",
2665 mmc_hostname(mmc));
2666
2667 return -EAGAIN;
2668 case MMC_SIGNAL_VOLTAGE_120:
2669 if (!(host->flags & SDHCI_SIGNALING_120))
2670 return -EINVAL;
2671 if (!IS_ERR(mmc->supply.vqmmc)) {
2672 ret = mmc_regulator_set_vqmmc(mmc, ios);
2673 if (ret < 0) {
2674 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2675 mmc_hostname(mmc));
2676 return -EIO;
2677 }
2678 }
2679 return 0;
2680 default:
2681 /* No signal voltage switch required */
2682 return 0;
2683 }
2684 }
2685 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2686
sdhci_card_busy(struct mmc_host * mmc)2687 static int sdhci_card_busy(struct mmc_host *mmc)
2688 {
2689 struct sdhci_host *host = mmc_priv(mmc);
2690 u32 present_state;
2691
2692 /* Check whether DAT[0] is 0 */
2693 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2694
2695 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2696 }
2697
sdhci_prepare_hs400_tuning(struct mmc_host * mmc,struct mmc_ios * ios)2698 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2699 {
2700 struct sdhci_host *host = mmc_priv(mmc);
2701 unsigned long flags;
2702
2703 spin_lock_irqsave(&host->lock, flags);
2704 host->flags |= SDHCI_HS400_TUNING;
2705 spin_unlock_irqrestore(&host->lock, flags);
2706
2707 return 0;
2708 }
2709
sdhci_start_tuning(struct sdhci_host * host)2710 void sdhci_start_tuning(struct sdhci_host *host)
2711 {
2712 u16 ctrl;
2713
2714 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2715 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2716 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2717 ctrl |= SDHCI_CTRL_TUNED_CLK;
2718 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2719
2720 /*
2721 * As per the Host Controller spec v3.00, tuning command
2722 * generates Buffer Read Ready interrupt, so enable that.
2723 *
2724 * Note: The spec clearly says that when tuning sequence
2725 * is being performed, the controller does not generate
2726 * interrupts other than Buffer Read Ready interrupt. But
2727 * to make sure we don't hit a controller bug, we _only_
2728 * enable Buffer Read Ready interrupt here.
2729 */
2730 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2731 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2732 }
2733 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2734
sdhci_end_tuning(struct sdhci_host * host)2735 void sdhci_end_tuning(struct sdhci_host *host)
2736 {
2737 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2738 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2739 }
2740 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2741
sdhci_reset_tuning(struct sdhci_host * host)2742 void sdhci_reset_tuning(struct sdhci_host *host)
2743 {
2744 u16 ctrl;
2745
2746 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2747 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2748 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2749 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2750 }
2751 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2752
sdhci_abort_tuning(struct sdhci_host * host,u32 opcode)2753 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2754 {
2755 sdhci_reset_tuning(host);
2756
2757 sdhci_do_reset(host, SDHCI_RESET_CMD);
2758 sdhci_do_reset(host, SDHCI_RESET_DATA);
2759
2760 sdhci_end_tuning(host);
2761
2762 mmc_send_abort_tuning(host->mmc, opcode);
2763 }
2764 EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
2765
2766 /*
2767 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2768 * tuning command does not have a data payload (or rather the hardware does it
2769 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2770 * interrupt setup is different to other commands and there is no timeout
2771 * interrupt so special handling is needed.
2772 */
sdhci_send_tuning(struct sdhci_host * host,u32 opcode)2773 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2774 {
2775 struct mmc_host *mmc = host->mmc;
2776 struct mmc_command cmd = {};
2777 struct mmc_request mrq = {};
2778 unsigned long flags;
2779 u32 b = host->sdma_boundary;
2780
2781 spin_lock_irqsave(&host->lock, flags);
2782
2783 cmd.opcode = opcode;
2784 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2785 cmd.mrq = &mrq;
2786
2787 mrq.cmd = &cmd;
2788 /*
2789 * In response to CMD19, the card sends 64 bytes of tuning
2790 * block to the Host Controller. So we set the block size
2791 * to 64 here.
2792 */
2793 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2794 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2795 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2796 else
2797 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2798
2799 /*
2800 * The tuning block is sent by the card to the host controller.
2801 * So we set the TRNS_READ bit in the Transfer Mode register.
2802 * This also takes care of setting DMA Enable and Multi Block
2803 * Select in the same register to 0.
2804 */
2805 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2806
2807 if (!sdhci_send_command_retry(host, &cmd, flags)) {
2808 spin_unlock_irqrestore(&host->lock, flags);
2809 host->tuning_done = 0;
2810 return;
2811 }
2812
2813 host->cmd = NULL;
2814
2815 sdhci_del_timer(host, &mrq);
2816
2817 host->tuning_done = 0;
2818
2819 spin_unlock_irqrestore(&host->lock, flags);
2820
2821 /* Wait for Buffer Read Ready interrupt */
2822 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2823 msecs_to_jiffies(50));
2824
2825 }
2826 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2827
__sdhci_execute_tuning(struct sdhci_host * host,u32 opcode)2828 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2829 {
2830 int i;
2831
2832 /*
2833 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2834 * of loops reaches tuning loop count.
2835 */
2836 for (i = 0; i < host->tuning_loop_count; i++) {
2837 u16 ctrl;
2838
2839 sdhci_send_tuning(host, opcode);
2840
2841 if (!host->tuning_done) {
2842 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2843 mmc_hostname(host->mmc));
2844 sdhci_abort_tuning(host, opcode);
2845 return -ETIMEDOUT;
2846 }
2847
2848 /* Spec does not require a delay between tuning cycles */
2849 if (host->tuning_delay > 0)
2850 mdelay(host->tuning_delay);
2851
2852 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2853 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2854 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2855 return 0; /* Success! */
2856 break;
2857 }
2858
2859 }
2860
2861 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2862 mmc_hostname(host->mmc));
2863 sdhci_reset_tuning(host);
2864 return -EAGAIN;
2865 }
2866
sdhci_execute_tuning(struct mmc_host * mmc,u32 opcode)2867 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2868 {
2869 struct sdhci_host *host = mmc_priv(mmc);
2870 int err = 0;
2871 unsigned int tuning_count = 0;
2872 bool hs400_tuning;
2873
2874 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2875
2876 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2877 tuning_count = host->tuning_count;
2878
2879 /*
2880 * The Host Controller needs tuning in case of SDR104 and DDR50
2881 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2882 * the Capabilities register.
2883 * If the Host Controller supports the HS200 mode then the
2884 * tuning function has to be executed.
2885 */
2886 switch (host->timing) {
2887 /* HS400 tuning is done in HS200 mode */
2888 case MMC_TIMING_MMC_HS400:
2889 err = -EINVAL;
2890 goto out;
2891
2892 case MMC_TIMING_MMC_HS200:
2893 /*
2894 * Periodic re-tuning for HS400 is not expected to be needed, so
2895 * disable it here.
2896 */
2897 if (hs400_tuning)
2898 tuning_count = 0;
2899 break;
2900
2901 case MMC_TIMING_UHS_SDR104:
2902 case MMC_TIMING_UHS_DDR50:
2903 break;
2904
2905 case MMC_TIMING_UHS_SDR50:
2906 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2907 break;
2908 fallthrough;
2909
2910 default:
2911 goto out;
2912 }
2913
2914 if (host->ops->platform_execute_tuning) {
2915 err = host->ops->platform_execute_tuning(host, opcode);
2916 goto out;
2917 }
2918
2919 mmc->retune_period = tuning_count;
2920
2921 if (host->tuning_delay < 0)
2922 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2923
2924 sdhci_start_tuning(host);
2925
2926 host->tuning_err = __sdhci_execute_tuning(host, opcode);
2927
2928 sdhci_end_tuning(host);
2929 out:
2930 host->flags &= ~SDHCI_HS400_TUNING;
2931
2932 return err;
2933 }
2934 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2935
sdhci_enable_preset_value(struct sdhci_host * host,bool enable)2936 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2937 {
2938 /* Host Controller v3.00 defines preset value registers */
2939 if (host->version < SDHCI_SPEC_300)
2940 return;
2941
2942 /*
2943 * We only enable or disable Preset Value if they are not already
2944 * enabled or disabled respectively. Otherwise, we bail out.
2945 */
2946 if (host->preset_enabled != enable) {
2947 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2948
2949 if (enable)
2950 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2951 else
2952 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2953
2954 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2955
2956 if (enable)
2957 host->flags |= SDHCI_PV_ENABLED;
2958 else
2959 host->flags &= ~SDHCI_PV_ENABLED;
2960
2961 host->preset_enabled = enable;
2962 }
2963 }
2964
sdhci_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)2965 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2966 int err)
2967 {
2968 struct mmc_data *data = mrq->data;
2969
2970 if (data->host_cookie != COOKIE_UNMAPPED)
2971 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
2972 mmc_get_dma_dir(data));
2973
2974 data->host_cookie = COOKIE_UNMAPPED;
2975 }
2976
sdhci_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)2977 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2978 {
2979 struct sdhci_host *host = mmc_priv(mmc);
2980
2981 mrq->data->host_cookie = COOKIE_UNMAPPED;
2982
2983 /*
2984 * No pre-mapping in the pre hook if we're using the bounce buffer,
2985 * for that we would need two bounce buffers since one buffer is
2986 * in flight when this is getting called.
2987 */
2988 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2989 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2990 }
2991
sdhci_error_out_mrqs(struct sdhci_host * host,int err)2992 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2993 {
2994 if (host->data_cmd) {
2995 host->data_cmd->error = err;
2996 sdhci_finish_mrq(host, host->data_cmd->mrq);
2997 }
2998
2999 if (host->cmd) {
3000 host->cmd->error = err;
3001 sdhci_finish_mrq(host, host->cmd->mrq);
3002 }
3003 }
3004
sdhci_card_event(struct mmc_host * mmc)3005 static void sdhci_card_event(struct mmc_host *mmc)
3006 {
3007 struct sdhci_host *host = mmc_priv(mmc);
3008 unsigned long flags;
3009 int present;
3010
3011 /* First check if client has provided their own card event */
3012 if (host->ops->card_event)
3013 host->ops->card_event(host);
3014
3015 present = mmc->ops->get_cd(mmc);
3016
3017 spin_lock_irqsave(&host->lock, flags);
3018
3019 /* Check sdhci_has_requests() first in case we are runtime suspended */
3020 if (sdhci_has_requests(host) && !present) {
3021 pr_err("%s: Card removed during transfer!\n",
3022 mmc_hostname(mmc));
3023 pr_err("%s: Resetting controller.\n",
3024 mmc_hostname(mmc));
3025
3026 sdhci_do_reset(host, SDHCI_RESET_CMD);
3027 sdhci_do_reset(host, SDHCI_RESET_DATA);
3028
3029 sdhci_error_out_mrqs(host, -ENOMEDIUM);
3030 }
3031
3032 spin_unlock_irqrestore(&host->lock, flags);
3033 }
3034
3035 static const struct mmc_host_ops sdhci_ops = {
3036 .request = sdhci_request,
3037 .post_req = sdhci_post_req,
3038 .pre_req = sdhci_pre_req,
3039 .set_ios = sdhci_set_ios,
3040 .get_cd = sdhci_get_cd,
3041 .get_ro = sdhci_get_ro,
3042 .hw_reset = sdhci_hw_reset,
3043 .enable_sdio_irq = sdhci_enable_sdio_irq,
3044 .ack_sdio_irq = sdhci_ack_sdio_irq,
3045 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
3046 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
3047 .execute_tuning = sdhci_execute_tuning,
3048 .card_event = sdhci_card_event,
3049 .card_busy = sdhci_card_busy,
3050 };
3051
3052 /*****************************************************************************\
3053 * *
3054 * Request done *
3055 * *
3056 \*****************************************************************************/
3057
sdhci_request_done(struct sdhci_host * host)3058 static bool sdhci_request_done(struct sdhci_host *host)
3059 {
3060 unsigned long flags;
3061 struct mmc_request *mrq;
3062 int i;
3063
3064 spin_lock_irqsave(&host->lock, flags);
3065
3066 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3067 mrq = host->mrqs_done[i];
3068 if (mrq)
3069 break;
3070 }
3071
3072 if (!mrq) {
3073 spin_unlock_irqrestore(&host->lock, flags);
3074 return true;
3075 }
3076
3077 /*
3078 * The controller needs a reset of internal state machines
3079 * upon error conditions.
3080 */
3081 if (sdhci_needs_reset(host, mrq)) {
3082 /*
3083 * Do not finish until command and data lines are available for
3084 * reset. Note there can only be one other mrq, so it cannot
3085 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
3086 * would both be null.
3087 */
3088 if (host->cmd || host->data_cmd) {
3089 spin_unlock_irqrestore(&host->lock, flags);
3090 return true;
3091 }
3092
3093 /* Some controllers need this kick or reset won't work here */
3094 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3095 /* This is to force an update */
3096 host->ops->set_clock(host, host->clock);
3097
3098 /*
3099 * Spec says we should do both at the same time, but Ricoh
3100 * controllers do not like that.
3101 */
3102 sdhci_do_reset(host, SDHCI_RESET_CMD);
3103 sdhci_do_reset(host, SDHCI_RESET_DATA);
3104
3105 host->pending_reset = false;
3106 }
3107
3108 /*
3109 * Always unmap the data buffers if they were mapped by
3110 * sdhci_prepare_data() whenever we finish with a request.
3111 * This avoids leaking DMA mappings on error.
3112 */
3113 if (host->flags & SDHCI_REQ_USE_DMA) {
3114 struct mmc_data *data = mrq->data;
3115
3116 if (host->use_external_dma && data &&
3117 (mrq->cmd->error || data->error)) {
3118 struct dma_chan *chan = sdhci_external_dma_channel(host, data);
3119
3120 host->mrqs_done[i] = NULL;
3121 spin_unlock_irqrestore(&host->lock, flags);
3122 dmaengine_terminate_sync(chan);
3123 spin_lock_irqsave(&host->lock, flags);
3124 sdhci_set_mrq_done(host, mrq);
3125 }
3126
3127 if (data && data->host_cookie == COOKIE_MAPPED) {
3128 if (host->bounce_buffer) {
3129 /*
3130 * On reads, copy the bounced data into the
3131 * sglist
3132 */
3133 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
3134 unsigned int length = data->bytes_xfered;
3135
3136 if (length > host->bounce_buffer_size) {
3137 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
3138 mmc_hostname(host->mmc),
3139 host->bounce_buffer_size,
3140 data->bytes_xfered);
3141 /* Cap it down and continue */
3142 length = host->bounce_buffer_size;
3143 }
3144 dma_sync_single_for_cpu(
3145 mmc_dev(host->mmc),
3146 host->bounce_addr,
3147 host->bounce_buffer_size,
3148 DMA_FROM_DEVICE);
3149 sg_copy_from_buffer(data->sg,
3150 data->sg_len,
3151 host->bounce_buffer,
3152 length);
3153 } else {
3154 /* No copying, just switch ownership */
3155 dma_sync_single_for_cpu(
3156 mmc_dev(host->mmc),
3157 host->bounce_addr,
3158 host->bounce_buffer_size,
3159 mmc_get_dma_dir(data));
3160 }
3161 } else {
3162 /* Unmap the raw data */
3163 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
3164 data->sg_len,
3165 mmc_get_dma_dir(data));
3166 }
3167 data->host_cookie = COOKIE_UNMAPPED;
3168 }
3169 }
3170
3171 host->mrqs_done[i] = NULL;
3172
3173 spin_unlock_irqrestore(&host->lock, flags);
3174
3175 if (host->ops->request_done)
3176 host->ops->request_done(host, mrq);
3177 else
3178 mmc_request_done(host->mmc, mrq);
3179
3180 return false;
3181 }
3182
sdhci_complete_work(struct work_struct * work)3183 static void sdhci_complete_work(struct work_struct *work)
3184 {
3185 struct sdhci_host *host = container_of(work, struct sdhci_host,
3186 complete_work);
3187
3188 while (!sdhci_request_done(host))
3189 ;
3190 }
3191
sdhci_timeout_timer(struct timer_list * t)3192 static void sdhci_timeout_timer(struct timer_list *t)
3193 {
3194 struct sdhci_host *host;
3195 unsigned long flags;
3196
3197 host = from_timer(host, t, timer);
3198
3199 spin_lock_irqsave(&host->lock, flags);
3200
3201 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
3202 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
3203 mmc_hostname(host->mmc));
3204 sdhci_err_stats_inc(host, REQ_TIMEOUT);
3205 sdhci_dumpregs(host);
3206
3207 host->cmd->error = -ETIMEDOUT;
3208 sdhci_finish_mrq(host, host->cmd->mrq);
3209 }
3210
3211 spin_unlock_irqrestore(&host->lock, flags);
3212 }
3213
sdhci_timeout_data_timer(struct timer_list * t)3214 static void sdhci_timeout_data_timer(struct timer_list *t)
3215 {
3216 struct sdhci_host *host;
3217 unsigned long flags;
3218
3219 host = from_timer(host, t, data_timer);
3220
3221 spin_lock_irqsave(&host->lock, flags);
3222
3223 if (host->data || host->data_cmd ||
3224 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
3225 pr_err("%s: Timeout waiting for hardware interrupt.\n",
3226 mmc_hostname(host->mmc));
3227 sdhci_err_stats_inc(host, REQ_TIMEOUT);
3228 sdhci_dumpregs(host);
3229
3230 if (host->data) {
3231 host->data->error = -ETIMEDOUT;
3232 __sdhci_finish_data(host, true);
3233 queue_work(host->complete_wq, &host->complete_work);
3234 } else if (host->data_cmd) {
3235 host->data_cmd->error = -ETIMEDOUT;
3236 sdhci_finish_mrq(host, host->data_cmd->mrq);
3237 } else {
3238 host->cmd->error = -ETIMEDOUT;
3239 sdhci_finish_mrq(host, host->cmd->mrq);
3240 }
3241 }
3242
3243 spin_unlock_irqrestore(&host->lock, flags);
3244 }
3245
3246 /*****************************************************************************\
3247 * *
3248 * Interrupt handling *
3249 * *
3250 \*****************************************************************************/
3251
sdhci_cmd_irq(struct sdhci_host * host,u32 intmask,u32 * intmask_p)3252 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
3253 {
3254 /* Handle auto-CMD12 error */
3255 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
3256 struct mmc_request *mrq = host->data_cmd->mrq;
3257 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3258 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3259 SDHCI_INT_DATA_TIMEOUT :
3260 SDHCI_INT_DATA_CRC;
3261
3262 /* Treat auto-CMD12 error the same as data error */
3263 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
3264 *intmask_p |= data_err_bit;
3265 return;
3266 }
3267 }
3268
3269 if (!host->cmd) {
3270 /*
3271 * SDHCI recovers from errors by resetting the cmd and data
3272 * circuits. Until that is done, there very well might be more
3273 * interrupts, so ignore them in that case.
3274 */
3275 if (host->pending_reset)
3276 return;
3277 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
3278 mmc_hostname(host->mmc), (unsigned)intmask);
3279 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3280 sdhci_dumpregs(host);
3281 return;
3282 }
3283
3284 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
3285 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
3286 if (intmask & SDHCI_INT_TIMEOUT) {
3287 host->cmd->error = -ETIMEDOUT;
3288 sdhci_err_stats_inc(host, CMD_TIMEOUT);
3289 } else {
3290 host->cmd->error = -EILSEQ;
3291 if (!mmc_op_tuning(host->cmd->opcode))
3292 sdhci_err_stats_inc(host, CMD_CRC);
3293 }
3294 /* Treat data command CRC error the same as data CRC error */
3295 if (host->cmd->data &&
3296 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
3297 SDHCI_INT_CRC) {
3298 host->cmd = NULL;
3299 *intmask_p |= SDHCI_INT_DATA_CRC;
3300 return;
3301 }
3302
3303 __sdhci_finish_mrq(host, host->cmd->mrq);
3304 return;
3305 }
3306
3307 /* Handle auto-CMD23 error */
3308 if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
3309 struct mmc_request *mrq = host->cmd->mrq;
3310 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3311 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3312 -ETIMEDOUT :
3313 -EILSEQ;
3314 sdhci_err_stats_inc(host, AUTO_CMD);
3315
3316
3317 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
3318 mrq->sbc->error = err;
3319 __sdhci_finish_mrq(host, mrq);
3320 return;
3321 }
3322 }
3323
3324 if (intmask & SDHCI_INT_RESPONSE)
3325 sdhci_finish_command(host);
3326 }
3327
sdhci_adma_show_error(struct sdhci_host * host)3328 static void sdhci_adma_show_error(struct sdhci_host *host)
3329 {
3330 void *desc = host->adma_table;
3331 dma_addr_t dma = host->adma_addr;
3332
3333 sdhci_dumpregs(host);
3334
3335 while (true) {
3336 struct sdhci_adma2_64_desc *dma_desc = desc;
3337
3338 if (host->flags & SDHCI_USE_64_BIT_DMA)
3339 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3340 (unsigned long long)dma,
3341 le32_to_cpu(dma_desc->addr_hi),
3342 le32_to_cpu(dma_desc->addr_lo),
3343 le16_to_cpu(dma_desc->len),
3344 le16_to_cpu(dma_desc->cmd));
3345 else
3346 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3347 (unsigned long long)dma,
3348 le32_to_cpu(dma_desc->addr_lo),
3349 le16_to_cpu(dma_desc->len),
3350 le16_to_cpu(dma_desc->cmd));
3351
3352 desc += host->desc_sz;
3353 dma += host->desc_sz;
3354
3355 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
3356 break;
3357 }
3358 }
3359
sdhci_data_irq(struct sdhci_host * host,u32 intmask)3360 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3361 {
3362 u32 command;
3363
3364 /*
3365 * CMD19 generates _only_ Buffer Read Ready interrupt if
3366 * use sdhci_send_tuning.
3367 * Need to exclude this case: PIO mode and use mmc_send_tuning,
3368 * If not, sdhci_transfer_pio will never be called, make the
3369 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm.
3370 */
3371 if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) {
3372 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
3373 if (command == MMC_SEND_TUNING_BLOCK ||
3374 command == MMC_SEND_TUNING_BLOCK_HS200) {
3375 host->tuning_done = 1;
3376 wake_up(&host->buf_ready_int);
3377 return;
3378 }
3379 }
3380
3381 if (!host->data) {
3382 struct mmc_command *data_cmd = host->data_cmd;
3383
3384 /*
3385 * The "data complete" interrupt is also used to
3386 * indicate that a busy state has ended. See comment
3387 * above in sdhci_cmd_irq().
3388 */
3389 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
3390 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3391 host->data_cmd = NULL;
3392 data_cmd->error = -ETIMEDOUT;
3393 sdhci_err_stats_inc(host, CMD_TIMEOUT);
3394 __sdhci_finish_mrq(host, data_cmd->mrq);
3395 return;
3396 }
3397 if (intmask & SDHCI_INT_DATA_END) {
3398 host->data_cmd = NULL;
3399 /*
3400 * Some cards handle busy-end interrupt
3401 * before the command completed, so make
3402 * sure we do things in the proper order.
3403 */
3404 if (host->cmd == data_cmd)
3405 return;
3406
3407 __sdhci_finish_mrq(host, data_cmd->mrq);
3408 return;
3409 }
3410 }
3411
3412 /*
3413 * SDHCI recovers from errors by resetting the cmd and data
3414 * circuits. Until that is done, there very well might be more
3415 * interrupts, so ignore them in that case.
3416 */
3417 if (host->pending_reset)
3418 return;
3419
3420 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
3421 mmc_hostname(host->mmc), (unsigned)intmask);
3422 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3423 sdhci_dumpregs(host);
3424
3425 return;
3426 }
3427
3428 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3429 host->data->error = -ETIMEDOUT;
3430 sdhci_err_stats_inc(host, DAT_TIMEOUT);
3431 } else if (intmask & SDHCI_INT_DATA_END_BIT) {
3432 host->data->error = -EILSEQ;
3433 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3434 sdhci_err_stats_inc(host, DAT_CRC);
3435 } else if ((intmask & SDHCI_INT_DATA_CRC) &&
3436 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
3437 != MMC_BUS_TEST_R) {
3438 host->data->error = -EILSEQ;
3439 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3440 sdhci_err_stats_inc(host, DAT_CRC);
3441 } else if (intmask & SDHCI_INT_ADMA_ERROR) {
3442 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
3443 intmask);
3444 sdhci_adma_show_error(host);
3445 sdhci_err_stats_inc(host, ADMA);
3446 host->data->error = -EIO;
3447 if (host->ops->adma_workaround)
3448 host->ops->adma_workaround(host, intmask);
3449 }
3450
3451 if (host->data->error)
3452 sdhci_finish_data(host);
3453 else {
3454 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
3455 sdhci_transfer_pio(host);
3456
3457 /*
3458 * We currently don't do anything fancy with DMA
3459 * boundaries, but as we can't disable the feature
3460 * we need to at least restart the transfer.
3461 *
3462 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
3463 * should return a valid address to continue from, but as
3464 * some controllers are faulty, don't trust them.
3465 */
3466 if (intmask & SDHCI_INT_DMA_END) {
3467 dma_addr_t dmastart, dmanow;
3468
3469 dmastart = sdhci_sdma_address(host);
3470 dmanow = dmastart + host->data->bytes_xfered;
3471 /*
3472 * Force update to the next DMA block boundary.
3473 */
3474 dmanow = (dmanow &
3475 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3476 SDHCI_DEFAULT_BOUNDARY_SIZE;
3477 host->data->bytes_xfered = dmanow - dmastart;
3478 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3479 &dmastart, host->data->bytes_xfered, &dmanow);
3480 sdhci_set_sdma_addr(host, dmanow);
3481 }
3482
3483 if (intmask & SDHCI_INT_DATA_END) {
3484 if (host->cmd == host->data_cmd) {
3485 /*
3486 * Data managed to finish before the
3487 * command completed. Make sure we do
3488 * things in the proper order.
3489 */
3490 host->data_early = 1;
3491 } else {
3492 sdhci_finish_data(host);
3493 }
3494 }
3495 }
3496 }
3497
sdhci_defer_done(struct sdhci_host * host,struct mmc_request * mrq)3498 static inline bool sdhci_defer_done(struct sdhci_host *host,
3499 struct mmc_request *mrq)
3500 {
3501 struct mmc_data *data = mrq->data;
3502
3503 return host->pending_reset || host->always_defer_done ||
3504 ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3505 data->host_cookie == COOKIE_MAPPED);
3506 }
3507
sdhci_irq(int irq,void * dev_id)3508 static irqreturn_t sdhci_irq(int irq, void *dev_id)
3509 {
3510 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3511 irqreturn_t result = IRQ_NONE;
3512 struct sdhci_host *host = dev_id;
3513 u32 intmask, mask, unexpected = 0;
3514 int max_loops = 16;
3515 int i;
3516
3517 spin_lock(&host->lock);
3518
3519 if (host->runtime_suspended) {
3520 spin_unlock(&host->lock);
3521 return IRQ_NONE;
3522 }
3523
3524 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3525 if (!intmask || intmask == 0xffffffff) {
3526 result = IRQ_NONE;
3527 goto out;
3528 }
3529
3530 do {
3531 DBG("IRQ status 0x%08x\n", intmask);
3532
3533 if (host->ops->irq) {
3534 intmask = host->ops->irq(host, intmask);
3535 if (!intmask)
3536 goto cont;
3537 }
3538
3539 /* Clear selected interrupts. */
3540 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3541 SDHCI_INT_BUS_POWER);
3542 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3543
3544 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3545 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3546 SDHCI_CARD_PRESENT;
3547
3548 /*
3549 * There is a observation on i.mx esdhc. INSERT
3550 * bit will be immediately set again when it gets
3551 * cleared, if a card is inserted. We have to mask
3552 * the irq to prevent interrupt storm which will
3553 * freeze the system. And the REMOVE gets the
3554 * same situation.
3555 *
3556 * More testing are needed here to ensure it works
3557 * for other platforms though.
3558 */
3559 host->ier &= ~(SDHCI_INT_CARD_INSERT |
3560 SDHCI_INT_CARD_REMOVE);
3561 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3562 SDHCI_INT_CARD_INSERT;
3563 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3564 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3565
3566 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3567 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3568
3569 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3570 SDHCI_INT_CARD_REMOVE);
3571 result = IRQ_WAKE_THREAD;
3572 }
3573
3574 if (intmask & SDHCI_INT_CMD_MASK)
3575 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3576
3577 if (intmask & SDHCI_INT_DATA_MASK)
3578 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3579
3580 if (intmask & SDHCI_INT_BUS_POWER)
3581 pr_err("%s: Card is consuming too much power!\n",
3582 mmc_hostname(host->mmc));
3583
3584 if (intmask & SDHCI_INT_RETUNE)
3585 mmc_retune_needed(host->mmc);
3586
3587 if ((intmask & SDHCI_INT_CARD_INT) &&
3588 (host->ier & SDHCI_INT_CARD_INT)) {
3589 sdhci_enable_sdio_irq_nolock(host, false);
3590 sdio_signal_irq(host->mmc);
3591 }
3592
3593 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3594 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3595 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3596 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3597
3598 if (intmask) {
3599 unexpected |= intmask;
3600 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3601 }
3602 cont:
3603 if (result == IRQ_NONE)
3604 result = IRQ_HANDLED;
3605
3606 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3607 } while (intmask && --max_loops);
3608
3609 /* Determine if mrqs can be completed immediately */
3610 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3611 struct mmc_request *mrq = host->mrqs_done[i];
3612
3613 if (!mrq)
3614 continue;
3615
3616 if (sdhci_defer_done(host, mrq)) {
3617 result = IRQ_WAKE_THREAD;
3618 } else {
3619 mrqs_done[i] = mrq;
3620 host->mrqs_done[i] = NULL;
3621 }
3622 }
3623 out:
3624 if (host->deferred_cmd)
3625 result = IRQ_WAKE_THREAD;
3626
3627 spin_unlock(&host->lock);
3628
3629 /* Process mrqs ready for immediate completion */
3630 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3631 if (!mrqs_done[i])
3632 continue;
3633
3634 if (host->ops->request_done)
3635 host->ops->request_done(host, mrqs_done[i]);
3636 else
3637 mmc_request_done(host->mmc, mrqs_done[i]);
3638 }
3639
3640 if (unexpected) {
3641 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3642 mmc_hostname(host->mmc), unexpected);
3643 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3644 sdhci_dumpregs(host);
3645 }
3646
3647 return result;
3648 }
3649
sdhci_thread_irq(int irq,void * dev_id)3650 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3651 {
3652 struct sdhci_host *host = dev_id;
3653 struct mmc_command *cmd;
3654 unsigned long flags;
3655 u32 isr;
3656
3657 while (!sdhci_request_done(host))
3658 ;
3659
3660 spin_lock_irqsave(&host->lock, flags);
3661
3662 isr = host->thread_isr;
3663 host->thread_isr = 0;
3664
3665 cmd = host->deferred_cmd;
3666 if (cmd && !sdhci_send_command_retry(host, cmd, flags))
3667 sdhci_finish_mrq(host, cmd->mrq);
3668
3669 spin_unlock_irqrestore(&host->lock, flags);
3670
3671 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3672 struct mmc_host *mmc = host->mmc;
3673
3674 mmc->ops->card_event(mmc);
3675 mmc_detect_change(mmc, msecs_to_jiffies(200));
3676 }
3677
3678 return IRQ_HANDLED;
3679 }
3680
3681 /*****************************************************************************\
3682 * *
3683 * Suspend/resume *
3684 * *
3685 \*****************************************************************************/
3686
3687 #ifdef CONFIG_PM
3688
sdhci_cd_irq_can_wakeup(struct sdhci_host * host)3689 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3690 {
3691 return mmc_card_is_removable(host->mmc) &&
3692 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3693 !mmc_can_gpio_cd(host->mmc);
3694 }
3695
3696 /*
3697 * To enable wakeup events, the corresponding events have to be enabled in
3698 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3699 * Table' in the SD Host Controller Standard Specification.
3700 * It is useless to restore SDHCI_INT_ENABLE state in
3701 * sdhci_disable_irq_wakeups() since it will be set by
3702 * sdhci_enable_card_detection() or sdhci_init().
3703 */
sdhci_enable_irq_wakeups(struct sdhci_host * host)3704 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3705 {
3706 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3707 SDHCI_WAKE_ON_INT;
3708 u32 irq_val = 0;
3709 u8 wake_val = 0;
3710 u8 val;
3711
3712 if (sdhci_cd_irq_can_wakeup(host)) {
3713 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3714 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3715 }
3716
3717 if (mmc_card_wake_sdio_irq(host->mmc)) {
3718 wake_val |= SDHCI_WAKE_ON_INT;
3719 irq_val |= SDHCI_INT_CARD_INT;
3720 }
3721
3722 if (!irq_val)
3723 return false;
3724
3725 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3726 val &= ~mask;
3727 val |= wake_val;
3728 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3729
3730 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3731
3732 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3733
3734 return host->irq_wake_enabled;
3735 }
3736
sdhci_disable_irq_wakeups(struct sdhci_host * host)3737 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3738 {
3739 u8 val;
3740 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3741 | SDHCI_WAKE_ON_INT;
3742
3743 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3744 val &= ~mask;
3745 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3746
3747 disable_irq_wake(host->irq);
3748
3749 host->irq_wake_enabled = false;
3750 }
3751
sdhci_suspend_host(struct sdhci_host * host)3752 int sdhci_suspend_host(struct sdhci_host *host)
3753 {
3754 sdhci_disable_card_detection(host);
3755
3756 mmc_retune_timer_stop(host->mmc);
3757
3758 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3759 !sdhci_enable_irq_wakeups(host)) {
3760 host->ier = 0;
3761 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3762 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3763 free_irq(host->irq, host);
3764 }
3765
3766 return 0;
3767 }
3768
3769 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3770
sdhci_resume_host(struct sdhci_host * host)3771 int sdhci_resume_host(struct sdhci_host *host)
3772 {
3773 struct mmc_host *mmc = host->mmc;
3774 int ret = 0;
3775
3776 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3777 if (host->ops->enable_dma)
3778 host->ops->enable_dma(host);
3779 }
3780
3781 if ((mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3782 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3783 /* Card keeps power but host controller does not */
3784 sdhci_init(host, 0);
3785 host->pwr = 0;
3786 host->clock = 0;
3787 host->reinit_uhs = true;
3788 mmc->ops->set_ios(mmc, &mmc->ios);
3789 } else {
3790 sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER));
3791 }
3792
3793 if (host->irq_wake_enabled) {
3794 sdhci_disable_irq_wakeups(host);
3795 } else {
3796 ret = request_threaded_irq(host->irq, sdhci_irq,
3797 sdhci_thread_irq, IRQF_SHARED,
3798 mmc_hostname(mmc), host);
3799 if (ret)
3800 return ret;
3801 }
3802
3803 sdhci_enable_card_detection(host);
3804
3805 return ret;
3806 }
3807
3808 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3809
sdhci_runtime_suspend_host(struct sdhci_host * host)3810 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3811 {
3812 unsigned long flags;
3813
3814 mmc_retune_timer_stop(host->mmc);
3815
3816 spin_lock_irqsave(&host->lock, flags);
3817 host->ier &= SDHCI_INT_CARD_INT;
3818 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3819 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3820 spin_unlock_irqrestore(&host->lock, flags);
3821
3822 synchronize_hardirq(host->irq);
3823
3824 spin_lock_irqsave(&host->lock, flags);
3825 host->runtime_suspended = true;
3826 spin_unlock_irqrestore(&host->lock, flags);
3827
3828 return 0;
3829 }
3830 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3831
sdhci_runtime_resume_host(struct sdhci_host * host,int soft_reset)3832 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3833 {
3834 struct mmc_host *mmc = host->mmc;
3835 unsigned long flags;
3836 int host_flags = host->flags;
3837
3838 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3839 if (host->ops->enable_dma)
3840 host->ops->enable_dma(host);
3841 }
3842
3843 sdhci_init(host, soft_reset);
3844
3845 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3846 mmc->ios.power_mode != MMC_POWER_OFF) {
3847 /* Force clock and power re-program */
3848 host->pwr = 0;
3849 host->clock = 0;
3850 host->reinit_uhs = true;
3851 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3852 mmc->ops->set_ios(mmc, &mmc->ios);
3853
3854 if ((host_flags & SDHCI_PV_ENABLED) &&
3855 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3856 spin_lock_irqsave(&host->lock, flags);
3857 sdhci_enable_preset_value(host, true);
3858 spin_unlock_irqrestore(&host->lock, flags);
3859 }
3860
3861 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3862 mmc->ops->hs400_enhanced_strobe)
3863 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3864 }
3865
3866 spin_lock_irqsave(&host->lock, flags);
3867
3868 host->runtime_suspended = false;
3869
3870 /* Enable SDIO IRQ */
3871 if (sdio_irq_claimed(mmc))
3872 sdhci_enable_sdio_irq_nolock(host, true);
3873
3874 /* Enable Card Detection */
3875 sdhci_enable_card_detection(host);
3876
3877 spin_unlock_irqrestore(&host->lock, flags);
3878
3879 return 0;
3880 }
3881 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3882
3883 #endif /* CONFIG_PM */
3884
3885 /*****************************************************************************\
3886 * *
3887 * Command Queue Engine (CQE) helpers *
3888 * *
3889 \*****************************************************************************/
3890
sdhci_cqe_enable(struct mmc_host * mmc)3891 void sdhci_cqe_enable(struct mmc_host *mmc)
3892 {
3893 struct sdhci_host *host = mmc_priv(mmc);
3894 unsigned long flags;
3895 u8 ctrl;
3896
3897 spin_lock_irqsave(&host->lock, flags);
3898
3899 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3900 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3901 /*
3902 * Host from V4.10 supports ADMA3 DMA type.
3903 * ADMA3 performs integrated descriptor which is more suitable
3904 * for cmd queuing to fetch both command and transfer descriptors.
3905 */
3906 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3907 ctrl |= SDHCI_CTRL_ADMA3;
3908 else if (host->flags & SDHCI_USE_64_BIT_DMA)
3909 ctrl |= SDHCI_CTRL_ADMA64;
3910 else
3911 ctrl |= SDHCI_CTRL_ADMA32;
3912 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3913
3914 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3915 SDHCI_BLOCK_SIZE);
3916
3917 /* Set maximum timeout */
3918 sdhci_set_timeout(host, NULL);
3919
3920 host->ier = host->cqe_ier;
3921
3922 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3923 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3924
3925 host->cqe_on = true;
3926
3927 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3928 mmc_hostname(mmc), host->ier,
3929 sdhci_readl(host, SDHCI_INT_STATUS));
3930
3931 spin_unlock_irqrestore(&host->lock, flags);
3932 }
3933 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3934
sdhci_cqe_disable(struct mmc_host * mmc,bool recovery)3935 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3936 {
3937 struct sdhci_host *host = mmc_priv(mmc);
3938 unsigned long flags;
3939
3940 spin_lock_irqsave(&host->lock, flags);
3941
3942 sdhci_set_default_irqs(host);
3943
3944 host->cqe_on = false;
3945
3946 if (recovery) {
3947 sdhci_do_reset(host, SDHCI_RESET_CMD);
3948 sdhci_do_reset(host, SDHCI_RESET_DATA);
3949 }
3950
3951 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3952 mmc_hostname(mmc), host->ier,
3953 sdhci_readl(host, SDHCI_INT_STATUS));
3954
3955 spin_unlock_irqrestore(&host->lock, flags);
3956 }
3957 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3958
sdhci_cqe_irq(struct sdhci_host * host,u32 intmask,int * cmd_error,int * data_error)3959 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3960 int *data_error)
3961 {
3962 u32 mask;
3963
3964 if (!host->cqe_on)
3965 return false;
3966
3967 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) {
3968 *cmd_error = -EILSEQ;
3969 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3970 sdhci_err_stats_inc(host, CMD_CRC);
3971 } else if (intmask & SDHCI_INT_TIMEOUT) {
3972 *cmd_error = -ETIMEDOUT;
3973 sdhci_err_stats_inc(host, CMD_TIMEOUT);
3974 } else
3975 *cmd_error = 0;
3976
3977 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
3978 *data_error = -EILSEQ;
3979 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3980 sdhci_err_stats_inc(host, DAT_CRC);
3981 } else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3982 *data_error = -ETIMEDOUT;
3983 sdhci_err_stats_inc(host, DAT_TIMEOUT);
3984 } else if (intmask & SDHCI_INT_ADMA_ERROR) {
3985 *data_error = -EIO;
3986 sdhci_err_stats_inc(host, ADMA);
3987 } else
3988 *data_error = 0;
3989
3990 /* Clear selected interrupts. */
3991 mask = intmask & host->cqe_ier;
3992 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3993
3994 if (intmask & SDHCI_INT_BUS_POWER)
3995 pr_err("%s: Card is consuming too much power!\n",
3996 mmc_hostname(host->mmc));
3997
3998 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3999 if (intmask) {
4000 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
4001 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
4002 mmc_hostname(host->mmc), intmask);
4003 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
4004 sdhci_dumpregs(host);
4005 }
4006
4007 return true;
4008 }
4009 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
4010
4011 /*****************************************************************************\
4012 * *
4013 * Device allocation/registration *
4014 * *
4015 \*****************************************************************************/
4016
sdhci_alloc_host(struct device * dev,size_t priv_size)4017 struct sdhci_host *sdhci_alloc_host(struct device *dev,
4018 size_t priv_size)
4019 {
4020 struct mmc_host *mmc;
4021 struct sdhci_host *host;
4022
4023 WARN_ON(dev == NULL);
4024
4025 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
4026 if (!mmc)
4027 return ERR_PTR(-ENOMEM);
4028
4029 host = mmc_priv(mmc);
4030 host->mmc = mmc;
4031 host->mmc_host_ops = sdhci_ops;
4032 mmc->ops = &host->mmc_host_ops;
4033
4034 host->flags = SDHCI_SIGNALING_330;
4035
4036 host->cqe_ier = SDHCI_CQE_INT_MASK;
4037 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
4038
4039 host->tuning_delay = -1;
4040 host->tuning_loop_count = MAX_TUNING_LOOP;
4041
4042 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
4043
4044 /*
4045 * The DMA table descriptor count is calculated as the maximum
4046 * number of segments times 2, to allow for an alignment
4047 * descriptor for each segment, plus 1 for a nop end descriptor.
4048 */
4049 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
4050 host->max_adma = 65536;
4051
4052 host->max_timeout_count = 0xE;
4053
4054 return host;
4055 }
4056
4057 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
4058
sdhci_set_dma_mask(struct sdhci_host * host)4059 static int sdhci_set_dma_mask(struct sdhci_host *host)
4060 {
4061 struct mmc_host *mmc = host->mmc;
4062 struct device *dev = mmc_dev(mmc);
4063 int ret = -EINVAL;
4064
4065 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
4066 host->flags &= ~SDHCI_USE_64_BIT_DMA;
4067
4068 /* Try 64-bit mask if hardware is capable of it */
4069 if (host->flags & SDHCI_USE_64_BIT_DMA) {
4070 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4071 if (ret) {
4072 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
4073 mmc_hostname(mmc));
4074 host->flags &= ~SDHCI_USE_64_BIT_DMA;
4075 }
4076 }
4077
4078 /* 32-bit mask as default & fallback */
4079 if (ret) {
4080 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4081 if (ret)
4082 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
4083 mmc_hostname(mmc));
4084 }
4085
4086 return ret;
4087 }
4088
__sdhci_read_caps(struct sdhci_host * host,const u16 * ver,const u32 * caps,const u32 * caps1)4089 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
4090 const u32 *caps, const u32 *caps1)
4091 {
4092 u16 v;
4093 u64 dt_caps_mask = 0;
4094 u64 dt_caps = 0;
4095
4096 if (host->read_caps)
4097 return;
4098
4099 host->read_caps = true;
4100
4101 if (debug_quirks)
4102 host->quirks = debug_quirks;
4103
4104 if (debug_quirks2)
4105 host->quirks2 = debug_quirks2;
4106
4107 sdhci_do_reset(host, SDHCI_RESET_ALL);
4108
4109 if (host->v4_mode)
4110 sdhci_do_enable_v4_mode(host);
4111
4112 device_property_read_u64(mmc_dev(host->mmc),
4113 "sdhci-caps-mask", &dt_caps_mask);
4114 device_property_read_u64(mmc_dev(host->mmc),
4115 "sdhci-caps", &dt_caps);
4116
4117 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
4118 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
4119
4120 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
4121 return;
4122
4123 if (caps) {
4124 host->caps = *caps;
4125 } else {
4126 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
4127 host->caps &= ~lower_32_bits(dt_caps_mask);
4128 host->caps |= lower_32_bits(dt_caps);
4129 }
4130
4131 if (host->version < SDHCI_SPEC_300)
4132 return;
4133
4134 if (caps1) {
4135 host->caps1 = *caps1;
4136 } else {
4137 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
4138 host->caps1 &= ~upper_32_bits(dt_caps_mask);
4139 host->caps1 |= upper_32_bits(dt_caps);
4140 }
4141 }
4142 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
4143
sdhci_allocate_bounce_buffer(struct sdhci_host * host)4144 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
4145 {
4146 struct mmc_host *mmc = host->mmc;
4147 unsigned int max_blocks;
4148 unsigned int bounce_size;
4149 int ret;
4150
4151 /*
4152 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
4153 * has diminishing returns, this is probably because SD/MMC
4154 * cards are usually optimized to handle this size of requests.
4155 */
4156 bounce_size = SZ_64K;
4157 /*
4158 * Adjust downwards to maximum request size if this is less
4159 * than our segment size, else hammer down the maximum
4160 * request size to the maximum buffer size.
4161 */
4162 if (mmc->max_req_size < bounce_size)
4163 bounce_size = mmc->max_req_size;
4164 max_blocks = bounce_size / 512;
4165
4166 /*
4167 * When we just support one segment, we can get significant
4168 * speedups by the help of a bounce buffer to group scattered
4169 * reads/writes together.
4170 */
4171 host->bounce_buffer = devm_kmalloc(mmc_dev(mmc),
4172 bounce_size,
4173 GFP_KERNEL);
4174 if (!host->bounce_buffer) {
4175 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
4176 mmc_hostname(mmc),
4177 bounce_size);
4178 /*
4179 * Exiting with zero here makes sure we proceed with
4180 * mmc->max_segs == 1.
4181 */
4182 return;
4183 }
4184
4185 host->bounce_addr = dma_map_single(mmc_dev(mmc),
4186 host->bounce_buffer,
4187 bounce_size,
4188 DMA_BIDIRECTIONAL);
4189 ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr);
4190 if (ret) {
4191 devm_kfree(mmc_dev(mmc), host->bounce_buffer);
4192 host->bounce_buffer = NULL;
4193 /* Again fall back to max_segs == 1 */
4194 return;
4195 }
4196
4197 host->bounce_buffer_size = bounce_size;
4198
4199 /* Lie about this since we're bouncing */
4200 mmc->max_segs = max_blocks;
4201 mmc->max_seg_size = bounce_size;
4202 mmc->max_req_size = bounce_size;
4203
4204 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
4205 mmc_hostname(mmc), max_blocks, bounce_size);
4206 }
4207
sdhci_can_64bit_dma(struct sdhci_host * host)4208 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
4209 {
4210 /*
4211 * According to SD Host Controller spec v4.10, bit[27] added from
4212 * version 4.10 in Capabilities Register is used as 64-bit System
4213 * Address support for V4 mode.
4214 */
4215 if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
4216 return host->caps & SDHCI_CAN_64BIT_V4;
4217
4218 return host->caps & SDHCI_CAN_64BIT;
4219 }
4220
sdhci_setup_host(struct sdhci_host * host)4221 int sdhci_setup_host(struct sdhci_host *host)
4222 {
4223 struct mmc_host *mmc;
4224 u32 max_current_caps;
4225 unsigned int ocr_avail;
4226 unsigned int override_timeout_clk;
4227 u32 max_clk;
4228 int ret = 0;
4229 bool enable_vqmmc = false;
4230
4231 WARN_ON(host == NULL);
4232 if (host == NULL)
4233 return -EINVAL;
4234
4235 mmc = host->mmc;
4236
4237 /*
4238 * If there are external regulators, get them. Note this must be done
4239 * early before resetting the host and reading the capabilities so that
4240 * the host can take the appropriate action if regulators are not
4241 * available.
4242 */
4243 if (!mmc->supply.vqmmc) {
4244 ret = mmc_regulator_get_supply(mmc);
4245 if (ret)
4246 return ret;
4247 enable_vqmmc = true;
4248 }
4249
4250 DBG("Version: 0x%08x | Present: 0x%08x\n",
4251 sdhci_readw(host, SDHCI_HOST_VERSION),
4252 sdhci_readl(host, SDHCI_PRESENT_STATE));
4253 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
4254 sdhci_readl(host, SDHCI_CAPABILITIES),
4255 sdhci_readl(host, SDHCI_CAPABILITIES_1));
4256
4257 sdhci_read_caps(host);
4258
4259 override_timeout_clk = host->timeout_clk;
4260
4261 if (host->version > SDHCI_SPEC_420) {
4262 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
4263 mmc_hostname(mmc), host->version);
4264 }
4265
4266 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
4267 host->flags |= SDHCI_USE_SDMA;
4268 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
4269 DBG("Controller doesn't have SDMA capability\n");
4270 else
4271 host->flags |= SDHCI_USE_SDMA;
4272
4273 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
4274 (host->flags & SDHCI_USE_SDMA)) {
4275 DBG("Disabling DMA as it is marked broken\n");
4276 host->flags &= ~SDHCI_USE_SDMA;
4277 }
4278
4279 if ((host->version >= SDHCI_SPEC_200) &&
4280 (host->caps & SDHCI_CAN_DO_ADMA2))
4281 host->flags |= SDHCI_USE_ADMA;
4282
4283 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
4284 (host->flags & SDHCI_USE_ADMA)) {
4285 DBG("Disabling ADMA as it is marked broken\n");
4286 host->flags &= ~SDHCI_USE_ADMA;
4287 }
4288
4289 if (sdhci_can_64bit_dma(host))
4290 host->flags |= SDHCI_USE_64_BIT_DMA;
4291
4292 if (host->use_external_dma) {
4293 ret = sdhci_external_dma_init(host);
4294 if (ret == -EPROBE_DEFER)
4295 goto unreg;
4296 /*
4297 * Fall back to use the DMA/PIO integrated in standard SDHCI
4298 * instead of external DMA devices.
4299 */
4300 else if (ret)
4301 sdhci_switch_external_dma(host, false);
4302 /* Disable internal DMA sources */
4303 else
4304 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4305 }
4306
4307 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
4308 if (host->ops->set_dma_mask)
4309 ret = host->ops->set_dma_mask(host);
4310 else
4311 ret = sdhci_set_dma_mask(host);
4312
4313 if (!ret && host->ops->enable_dma)
4314 ret = host->ops->enable_dma(host);
4315
4316 if (ret) {
4317 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
4318 mmc_hostname(mmc));
4319 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4320
4321 ret = 0;
4322 }
4323 }
4324
4325 /* SDMA does not support 64-bit DMA if v4 mode not set */
4326 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
4327 host->flags &= ~SDHCI_USE_SDMA;
4328
4329 if (host->flags & SDHCI_USE_ADMA) {
4330 dma_addr_t dma;
4331 void *buf;
4332
4333 if (!(host->flags & SDHCI_USE_64_BIT_DMA))
4334 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
4335 else if (!host->alloc_desc_sz)
4336 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
4337
4338 host->desc_sz = host->alloc_desc_sz;
4339 host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
4340
4341 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
4342 /*
4343 * Use zalloc to zero the reserved high 32-bits of 128-bit
4344 * descriptors so that they never need to be written.
4345 */
4346 buf = dma_alloc_coherent(mmc_dev(mmc),
4347 host->align_buffer_sz + host->adma_table_sz,
4348 &dma, GFP_KERNEL);
4349 if (!buf) {
4350 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
4351 mmc_hostname(mmc));
4352 host->flags &= ~SDHCI_USE_ADMA;
4353 } else if ((dma + host->align_buffer_sz) &
4354 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
4355 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
4356 mmc_hostname(mmc));
4357 host->flags &= ~SDHCI_USE_ADMA;
4358 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4359 host->adma_table_sz, buf, dma);
4360 } else {
4361 host->align_buffer = buf;
4362 host->align_addr = dma;
4363
4364 host->adma_table = buf + host->align_buffer_sz;
4365 host->adma_addr = dma + host->align_buffer_sz;
4366 }
4367 }
4368
4369 /*
4370 * If we use DMA, then it's up to the caller to set the DMA
4371 * mask, but PIO does not need the hw shim so we set a new
4372 * mask here in that case.
4373 */
4374 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
4375 host->dma_mask = DMA_BIT_MASK(64);
4376 mmc_dev(mmc)->dma_mask = &host->dma_mask;
4377 }
4378
4379 if (host->version >= SDHCI_SPEC_300)
4380 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps);
4381 else
4382 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps);
4383
4384 host->max_clk *= 1000000;
4385 if (host->max_clk == 0 || host->quirks &
4386 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4387 if (!host->ops->get_max_clock) {
4388 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
4389 mmc_hostname(mmc));
4390 ret = -ENODEV;
4391 goto undma;
4392 }
4393 host->max_clk = host->ops->get_max_clock(host);
4394 }
4395
4396 /*
4397 * In case of Host Controller v3.00, find out whether clock
4398 * multiplier is supported.
4399 */
4400 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1);
4401
4402 /*
4403 * In case the value in Clock Multiplier is 0, then programmable
4404 * clock mode is not supported, otherwise the actual clock
4405 * multiplier is one more than the value of Clock Multiplier
4406 * in the Capabilities Register.
4407 */
4408 if (host->clk_mul)
4409 host->clk_mul += 1;
4410
4411 /*
4412 * Set host parameters.
4413 */
4414 max_clk = host->max_clk;
4415
4416 if (host->ops->get_min_clock)
4417 mmc->f_min = host->ops->get_min_clock(host);
4418 else if (host->version >= SDHCI_SPEC_300) {
4419 if (host->clk_mul)
4420 max_clk = host->max_clk * host->clk_mul;
4421 /*
4422 * Divided Clock Mode minimum clock rate is always less than
4423 * Programmable Clock Mode minimum clock rate.
4424 */
4425 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
4426 } else
4427 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
4428
4429 if (!mmc->f_max || mmc->f_max > max_clk)
4430 mmc->f_max = max_clk;
4431
4432 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4433 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps);
4434
4435 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
4436 host->timeout_clk *= 1000;
4437
4438 if (host->timeout_clk == 0) {
4439 if (!host->ops->get_timeout_clock) {
4440 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
4441 mmc_hostname(mmc));
4442 ret = -ENODEV;
4443 goto undma;
4444 }
4445
4446 host->timeout_clk =
4447 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
4448 1000);
4449 }
4450
4451 if (override_timeout_clk)
4452 host->timeout_clk = override_timeout_clk;
4453
4454 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
4455 host->ops->get_max_timeout_count(host) : 1 << 27;
4456 mmc->max_busy_timeout /= host->timeout_clk;
4457 }
4458
4459 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
4460 !host->ops->get_max_timeout_count)
4461 mmc->max_busy_timeout = 0;
4462
4463 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
4464 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
4465
4466 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
4467 host->flags |= SDHCI_AUTO_CMD12;
4468
4469 /*
4470 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
4471 * For v4 mode, SDMA may use Auto-CMD23 as well.
4472 */
4473 if ((host->version >= SDHCI_SPEC_300) &&
4474 ((host->flags & SDHCI_USE_ADMA) ||
4475 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
4476 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
4477 host->flags |= SDHCI_AUTO_CMD23;
4478 DBG("Auto-CMD23 available\n");
4479 } else {
4480 DBG("Auto-CMD23 unavailable\n");
4481 }
4482
4483 /*
4484 * A controller may support 8-bit width, but the board itself
4485 * might not have the pins brought out. Boards that support
4486 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
4487 * their platform code before calling sdhci_add_host(), and we
4488 * won't assume 8-bit width for hosts without that CAP.
4489 */
4490 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
4491 mmc->caps |= MMC_CAP_4_BIT_DATA;
4492
4493 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
4494 mmc->caps &= ~MMC_CAP_CMD23;
4495
4496 if (host->caps & SDHCI_CAN_DO_HISPD)
4497 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4498
4499 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
4500 mmc_card_is_removable(mmc) &&
4501 mmc_gpio_get_cd(mmc) < 0)
4502 mmc->caps |= MMC_CAP_NEEDS_POLL;
4503
4504 if (!IS_ERR(mmc->supply.vqmmc)) {
4505 if (enable_vqmmc) {
4506 ret = regulator_enable(mmc->supply.vqmmc);
4507 host->sdhci_core_to_disable_vqmmc = !ret;
4508 }
4509
4510 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
4511 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
4512 1950000))
4513 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
4514 SDHCI_SUPPORT_SDR50 |
4515 SDHCI_SUPPORT_DDR50);
4516
4517 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
4518 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
4519 3600000))
4520 host->flags &= ~SDHCI_SIGNALING_330;
4521
4522 if (ret) {
4523 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4524 mmc_hostname(mmc), ret);
4525 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
4526 }
4527
4528 }
4529
4530 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
4531 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4532 SDHCI_SUPPORT_DDR50);
4533 /*
4534 * The SDHCI controller in a SoC might support HS200/HS400
4535 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
4536 * but if the board is modeled such that the IO lines are not
4537 * connected to 1.8v then HS200/HS400 cannot be supported.
4538 * Disable HS200/HS400 if the board does not have 1.8v connected
4539 * to the IO lines. (Applicable for other modes in 1.8v)
4540 */
4541 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4542 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4543 }
4544
4545 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4546 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4547 SDHCI_SUPPORT_DDR50))
4548 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4549
4550 /* SDR104 supports also implies SDR50 support */
4551 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4552 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4553 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
4554 * field can be promoted to support HS200.
4555 */
4556 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4557 mmc->caps2 |= MMC_CAP2_HS200;
4558 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4559 mmc->caps |= MMC_CAP_UHS_SDR50;
4560 }
4561
4562 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4563 (host->caps1 & SDHCI_SUPPORT_HS400))
4564 mmc->caps2 |= MMC_CAP2_HS400;
4565
4566 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4567 (IS_ERR(mmc->supply.vqmmc) ||
4568 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4569 1300000)))
4570 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4571
4572 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4573 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4574 mmc->caps |= MMC_CAP_UHS_DDR50;
4575
4576 /* Does the host need tuning for SDR50? */
4577 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4578 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4579
4580 /* Driver Type(s) (A, C, D) supported by the host */
4581 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4582 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4583 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4584 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4585 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4586 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4587
4588 /* Initial value for re-tuning timer count */
4589 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK,
4590 host->caps1);
4591
4592 /*
4593 * In case Re-tuning Timer is not disabled, the actual value of
4594 * re-tuning timer will be 2 ^ (n - 1).
4595 */
4596 if (host->tuning_count)
4597 host->tuning_count = 1 << (host->tuning_count - 1);
4598
4599 /* Re-tuning mode supported by the Host Controller */
4600 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1);
4601
4602 ocr_avail = 0;
4603
4604 /*
4605 * According to SD Host Controller spec v3.00, if the Host System
4606 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4607 * the value is meaningful only if Voltage Support in the Capabilities
4608 * register is set. The actual current value is 4 times the register
4609 * value.
4610 */
4611 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4612 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4613 int curr = regulator_get_current_limit(mmc->supply.vmmc);
4614 if (curr > 0) {
4615
4616 /* convert to SDHCI_MAX_CURRENT format */
4617 curr = curr/1000; /* convert to mA */
4618 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4619
4620 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4621 max_current_caps =
4622 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) |
4623 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) |
4624 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr);
4625 }
4626 }
4627
4628 if (host->caps & SDHCI_CAN_VDD_330) {
4629 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4630
4631 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK,
4632 max_current_caps) *
4633 SDHCI_MAX_CURRENT_MULTIPLIER;
4634 }
4635 if (host->caps & SDHCI_CAN_VDD_300) {
4636 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4637
4638 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK,
4639 max_current_caps) *
4640 SDHCI_MAX_CURRENT_MULTIPLIER;
4641 }
4642 if (host->caps & SDHCI_CAN_VDD_180) {
4643 ocr_avail |= MMC_VDD_165_195;
4644
4645 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK,
4646 max_current_caps) *
4647 SDHCI_MAX_CURRENT_MULTIPLIER;
4648 }
4649
4650 /* If OCR set by host, use it instead. */
4651 if (host->ocr_mask)
4652 ocr_avail = host->ocr_mask;
4653
4654 /* If OCR set by external regulators, give it highest prio. */
4655 if (mmc->ocr_avail)
4656 ocr_avail = mmc->ocr_avail;
4657
4658 mmc->ocr_avail = ocr_avail;
4659 mmc->ocr_avail_sdio = ocr_avail;
4660 if (host->ocr_avail_sdio)
4661 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4662 mmc->ocr_avail_sd = ocr_avail;
4663 if (host->ocr_avail_sd)
4664 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4665 else /* normal SD controllers don't support 1.8V */
4666 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4667 mmc->ocr_avail_mmc = ocr_avail;
4668 if (host->ocr_avail_mmc)
4669 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4670
4671 if (mmc->ocr_avail == 0) {
4672 pr_err("%s: Hardware doesn't report any support voltages.\n",
4673 mmc_hostname(mmc));
4674 ret = -ENODEV;
4675 goto unreg;
4676 }
4677
4678 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4679 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4680 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4681 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4682 host->flags |= SDHCI_SIGNALING_180;
4683
4684 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4685 host->flags |= SDHCI_SIGNALING_120;
4686
4687 spin_lock_init(&host->lock);
4688
4689 /*
4690 * Maximum number of sectors in one transfer. Limited by SDMA boundary
4691 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4692 * is less anyway.
4693 */
4694 mmc->max_req_size = 524288;
4695
4696 /*
4697 * Maximum number of segments. Depends on if the hardware
4698 * can do scatter/gather or not.
4699 */
4700 if (host->flags & SDHCI_USE_ADMA) {
4701 mmc->max_segs = SDHCI_MAX_SEGS;
4702 } else if (host->flags & SDHCI_USE_SDMA) {
4703 mmc->max_segs = 1;
4704 mmc->max_req_size = min_t(size_t, mmc->max_req_size,
4705 dma_max_mapping_size(mmc_dev(mmc)));
4706 } else { /* PIO */
4707 mmc->max_segs = SDHCI_MAX_SEGS;
4708 }
4709
4710 /*
4711 * Maximum segment size. Could be one segment with the maximum number
4712 * of bytes. When doing hardware scatter/gather, each entry cannot
4713 * be larger than 64 KiB though.
4714 */
4715 if (host->flags & SDHCI_USE_ADMA) {
4716 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
4717 host->max_adma = 65532; /* 32-bit alignment */
4718 mmc->max_seg_size = 65535;
4719 } else {
4720 mmc->max_seg_size = 65536;
4721 }
4722 } else {
4723 mmc->max_seg_size = mmc->max_req_size;
4724 }
4725
4726 /*
4727 * Maximum block size. This varies from controller to controller and
4728 * is specified in the capabilities register.
4729 */
4730 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4731 mmc->max_blk_size = 2;
4732 } else {
4733 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4734 SDHCI_MAX_BLOCK_SHIFT;
4735 if (mmc->max_blk_size >= 3) {
4736 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4737 mmc_hostname(mmc));
4738 mmc->max_blk_size = 0;
4739 }
4740 }
4741
4742 mmc->max_blk_size = 512 << mmc->max_blk_size;
4743
4744 /*
4745 * Maximum block count.
4746 */
4747 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4748
4749 if (mmc->max_segs == 1)
4750 /* This may alter mmc->*_blk_* parameters */
4751 sdhci_allocate_bounce_buffer(host);
4752
4753 return 0;
4754
4755 unreg:
4756 if (host->sdhci_core_to_disable_vqmmc)
4757 regulator_disable(mmc->supply.vqmmc);
4758 undma:
4759 if (host->align_buffer)
4760 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4761 host->adma_table_sz, host->align_buffer,
4762 host->align_addr);
4763 host->adma_table = NULL;
4764 host->align_buffer = NULL;
4765
4766 return ret;
4767 }
4768 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4769
sdhci_cleanup_host(struct sdhci_host * host)4770 void sdhci_cleanup_host(struct sdhci_host *host)
4771 {
4772 struct mmc_host *mmc = host->mmc;
4773
4774 if (host->sdhci_core_to_disable_vqmmc)
4775 regulator_disable(mmc->supply.vqmmc);
4776
4777 if (host->align_buffer)
4778 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4779 host->adma_table_sz, host->align_buffer,
4780 host->align_addr);
4781
4782 if (host->use_external_dma)
4783 sdhci_external_dma_release(host);
4784
4785 host->adma_table = NULL;
4786 host->align_buffer = NULL;
4787 }
4788 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4789
__sdhci_add_host(struct sdhci_host * host)4790 int __sdhci_add_host(struct sdhci_host *host)
4791 {
4792 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4793 struct mmc_host *mmc = host->mmc;
4794 int ret;
4795
4796 if ((mmc->caps2 & MMC_CAP2_CQE) &&
4797 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
4798 mmc->caps2 &= ~MMC_CAP2_CQE;
4799 mmc->cqe_ops = NULL;
4800 }
4801
4802 host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4803 if (!host->complete_wq)
4804 return -ENOMEM;
4805
4806 INIT_WORK(&host->complete_work, sdhci_complete_work);
4807
4808 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4809 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4810
4811 init_waitqueue_head(&host->buf_ready_int);
4812
4813 sdhci_init(host, 0);
4814
4815 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4816 IRQF_SHARED, mmc_hostname(mmc), host);
4817 if (ret) {
4818 pr_err("%s: Failed to request IRQ %d: %d\n",
4819 mmc_hostname(mmc), host->irq, ret);
4820 goto unwq;
4821 }
4822
4823 ret = sdhci_led_register(host);
4824 if (ret) {
4825 pr_err("%s: Failed to register LED device: %d\n",
4826 mmc_hostname(mmc), ret);
4827 goto unirq;
4828 }
4829
4830 ret = mmc_add_host(mmc);
4831 if (ret)
4832 goto unled;
4833
4834 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4835 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4836 host->use_external_dma ? "External DMA" :
4837 (host->flags & SDHCI_USE_ADMA) ?
4838 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4839 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4840
4841 sdhci_enable_card_detection(host);
4842
4843 return 0;
4844
4845 unled:
4846 sdhci_led_unregister(host);
4847 unirq:
4848 sdhci_do_reset(host, SDHCI_RESET_ALL);
4849 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4850 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4851 free_irq(host->irq, host);
4852 unwq:
4853 destroy_workqueue(host->complete_wq);
4854
4855 return ret;
4856 }
4857 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4858
sdhci_add_host(struct sdhci_host * host)4859 int sdhci_add_host(struct sdhci_host *host)
4860 {
4861 int ret;
4862
4863 ret = sdhci_setup_host(host);
4864 if (ret)
4865 return ret;
4866
4867 ret = __sdhci_add_host(host);
4868 if (ret)
4869 goto cleanup;
4870
4871 return 0;
4872
4873 cleanup:
4874 sdhci_cleanup_host(host);
4875
4876 return ret;
4877 }
4878 EXPORT_SYMBOL_GPL(sdhci_add_host);
4879
sdhci_remove_host(struct sdhci_host * host,int dead)4880 void sdhci_remove_host(struct sdhci_host *host, int dead)
4881 {
4882 struct mmc_host *mmc = host->mmc;
4883 unsigned long flags;
4884
4885 if (dead) {
4886 spin_lock_irqsave(&host->lock, flags);
4887
4888 host->flags |= SDHCI_DEVICE_DEAD;
4889
4890 if (sdhci_has_requests(host)) {
4891 pr_err("%s: Controller removed during "
4892 " transfer!\n", mmc_hostname(mmc));
4893 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4894 }
4895
4896 spin_unlock_irqrestore(&host->lock, flags);
4897 }
4898
4899 sdhci_disable_card_detection(host);
4900
4901 mmc_remove_host(mmc);
4902
4903 sdhci_led_unregister(host);
4904
4905 if (!dead)
4906 sdhci_do_reset(host, SDHCI_RESET_ALL);
4907
4908 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4909 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4910 free_irq(host->irq, host);
4911
4912 del_timer_sync(&host->timer);
4913 del_timer_sync(&host->data_timer);
4914
4915 destroy_workqueue(host->complete_wq);
4916
4917 if (host->sdhci_core_to_disable_vqmmc)
4918 regulator_disable(mmc->supply.vqmmc);
4919
4920 if (host->align_buffer)
4921 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4922 host->adma_table_sz, host->align_buffer,
4923 host->align_addr);
4924
4925 if (host->use_external_dma)
4926 sdhci_external_dma_release(host);
4927
4928 host->adma_table = NULL;
4929 host->align_buffer = NULL;
4930 }
4931
4932 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4933
sdhci_free_host(struct sdhci_host * host)4934 void sdhci_free_host(struct sdhci_host *host)
4935 {
4936 mmc_free_host(host->mmc);
4937 }
4938
4939 EXPORT_SYMBOL_GPL(sdhci_free_host);
4940
4941 /*****************************************************************************\
4942 * *
4943 * Driver init/exit *
4944 * *
4945 \*****************************************************************************/
4946
sdhci_drv_init(void)4947 static int __init sdhci_drv_init(void)
4948 {
4949 pr_info(DRIVER_NAME
4950 ": Secure Digital Host Controller Interface driver\n");
4951 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4952
4953 return 0;
4954 }
4955
sdhci_drv_exit(void)4956 static void __exit sdhci_drv_exit(void)
4957 {
4958 }
4959
4960 module_init(sdhci_drv_init);
4961 module_exit(sdhci_drv_exit);
4962
4963 module_param(debug_quirks, uint, 0444);
4964 module_param(debug_quirks2, uint, 0444);
4965
4966 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4967 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4968 MODULE_LICENSE("GPL");
4969
4970 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4971 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
4972