1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 *
5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 *
7 * Thanks to the following companies for their support:
8 *
9 * - JMicron (hardware and technical support)
10 */
11
12 #include <linux/delay.h>
13 #include <linux/ktime.h>
14 #include <linux/highmem.h>
15 #include <linux/io.h>
16 #include <linux/module.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 #include <linux/scatterlist.h>
20 #include <linux/sizes.h>
21 #include <linux/swiotlb.h>
22 #include <linux/regulator/consumer.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/of.h>
25
26 #include <linux/leds.h>
27
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/host.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/sdio.h>
32 #include <linux/mmc/slot-gpio.h>
33
34 #include "sdhci.h"
35
36 #define DRIVER_NAME "sdhci"
37
38 #define DBG(f, x...) \
39 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
40
41 #define SDHCI_DUMP(f, x...) \
42 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
43
44 #define MAX_TUNING_LOOP 40
45
46 static unsigned int debug_quirks = 0;
47 static unsigned int debug_quirks2;
48
49 static void sdhci_finish_data(struct sdhci_host *);
50
51 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
52
sdhci_dumpregs(struct sdhci_host * host)53 void sdhci_dumpregs(struct sdhci_host *host)
54 {
55 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
56
57 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
58 sdhci_readl(host, SDHCI_DMA_ADDRESS),
59 sdhci_readw(host, SDHCI_HOST_VERSION));
60 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
61 sdhci_readw(host, SDHCI_BLOCK_SIZE),
62 sdhci_readw(host, SDHCI_BLOCK_COUNT));
63 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
64 sdhci_readl(host, SDHCI_ARGUMENT),
65 sdhci_readw(host, SDHCI_TRANSFER_MODE));
66 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
67 sdhci_readl(host, SDHCI_PRESENT_STATE),
68 sdhci_readb(host, SDHCI_HOST_CONTROL));
69 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
70 sdhci_readb(host, SDHCI_POWER_CONTROL),
71 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
72 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
73 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
74 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
75 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
76 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
77 sdhci_readl(host, SDHCI_INT_STATUS));
78 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
79 sdhci_readl(host, SDHCI_INT_ENABLE),
80 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
81 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
82 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
83 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
84 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
85 sdhci_readl(host, SDHCI_CAPABILITIES),
86 sdhci_readl(host, SDHCI_CAPABILITIES_1));
87 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
88 sdhci_readw(host, SDHCI_COMMAND),
89 sdhci_readl(host, SDHCI_MAX_CURRENT));
90 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
91 sdhci_readl(host, SDHCI_RESPONSE),
92 sdhci_readl(host, SDHCI_RESPONSE + 4));
93 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
94 sdhci_readl(host, SDHCI_RESPONSE + 8),
95 sdhci_readl(host, SDHCI_RESPONSE + 12));
96 SDHCI_DUMP("Host ctl2: 0x%08x\n",
97 sdhci_readw(host, SDHCI_HOST_CONTROL2));
98
99 if (host->flags & SDHCI_USE_ADMA) {
100 if (host->flags & SDHCI_USE_64_BIT_DMA) {
101 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
102 sdhci_readl(host, SDHCI_ADMA_ERROR),
103 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
104 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
105 } else {
106 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
107 sdhci_readl(host, SDHCI_ADMA_ERROR),
108 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
109 }
110 }
111
112 SDHCI_DUMP("============================================\n");
113 }
114 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
115
116 /*****************************************************************************\
117 * *
118 * Low level functions *
119 * *
120 \*****************************************************************************/
121
sdhci_do_enable_v4_mode(struct sdhci_host * host)122 static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
123 {
124 u16 ctrl2;
125
126 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
127 if (ctrl2 & SDHCI_CTRL_V4_MODE)
128 return;
129
130 ctrl2 |= SDHCI_CTRL_V4_MODE;
131 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
132 }
133
134 /*
135 * This can be called before sdhci_add_host() by Vendor's host controller
136 * driver to enable v4 mode if supported.
137 */
sdhci_enable_v4_mode(struct sdhci_host * host)138 void sdhci_enable_v4_mode(struct sdhci_host *host)
139 {
140 host->v4_mode = true;
141 sdhci_do_enable_v4_mode(host);
142 }
143 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
144
sdhci_data_line_cmd(struct mmc_command * cmd)145 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
146 {
147 return cmd->data || cmd->flags & MMC_RSP_BUSY;
148 }
149
sdhci_set_card_detection(struct sdhci_host * host,bool enable)150 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
151 {
152 u32 present;
153
154 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
155 !mmc_card_is_removable(host->mmc))
156 return;
157
158 if (enable) {
159 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
160 SDHCI_CARD_PRESENT;
161
162 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
163 SDHCI_INT_CARD_INSERT;
164 } else {
165 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
166 }
167
168 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
169 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
170 }
171
sdhci_enable_card_detection(struct sdhci_host * host)172 static void sdhci_enable_card_detection(struct sdhci_host *host)
173 {
174 sdhci_set_card_detection(host, true);
175 }
176
sdhci_disable_card_detection(struct sdhci_host * host)177 static void sdhci_disable_card_detection(struct sdhci_host *host)
178 {
179 sdhci_set_card_detection(host, false);
180 }
181
sdhci_runtime_pm_bus_on(struct sdhci_host * host)182 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
183 {
184 if (host->bus_on)
185 return;
186 host->bus_on = true;
187 pm_runtime_get_noresume(host->mmc->parent);
188 }
189
sdhci_runtime_pm_bus_off(struct sdhci_host * host)190 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
191 {
192 if (!host->bus_on)
193 return;
194 host->bus_on = false;
195 pm_runtime_put_noidle(host->mmc->parent);
196 }
197
sdhci_reset(struct sdhci_host * host,u8 mask)198 void sdhci_reset(struct sdhci_host *host, u8 mask)
199 {
200 ktime_t timeout;
201
202 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
203
204 if (mask & SDHCI_RESET_ALL) {
205 host->clock = 0;
206 /* Reset-all turns off SD Bus Power */
207 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
208 sdhci_runtime_pm_bus_off(host);
209 }
210
211 /* Wait max 100 ms */
212 timeout = ktime_add_ms(ktime_get(), 100);
213
214 /* hw clears the bit when it's done */
215 while (1) {
216 bool timedout = ktime_after(ktime_get(), timeout);
217
218 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
219 break;
220 if (timedout) {
221 pr_err("%s: Reset 0x%x never completed.\n",
222 mmc_hostname(host->mmc), (int)mask);
223 sdhci_dumpregs(host);
224 return;
225 }
226 udelay(10);
227 }
228 }
229 EXPORT_SYMBOL_GPL(sdhci_reset);
230
sdhci_do_reset(struct sdhci_host * host,u8 mask)231 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
232 {
233 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
234 struct mmc_host *mmc = host->mmc;
235
236 if (!mmc->ops->get_cd(mmc))
237 return;
238 }
239
240 host->ops->reset(host, mask);
241
242 if (mask & SDHCI_RESET_ALL) {
243 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
244 if (host->ops->enable_dma)
245 host->ops->enable_dma(host);
246 }
247
248 /* Resetting the controller clears many */
249 host->preset_enabled = false;
250 }
251 }
252
sdhci_set_default_irqs(struct sdhci_host * host)253 static void sdhci_set_default_irqs(struct sdhci_host *host)
254 {
255 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
256 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
257 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
258 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
259 SDHCI_INT_RESPONSE;
260
261 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
262 host->tuning_mode == SDHCI_TUNING_MODE_3)
263 host->ier |= SDHCI_INT_RETUNE;
264
265 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
266 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
267 }
268
sdhci_config_dma(struct sdhci_host * host)269 static void sdhci_config_dma(struct sdhci_host *host)
270 {
271 u8 ctrl;
272 u16 ctrl2;
273
274 if (host->version < SDHCI_SPEC_200)
275 return;
276
277 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
278
279 /*
280 * Always adjust the DMA selection as some controllers
281 * (e.g. JMicron) can't do PIO properly when the selection
282 * is ADMA.
283 */
284 ctrl &= ~SDHCI_CTRL_DMA_MASK;
285 if (!(host->flags & SDHCI_REQ_USE_DMA))
286 goto out;
287
288 /* Note if DMA Select is zero then SDMA is selected */
289 if (host->flags & SDHCI_USE_ADMA)
290 ctrl |= SDHCI_CTRL_ADMA32;
291
292 if (host->flags & SDHCI_USE_64_BIT_DMA) {
293 /*
294 * If v4 mode, all supported DMA can be 64-bit addressing if
295 * controller supports 64-bit system address, otherwise only
296 * ADMA can support 64-bit addressing.
297 */
298 if (host->v4_mode) {
299 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
300 ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
301 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
302 } else if (host->flags & SDHCI_USE_ADMA) {
303 /*
304 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
305 * set SDHCI_CTRL_ADMA64.
306 */
307 ctrl |= SDHCI_CTRL_ADMA64;
308 }
309 }
310
311 out:
312 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
313 }
314
sdhci_init(struct sdhci_host * host,int soft)315 static void sdhci_init(struct sdhci_host *host, int soft)
316 {
317 struct mmc_host *mmc = host->mmc;
318
319 if (soft)
320 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
321 else
322 sdhci_do_reset(host, SDHCI_RESET_ALL);
323
324 if (host->v4_mode)
325 sdhci_do_enable_v4_mode(host);
326
327 sdhci_set_default_irqs(host);
328
329 host->cqe_on = false;
330
331 if (soft) {
332 /* force clock reconfiguration */
333 host->clock = 0;
334 mmc->ops->set_ios(mmc, &mmc->ios);
335 }
336 }
337
sdhci_reinit(struct sdhci_host * host)338 static void sdhci_reinit(struct sdhci_host *host)
339 {
340 sdhci_init(host, 0);
341 sdhci_enable_card_detection(host);
342 }
343
__sdhci_led_activate(struct sdhci_host * host)344 static void __sdhci_led_activate(struct sdhci_host *host)
345 {
346 u8 ctrl;
347
348 if (host->quirks & SDHCI_QUIRK_NO_LED)
349 return;
350
351 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
352 ctrl |= SDHCI_CTRL_LED;
353 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
354 }
355
__sdhci_led_deactivate(struct sdhci_host * host)356 static void __sdhci_led_deactivate(struct sdhci_host *host)
357 {
358 u8 ctrl;
359
360 if (host->quirks & SDHCI_QUIRK_NO_LED)
361 return;
362
363 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
364 ctrl &= ~SDHCI_CTRL_LED;
365 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
366 }
367
368 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
sdhci_led_control(struct led_classdev * led,enum led_brightness brightness)369 static void sdhci_led_control(struct led_classdev *led,
370 enum led_brightness brightness)
371 {
372 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
373 unsigned long flags;
374
375 spin_lock_irqsave(&host->lock, flags);
376
377 if (host->runtime_suspended)
378 goto out;
379
380 if (brightness == LED_OFF)
381 __sdhci_led_deactivate(host);
382 else
383 __sdhci_led_activate(host);
384 out:
385 spin_unlock_irqrestore(&host->lock, flags);
386 }
387
sdhci_led_register(struct sdhci_host * host)388 static int sdhci_led_register(struct sdhci_host *host)
389 {
390 struct mmc_host *mmc = host->mmc;
391
392 if (host->quirks & SDHCI_QUIRK_NO_LED)
393 return 0;
394
395 snprintf(host->led_name, sizeof(host->led_name),
396 "%s::", mmc_hostname(mmc));
397
398 host->led.name = host->led_name;
399 host->led.brightness = LED_OFF;
400 host->led.default_trigger = mmc_hostname(mmc);
401 host->led.brightness_set = sdhci_led_control;
402
403 return led_classdev_register(mmc_dev(mmc), &host->led);
404 }
405
sdhci_led_unregister(struct sdhci_host * host)406 static void sdhci_led_unregister(struct sdhci_host *host)
407 {
408 if (host->quirks & SDHCI_QUIRK_NO_LED)
409 return;
410
411 led_classdev_unregister(&host->led);
412 }
413
sdhci_led_activate(struct sdhci_host * host)414 static inline void sdhci_led_activate(struct sdhci_host *host)
415 {
416 }
417
sdhci_led_deactivate(struct sdhci_host * host)418 static inline void sdhci_led_deactivate(struct sdhci_host *host)
419 {
420 }
421
422 #else
423
sdhci_led_register(struct sdhci_host * host)424 static inline int sdhci_led_register(struct sdhci_host *host)
425 {
426 return 0;
427 }
428
sdhci_led_unregister(struct sdhci_host * host)429 static inline void sdhci_led_unregister(struct sdhci_host *host)
430 {
431 }
432
sdhci_led_activate(struct sdhci_host * host)433 static inline void sdhci_led_activate(struct sdhci_host *host)
434 {
435 __sdhci_led_activate(host);
436 }
437
sdhci_led_deactivate(struct sdhci_host * host)438 static inline void sdhci_led_deactivate(struct sdhci_host *host)
439 {
440 __sdhci_led_deactivate(host);
441 }
442
443 #endif
444
sdhci_mod_timer(struct sdhci_host * host,struct mmc_request * mrq,unsigned long timeout)445 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
446 unsigned long timeout)
447 {
448 if (sdhci_data_line_cmd(mrq->cmd))
449 mod_timer(&host->data_timer, timeout);
450 else
451 mod_timer(&host->timer, timeout);
452 }
453
sdhci_del_timer(struct sdhci_host * host,struct mmc_request * mrq)454 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
455 {
456 if (sdhci_data_line_cmd(mrq->cmd))
457 del_timer(&host->data_timer);
458 else
459 del_timer(&host->timer);
460 }
461
sdhci_has_requests(struct sdhci_host * host)462 static inline bool sdhci_has_requests(struct sdhci_host *host)
463 {
464 return host->cmd || host->data_cmd;
465 }
466
467 /*****************************************************************************\
468 * *
469 * Core functions *
470 * *
471 \*****************************************************************************/
472
sdhci_read_block_pio(struct sdhci_host * host)473 static void sdhci_read_block_pio(struct sdhci_host *host)
474 {
475 unsigned long flags;
476 size_t blksize, len, chunk;
477 u32 uninitialized_var(scratch);
478 u8 *buf;
479
480 DBG("PIO reading\n");
481
482 blksize = host->data->blksz;
483 chunk = 0;
484
485 local_irq_save(flags);
486
487 while (blksize) {
488 BUG_ON(!sg_miter_next(&host->sg_miter));
489
490 len = min(host->sg_miter.length, blksize);
491
492 blksize -= len;
493 host->sg_miter.consumed = len;
494
495 buf = host->sg_miter.addr;
496
497 while (len) {
498 if (chunk == 0) {
499 scratch = sdhci_readl(host, SDHCI_BUFFER);
500 chunk = 4;
501 }
502
503 *buf = scratch & 0xFF;
504
505 buf++;
506 scratch >>= 8;
507 chunk--;
508 len--;
509 }
510 }
511
512 sg_miter_stop(&host->sg_miter);
513
514 local_irq_restore(flags);
515 }
516
sdhci_write_block_pio(struct sdhci_host * host)517 static void sdhci_write_block_pio(struct sdhci_host *host)
518 {
519 unsigned long flags;
520 size_t blksize, len, chunk;
521 u32 scratch;
522 u8 *buf;
523
524 DBG("PIO writing\n");
525
526 blksize = host->data->blksz;
527 chunk = 0;
528 scratch = 0;
529
530 local_irq_save(flags);
531
532 while (blksize) {
533 BUG_ON(!sg_miter_next(&host->sg_miter));
534
535 len = min(host->sg_miter.length, blksize);
536
537 blksize -= len;
538 host->sg_miter.consumed = len;
539
540 buf = host->sg_miter.addr;
541
542 while (len) {
543 scratch |= (u32)*buf << (chunk * 8);
544
545 buf++;
546 chunk++;
547 len--;
548
549 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
550 sdhci_writel(host, scratch, SDHCI_BUFFER);
551 chunk = 0;
552 scratch = 0;
553 }
554 }
555 }
556
557 sg_miter_stop(&host->sg_miter);
558
559 local_irq_restore(flags);
560 }
561
sdhci_transfer_pio(struct sdhci_host * host)562 static void sdhci_transfer_pio(struct sdhci_host *host)
563 {
564 u32 mask;
565
566 if (host->blocks == 0)
567 return;
568
569 if (host->data->flags & MMC_DATA_READ)
570 mask = SDHCI_DATA_AVAILABLE;
571 else
572 mask = SDHCI_SPACE_AVAILABLE;
573
574 /*
575 * Some controllers (JMicron JMB38x) mess up the buffer bits
576 * for transfers < 4 bytes. As long as it is just one block,
577 * we can ignore the bits.
578 */
579 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
580 (host->data->blocks == 1))
581 mask = ~0;
582
583 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
584 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
585 udelay(100);
586
587 if (host->data->flags & MMC_DATA_READ)
588 sdhci_read_block_pio(host);
589 else
590 sdhci_write_block_pio(host);
591
592 host->blocks--;
593 if (host->blocks == 0)
594 break;
595 }
596
597 DBG("PIO transfer complete.\n");
598 }
599
sdhci_pre_dma_transfer(struct sdhci_host * host,struct mmc_data * data,int cookie)600 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
601 struct mmc_data *data, int cookie)
602 {
603 int sg_count;
604
605 /*
606 * If the data buffers are already mapped, return the previous
607 * dma_map_sg() result.
608 */
609 if (data->host_cookie == COOKIE_PRE_MAPPED)
610 return data->sg_count;
611
612 /* Bounce write requests to the bounce buffer */
613 if (host->bounce_buffer) {
614 unsigned int length = data->blksz * data->blocks;
615
616 if (length > host->bounce_buffer_size) {
617 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
618 mmc_hostname(host->mmc), length,
619 host->bounce_buffer_size);
620 return -EIO;
621 }
622 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
623 /* Copy the data to the bounce buffer */
624 sg_copy_to_buffer(data->sg, data->sg_len,
625 host->bounce_buffer,
626 length);
627 }
628 /* Switch ownership to the DMA */
629 dma_sync_single_for_device(host->mmc->parent,
630 host->bounce_addr,
631 host->bounce_buffer_size,
632 mmc_get_dma_dir(data));
633 /* Just a dummy value */
634 sg_count = 1;
635 } else {
636 /* Just access the data directly from memory */
637 sg_count = dma_map_sg(mmc_dev(host->mmc),
638 data->sg, data->sg_len,
639 mmc_get_dma_dir(data));
640 }
641
642 if (sg_count == 0)
643 return -ENOSPC;
644
645 data->sg_count = sg_count;
646 data->host_cookie = cookie;
647
648 return sg_count;
649 }
650
sdhci_kmap_atomic(struct scatterlist * sg,unsigned long * flags)651 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
652 {
653 local_irq_save(*flags);
654 return kmap_atomic(sg_page(sg)) + sg->offset;
655 }
656
sdhci_kunmap_atomic(void * buffer,unsigned long * flags)657 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
658 {
659 kunmap_atomic(buffer);
660 local_irq_restore(*flags);
661 }
662
sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)663 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
664 dma_addr_t addr, int len, unsigned int cmd)
665 {
666 struct sdhci_adma2_64_desc *dma_desc = *desc;
667
668 /* 32-bit and 64-bit descriptors have these members in same position */
669 dma_desc->cmd = cpu_to_le16(cmd);
670 dma_desc->len = cpu_to_le16(len);
671 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
672
673 if (host->flags & SDHCI_USE_64_BIT_DMA)
674 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
675
676 *desc += host->desc_sz;
677 }
678 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
679
__sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)680 static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
681 void **desc, dma_addr_t addr,
682 int len, unsigned int cmd)
683 {
684 if (host->ops->adma_write_desc)
685 host->ops->adma_write_desc(host, desc, addr, len, cmd);
686 else
687 sdhci_adma_write_desc(host, desc, addr, len, cmd);
688 }
689
sdhci_adma_mark_end(void * desc)690 static void sdhci_adma_mark_end(void *desc)
691 {
692 struct sdhci_adma2_64_desc *dma_desc = desc;
693
694 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
695 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
696 }
697
sdhci_adma_table_pre(struct sdhci_host * host,struct mmc_data * data,int sg_count)698 static void sdhci_adma_table_pre(struct sdhci_host *host,
699 struct mmc_data *data, int sg_count)
700 {
701 struct scatterlist *sg;
702 unsigned long flags;
703 dma_addr_t addr, align_addr;
704 void *desc, *align;
705 char *buffer;
706 int len, offset, i;
707
708 /*
709 * The spec does not specify endianness of descriptor table.
710 * We currently guess that it is LE.
711 */
712
713 host->sg_count = sg_count;
714
715 desc = host->adma_table;
716 align = host->align_buffer;
717
718 align_addr = host->align_addr;
719
720 for_each_sg(data->sg, sg, host->sg_count, i) {
721 addr = sg_dma_address(sg);
722 len = sg_dma_len(sg);
723
724 /*
725 * The SDHCI specification states that ADMA addresses must
726 * be 32-bit aligned. If they aren't, then we use a bounce
727 * buffer for the (up to three) bytes that screw up the
728 * alignment.
729 */
730 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
731 SDHCI_ADMA2_MASK;
732 if (offset) {
733 if (data->flags & MMC_DATA_WRITE) {
734 buffer = sdhci_kmap_atomic(sg, &flags);
735 memcpy(align, buffer, offset);
736 sdhci_kunmap_atomic(buffer, &flags);
737 }
738
739 /* tran, valid */
740 __sdhci_adma_write_desc(host, &desc, align_addr,
741 offset, ADMA2_TRAN_VALID);
742
743 BUG_ON(offset > 65536);
744
745 align += SDHCI_ADMA2_ALIGN;
746 align_addr += SDHCI_ADMA2_ALIGN;
747
748 addr += offset;
749 len -= offset;
750 }
751
752 BUG_ON(len > 65536);
753
754 /* tran, valid */
755 if (len)
756 __sdhci_adma_write_desc(host, &desc, addr, len,
757 ADMA2_TRAN_VALID);
758
759 /*
760 * If this triggers then we have a calculation bug
761 * somewhere. :/
762 */
763 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
764 }
765
766 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
767 /* Mark the last descriptor as the terminating descriptor */
768 if (desc != host->adma_table) {
769 desc -= host->desc_sz;
770 sdhci_adma_mark_end(desc);
771 }
772 } else {
773 /* Add a terminating entry - nop, end, valid */
774 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
775 }
776 }
777
sdhci_adma_table_post(struct sdhci_host * host,struct mmc_data * data)778 static void sdhci_adma_table_post(struct sdhci_host *host,
779 struct mmc_data *data)
780 {
781 struct scatterlist *sg;
782 int i, size;
783 void *align;
784 char *buffer;
785 unsigned long flags;
786
787 if (data->flags & MMC_DATA_READ) {
788 bool has_unaligned = false;
789
790 /* Do a quick scan of the SG list for any unaligned mappings */
791 for_each_sg(data->sg, sg, host->sg_count, i)
792 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
793 has_unaligned = true;
794 break;
795 }
796
797 if (has_unaligned) {
798 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
799 data->sg_len, DMA_FROM_DEVICE);
800
801 align = host->align_buffer;
802
803 for_each_sg(data->sg, sg, host->sg_count, i) {
804 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
805 size = SDHCI_ADMA2_ALIGN -
806 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
807
808 buffer = sdhci_kmap_atomic(sg, &flags);
809 memcpy(buffer, align, size);
810 sdhci_kunmap_atomic(buffer, &flags);
811
812 align += SDHCI_ADMA2_ALIGN;
813 }
814 }
815 }
816 }
817 }
818
sdhci_set_adma_addr(struct sdhci_host * host,dma_addr_t addr)819 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
820 {
821 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
822 if (host->flags & SDHCI_USE_64_BIT_DMA)
823 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
824 }
825
sdhci_sdma_address(struct sdhci_host * host)826 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
827 {
828 if (host->bounce_buffer)
829 return host->bounce_addr;
830 else
831 return sg_dma_address(host->data->sg);
832 }
833
sdhci_set_sdma_addr(struct sdhci_host * host,dma_addr_t addr)834 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
835 {
836 if (host->v4_mode)
837 sdhci_set_adma_addr(host, addr);
838 else
839 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
840 }
841
sdhci_target_timeout(struct sdhci_host * host,struct mmc_command * cmd,struct mmc_data * data)842 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
843 struct mmc_command *cmd,
844 struct mmc_data *data)
845 {
846 unsigned int target_timeout;
847
848 /* timeout in us */
849 if (!data) {
850 target_timeout = cmd->busy_timeout * 1000;
851 } else {
852 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
853 if (host->clock && data->timeout_clks) {
854 unsigned long long val;
855
856 /*
857 * data->timeout_clks is in units of clock cycles.
858 * host->clock is in Hz. target_timeout is in us.
859 * Hence, us = 1000000 * cycles / Hz. Round up.
860 */
861 val = 1000000ULL * data->timeout_clks;
862 if (do_div(val, host->clock))
863 target_timeout++;
864 target_timeout += val;
865 }
866 }
867
868 return target_timeout;
869 }
870
sdhci_calc_sw_timeout(struct sdhci_host * host,struct mmc_command * cmd)871 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
872 struct mmc_command *cmd)
873 {
874 struct mmc_data *data = cmd->data;
875 struct mmc_host *mmc = host->mmc;
876 struct mmc_ios *ios = &mmc->ios;
877 unsigned char bus_width = 1 << ios->bus_width;
878 unsigned int blksz;
879 unsigned int freq;
880 u64 target_timeout;
881 u64 transfer_time;
882
883 target_timeout = sdhci_target_timeout(host, cmd, data);
884 target_timeout *= NSEC_PER_USEC;
885
886 if (data) {
887 blksz = data->blksz;
888 freq = host->mmc->actual_clock ? : host->clock;
889 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
890 do_div(transfer_time, freq);
891 /* multiply by '2' to account for any unknowns */
892 transfer_time = transfer_time * 2;
893 /* calculate timeout for the entire data */
894 host->data_timeout = data->blocks * target_timeout +
895 transfer_time;
896 } else {
897 host->data_timeout = target_timeout;
898 }
899
900 if (host->data_timeout)
901 host->data_timeout += MMC_CMD_TRANSFER_TIME;
902 }
903
sdhci_calc_timeout(struct sdhci_host * host,struct mmc_command * cmd,bool * too_big)904 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
905 bool *too_big)
906 {
907 u8 count;
908 struct mmc_data *data;
909 unsigned target_timeout, current_timeout;
910
911 *too_big = true;
912
913 /*
914 * If the host controller provides us with an incorrect timeout
915 * value, just skip the check and use 0xE. The hardware may take
916 * longer to time out, but that's much better than having a too-short
917 * timeout value.
918 */
919 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
920 return 0xE;
921
922 /* Unspecified command, asume max */
923 if (cmd == NULL)
924 return 0xE;
925
926 data = cmd->data;
927 /* Unspecified timeout, assume max */
928 if (!data && !cmd->busy_timeout)
929 return 0xE;
930
931 /* timeout in us */
932 target_timeout = sdhci_target_timeout(host, cmd, data);
933
934 /*
935 * Figure out needed cycles.
936 * We do this in steps in order to fit inside a 32 bit int.
937 * The first step is the minimum timeout, which will have a
938 * minimum resolution of 6 bits:
939 * (1) 2^13*1000 > 2^22,
940 * (2) host->timeout_clk < 2^16
941 * =>
942 * (1) / (2) > 2^6
943 */
944 count = 0;
945 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
946 while (current_timeout < target_timeout) {
947 count++;
948 current_timeout <<= 1;
949 if (count >= 0xF)
950 break;
951 }
952
953 if (count >= 0xF) {
954 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
955 DBG("Too large timeout 0x%x requested for CMD%d!\n",
956 count, cmd->opcode);
957 count = 0xE;
958 } else {
959 *too_big = false;
960 }
961
962 return count;
963 }
964
sdhci_set_transfer_irqs(struct sdhci_host * host)965 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
966 {
967 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
968 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
969
970 if (host->flags & SDHCI_REQ_USE_DMA)
971 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
972 else
973 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
974
975 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
976 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
977 else
978 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
979
980 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
981 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
982 }
983
sdhci_set_data_timeout_irq(struct sdhci_host * host,bool enable)984 static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
985 {
986 if (enable)
987 host->ier |= SDHCI_INT_DATA_TIMEOUT;
988 else
989 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
990 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
991 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
992 }
993
sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)994 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
995 {
996 u8 count;
997
998 if (host->ops->set_timeout) {
999 host->ops->set_timeout(host, cmd);
1000 } else {
1001 bool too_big = false;
1002
1003 count = sdhci_calc_timeout(host, cmd, &too_big);
1004
1005 if (too_big &&
1006 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1007 sdhci_calc_sw_timeout(host, cmd);
1008 sdhci_set_data_timeout_irq(host, false);
1009 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1010 sdhci_set_data_timeout_irq(host, true);
1011 }
1012
1013 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1014 }
1015 }
1016
sdhci_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1017 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1018 {
1019 struct mmc_data *data = cmd->data;
1020
1021 host->data_timeout = 0;
1022
1023 if (sdhci_data_line_cmd(cmd))
1024 sdhci_set_timeout(host, cmd);
1025
1026 if (!data)
1027 return;
1028
1029 WARN_ON(host->data);
1030
1031 /* Sanity checks */
1032 BUG_ON(data->blksz * data->blocks > 524288);
1033 BUG_ON(data->blksz > host->mmc->max_blk_size);
1034 BUG_ON(data->blocks > 65535);
1035
1036 host->data = data;
1037 host->data_early = 0;
1038 host->data->bytes_xfered = 0;
1039
1040 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1041 struct scatterlist *sg;
1042 unsigned int length_mask, offset_mask;
1043 int i;
1044
1045 host->flags |= SDHCI_REQ_USE_DMA;
1046
1047 /*
1048 * FIXME: This doesn't account for merging when mapping the
1049 * scatterlist.
1050 *
1051 * The assumption here being that alignment and lengths are
1052 * the same after DMA mapping to device address space.
1053 */
1054 length_mask = 0;
1055 offset_mask = 0;
1056 if (host->flags & SDHCI_USE_ADMA) {
1057 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1058 length_mask = 3;
1059 /*
1060 * As we use up to 3 byte chunks to work
1061 * around alignment problems, we need to
1062 * check the offset as well.
1063 */
1064 offset_mask = 3;
1065 }
1066 } else {
1067 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1068 length_mask = 3;
1069 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1070 offset_mask = 3;
1071 }
1072
1073 if (unlikely(length_mask | offset_mask)) {
1074 for_each_sg(data->sg, sg, data->sg_len, i) {
1075 if (sg->length & length_mask) {
1076 DBG("Reverting to PIO because of transfer size (%d)\n",
1077 sg->length);
1078 host->flags &= ~SDHCI_REQ_USE_DMA;
1079 break;
1080 }
1081 if (sg->offset & offset_mask) {
1082 DBG("Reverting to PIO because of bad alignment\n");
1083 host->flags &= ~SDHCI_REQ_USE_DMA;
1084 break;
1085 }
1086 }
1087 }
1088 }
1089
1090 if (host->flags & SDHCI_REQ_USE_DMA) {
1091 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1092
1093 if (sg_cnt <= 0) {
1094 /*
1095 * This only happens when someone fed
1096 * us an invalid request.
1097 */
1098 WARN_ON(1);
1099 host->flags &= ~SDHCI_REQ_USE_DMA;
1100 } else if (host->flags & SDHCI_USE_ADMA) {
1101 sdhci_adma_table_pre(host, data, sg_cnt);
1102 sdhci_set_adma_addr(host, host->adma_addr);
1103 } else {
1104 WARN_ON(sg_cnt != 1);
1105 sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1106 }
1107 }
1108
1109 sdhci_config_dma(host);
1110
1111 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1112 int flags;
1113
1114 flags = SG_MITER_ATOMIC;
1115 if (host->data->flags & MMC_DATA_READ)
1116 flags |= SG_MITER_TO_SG;
1117 else
1118 flags |= SG_MITER_FROM_SG;
1119 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1120 host->blocks = data->blocks;
1121 }
1122
1123 sdhci_set_transfer_irqs(host);
1124
1125 /* Set the DMA boundary value and block size */
1126 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1127 SDHCI_BLOCK_SIZE);
1128
1129 /*
1130 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1131 * can be supported, in that case 16-bit block count register must be 0.
1132 */
1133 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1134 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1135 if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1136 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1137 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1138 } else {
1139 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1140 }
1141 }
1142
sdhci_auto_cmd12(struct sdhci_host * host,struct mmc_request * mrq)1143 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1144 struct mmc_request *mrq)
1145 {
1146 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1147 !mrq->cap_cmd_during_tfr;
1148 }
1149
sdhci_auto_cmd_select(struct sdhci_host * host,struct mmc_command * cmd,u16 * mode)1150 static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1151 struct mmc_command *cmd,
1152 u16 *mode)
1153 {
1154 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1155 (cmd->opcode != SD_IO_RW_EXTENDED);
1156 bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1157 u16 ctrl2;
1158
1159 /*
1160 * In case of Version 4.10 or later, use of 'Auto CMD Auto
1161 * Select' is recommended rather than use of 'Auto CMD12
1162 * Enable' or 'Auto CMD23 Enable'.
1163 */
1164 if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) {
1165 *mode |= SDHCI_TRNS_AUTO_SEL;
1166
1167 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1168 if (use_cmd23)
1169 ctrl2 |= SDHCI_CMD23_ENABLE;
1170 else
1171 ctrl2 &= ~SDHCI_CMD23_ENABLE;
1172 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1173
1174 return;
1175 }
1176
1177 /*
1178 * If we are sending CMD23, CMD12 never gets sent
1179 * on successful completion (so no Auto-CMD12).
1180 */
1181 if (use_cmd12)
1182 *mode |= SDHCI_TRNS_AUTO_CMD12;
1183 else if (use_cmd23)
1184 *mode |= SDHCI_TRNS_AUTO_CMD23;
1185 }
1186
sdhci_set_transfer_mode(struct sdhci_host * host,struct mmc_command * cmd)1187 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1188 struct mmc_command *cmd)
1189 {
1190 u16 mode = 0;
1191 struct mmc_data *data = cmd->data;
1192
1193 if (data == NULL) {
1194 if (host->quirks2 &
1195 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1196 /* must not clear SDHCI_TRANSFER_MODE when tuning */
1197 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1198 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1199 } else {
1200 /* clear Auto CMD settings for no data CMDs */
1201 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1202 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1203 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1204 }
1205 return;
1206 }
1207
1208 WARN_ON(!host->data);
1209
1210 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1211 mode = SDHCI_TRNS_BLK_CNT_EN;
1212
1213 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1214 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1215 sdhci_auto_cmd_select(host, cmd, &mode);
1216 if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23))
1217 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1218 }
1219
1220 if (data->flags & MMC_DATA_READ)
1221 mode |= SDHCI_TRNS_READ;
1222 if (host->flags & SDHCI_REQ_USE_DMA)
1223 mode |= SDHCI_TRNS_DMA;
1224
1225 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1226 }
1227
sdhci_needs_reset(struct sdhci_host * host,struct mmc_request * mrq)1228 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1229 {
1230 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1231 ((mrq->cmd && mrq->cmd->error) ||
1232 (mrq->sbc && mrq->sbc->error) ||
1233 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1234 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1235 }
1236
__sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1237 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1238 {
1239 int i;
1240
1241 if (host->cmd && host->cmd->mrq == mrq)
1242 host->cmd = NULL;
1243
1244 if (host->data_cmd && host->data_cmd->mrq == mrq)
1245 host->data_cmd = NULL;
1246
1247 if (host->data && host->data->mrq == mrq)
1248 host->data = NULL;
1249
1250 if (sdhci_needs_reset(host, mrq))
1251 host->pending_reset = true;
1252
1253 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1254 if (host->mrqs_done[i] == mrq) {
1255 WARN_ON(1);
1256 return;
1257 }
1258 }
1259
1260 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1261 if (!host->mrqs_done[i]) {
1262 host->mrqs_done[i] = mrq;
1263 break;
1264 }
1265 }
1266
1267 WARN_ON(i >= SDHCI_MAX_MRQS);
1268
1269 sdhci_del_timer(host, mrq);
1270
1271 if (!sdhci_has_requests(host))
1272 sdhci_led_deactivate(host);
1273 }
1274
sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1275 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1276 {
1277 __sdhci_finish_mrq(host, mrq);
1278
1279 queue_work(host->complete_wq, &host->complete_work);
1280 }
1281
sdhci_finish_data(struct sdhci_host * host)1282 static void sdhci_finish_data(struct sdhci_host *host)
1283 {
1284 struct mmc_command *data_cmd = host->data_cmd;
1285 struct mmc_data *data = host->data;
1286
1287 host->data = NULL;
1288 host->data_cmd = NULL;
1289
1290 /*
1291 * The controller needs a reset of internal state machines upon error
1292 * conditions.
1293 */
1294 if (data->error) {
1295 if (!host->cmd || host->cmd == data_cmd)
1296 sdhci_do_reset(host, SDHCI_RESET_CMD);
1297 sdhci_do_reset(host, SDHCI_RESET_DATA);
1298 }
1299
1300 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1301 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1302 sdhci_adma_table_post(host, data);
1303
1304 /*
1305 * The specification states that the block count register must
1306 * be updated, but it does not specify at what point in the
1307 * data flow. That makes the register entirely useless to read
1308 * back so we have to assume that nothing made it to the card
1309 * in the event of an error.
1310 */
1311 if (data->error)
1312 data->bytes_xfered = 0;
1313 else
1314 data->bytes_xfered = data->blksz * data->blocks;
1315
1316 /*
1317 * Need to send CMD12 if -
1318 * a) open-ended multiblock transfer (no CMD23)
1319 * b) error in multiblock transfer
1320 */
1321 if (data->stop &&
1322 (data->error ||
1323 !data->mrq->sbc)) {
1324 /*
1325 * 'cap_cmd_during_tfr' request must not use the command line
1326 * after mmc_command_done() has been called. It is upper layer's
1327 * responsibility to send the stop command if required.
1328 */
1329 if (data->mrq->cap_cmd_during_tfr) {
1330 __sdhci_finish_mrq(host, data->mrq);
1331 } else {
1332 /* Avoid triggering warning in sdhci_send_command() */
1333 host->cmd = NULL;
1334 sdhci_send_command(host, data->stop);
1335 }
1336 } else {
1337 __sdhci_finish_mrq(host, data->mrq);
1338 }
1339 }
1340
sdhci_send_command(struct sdhci_host * host,struct mmc_command * cmd)1341 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1342 {
1343 int flags;
1344 u32 mask;
1345 unsigned long timeout;
1346
1347 WARN_ON(host->cmd);
1348
1349 /* Initially, a command has no error */
1350 cmd->error = 0;
1351
1352 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1353 cmd->opcode == MMC_STOP_TRANSMISSION)
1354 cmd->flags |= MMC_RSP_BUSY;
1355
1356 /* Wait max 10 ms */
1357 timeout = 10;
1358
1359 mask = SDHCI_CMD_INHIBIT;
1360 if (sdhci_data_line_cmd(cmd))
1361 mask |= SDHCI_DATA_INHIBIT;
1362
1363 /* We shouldn't wait for data inihibit for stop commands, even
1364 though they might use busy signaling */
1365 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1366 mask &= ~SDHCI_DATA_INHIBIT;
1367
1368 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1369 if (timeout == 0) {
1370 pr_err("%s: Controller never released inhibit bit(s).\n",
1371 mmc_hostname(host->mmc));
1372 sdhci_dumpregs(host);
1373 cmd->error = -EIO;
1374 sdhci_finish_mrq(host, cmd->mrq);
1375 return;
1376 }
1377 timeout--;
1378 mdelay(1);
1379 }
1380
1381 host->cmd = cmd;
1382 if (sdhci_data_line_cmd(cmd)) {
1383 WARN_ON(host->data_cmd);
1384 host->data_cmd = cmd;
1385 }
1386
1387 sdhci_prepare_data(host, cmd);
1388
1389 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1390
1391 sdhci_set_transfer_mode(host, cmd);
1392
1393 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1394 pr_err("%s: Unsupported response type!\n",
1395 mmc_hostname(host->mmc));
1396 cmd->error = -EINVAL;
1397 sdhci_finish_mrq(host, cmd->mrq);
1398 return;
1399 }
1400
1401 if (!(cmd->flags & MMC_RSP_PRESENT))
1402 flags = SDHCI_CMD_RESP_NONE;
1403 else if (cmd->flags & MMC_RSP_136)
1404 flags = SDHCI_CMD_RESP_LONG;
1405 else if (cmd->flags & MMC_RSP_BUSY)
1406 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1407 else
1408 flags = SDHCI_CMD_RESP_SHORT;
1409
1410 if (cmd->flags & MMC_RSP_CRC)
1411 flags |= SDHCI_CMD_CRC;
1412 if (cmd->flags & MMC_RSP_OPCODE)
1413 flags |= SDHCI_CMD_INDEX;
1414
1415 /* CMD19 is special in that the Data Present Select should be set */
1416 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1417 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1418 flags |= SDHCI_CMD_DATA;
1419
1420 timeout = jiffies;
1421 if (host->data_timeout)
1422 timeout += nsecs_to_jiffies(host->data_timeout);
1423 else if (!cmd->data && cmd->busy_timeout > 9000)
1424 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1425 else
1426 timeout += 10 * HZ;
1427 sdhci_mod_timer(host, cmd->mrq, timeout);
1428
1429 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1430 }
1431 EXPORT_SYMBOL_GPL(sdhci_send_command);
1432
sdhci_read_rsp_136(struct sdhci_host * host,struct mmc_command * cmd)1433 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1434 {
1435 int i, reg;
1436
1437 for (i = 0; i < 4; i++) {
1438 reg = SDHCI_RESPONSE + (3 - i) * 4;
1439 cmd->resp[i] = sdhci_readl(host, reg);
1440 }
1441
1442 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1443 return;
1444
1445 /* CRC is stripped so we need to do some shifting */
1446 for (i = 0; i < 4; i++) {
1447 cmd->resp[i] <<= 8;
1448 if (i != 3)
1449 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1450 }
1451 }
1452
sdhci_finish_command(struct sdhci_host * host)1453 static void sdhci_finish_command(struct sdhci_host *host)
1454 {
1455 struct mmc_command *cmd = host->cmd;
1456
1457 host->cmd = NULL;
1458
1459 if (cmd->flags & MMC_RSP_PRESENT) {
1460 if (cmd->flags & MMC_RSP_136) {
1461 sdhci_read_rsp_136(host, cmd);
1462 } else {
1463 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1464 }
1465 }
1466
1467 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1468 mmc_command_done(host->mmc, cmd->mrq);
1469
1470 /*
1471 * The host can send and interrupt when the busy state has
1472 * ended, allowing us to wait without wasting CPU cycles.
1473 * The busy signal uses DAT0 so this is similar to waiting
1474 * for data to complete.
1475 *
1476 * Note: The 1.0 specification is a bit ambiguous about this
1477 * feature so there might be some problems with older
1478 * controllers.
1479 */
1480 if (cmd->flags & MMC_RSP_BUSY) {
1481 if (cmd->data) {
1482 DBG("Cannot wait for busy signal when also doing a data transfer");
1483 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1484 cmd == host->data_cmd) {
1485 /* Command complete before busy is ended */
1486 return;
1487 }
1488 }
1489
1490 /* Finished CMD23, now send actual command. */
1491 if (cmd == cmd->mrq->sbc) {
1492 sdhci_send_command(host, cmd->mrq->cmd);
1493 } else {
1494
1495 /* Processed actual command. */
1496 if (host->data && host->data_early)
1497 sdhci_finish_data(host);
1498
1499 if (!cmd->data)
1500 __sdhci_finish_mrq(host, cmd->mrq);
1501 }
1502 }
1503
sdhci_get_preset_value(struct sdhci_host * host)1504 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1505 {
1506 u16 preset = 0;
1507
1508 switch (host->timing) {
1509 case MMC_TIMING_UHS_SDR12:
1510 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1511 break;
1512 case MMC_TIMING_UHS_SDR25:
1513 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1514 break;
1515 case MMC_TIMING_UHS_SDR50:
1516 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1517 break;
1518 case MMC_TIMING_UHS_SDR104:
1519 case MMC_TIMING_MMC_HS200:
1520 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1521 break;
1522 case MMC_TIMING_UHS_DDR50:
1523 case MMC_TIMING_MMC_DDR52:
1524 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1525 break;
1526 case MMC_TIMING_MMC_HS400:
1527 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1528 break;
1529 default:
1530 pr_warn("%s: Invalid UHS-I mode selected\n",
1531 mmc_hostname(host->mmc));
1532 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1533 break;
1534 }
1535 return preset;
1536 }
1537
sdhci_calc_clk(struct sdhci_host * host,unsigned int clock,unsigned int * actual_clock)1538 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1539 unsigned int *actual_clock)
1540 {
1541 int div = 0; /* Initialized for compiler warning */
1542 int real_div = div, clk_mul = 1;
1543 u16 clk = 0;
1544 bool switch_base_clk = false;
1545
1546 if (host->version >= SDHCI_SPEC_300) {
1547 if (host->preset_enabled) {
1548 u16 pre_val;
1549
1550 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1551 pre_val = sdhci_get_preset_value(host);
1552 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1553 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1554 if (host->clk_mul &&
1555 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1556 clk = SDHCI_PROG_CLOCK_MODE;
1557 real_div = div + 1;
1558 clk_mul = host->clk_mul;
1559 } else {
1560 real_div = max_t(int, 1, div << 1);
1561 }
1562 goto clock_set;
1563 }
1564
1565 /*
1566 * Check if the Host Controller supports Programmable Clock
1567 * Mode.
1568 */
1569 if (host->clk_mul) {
1570 for (div = 1; div <= 1024; div++) {
1571 if ((host->max_clk * host->clk_mul / div)
1572 <= clock)
1573 break;
1574 }
1575 if ((host->max_clk * host->clk_mul / div) <= clock) {
1576 /*
1577 * Set Programmable Clock Mode in the Clock
1578 * Control register.
1579 */
1580 clk = SDHCI_PROG_CLOCK_MODE;
1581 real_div = div;
1582 clk_mul = host->clk_mul;
1583 div--;
1584 } else {
1585 /*
1586 * Divisor can be too small to reach clock
1587 * speed requirement. Then use the base clock.
1588 */
1589 switch_base_clk = true;
1590 }
1591 }
1592
1593 if (!host->clk_mul || switch_base_clk) {
1594 /* Version 3.00 divisors must be a multiple of 2. */
1595 if (host->max_clk <= clock)
1596 div = 1;
1597 else {
1598 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1599 div += 2) {
1600 if ((host->max_clk / div) <= clock)
1601 break;
1602 }
1603 }
1604 real_div = div;
1605 div >>= 1;
1606 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1607 && !div && host->max_clk <= 25000000)
1608 div = 1;
1609 }
1610 } else {
1611 /* Version 2.00 divisors must be a power of 2. */
1612 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1613 if ((host->max_clk / div) <= clock)
1614 break;
1615 }
1616 real_div = div;
1617 div >>= 1;
1618 }
1619
1620 clock_set:
1621 if (real_div)
1622 *actual_clock = (host->max_clk * clk_mul) / real_div;
1623 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1624 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1625 << SDHCI_DIVIDER_HI_SHIFT;
1626
1627 return clk;
1628 }
1629 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1630
sdhci_enable_clk(struct sdhci_host * host,u16 clk)1631 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1632 {
1633 ktime_t timeout;
1634
1635 clk |= SDHCI_CLOCK_INT_EN;
1636 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1637
1638 /* Wait max 150 ms */
1639 timeout = ktime_add_ms(ktime_get(), 150);
1640 while (1) {
1641 bool timedout = ktime_after(ktime_get(), timeout);
1642
1643 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1644 if (clk & SDHCI_CLOCK_INT_STABLE)
1645 break;
1646 if (timedout) {
1647 pr_err("%s: Internal clock never stabilised.\n",
1648 mmc_hostname(host->mmc));
1649 sdhci_dumpregs(host);
1650 return;
1651 }
1652 udelay(10);
1653 }
1654
1655 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
1656 clk |= SDHCI_CLOCK_PLL_EN;
1657 clk &= ~SDHCI_CLOCK_INT_STABLE;
1658 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1659
1660 /* Wait max 150 ms */
1661 timeout = ktime_add_ms(ktime_get(), 150);
1662 while (1) {
1663 bool timedout = ktime_after(ktime_get(), timeout);
1664
1665 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1666 if (clk & SDHCI_CLOCK_INT_STABLE)
1667 break;
1668 if (timedout) {
1669 pr_err("%s: PLL clock never stabilised.\n",
1670 mmc_hostname(host->mmc));
1671 sdhci_dumpregs(host);
1672 return;
1673 }
1674 udelay(10);
1675 }
1676 }
1677
1678 clk |= SDHCI_CLOCK_CARD_EN;
1679 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1680 }
1681 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1682
sdhci_set_clock(struct sdhci_host * host,unsigned int clock)1683 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1684 {
1685 u16 clk;
1686
1687 host->mmc->actual_clock = 0;
1688
1689 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1690
1691 if (clock == 0)
1692 return;
1693
1694 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1695 sdhci_enable_clk(host, clk);
1696 }
1697 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1698
sdhci_set_power_reg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)1699 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1700 unsigned short vdd)
1701 {
1702 struct mmc_host *mmc = host->mmc;
1703
1704 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1705
1706 if (mode != MMC_POWER_OFF)
1707 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1708 else
1709 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1710 }
1711
sdhci_set_power_noreg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)1712 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1713 unsigned short vdd)
1714 {
1715 u8 pwr = 0;
1716
1717 if (mode != MMC_POWER_OFF) {
1718 switch (1 << vdd) {
1719 case MMC_VDD_165_195:
1720 /*
1721 * Without a regulator, SDHCI does not support 2.0v
1722 * so we only get here if the driver deliberately
1723 * added the 2.0v range to ocr_avail. Map it to 1.8v
1724 * for the purpose of turning on the power.
1725 */
1726 case MMC_VDD_20_21:
1727 pwr = SDHCI_POWER_180;
1728 break;
1729 case MMC_VDD_29_30:
1730 case MMC_VDD_30_31:
1731 pwr = SDHCI_POWER_300;
1732 break;
1733 case MMC_VDD_32_33:
1734 case MMC_VDD_33_34:
1735 pwr = SDHCI_POWER_330;
1736 break;
1737 default:
1738 WARN(1, "%s: Invalid vdd %#x\n",
1739 mmc_hostname(host->mmc), vdd);
1740 break;
1741 }
1742 }
1743
1744 if (host->pwr == pwr)
1745 return;
1746
1747 host->pwr = pwr;
1748
1749 if (pwr == 0) {
1750 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1751 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1752 sdhci_runtime_pm_bus_off(host);
1753 } else {
1754 /*
1755 * Spec says that we should clear the power reg before setting
1756 * a new value. Some controllers don't seem to like this though.
1757 */
1758 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1759 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1760
1761 /*
1762 * At least the Marvell CaFe chip gets confused if we set the
1763 * voltage and set turn on power at the same time, so set the
1764 * voltage first.
1765 */
1766 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1767 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1768
1769 pwr |= SDHCI_POWER_ON;
1770
1771 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1772
1773 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1774 sdhci_runtime_pm_bus_on(host);
1775
1776 /*
1777 * Some controllers need an extra 10ms delay of 10ms before
1778 * they can apply clock after applying power
1779 */
1780 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1781 mdelay(10);
1782 }
1783 }
1784 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1785
sdhci_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)1786 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1787 unsigned short vdd)
1788 {
1789 if (IS_ERR(host->mmc->supply.vmmc))
1790 sdhci_set_power_noreg(host, mode, vdd);
1791 else
1792 sdhci_set_power_reg(host, mode, vdd);
1793 }
1794 EXPORT_SYMBOL_GPL(sdhci_set_power);
1795
1796 /*****************************************************************************\
1797 * *
1798 * MMC callbacks *
1799 * *
1800 \*****************************************************************************/
1801
sdhci_request(struct mmc_host * mmc,struct mmc_request * mrq)1802 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1803 {
1804 struct sdhci_host *host;
1805 int present;
1806 unsigned long flags;
1807
1808 host = mmc_priv(mmc);
1809
1810 /* Firstly check card presence */
1811 present = mmc->ops->get_cd(mmc);
1812
1813 spin_lock_irqsave(&host->lock, flags);
1814
1815 sdhci_led_activate(host);
1816
1817 /*
1818 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1819 * requests if Auto-CMD12 is enabled.
1820 */
1821 if (sdhci_auto_cmd12(host, mrq)) {
1822 if (mrq->stop) {
1823 mrq->data->stop = NULL;
1824 mrq->stop = NULL;
1825 }
1826 }
1827
1828 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1829 mrq->cmd->error = -ENOMEDIUM;
1830 sdhci_finish_mrq(host, mrq);
1831 } else {
1832 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1833 sdhci_send_command(host, mrq->sbc);
1834 else
1835 sdhci_send_command(host, mrq->cmd);
1836 }
1837
1838 spin_unlock_irqrestore(&host->lock, flags);
1839 }
1840 EXPORT_SYMBOL_GPL(sdhci_request);
1841
sdhci_set_bus_width(struct sdhci_host * host,int width)1842 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1843 {
1844 u8 ctrl;
1845
1846 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1847 if (width == MMC_BUS_WIDTH_8) {
1848 ctrl &= ~SDHCI_CTRL_4BITBUS;
1849 ctrl |= SDHCI_CTRL_8BITBUS;
1850 } else {
1851 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1852 ctrl &= ~SDHCI_CTRL_8BITBUS;
1853 if (width == MMC_BUS_WIDTH_4)
1854 ctrl |= SDHCI_CTRL_4BITBUS;
1855 else
1856 ctrl &= ~SDHCI_CTRL_4BITBUS;
1857 }
1858 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1859 }
1860 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1861
sdhci_set_uhs_signaling(struct sdhci_host * host,unsigned timing)1862 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1863 {
1864 u16 ctrl_2;
1865
1866 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1867 /* Select Bus Speed Mode for host */
1868 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1869 if ((timing == MMC_TIMING_MMC_HS200) ||
1870 (timing == MMC_TIMING_UHS_SDR104))
1871 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1872 else if (timing == MMC_TIMING_UHS_SDR12)
1873 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1874 else if (timing == MMC_TIMING_UHS_SDR25)
1875 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1876 else if (timing == MMC_TIMING_UHS_SDR50)
1877 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1878 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1879 (timing == MMC_TIMING_MMC_DDR52))
1880 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1881 else if (timing == MMC_TIMING_MMC_HS400)
1882 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1883 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1884 }
1885 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1886
sdhci_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)1887 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1888 {
1889 struct sdhci_host *host = mmc_priv(mmc);
1890 u8 ctrl;
1891
1892 if (ios->power_mode == MMC_POWER_UNDEFINED)
1893 return;
1894
1895 if (host->flags & SDHCI_DEVICE_DEAD) {
1896 if (!IS_ERR(mmc->supply.vmmc) &&
1897 ios->power_mode == MMC_POWER_OFF)
1898 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1899 return;
1900 }
1901
1902 /*
1903 * Reset the chip on each power off.
1904 * Should clear out any weird states.
1905 */
1906 if (ios->power_mode == MMC_POWER_OFF) {
1907 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1908 sdhci_reinit(host);
1909 }
1910
1911 if (host->version >= SDHCI_SPEC_300 &&
1912 (ios->power_mode == MMC_POWER_UP) &&
1913 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1914 sdhci_enable_preset_value(host, false);
1915
1916 if (!ios->clock || ios->clock != host->clock) {
1917 host->ops->set_clock(host, ios->clock);
1918 host->clock = ios->clock;
1919
1920 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1921 host->clock) {
1922 host->timeout_clk = host->mmc->actual_clock ?
1923 host->mmc->actual_clock / 1000 :
1924 host->clock / 1000;
1925 host->mmc->max_busy_timeout =
1926 host->ops->get_max_timeout_count ?
1927 host->ops->get_max_timeout_count(host) :
1928 1 << 27;
1929 host->mmc->max_busy_timeout /= host->timeout_clk;
1930 }
1931 }
1932
1933 if (host->ops->set_power)
1934 host->ops->set_power(host, ios->power_mode, ios->vdd);
1935 else
1936 sdhci_set_power(host, ios->power_mode, ios->vdd);
1937
1938 if (host->ops->platform_send_init_74_clocks)
1939 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1940
1941 host->ops->set_bus_width(host, ios->bus_width);
1942
1943 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1944
1945 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
1946 if (ios->timing == MMC_TIMING_SD_HS ||
1947 ios->timing == MMC_TIMING_MMC_HS ||
1948 ios->timing == MMC_TIMING_MMC_HS400 ||
1949 ios->timing == MMC_TIMING_MMC_HS200 ||
1950 ios->timing == MMC_TIMING_MMC_DDR52 ||
1951 ios->timing == MMC_TIMING_UHS_SDR50 ||
1952 ios->timing == MMC_TIMING_UHS_SDR104 ||
1953 ios->timing == MMC_TIMING_UHS_DDR50 ||
1954 ios->timing == MMC_TIMING_UHS_SDR25)
1955 ctrl |= SDHCI_CTRL_HISPD;
1956 else
1957 ctrl &= ~SDHCI_CTRL_HISPD;
1958 }
1959
1960 if (host->version >= SDHCI_SPEC_300) {
1961 u16 clk, ctrl_2;
1962
1963 if (!host->preset_enabled) {
1964 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1965 /*
1966 * We only need to set Driver Strength if the
1967 * preset value enable is not set.
1968 */
1969 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1970 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1971 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1972 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1973 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1974 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1975 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1976 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1977 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1978 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1979 else {
1980 pr_warn("%s: invalid driver type, default to driver type B\n",
1981 mmc_hostname(mmc));
1982 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1983 }
1984
1985 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1986 } else {
1987 /*
1988 * According to SDHC Spec v3.00, if the Preset Value
1989 * Enable in the Host Control 2 register is set, we
1990 * need to reset SD Clock Enable before changing High
1991 * Speed Enable to avoid generating clock gliches.
1992 */
1993
1994 /* Reset SD Clock Enable */
1995 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1996 clk &= ~SDHCI_CLOCK_CARD_EN;
1997 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1998
1999 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2000
2001 /* Re-enable SD Clock */
2002 host->ops->set_clock(host, host->clock);
2003 }
2004
2005 /* Reset SD Clock Enable */
2006 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2007 clk &= ~SDHCI_CLOCK_CARD_EN;
2008 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2009
2010 host->ops->set_uhs_signaling(host, ios->timing);
2011 host->timing = ios->timing;
2012
2013 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2014 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
2015 (ios->timing == MMC_TIMING_UHS_SDR25) ||
2016 (ios->timing == MMC_TIMING_UHS_SDR50) ||
2017 (ios->timing == MMC_TIMING_UHS_SDR104) ||
2018 (ios->timing == MMC_TIMING_UHS_DDR50) ||
2019 (ios->timing == MMC_TIMING_MMC_DDR52))) {
2020 u16 preset;
2021
2022 sdhci_enable_preset_value(host, true);
2023 preset = sdhci_get_preset_value(host);
2024 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
2025 >> SDHCI_PRESET_DRV_SHIFT;
2026 }
2027
2028 /* Re-enable SD Clock */
2029 host->ops->set_clock(host, host->clock);
2030 } else
2031 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2032
2033 /*
2034 * Some (ENE) controllers go apeshit on some ios operation,
2035 * signalling timeout and CRC errors even on CMD0. Resetting
2036 * it on each ios seems to solve the problem.
2037 */
2038 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2039 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2040 }
2041 EXPORT_SYMBOL_GPL(sdhci_set_ios);
2042
sdhci_get_cd(struct mmc_host * mmc)2043 static int sdhci_get_cd(struct mmc_host *mmc)
2044 {
2045 struct sdhci_host *host = mmc_priv(mmc);
2046 int gpio_cd = mmc_gpio_get_cd(mmc);
2047
2048 if (host->flags & SDHCI_DEVICE_DEAD)
2049 return 0;
2050
2051 /* If nonremovable, assume that the card is always present. */
2052 if (!mmc_card_is_removable(host->mmc))
2053 return 1;
2054
2055 /*
2056 * Try slot gpio detect, if defined it take precedence
2057 * over build in controller functionality
2058 */
2059 if (gpio_cd >= 0)
2060 return !!gpio_cd;
2061
2062 /* If polling, assume that the card is always present. */
2063 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2064 return 1;
2065
2066 /* Host native card detect */
2067 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2068 }
2069
sdhci_check_ro(struct sdhci_host * host)2070 static int sdhci_check_ro(struct sdhci_host *host)
2071 {
2072 unsigned long flags;
2073 int is_readonly;
2074
2075 spin_lock_irqsave(&host->lock, flags);
2076
2077 if (host->flags & SDHCI_DEVICE_DEAD)
2078 is_readonly = 0;
2079 else if (host->ops->get_ro)
2080 is_readonly = host->ops->get_ro(host);
2081 else if (mmc_can_gpio_ro(host->mmc))
2082 is_readonly = mmc_gpio_get_ro(host->mmc);
2083 else
2084 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2085 & SDHCI_WRITE_PROTECT);
2086
2087 spin_unlock_irqrestore(&host->lock, flags);
2088
2089 /* This quirk needs to be replaced by a callback-function later */
2090 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2091 !is_readonly : is_readonly;
2092 }
2093
2094 #define SAMPLE_COUNT 5
2095
sdhci_get_ro(struct mmc_host * mmc)2096 static int sdhci_get_ro(struct mmc_host *mmc)
2097 {
2098 struct sdhci_host *host = mmc_priv(mmc);
2099 int i, ro_count;
2100
2101 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2102 return sdhci_check_ro(host);
2103
2104 ro_count = 0;
2105 for (i = 0; i < SAMPLE_COUNT; i++) {
2106 if (sdhci_check_ro(host)) {
2107 if (++ro_count > SAMPLE_COUNT / 2)
2108 return 1;
2109 }
2110 msleep(30);
2111 }
2112 return 0;
2113 }
2114
sdhci_hw_reset(struct mmc_host * mmc)2115 static void sdhci_hw_reset(struct mmc_host *mmc)
2116 {
2117 struct sdhci_host *host = mmc_priv(mmc);
2118
2119 if (host->ops && host->ops->hw_reset)
2120 host->ops->hw_reset(host);
2121 }
2122
sdhci_enable_sdio_irq_nolock(struct sdhci_host * host,int enable)2123 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2124 {
2125 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2126 if (enable)
2127 host->ier |= SDHCI_INT_CARD_INT;
2128 else
2129 host->ier &= ~SDHCI_INT_CARD_INT;
2130
2131 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2132 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2133 }
2134 }
2135
sdhci_enable_sdio_irq(struct mmc_host * mmc,int enable)2136 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2137 {
2138 struct sdhci_host *host = mmc_priv(mmc);
2139 unsigned long flags;
2140
2141 if (enable)
2142 pm_runtime_get_noresume(host->mmc->parent);
2143
2144 spin_lock_irqsave(&host->lock, flags);
2145 sdhci_enable_sdio_irq_nolock(host, enable);
2146 spin_unlock_irqrestore(&host->lock, flags);
2147
2148 if (!enable)
2149 pm_runtime_put_noidle(host->mmc->parent);
2150 }
2151 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2152
sdhci_ack_sdio_irq(struct mmc_host * mmc)2153 static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2154 {
2155 struct sdhci_host *host = mmc_priv(mmc);
2156 unsigned long flags;
2157
2158 spin_lock_irqsave(&host->lock, flags);
2159 sdhci_enable_sdio_irq_nolock(host, true);
2160 spin_unlock_irqrestore(&host->lock, flags);
2161 }
2162
sdhci_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)2163 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2164 struct mmc_ios *ios)
2165 {
2166 struct sdhci_host *host = mmc_priv(mmc);
2167 u16 ctrl;
2168 int ret;
2169
2170 /*
2171 * Signal Voltage Switching is only applicable for Host Controllers
2172 * v3.00 and above.
2173 */
2174 if (host->version < SDHCI_SPEC_300)
2175 return 0;
2176
2177 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2178
2179 switch (ios->signal_voltage) {
2180 case MMC_SIGNAL_VOLTAGE_330:
2181 if (!(host->flags & SDHCI_SIGNALING_330))
2182 return -EINVAL;
2183 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2184 ctrl &= ~SDHCI_CTRL_VDD_180;
2185 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2186
2187 if (!IS_ERR(mmc->supply.vqmmc)) {
2188 ret = mmc_regulator_set_vqmmc(mmc, ios);
2189 if (ret) {
2190 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2191 mmc_hostname(mmc));
2192 return -EIO;
2193 }
2194 }
2195 /* Wait for 5ms */
2196 usleep_range(5000, 5500);
2197
2198 /* 3.3V regulator output should be stable within 5 ms */
2199 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2200 if (!(ctrl & SDHCI_CTRL_VDD_180))
2201 return 0;
2202
2203 pr_warn("%s: 3.3V regulator output did not became stable\n",
2204 mmc_hostname(mmc));
2205
2206 return -EAGAIN;
2207 case MMC_SIGNAL_VOLTAGE_180:
2208 if (!(host->flags & SDHCI_SIGNALING_180))
2209 return -EINVAL;
2210 if (!IS_ERR(mmc->supply.vqmmc)) {
2211 ret = mmc_regulator_set_vqmmc(mmc, ios);
2212 if (ret) {
2213 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2214 mmc_hostname(mmc));
2215 return -EIO;
2216 }
2217 }
2218
2219 /*
2220 * Enable 1.8V Signal Enable in the Host Control2
2221 * register
2222 */
2223 ctrl |= SDHCI_CTRL_VDD_180;
2224 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2225
2226 /* Some controller need to do more when switching */
2227 if (host->ops->voltage_switch)
2228 host->ops->voltage_switch(host);
2229
2230 /* 1.8V regulator output should be stable within 5 ms */
2231 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2232 if (ctrl & SDHCI_CTRL_VDD_180)
2233 return 0;
2234
2235 pr_warn("%s: 1.8V regulator output did not became stable\n",
2236 mmc_hostname(mmc));
2237
2238 return -EAGAIN;
2239 case MMC_SIGNAL_VOLTAGE_120:
2240 if (!(host->flags & SDHCI_SIGNALING_120))
2241 return -EINVAL;
2242 if (!IS_ERR(mmc->supply.vqmmc)) {
2243 ret = mmc_regulator_set_vqmmc(mmc, ios);
2244 if (ret) {
2245 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2246 mmc_hostname(mmc));
2247 return -EIO;
2248 }
2249 }
2250 return 0;
2251 default:
2252 /* No signal voltage switch required */
2253 return 0;
2254 }
2255 }
2256 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2257
sdhci_card_busy(struct mmc_host * mmc)2258 static int sdhci_card_busy(struct mmc_host *mmc)
2259 {
2260 struct sdhci_host *host = mmc_priv(mmc);
2261 u32 present_state;
2262
2263 /* Check whether DAT[0] is 0 */
2264 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2265
2266 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2267 }
2268
sdhci_prepare_hs400_tuning(struct mmc_host * mmc,struct mmc_ios * ios)2269 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2270 {
2271 struct sdhci_host *host = mmc_priv(mmc);
2272 unsigned long flags;
2273
2274 spin_lock_irqsave(&host->lock, flags);
2275 host->flags |= SDHCI_HS400_TUNING;
2276 spin_unlock_irqrestore(&host->lock, flags);
2277
2278 return 0;
2279 }
2280
sdhci_start_tuning(struct sdhci_host * host)2281 void sdhci_start_tuning(struct sdhci_host *host)
2282 {
2283 u16 ctrl;
2284
2285 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2286 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2287 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2288 ctrl |= SDHCI_CTRL_TUNED_CLK;
2289 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2290
2291 /*
2292 * As per the Host Controller spec v3.00, tuning command
2293 * generates Buffer Read Ready interrupt, so enable that.
2294 *
2295 * Note: The spec clearly says that when tuning sequence
2296 * is being performed, the controller does not generate
2297 * interrupts other than Buffer Read Ready interrupt. But
2298 * to make sure we don't hit a controller bug, we _only_
2299 * enable Buffer Read Ready interrupt here.
2300 */
2301 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2302 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2303 }
2304 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2305
sdhci_end_tuning(struct sdhci_host * host)2306 void sdhci_end_tuning(struct sdhci_host *host)
2307 {
2308 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2309 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2310 }
2311 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2312
sdhci_reset_tuning(struct sdhci_host * host)2313 void sdhci_reset_tuning(struct sdhci_host *host)
2314 {
2315 u16 ctrl;
2316
2317 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2318 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2319 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2320 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2321 }
2322 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2323
sdhci_abort_tuning(struct sdhci_host * host,u32 opcode)2324 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2325 {
2326 sdhci_reset_tuning(host);
2327
2328 sdhci_do_reset(host, SDHCI_RESET_CMD);
2329 sdhci_do_reset(host, SDHCI_RESET_DATA);
2330
2331 sdhci_end_tuning(host);
2332
2333 mmc_abort_tuning(host->mmc, opcode);
2334 }
2335 EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
2336
2337 /*
2338 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2339 * tuning command does not have a data payload (or rather the hardware does it
2340 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2341 * interrupt setup is different to other commands and there is no timeout
2342 * interrupt so special handling is needed.
2343 */
sdhci_send_tuning(struct sdhci_host * host,u32 opcode)2344 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2345 {
2346 struct mmc_host *mmc = host->mmc;
2347 struct mmc_command cmd = {};
2348 struct mmc_request mrq = {};
2349 unsigned long flags;
2350 u32 b = host->sdma_boundary;
2351
2352 spin_lock_irqsave(&host->lock, flags);
2353
2354 cmd.opcode = opcode;
2355 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2356 cmd.mrq = &mrq;
2357
2358 mrq.cmd = &cmd;
2359 /*
2360 * In response to CMD19, the card sends 64 bytes of tuning
2361 * block to the Host Controller. So we set the block size
2362 * to 64 here.
2363 */
2364 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2365 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2366 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2367 else
2368 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2369
2370 /*
2371 * The tuning block is sent by the card to the host controller.
2372 * So we set the TRNS_READ bit in the Transfer Mode register.
2373 * This also takes care of setting DMA Enable and Multi Block
2374 * Select in the same register to 0.
2375 */
2376 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2377
2378 sdhci_send_command(host, &cmd);
2379
2380 host->cmd = NULL;
2381
2382 sdhci_del_timer(host, &mrq);
2383
2384 host->tuning_done = 0;
2385
2386 spin_unlock_irqrestore(&host->lock, flags);
2387
2388 /* Wait for Buffer Read Ready interrupt */
2389 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2390 msecs_to_jiffies(50));
2391
2392 }
2393 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2394
__sdhci_execute_tuning(struct sdhci_host * host,u32 opcode)2395 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2396 {
2397 int i;
2398
2399 /*
2400 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2401 * of loops reaches tuning loop count.
2402 */
2403 for (i = 0; i < host->tuning_loop_count; i++) {
2404 u16 ctrl;
2405
2406 sdhci_send_tuning(host, opcode);
2407
2408 if (!host->tuning_done) {
2409 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2410 mmc_hostname(host->mmc));
2411 sdhci_abort_tuning(host, opcode);
2412 return -ETIMEDOUT;
2413 }
2414
2415 /* Spec does not require a delay between tuning cycles */
2416 if (host->tuning_delay > 0)
2417 mdelay(host->tuning_delay);
2418
2419 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2420 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2421 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2422 return 0; /* Success! */
2423 break;
2424 }
2425
2426 }
2427
2428 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2429 mmc_hostname(host->mmc));
2430 sdhci_reset_tuning(host);
2431 return -EAGAIN;
2432 }
2433
sdhci_execute_tuning(struct mmc_host * mmc,u32 opcode)2434 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2435 {
2436 struct sdhci_host *host = mmc_priv(mmc);
2437 int err = 0;
2438 unsigned int tuning_count = 0;
2439 bool hs400_tuning;
2440
2441 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2442
2443 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2444 tuning_count = host->tuning_count;
2445
2446 /*
2447 * The Host Controller needs tuning in case of SDR104 and DDR50
2448 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2449 * the Capabilities register.
2450 * If the Host Controller supports the HS200 mode then the
2451 * tuning function has to be executed.
2452 */
2453 switch (host->timing) {
2454 /* HS400 tuning is done in HS200 mode */
2455 case MMC_TIMING_MMC_HS400:
2456 err = -EINVAL;
2457 goto out;
2458
2459 case MMC_TIMING_MMC_HS200:
2460 /*
2461 * Periodic re-tuning for HS400 is not expected to be needed, so
2462 * disable it here.
2463 */
2464 if (hs400_tuning)
2465 tuning_count = 0;
2466 break;
2467
2468 case MMC_TIMING_UHS_SDR104:
2469 case MMC_TIMING_UHS_DDR50:
2470 break;
2471
2472 case MMC_TIMING_UHS_SDR50:
2473 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2474 break;
2475 /* FALLTHROUGH */
2476
2477 default:
2478 goto out;
2479 }
2480
2481 if (host->ops->platform_execute_tuning) {
2482 err = host->ops->platform_execute_tuning(host, opcode);
2483 goto out;
2484 }
2485
2486 host->mmc->retune_period = tuning_count;
2487
2488 if (host->tuning_delay < 0)
2489 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2490
2491 sdhci_start_tuning(host);
2492
2493 host->tuning_err = __sdhci_execute_tuning(host, opcode);
2494
2495 sdhci_end_tuning(host);
2496 out:
2497 host->flags &= ~SDHCI_HS400_TUNING;
2498
2499 return err;
2500 }
2501 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2502
sdhci_enable_preset_value(struct sdhci_host * host,bool enable)2503 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2504 {
2505 /* Host Controller v3.00 defines preset value registers */
2506 if (host->version < SDHCI_SPEC_300)
2507 return;
2508
2509 /*
2510 * We only enable or disable Preset Value if they are not already
2511 * enabled or disabled respectively. Otherwise, we bail out.
2512 */
2513 if (host->preset_enabled != enable) {
2514 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2515
2516 if (enable)
2517 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2518 else
2519 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2520
2521 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2522
2523 if (enable)
2524 host->flags |= SDHCI_PV_ENABLED;
2525 else
2526 host->flags &= ~SDHCI_PV_ENABLED;
2527
2528 host->preset_enabled = enable;
2529 }
2530 }
2531
sdhci_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)2532 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2533 int err)
2534 {
2535 struct sdhci_host *host = mmc_priv(mmc);
2536 struct mmc_data *data = mrq->data;
2537
2538 if (data->host_cookie != COOKIE_UNMAPPED)
2539 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2540 mmc_get_dma_dir(data));
2541
2542 data->host_cookie = COOKIE_UNMAPPED;
2543 }
2544
sdhci_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)2545 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2546 {
2547 struct sdhci_host *host = mmc_priv(mmc);
2548
2549 mrq->data->host_cookie = COOKIE_UNMAPPED;
2550
2551 /*
2552 * No pre-mapping in the pre hook if we're using the bounce buffer,
2553 * for that we would need two bounce buffers since one buffer is
2554 * in flight when this is getting called.
2555 */
2556 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2557 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2558 }
2559
sdhci_error_out_mrqs(struct sdhci_host * host,int err)2560 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2561 {
2562 if (host->data_cmd) {
2563 host->data_cmd->error = err;
2564 sdhci_finish_mrq(host, host->data_cmd->mrq);
2565 }
2566
2567 if (host->cmd) {
2568 host->cmd->error = err;
2569 sdhci_finish_mrq(host, host->cmd->mrq);
2570 }
2571 }
2572
sdhci_card_event(struct mmc_host * mmc)2573 static void sdhci_card_event(struct mmc_host *mmc)
2574 {
2575 struct sdhci_host *host = mmc_priv(mmc);
2576 unsigned long flags;
2577 int present;
2578
2579 /* First check if client has provided their own card event */
2580 if (host->ops->card_event)
2581 host->ops->card_event(host);
2582
2583 present = mmc->ops->get_cd(mmc);
2584
2585 spin_lock_irqsave(&host->lock, flags);
2586
2587 /* Check sdhci_has_requests() first in case we are runtime suspended */
2588 if (sdhci_has_requests(host) && !present) {
2589 pr_err("%s: Card removed during transfer!\n",
2590 mmc_hostname(host->mmc));
2591 pr_err("%s: Resetting controller.\n",
2592 mmc_hostname(host->mmc));
2593
2594 sdhci_do_reset(host, SDHCI_RESET_CMD);
2595 sdhci_do_reset(host, SDHCI_RESET_DATA);
2596
2597 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2598 }
2599
2600 spin_unlock_irqrestore(&host->lock, flags);
2601 }
2602
2603 static const struct mmc_host_ops sdhci_ops = {
2604 .request = sdhci_request,
2605 .post_req = sdhci_post_req,
2606 .pre_req = sdhci_pre_req,
2607 .set_ios = sdhci_set_ios,
2608 .get_cd = sdhci_get_cd,
2609 .get_ro = sdhci_get_ro,
2610 .hw_reset = sdhci_hw_reset,
2611 .enable_sdio_irq = sdhci_enable_sdio_irq,
2612 .ack_sdio_irq = sdhci_ack_sdio_irq,
2613 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2614 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2615 .execute_tuning = sdhci_execute_tuning,
2616 .card_event = sdhci_card_event,
2617 .card_busy = sdhci_card_busy,
2618 };
2619
2620 /*****************************************************************************\
2621 * *
2622 * Request done *
2623 * *
2624 \*****************************************************************************/
2625
sdhci_request_done(struct sdhci_host * host)2626 static bool sdhci_request_done(struct sdhci_host *host)
2627 {
2628 unsigned long flags;
2629 struct mmc_request *mrq;
2630 int i;
2631
2632 spin_lock_irqsave(&host->lock, flags);
2633
2634 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2635 mrq = host->mrqs_done[i];
2636 if (mrq)
2637 break;
2638 }
2639
2640 if (!mrq) {
2641 spin_unlock_irqrestore(&host->lock, flags);
2642 return true;
2643 }
2644
2645 /*
2646 * Always unmap the data buffers if they were mapped by
2647 * sdhci_prepare_data() whenever we finish with a request.
2648 * This avoids leaking DMA mappings on error.
2649 */
2650 if (host->flags & SDHCI_REQ_USE_DMA) {
2651 struct mmc_data *data = mrq->data;
2652
2653 if (data && data->host_cookie == COOKIE_MAPPED) {
2654 if (host->bounce_buffer) {
2655 /*
2656 * On reads, copy the bounced data into the
2657 * sglist
2658 */
2659 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
2660 unsigned int length = data->bytes_xfered;
2661
2662 if (length > host->bounce_buffer_size) {
2663 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
2664 mmc_hostname(host->mmc),
2665 host->bounce_buffer_size,
2666 data->bytes_xfered);
2667 /* Cap it down and continue */
2668 length = host->bounce_buffer_size;
2669 }
2670 dma_sync_single_for_cpu(
2671 host->mmc->parent,
2672 host->bounce_addr,
2673 host->bounce_buffer_size,
2674 DMA_FROM_DEVICE);
2675 sg_copy_from_buffer(data->sg,
2676 data->sg_len,
2677 host->bounce_buffer,
2678 length);
2679 } else {
2680 /* No copying, just switch ownership */
2681 dma_sync_single_for_cpu(
2682 host->mmc->parent,
2683 host->bounce_addr,
2684 host->bounce_buffer_size,
2685 mmc_get_dma_dir(data));
2686 }
2687 } else {
2688 /* Unmap the raw data */
2689 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
2690 data->sg_len,
2691 mmc_get_dma_dir(data));
2692 }
2693 data->host_cookie = COOKIE_UNMAPPED;
2694 }
2695 }
2696
2697 /*
2698 * The controller needs a reset of internal state machines
2699 * upon error conditions.
2700 */
2701 if (sdhci_needs_reset(host, mrq)) {
2702 /*
2703 * Do not finish until command and data lines are available for
2704 * reset. Note there can only be one other mrq, so it cannot
2705 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2706 * would both be null.
2707 */
2708 if (host->cmd || host->data_cmd) {
2709 spin_unlock_irqrestore(&host->lock, flags);
2710 return true;
2711 }
2712
2713 /* Some controllers need this kick or reset won't work here */
2714 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2715 /* This is to force an update */
2716 host->ops->set_clock(host, host->clock);
2717
2718 /* Spec says we should do both at the same time, but Ricoh
2719 controllers do not like that. */
2720 sdhci_do_reset(host, SDHCI_RESET_CMD);
2721 sdhci_do_reset(host, SDHCI_RESET_DATA);
2722
2723 host->pending_reset = false;
2724 }
2725
2726 host->mrqs_done[i] = NULL;
2727
2728 spin_unlock_irqrestore(&host->lock, flags);
2729
2730 mmc_request_done(host->mmc, mrq);
2731
2732 return false;
2733 }
2734
sdhci_complete_work(struct work_struct * work)2735 static void sdhci_complete_work(struct work_struct *work)
2736 {
2737 struct sdhci_host *host = container_of(work, struct sdhci_host,
2738 complete_work);
2739
2740 while (!sdhci_request_done(host))
2741 ;
2742 }
2743
sdhci_timeout_timer(struct timer_list * t)2744 static void sdhci_timeout_timer(struct timer_list *t)
2745 {
2746 struct sdhci_host *host;
2747 unsigned long flags;
2748
2749 host = from_timer(host, t, timer);
2750
2751 spin_lock_irqsave(&host->lock, flags);
2752
2753 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2754 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2755 mmc_hostname(host->mmc));
2756 sdhci_dumpregs(host);
2757
2758 host->cmd->error = -ETIMEDOUT;
2759 sdhci_finish_mrq(host, host->cmd->mrq);
2760 }
2761
2762 spin_unlock_irqrestore(&host->lock, flags);
2763 }
2764
sdhci_timeout_data_timer(struct timer_list * t)2765 static void sdhci_timeout_data_timer(struct timer_list *t)
2766 {
2767 struct sdhci_host *host;
2768 unsigned long flags;
2769
2770 host = from_timer(host, t, data_timer);
2771
2772 spin_lock_irqsave(&host->lock, flags);
2773
2774 if (host->data || host->data_cmd ||
2775 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2776 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2777 mmc_hostname(host->mmc));
2778 sdhci_dumpregs(host);
2779
2780 if (host->data) {
2781 host->data->error = -ETIMEDOUT;
2782 sdhci_finish_data(host);
2783 queue_work(host->complete_wq, &host->complete_work);
2784 } else if (host->data_cmd) {
2785 host->data_cmd->error = -ETIMEDOUT;
2786 sdhci_finish_mrq(host, host->data_cmd->mrq);
2787 } else {
2788 host->cmd->error = -ETIMEDOUT;
2789 sdhci_finish_mrq(host, host->cmd->mrq);
2790 }
2791 }
2792
2793 spin_unlock_irqrestore(&host->lock, flags);
2794 }
2795
2796 /*****************************************************************************\
2797 * *
2798 * Interrupt handling *
2799 * *
2800 \*****************************************************************************/
2801
sdhci_cmd_irq(struct sdhci_host * host,u32 intmask,u32 * intmask_p)2802 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
2803 {
2804 /* Handle auto-CMD12 error */
2805 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
2806 struct mmc_request *mrq = host->data_cmd->mrq;
2807 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2808 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2809 SDHCI_INT_DATA_TIMEOUT :
2810 SDHCI_INT_DATA_CRC;
2811
2812 /* Treat auto-CMD12 error the same as data error */
2813 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
2814 *intmask_p |= data_err_bit;
2815 return;
2816 }
2817 }
2818
2819 if (!host->cmd) {
2820 /*
2821 * SDHCI recovers from errors by resetting the cmd and data
2822 * circuits. Until that is done, there very well might be more
2823 * interrupts, so ignore them in that case.
2824 */
2825 if (host->pending_reset)
2826 return;
2827 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2828 mmc_hostname(host->mmc), (unsigned)intmask);
2829 sdhci_dumpregs(host);
2830 return;
2831 }
2832
2833 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2834 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2835 if (intmask & SDHCI_INT_TIMEOUT)
2836 host->cmd->error = -ETIMEDOUT;
2837 else
2838 host->cmd->error = -EILSEQ;
2839
2840 /* Treat data command CRC error the same as data CRC error */
2841 if (host->cmd->data &&
2842 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2843 SDHCI_INT_CRC) {
2844 host->cmd = NULL;
2845 *intmask_p |= SDHCI_INT_DATA_CRC;
2846 return;
2847 }
2848
2849 __sdhci_finish_mrq(host, host->cmd->mrq);
2850 return;
2851 }
2852
2853 /* Handle auto-CMD23 error */
2854 if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
2855 struct mmc_request *mrq = host->cmd->mrq;
2856 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2857 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2858 -ETIMEDOUT :
2859 -EILSEQ;
2860
2861 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
2862 mrq->sbc->error = err;
2863 __sdhci_finish_mrq(host, mrq);
2864 return;
2865 }
2866 }
2867
2868 if (intmask & SDHCI_INT_RESPONSE)
2869 sdhci_finish_command(host);
2870 }
2871
sdhci_adma_show_error(struct sdhci_host * host)2872 static void sdhci_adma_show_error(struct sdhci_host *host)
2873 {
2874 void *desc = host->adma_table;
2875 dma_addr_t dma = host->adma_addr;
2876
2877 sdhci_dumpregs(host);
2878
2879 while (true) {
2880 struct sdhci_adma2_64_desc *dma_desc = desc;
2881
2882 if (host->flags & SDHCI_USE_64_BIT_DMA)
2883 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2884 (unsigned long long)dma,
2885 le32_to_cpu(dma_desc->addr_hi),
2886 le32_to_cpu(dma_desc->addr_lo),
2887 le16_to_cpu(dma_desc->len),
2888 le16_to_cpu(dma_desc->cmd));
2889 else
2890 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2891 (unsigned long long)dma,
2892 le32_to_cpu(dma_desc->addr_lo),
2893 le16_to_cpu(dma_desc->len),
2894 le16_to_cpu(dma_desc->cmd));
2895
2896 desc += host->desc_sz;
2897 dma += host->desc_sz;
2898
2899 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2900 break;
2901 }
2902 }
2903
sdhci_data_irq(struct sdhci_host * host,u32 intmask)2904 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2905 {
2906 u32 command;
2907
2908 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2909 if (intmask & SDHCI_INT_DATA_AVAIL) {
2910 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2911 if (command == MMC_SEND_TUNING_BLOCK ||
2912 command == MMC_SEND_TUNING_BLOCK_HS200) {
2913 host->tuning_done = 1;
2914 wake_up(&host->buf_ready_int);
2915 return;
2916 }
2917 }
2918
2919 if (!host->data) {
2920 struct mmc_command *data_cmd = host->data_cmd;
2921
2922 /*
2923 * The "data complete" interrupt is also used to
2924 * indicate that a busy state has ended. See comment
2925 * above in sdhci_cmd_irq().
2926 */
2927 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2928 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2929 host->data_cmd = NULL;
2930 data_cmd->error = -ETIMEDOUT;
2931 __sdhci_finish_mrq(host, data_cmd->mrq);
2932 return;
2933 }
2934 if (intmask & SDHCI_INT_DATA_END) {
2935 host->data_cmd = NULL;
2936 /*
2937 * Some cards handle busy-end interrupt
2938 * before the command completed, so make
2939 * sure we do things in the proper order.
2940 */
2941 if (host->cmd == data_cmd)
2942 return;
2943
2944 __sdhci_finish_mrq(host, data_cmd->mrq);
2945 return;
2946 }
2947 }
2948
2949 /*
2950 * SDHCI recovers from errors by resetting the cmd and data
2951 * circuits. Until that is done, there very well might be more
2952 * interrupts, so ignore them in that case.
2953 */
2954 if (host->pending_reset)
2955 return;
2956
2957 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2958 mmc_hostname(host->mmc), (unsigned)intmask);
2959 sdhci_dumpregs(host);
2960
2961 return;
2962 }
2963
2964 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2965 host->data->error = -ETIMEDOUT;
2966 else if (intmask & SDHCI_INT_DATA_END_BIT)
2967 host->data->error = -EILSEQ;
2968 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2969 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2970 != MMC_BUS_TEST_R)
2971 host->data->error = -EILSEQ;
2972 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2973 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
2974 intmask);
2975 sdhci_adma_show_error(host);
2976 host->data->error = -EIO;
2977 if (host->ops->adma_workaround)
2978 host->ops->adma_workaround(host, intmask);
2979 }
2980
2981 if (host->data->error)
2982 sdhci_finish_data(host);
2983 else {
2984 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2985 sdhci_transfer_pio(host);
2986
2987 /*
2988 * We currently don't do anything fancy with DMA
2989 * boundaries, but as we can't disable the feature
2990 * we need to at least restart the transfer.
2991 *
2992 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2993 * should return a valid address to continue from, but as
2994 * some controllers are faulty, don't trust them.
2995 */
2996 if (intmask & SDHCI_INT_DMA_END) {
2997 dma_addr_t dmastart, dmanow;
2998
2999 dmastart = sdhci_sdma_address(host);
3000 dmanow = dmastart + host->data->bytes_xfered;
3001 /*
3002 * Force update to the next DMA block boundary.
3003 */
3004 dmanow = (dmanow &
3005 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3006 SDHCI_DEFAULT_BOUNDARY_SIZE;
3007 host->data->bytes_xfered = dmanow - dmastart;
3008 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3009 &dmastart, host->data->bytes_xfered, &dmanow);
3010 sdhci_set_sdma_addr(host, dmanow);
3011 }
3012
3013 if (intmask & SDHCI_INT_DATA_END) {
3014 if (host->cmd == host->data_cmd) {
3015 /*
3016 * Data managed to finish before the
3017 * command completed. Make sure we do
3018 * things in the proper order.
3019 */
3020 host->data_early = 1;
3021 } else {
3022 sdhci_finish_data(host);
3023 }
3024 }
3025 }
3026 }
3027
sdhci_defer_done(struct sdhci_host * host,struct mmc_request * mrq)3028 static inline bool sdhci_defer_done(struct sdhci_host *host,
3029 struct mmc_request *mrq)
3030 {
3031 struct mmc_data *data = mrq->data;
3032
3033 return host->pending_reset ||
3034 ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3035 data->host_cookie == COOKIE_MAPPED);
3036 }
3037
sdhci_irq(int irq,void * dev_id)3038 static irqreturn_t sdhci_irq(int irq, void *dev_id)
3039 {
3040 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3041 irqreturn_t result = IRQ_NONE;
3042 struct sdhci_host *host = dev_id;
3043 u32 intmask, mask, unexpected = 0;
3044 int max_loops = 16;
3045 int i;
3046
3047 spin_lock(&host->lock);
3048
3049 if (host->runtime_suspended) {
3050 spin_unlock(&host->lock);
3051 return IRQ_NONE;
3052 }
3053
3054 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3055 if (!intmask || intmask == 0xffffffff) {
3056 result = IRQ_NONE;
3057 goto out;
3058 }
3059
3060 do {
3061 DBG("IRQ status 0x%08x\n", intmask);
3062
3063 if (host->ops->irq) {
3064 intmask = host->ops->irq(host, intmask);
3065 if (!intmask)
3066 goto cont;
3067 }
3068
3069 /* Clear selected interrupts. */
3070 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3071 SDHCI_INT_BUS_POWER);
3072 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3073
3074 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3075 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3076 SDHCI_CARD_PRESENT;
3077
3078 /*
3079 * There is a observation on i.mx esdhc. INSERT
3080 * bit will be immediately set again when it gets
3081 * cleared, if a card is inserted. We have to mask
3082 * the irq to prevent interrupt storm which will
3083 * freeze the system. And the REMOVE gets the
3084 * same situation.
3085 *
3086 * More testing are needed here to ensure it works
3087 * for other platforms though.
3088 */
3089 host->ier &= ~(SDHCI_INT_CARD_INSERT |
3090 SDHCI_INT_CARD_REMOVE);
3091 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3092 SDHCI_INT_CARD_INSERT;
3093 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3094 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3095
3096 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3097 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3098
3099 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3100 SDHCI_INT_CARD_REMOVE);
3101 result = IRQ_WAKE_THREAD;
3102 }
3103
3104 if (intmask & SDHCI_INT_CMD_MASK)
3105 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3106
3107 if (intmask & SDHCI_INT_DATA_MASK)
3108 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3109
3110 if (intmask & SDHCI_INT_BUS_POWER)
3111 pr_err("%s: Card is consuming too much power!\n",
3112 mmc_hostname(host->mmc));
3113
3114 if (intmask & SDHCI_INT_RETUNE)
3115 mmc_retune_needed(host->mmc);
3116
3117 if ((intmask & SDHCI_INT_CARD_INT) &&
3118 (host->ier & SDHCI_INT_CARD_INT)) {
3119 sdhci_enable_sdio_irq_nolock(host, false);
3120 sdio_signal_irq(host->mmc);
3121 }
3122
3123 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3124 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3125 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3126 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3127
3128 if (intmask) {
3129 unexpected |= intmask;
3130 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3131 }
3132 cont:
3133 if (result == IRQ_NONE)
3134 result = IRQ_HANDLED;
3135
3136 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3137 } while (intmask && --max_loops);
3138
3139 /* Determine if mrqs can be completed immediately */
3140 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3141 struct mmc_request *mrq = host->mrqs_done[i];
3142
3143 if (!mrq)
3144 continue;
3145
3146 if (sdhci_defer_done(host, mrq)) {
3147 result = IRQ_WAKE_THREAD;
3148 } else {
3149 mrqs_done[i] = mrq;
3150 host->mrqs_done[i] = NULL;
3151 }
3152 }
3153 out:
3154 spin_unlock(&host->lock);
3155
3156 /* Process mrqs ready for immediate completion */
3157 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3158 if (mrqs_done[i])
3159 mmc_request_done(host->mmc, mrqs_done[i]);
3160 }
3161
3162 if (unexpected) {
3163 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3164 mmc_hostname(host->mmc), unexpected);
3165 sdhci_dumpregs(host);
3166 }
3167
3168 return result;
3169 }
3170
sdhci_thread_irq(int irq,void * dev_id)3171 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3172 {
3173 struct sdhci_host *host = dev_id;
3174 unsigned long flags;
3175 u32 isr;
3176
3177 while (!sdhci_request_done(host))
3178 ;
3179
3180 spin_lock_irqsave(&host->lock, flags);
3181 isr = host->thread_isr;
3182 host->thread_isr = 0;
3183 spin_unlock_irqrestore(&host->lock, flags);
3184
3185 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3186 struct mmc_host *mmc = host->mmc;
3187
3188 mmc->ops->card_event(mmc);
3189 mmc_detect_change(mmc, msecs_to_jiffies(200));
3190 }
3191
3192 return IRQ_HANDLED;
3193 }
3194
3195 /*****************************************************************************\
3196 * *
3197 * Suspend/resume *
3198 * *
3199 \*****************************************************************************/
3200
3201 #ifdef CONFIG_PM
3202
sdhci_cd_irq_can_wakeup(struct sdhci_host * host)3203 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3204 {
3205 return mmc_card_is_removable(host->mmc) &&
3206 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3207 !mmc_can_gpio_cd(host->mmc);
3208 }
3209
3210 /*
3211 * To enable wakeup events, the corresponding events have to be enabled in
3212 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3213 * Table' in the SD Host Controller Standard Specification.
3214 * It is useless to restore SDHCI_INT_ENABLE state in
3215 * sdhci_disable_irq_wakeups() since it will be set by
3216 * sdhci_enable_card_detection() or sdhci_init().
3217 */
sdhci_enable_irq_wakeups(struct sdhci_host * host)3218 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3219 {
3220 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3221 SDHCI_WAKE_ON_INT;
3222 u32 irq_val = 0;
3223 u8 wake_val = 0;
3224 u8 val;
3225
3226 if (sdhci_cd_irq_can_wakeup(host)) {
3227 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3228 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3229 }
3230
3231 if (mmc_card_wake_sdio_irq(host->mmc)) {
3232 wake_val |= SDHCI_WAKE_ON_INT;
3233 irq_val |= SDHCI_INT_CARD_INT;
3234 }
3235
3236 if (!irq_val)
3237 return false;
3238
3239 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3240 val &= ~mask;
3241 val |= wake_val;
3242 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3243
3244 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3245
3246 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3247
3248 return host->irq_wake_enabled;
3249 }
3250
sdhci_disable_irq_wakeups(struct sdhci_host * host)3251 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3252 {
3253 u8 val;
3254 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3255 | SDHCI_WAKE_ON_INT;
3256
3257 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3258 val &= ~mask;
3259 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3260
3261 disable_irq_wake(host->irq);
3262
3263 host->irq_wake_enabled = false;
3264 }
3265
sdhci_suspend_host(struct sdhci_host * host)3266 int sdhci_suspend_host(struct sdhci_host *host)
3267 {
3268 sdhci_disable_card_detection(host);
3269
3270 mmc_retune_timer_stop(host->mmc);
3271
3272 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3273 !sdhci_enable_irq_wakeups(host)) {
3274 host->ier = 0;
3275 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3276 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3277 free_irq(host->irq, host);
3278 }
3279
3280 return 0;
3281 }
3282
3283 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3284
sdhci_resume_host(struct sdhci_host * host)3285 int sdhci_resume_host(struct sdhci_host *host)
3286 {
3287 struct mmc_host *mmc = host->mmc;
3288 int ret = 0;
3289
3290 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3291 if (host->ops->enable_dma)
3292 host->ops->enable_dma(host);
3293 }
3294
3295 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3296 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3297 /* Card keeps power but host controller does not */
3298 sdhci_init(host, 0);
3299 host->pwr = 0;
3300 host->clock = 0;
3301 mmc->ops->set_ios(mmc, &mmc->ios);
3302 } else {
3303 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3304 }
3305
3306 if (host->irq_wake_enabled) {
3307 sdhci_disable_irq_wakeups(host);
3308 } else {
3309 ret = request_threaded_irq(host->irq, sdhci_irq,
3310 sdhci_thread_irq, IRQF_SHARED,
3311 mmc_hostname(host->mmc), host);
3312 if (ret)
3313 return ret;
3314 }
3315
3316 sdhci_enable_card_detection(host);
3317
3318 return ret;
3319 }
3320
3321 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3322
sdhci_runtime_suspend_host(struct sdhci_host * host)3323 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3324 {
3325 unsigned long flags;
3326
3327 mmc_retune_timer_stop(host->mmc);
3328
3329 spin_lock_irqsave(&host->lock, flags);
3330 host->ier &= SDHCI_INT_CARD_INT;
3331 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3332 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3333 spin_unlock_irqrestore(&host->lock, flags);
3334
3335 synchronize_hardirq(host->irq);
3336
3337 spin_lock_irqsave(&host->lock, flags);
3338 host->runtime_suspended = true;
3339 spin_unlock_irqrestore(&host->lock, flags);
3340
3341 return 0;
3342 }
3343 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3344
sdhci_runtime_resume_host(struct sdhci_host * host,int soft_reset)3345 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3346 {
3347 struct mmc_host *mmc = host->mmc;
3348 unsigned long flags;
3349 int host_flags = host->flags;
3350
3351 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3352 if (host->ops->enable_dma)
3353 host->ops->enable_dma(host);
3354 }
3355
3356 sdhci_init(host, soft_reset);
3357
3358 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3359 mmc->ios.power_mode != MMC_POWER_OFF) {
3360 /* Force clock and power re-program */
3361 host->pwr = 0;
3362 host->clock = 0;
3363 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3364 mmc->ops->set_ios(mmc, &mmc->ios);
3365
3366 if ((host_flags & SDHCI_PV_ENABLED) &&
3367 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3368 spin_lock_irqsave(&host->lock, flags);
3369 sdhci_enable_preset_value(host, true);
3370 spin_unlock_irqrestore(&host->lock, flags);
3371 }
3372
3373 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3374 mmc->ops->hs400_enhanced_strobe)
3375 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3376 }
3377
3378 spin_lock_irqsave(&host->lock, flags);
3379
3380 host->runtime_suspended = false;
3381
3382 /* Enable SDIO IRQ */
3383 if (sdio_irq_claimed(mmc))
3384 sdhci_enable_sdio_irq_nolock(host, true);
3385
3386 /* Enable Card Detection */
3387 sdhci_enable_card_detection(host);
3388
3389 spin_unlock_irqrestore(&host->lock, flags);
3390
3391 return 0;
3392 }
3393 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3394
3395 #endif /* CONFIG_PM */
3396
3397 /*****************************************************************************\
3398 * *
3399 * Command Queue Engine (CQE) helpers *
3400 * *
3401 \*****************************************************************************/
3402
sdhci_cqe_enable(struct mmc_host * mmc)3403 void sdhci_cqe_enable(struct mmc_host *mmc)
3404 {
3405 struct sdhci_host *host = mmc_priv(mmc);
3406 unsigned long flags;
3407 u8 ctrl;
3408
3409 spin_lock_irqsave(&host->lock, flags);
3410
3411 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3412 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3413 /*
3414 * Host from V4.10 supports ADMA3 DMA type.
3415 * ADMA3 performs integrated descriptor which is more suitable
3416 * for cmd queuing to fetch both command and transfer descriptors.
3417 */
3418 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3419 ctrl |= SDHCI_CTRL_ADMA3;
3420 else if (host->flags & SDHCI_USE_64_BIT_DMA)
3421 ctrl |= SDHCI_CTRL_ADMA64;
3422 else
3423 ctrl |= SDHCI_CTRL_ADMA32;
3424 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3425
3426 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3427 SDHCI_BLOCK_SIZE);
3428
3429 /* Set maximum timeout */
3430 sdhci_set_timeout(host, NULL);
3431
3432 host->ier = host->cqe_ier;
3433
3434 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3435 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3436
3437 host->cqe_on = true;
3438
3439 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3440 mmc_hostname(mmc), host->ier,
3441 sdhci_readl(host, SDHCI_INT_STATUS));
3442
3443 spin_unlock_irqrestore(&host->lock, flags);
3444 }
3445 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3446
sdhci_cqe_disable(struct mmc_host * mmc,bool recovery)3447 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3448 {
3449 struct sdhci_host *host = mmc_priv(mmc);
3450 unsigned long flags;
3451
3452 spin_lock_irqsave(&host->lock, flags);
3453
3454 sdhci_set_default_irqs(host);
3455
3456 host->cqe_on = false;
3457
3458 if (recovery) {
3459 sdhci_do_reset(host, SDHCI_RESET_CMD);
3460 sdhci_do_reset(host, SDHCI_RESET_DATA);
3461 }
3462
3463 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3464 mmc_hostname(mmc), host->ier,
3465 sdhci_readl(host, SDHCI_INT_STATUS));
3466
3467 spin_unlock_irqrestore(&host->lock, flags);
3468 }
3469 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3470
sdhci_cqe_irq(struct sdhci_host * host,u32 intmask,int * cmd_error,int * data_error)3471 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3472 int *data_error)
3473 {
3474 u32 mask;
3475
3476 if (!host->cqe_on)
3477 return false;
3478
3479 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3480 *cmd_error = -EILSEQ;
3481 else if (intmask & SDHCI_INT_TIMEOUT)
3482 *cmd_error = -ETIMEDOUT;
3483 else
3484 *cmd_error = 0;
3485
3486 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3487 *data_error = -EILSEQ;
3488 else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3489 *data_error = -ETIMEDOUT;
3490 else if (intmask & SDHCI_INT_ADMA_ERROR)
3491 *data_error = -EIO;
3492 else
3493 *data_error = 0;
3494
3495 /* Clear selected interrupts. */
3496 mask = intmask & host->cqe_ier;
3497 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3498
3499 if (intmask & SDHCI_INT_BUS_POWER)
3500 pr_err("%s: Card is consuming too much power!\n",
3501 mmc_hostname(host->mmc));
3502
3503 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3504 if (intmask) {
3505 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3506 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3507 mmc_hostname(host->mmc), intmask);
3508 sdhci_dumpregs(host);
3509 }
3510
3511 return true;
3512 }
3513 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3514
3515 /*****************************************************************************\
3516 * *
3517 * Device allocation/registration *
3518 * *
3519 \*****************************************************************************/
3520
sdhci_alloc_host(struct device * dev,size_t priv_size)3521 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3522 size_t priv_size)
3523 {
3524 struct mmc_host *mmc;
3525 struct sdhci_host *host;
3526
3527 WARN_ON(dev == NULL);
3528
3529 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3530 if (!mmc)
3531 return ERR_PTR(-ENOMEM);
3532
3533 host = mmc_priv(mmc);
3534 host->mmc = mmc;
3535 host->mmc_host_ops = sdhci_ops;
3536 mmc->ops = &host->mmc_host_ops;
3537
3538 host->flags = SDHCI_SIGNALING_330;
3539
3540 host->cqe_ier = SDHCI_CQE_INT_MASK;
3541 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3542
3543 host->tuning_delay = -1;
3544 host->tuning_loop_count = MAX_TUNING_LOOP;
3545
3546 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3547
3548 /*
3549 * The DMA table descriptor count is calculated as the maximum
3550 * number of segments times 2, to allow for an alignment
3551 * descriptor for each segment, plus 1 for a nop end descriptor.
3552 */
3553 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
3554
3555 return host;
3556 }
3557
3558 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3559
sdhci_set_dma_mask(struct sdhci_host * host)3560 static int sdhci_set_dma_mask(struct sdhci_host *host)
3561 {
3562 struct mmc_host *mmc = host->mmc;
3563 struct device *dev = mmc_dev(mmc);
3564 int ret = -EINVAL;
3565
3566 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3567 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3568
3569 /* Try 64-bit mask if hardware is capable of it */
3570 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3571 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3572 if (ret) {
3573 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3574 mmc_hostname(mmc));
3575 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3576 }
3577 }
3578
3579 /* 32-bit mask as default & fallback */
3580 if (ret) {
3581 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3582 if (ret)
3583 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3584 mmc_hostname(mmc));
3585 }
3586
3587 return ret;
3588 }
3589
__sdhci_read_caps(struct sdhci_host * host,const u16 * ver,const u32 * caps,const u32 * caps1)3590 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
3591 const u32 *caps, const u32 *caps1)
3592 {
3593 u16 v;
3594 u64 dt_caps_mask = 0;
3595 u64 dt_caps = 0;
3596
3597 if (host->read_caps)
3598 return;
3599
3600 host->read_caps = true;
3601
3602 if (debug_quirks)
3603 host->quirks = debug_quirks;
3604
3605 if (debug_quirks2)
3606 host->quirks2 = debug_quirks2;
3607
3608 sdhci_do_reset(host, SDHCI_RESET_ALL);
3609
3610 if (host->v4_mode)
3611 sdhci_do_enable_v4_mode(host);
3612
3613 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3614 "sdhci-caps-mask", &dt_caps_mask);
3615 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3616 "sdhci-caps", &dt_caps);
3617
3618 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3619 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3620
3621 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3622 return;
3623
3624 if (caps) {
3625 host->caps = *caps;
3626 } else {
3627 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3628 host->caps &= ~lower_32_bits(dt_caps_mask);
3629 host->caps |= lower_32_bits(dt_caps);
3630 }
3631
3632 if (host->version < SDHCI_SPEC_300)
3633 return;
3634
3635 if (caps1) {
3636 host->caps1 = *caps1;
3637 } else {
3638 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3639 host->caps1 &= ~upper_32_bits(dt_caps_mask);
3640 host->caps1 |= upper_32_bits(dt_caps);
3641 }
3642 }
3643 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3644
sdhci_allocate_bounce_buffer(struct sdhci_host * host)3645 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3646 {
3647 struct mmc_host *mmc = host->mmc;
3648 unsigned int max_blocks;
3649 unsigned int bounce_size;
3650 int ret;
3651
3652 /*
3653 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
3654 * has diminishing returns, this is probably because SD/MMC
3655 * cards are usually optimized to handle this size of requests.
3656 */
3657 bounce_size = SZ_64K;
3658 /*
3659 * Adjust downwards to maximum request size if this is less
3660 * than our segment size, else hammer down the maximum
3661 * request size to the maximum buffer size.
3662 */
3663 if (mmc->max_req_size < bounce_size)
3664 bounce_size = mmc->max_req_size;
3665 max_blocks = bounce_size / 512;
3666
3667 /*
3668 * When we just support one segment, we can get significant
3669 * speedups by the help of a bounce buffer to group scattered
3670 * reads/writes together.
3671 */
3672 host->bounce_buffer = devm_kmalloc(mmc->parent,
3673 bounce_size,
3674 GFP_KERNEL);
3675 if (!host->bounce_buffer) {
3676 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3677 mmc_hostname(mmc),
3678 bounce_size);
3679 /*
3680 * Exiting with zero here makes sure we proceed with
3681 * mmc->max_segs == 1.
3682 */
3683 return;
3684 }
3685
3686 host->bounce_addr = dma_map_single(mmc->parent,
3687 host->bounce_buffer,
3688 bounce_size,
3689 DMA_BIDIRECTIONAL);
3690 ret = dma_mapping_error(mmc->parent, host->bounce_addr);
3691 if (ret)
3692 /* Again fall back to max_segs == 1 */
3693 return;
3694 host->bounce_buffer_size = bounce_size;
3695
3696 /* Lie about this since we're bouncing */
3697 mmc->max_segs = max_blocks;
3698 mmc->max_seg_size = bounce_size;
3699 mmc->max_req_size = bounce_size;
3700
3701 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3702 mmc_hostname(mmc), max_blocks, bounce_size);
3703 }
3704
sdhci_can_64bit_dma(struct sdhci_host * host)3705 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
3706 {
3707 /*
3708 * According to SD Host Controller spec v4.10, bit[27] added from
3709 * version 4.10 in Capabilities Register is used as 64-bit System
3710 * Address support for V4 mode.
3711 */
3712 if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
3713 return host->caps & SDHCI_CAN_64BIT_V4;
3714
3715 return host->caps & SDHCI_CAN_64BIT;
3716 }
3717
sdhci_setup_host(struct sdhci_host * host)3718 int sdhci_setup_host(struct sdhci_host *host)
3719 {
3720 struct mmc_host *mmc;
3721 u32 max_current_caps;
3722 unsigned int ocr_avail;
3723 unsigned int override_timeout_clk;
3724 u32 max_clk;
3725 int ret;
3726
3727 WARN_ON(host == NULL);
3728 if (host == NULL)
3729 return -EINVAL;
3730
3731 mmc = host->mmc;
3732
3733 /*
3734 * If there are external regulators, get them. Note this must be done
3735 * early before resetting the host and reading the capabilities so that
3736 * the host can take the appropriate action if regulators are not
3737 * available.
3738 */
3739 ret = mmc_regulator_get_supply(mmc);
3740 if (ret)
3741 return ret;
3742
3743 DBG("Version: 0x%08x | Present: 0x%08x\n",
3744 sdhci_readw(host, SDHCI_HOST_VERSION),
3745 sdhci_readl(host, SDHCI_PRESENT_STATE));
3746 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
3747 sdhci_readl(host, SDHCI_CAPABILITIES),
3748 sdhci_readl(host, SDHCI_CAPABILITIES_1));
3749
3750 sdhci_read_caps(host);
3751
3752 override_timeout_clk = host->timeout_clk;
3753
3754 if (host->version > SDHCI_SPEC_420) {
3755 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3756 mmc_hostname(mmc), host->version);
3757 }
3758
3759 if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
3760 mmc->caps2 &= ~MMC_CAP2_CQE;
3761
3762 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3763 host->flags |= SDHCI_USE_SDMA;
3764 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3765 DBG("Controller doesn't have SDMA capability\n");
3766 else
3767 host->flags |= SDHCI_USE_SDMA;
3768
3769 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3770 (host->flags & SDHCI_USE_SDMA)) {
3771 DBG("Disabling DMA as it is marked broken\n");
3772 host->flags &= ~SDHCI_USE_SDMA;
3773 }
3774
3775 if ((host->version >= SDHCI_SPEC_200) &&
3776 (host->caps & SDHCI_CAN_DO_ADMA2))
3777 host->flags |= SDHCI_USE_ADMA;
3778
3779 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3780 (host->flags & SDHCI_USE_ADMA)) {
3781 DBG("Disabling ADMA as it is marked broken\n");
3782 host->flags &= ~SDHCI_USE_ADMA;
3783 }
3784
3785 if (sdhci_can_64bit_dma(host))
3786 host->flags |= SDHCI_USE_64_BIT_DMA;
3787
3788 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3789 if (host->ops->set_dma_mask)
3790 ret = host->ops->set_dma_mask(host);
3791 else
3792 ret = sdhci_set_dma_mask(host);
3793
3794 if (!ret && host->ops->enable_dma)
3795 ret = host->ops->enable_dma(host);
3796
3797 if (ret) {
3798 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3799 mmc_hostname(mmc));
3800 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3801
3802 ret = 0;
3803 }
3804 }
3805
3806 /* SDMA does not support 64-bit DMA if v4 mode not set */
3807 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
3808 host->flags &= ~SDHCI_USE_SDMA;
3809
3810 if (host->flags & SDHCI_USE_ADMA) {
3811 dma_addr_t dma;
3812 void *buf;
3813
3814 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3815 host->adma_table_sz = host->adma_table_cnt *
3816 SDHCI_ADMA2_64_DESC_SZ(host);
3817 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
3818 } else {
3819 host->adma_table_sz = host->adma_table_cnt *
3820 SDHCI_ADMA2_32_DESC_SZ;
3821 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3822 }
3823
3824 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3825 /*
3826 * Use zalloc to zero the reserved high 32-bits of 128-bit
3827 * descriptors so that they never need to be written.
3828 */
3829 buf = dma_alloc_coherent(mmc_dev(mmc),
3830 host->align_buffer_sz + host->adma_table_sz,
3831 &dma, GFP_KERNEL);
3832 if (!buf) {
3833 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3834 mmc_hostname(mmc));
3835 host->flags &= ~SDHCI_USE_ADMA;
3836 } else if ((dma + host->align_buffer_sz) &
3837 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3838 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3839 mmc_hostname(mmc));
3840 host->flags &= ~SDHCI_USE_ADMA;
3841 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3842 host->adma_table_sz, buf, dma);
3843 } else {
3844 host->align_buffer = buf;
3845 host->align_addr = dma;
3846
3847 host->adma_table = buf + host->align_buffer_sz;
3848 host->adma_addr = dma + host->align_buffer_sz;
3849 }
3850 }
3851
3852 /*
3853 * If we use DMA, then it's up to the caller to set the DMA
3854 * mask, but PIO does not need the hw shim so we set a new
3855 * mask here in that case.
3856 */
3857 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3858 host->dma_mask = DMA_BIT_MASK(64);
3859 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3860 }
3861
3862 if (host->version >= SDHCI_SPEC_300)
3863 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3864 >> SDHCI_CLOCK_BASE_SHIFT;
3865 else
3866 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3867 >> SDHCI_CLOCK_BASE_SHIFT;
3868
3869 host->max_clk *= 1000000;
3870 if (host->max_clk == 0 || host->quirks &
3871 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3872 if (!host->ops->get_max_clock) {
3873 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3874 mmc_hostname(mmc));
3875 ret = -ENODEV;
3876 goto undma;
3877 }
3878 host->max_clk = host->ops->get_max_clock(host);
3879 }
3880
3881 /*
3882 * In case of Host Controller v3.00, find out whether clock
3883 * multiplier is supported.
3884 */
3885 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3886 SDHCI_CLOCK_MUL_SHIFT;
3887
3888 /*
3889 * In case the value in Clock Multiplier is 0, then programmable
3890 * clock mode is not supported, otherwise the actual clock
3891 * multiplier is one more than the value of Clock Multiplier
3892 * in the Capabilities Register.
3893 */
3894 if (host->clk_mul)
3895 host->clk_mul += 1;
3896
3897 /*
3898 * Set host parameters.
3899 */
3900 max_clk = host->max_clk;
3901
3902 if (host->ops->get_min_clock)
3903 mmc->f_min = host->ops->get_min_clock(host);
3904 else if (host->version >= SDHCI_SPEC_300) {
3905 if (host->clk_mul)
3906 max_clk = host->max_clk * host->clk_mul;
3907 /*
3908 * Divided Clock Mode minimum clock rate is always less than
3909 * Programmable Clock Mode minimum clock rate.
3910 */
3911 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3912 } else
3913 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3914
3915 if (!mmc->f_max || mmc->f_max > max_clk)
3916 mmc->f_max = max_clk;
3917
3918 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3919 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3920 SDHCI_TIMEOUT_CLK_SHIFT;
3921
3922 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3923 host->timeout_clk *= 1000;
3924
3925 if (host->timeout_clk == 0) {
3926 if (!host->ops->get_timeout_clock) {
3927 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3928 mmc_hostname(mmc));
3929 ret = -ENODEV;
3930 goto undma;
3931 }
3932
3933 host->timeout_clk =
3934 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3935 1000);
3936 }
3937
3938 if (override_timeout_clk)
3939 host->timeout_clk = override_timeout_clk;
3940
3941 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3942 host->ops->get_max_timeout_count(host) : 1 << 27;
3943 mmc->max_busy_timeout /= host->timeout_clk;
3944 }
3945
3946 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
3947 !host->ops->get_max_timeout_count)
3948 mmc->max_busy_timeout = 0;
3949
3950 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3951 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3952
3953 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3954 host->flags |= SDHCI_AUTO_CMD12;
3955
3956 /*
3957 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
3958 * For v4 mode, SDMA may use Auto-CMD23 as well.
3959 */
3960 if ((host->version >= SDHCI_SPEC_300) &&
3961 ((host->flags & SDHCI_USE_ADMA) ||
3962 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
3963 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3964 host->flags |= SDHCI_AUTO_CMD23;
3965 DBG("Auto-CMD23 available\n");
3966 } else {
3967 DBG("Auto-CMD23 unavailable\n");
3968 }
3969
3970 /*
3971 * A controller may support 8-bit width, but the board itself
3972 * might not have the pins brought out. Boards that support
3973 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3974 * their platform code before calling sdhci_add_host(), and we
3975 * won't assume 8-bit width for hosts without that CAP.
3976 */
3977 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3978 mmc->caps |= MMC_CAP_4_BIT_DATA;
3979
3980 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3981 mmc->caps &= ~MMC_CAP_CMD23;
3982
3983 if (host->caps & SDHCI_CAN_DO_HISPD)
3984 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3985
3986 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3987 mmc_card_is_removable(mmc) &&
3988 mmc_gpio_get_cd(host->mmc) < 0)
3989 mmc->caps |= MMC_CAP_NEEDS_POLL;
3990
3991 if (!IS_ERR(mmc->supply.vqmmc)) {
3992 ret = regulator_enable(mmc->supply.vqmmc);
3993
3994 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
3995 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3996 1950000))
3997 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3998 SDHCI_SUPPORT_SDR50 |
3999 SDHCI_SUPPORT_DDR50);
4000
4001 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
4002 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
4003 3600000))
4004 host->flags &= ~SDHCI_SIGNALING_330;
4005
4006 if (ret) {
4007 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4008 mmc_hostname(mmc), ret);
4009 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
4010 }
4011 }
4012
4013 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
4014 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4015 SDHCI_SUPPORT_DDR50);
4016 /*
4017 * The SDHCI controller in a SoC might support HS200/HS400
4018 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
4019 * but if the board is modeled such that the IO lines are not
4020 * connected to 1.8v then HS200/HS400 cannot be supported.
4021 * Disable HS200/HS400 if the board does not have 1.8v connected
4022 * to the IO lines. (Applicable for other modes in 1.8v)
4023 */
4024 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4025 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4026 }
4027
4028 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4029 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4030 SDHCI_SUPPORT_DDR50))
4031 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4032
4033 /* SDR104 supports also implies SDR50 support */
4034 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4035 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4036 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
4037 * field can be promoted to support HS200.
4038 */
4039 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4040 mmc->caps2 |= MMC_CAP2_HS200;
4041 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4042 mmc->caps |= MMC_CAP_UHS_SDR50;
4043 }
4044
4045 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4046 (host->caps1 & SDHCI_SUPPORT_HS400))
4047 mmc->caps2 |= MMC_CAP2_HS400;
4048
4049 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4050 (IS_ERR(mmc->supply.vqmmc) ||
4051 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4052 1300000)))
4053 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4054
4055 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4056 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4057 mmc->caps |= MMC_CAP_UHS_DDR50;
4058
4059 /* Does the host need tuning for SDR50? */
4060 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4061 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4062
4063 /* Driver Type(s) (A, C, D) supported by the host */
4064 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4065 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4066 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4067 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4068 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4069 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4070
4071 /* Initial value for re-tuning timer count */
4072 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
4073 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
4074
4075 /*
4076 * In case Re-tuning Timer is not disabled, the actual value of
4077 * re-tuning timer will be 2 ^ (n - 1).
4078 */
4079 if (host->tuning_count)
4080 host->tuning_count = 1 << (host->tuning_count - 1);
4081
4082 /* Re-tuning mode supported by the Host Controller */
4083 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
4084 SDHCI_RETUNING_MODE_SHIFT;
4085
4086 ocr_avail = 0;
4087
4088 /*
4089 * According to SD Host Controller spec v3.00, if the Host System
4090 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4091 * the value is meaningful only if Voltage Support in the Capabilities
4092 * register is set. The actual current value is 4 times the register
4093 * value.
4094 */
4095 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4096 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4097 int curr = regulator_get_current_limit(mmc->supply.vmmc);
4098 if (curr > 0) {
4099
4100 /* convert to SDHCI_MAX_CURRENT format */
4101 curr = curr/1000; /* convert to mA */
4102 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4103
4104 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4105 max_current_caps =
4106 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
4107 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
4108 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
4109 }
4110 }
4111
4112 if (host->caps & SDHCI_CAN_VDD_330) {
4113 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4114
4115 mmc->max_current_330 = ((max_current_caps &
4116 SDHCI_MAX_CURRENT_330_MASK) >>
4117 SDHCI_MAX_CURRENT_330_SHIFT) *
4118 SDHCI_MAX_CURRENT_MULTIPLIER;
4119 }
4120 if (host->caps & SDHCI_CAN_VDD_300) {
4121 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4122
4123 mmc->max_current_300 = ((max_current_caps &
4124 SDHCI_MAX_CURRENT_300_MASK) >>
4125 SDHCI_MAX_CURRENT_300_SHIFT) *
4126 SDHCI_MAX_CURRENT_MULTIPLIER;
4127 }
4128 if (host->caps & SDHCI_CAN_VDD_180) {
4129 ocr_avail |= MMC_VDD_165_195;
4130
4131 mmc->max_current_180 = ((max_current_caps &
4132 SDHCI_MAX_CURRENT_180_MASK) >>
4133 SDHCI_MAX_CURRENT_180_SHIFT) *
4134 SDHCI_MAX_CURRENT_MULTIPLIER;
4135 }
4136
4137 /* If OCR set by host, use it instead. */
4138 if (host->ocr_mask)
4139 ocr_avail = host->ocr_mask;
4140
4141 /* If OCR set by external regulators, give it highest prio. */
4142 if (mmc->ocr_avail)
4143 ocr_avail = mmc->ocr_avail;
4144
4145 mmc->ocr_avail = ocr_avail;
4146 mmc->ocr_avail_sdio = ocr_avail;
4147 if (host->ocr_avail_sdio)
4148 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4149 mmc->ocr_avail_sd = ocr_avail;
4150 if (host->ocr_avail_sd)
4151 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4152 else /* normal SD controllers don't support 1.8V */
4153 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4154 mmc->ocr_avail_mmc = ocr_avail;
4155 if (host->ocr_avail_mmc)
4156 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4157
4158 if (mmc->ocr_avail == 0) {
4159 pr_err("%s: Hardware doesn't report any support voltages.\n",
4160 mmc_hostname(mmc));
4161 ret = -ENODEV;
4162 goto unreg;
4163 }
4164
4165 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4166 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4167 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4168 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4169 host->flags |= SDHCI_SIGNALING_180;
4170
4171 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4172 host->flags |= SDHCI_SIGNALING_120;
4173
4174 spin_lock_init(&host->lock);
4175
4176 /*
4177 * Maximum number of sectors in one transfer. Limited by SDMA boundary
4178 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4179 * is less anyway.
4180 */
4181 mmc->max_req_size = 524288;
4182
4183 /*
4184 * Maximum number of segments. Depends on if the hardware
4185 * can do scatter/gather or not.
4186 */
4187 if (host->flags & SDHCI_USE_ADMA) {
4188 mmc->max_segs = SDHCI_MAX_SEGS;
4189 } else if (host->flags & SDHCI_USE_SDMA) {
4190 mmc->max_segs = 1;
4191 if (swiotlb_max_segment()) {
4192 unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
4193 IO_TLB_SEGSIZE;
4194 mmc->max_req_size = min(mmc->max_req_size,
4195 max_req_size);
4196 }
4197 } else { /* PIO */
4198 mmc->max_segs = SDHCI_MAX_SEGS;
4199 }
4200
4201 /*
4202 * Maximum segment size. Could be one segment with the maximum number
4203 * of bytes. When doing hardware scatter/gather, each entry cannot
4204 * be larger than 64 KiB though.
4205 */
4206 if (host->flags & SDHCI_USE_ADMA) {
4207 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
4208 mmc->max_seg_size = 65535;
4209 else
4210 mmc->max_seg_size = 65536;
4211 } else {
4212 mmc->max_seg_size = mmc->max_req_size;
4213 }
4214
4215 /*
4216 * Maximum block size. This varies from controller to controller and
4217 * is specified in the capabilities register.
4218 */
4219 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4220 mmc->max_blk_size = 2;
4221 } else {
4222 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4223 SDHCI_MAX_BLOCK_SHIFT;
4224 if (mmc->max_blk_size >= 3) {
4225 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4226 mmc_hostname(mmc));
4227 mmc->max_blk_size = 0;
4228 }
4229 }
4230
4231 mmc->max_blk_size = 512 << mmc->max_blk_size;
4232
4233 /*
4234 * Maximum block count.
4235 */
4236 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4237
4238 if (mmc->max_segs == 1)
4239 /* This may alter mmc->*_blk_* parameters */
4240 sdhci_allocate_bounce_buffer(host);
4241
4242 return 0;
4243
4244 unreg:
4245 if (!IS_ERR(mmc->supply.vqmmc))
4246 regulator_disable(mmc->supply.vqmmc);
4247 undma:
4248 if (host->align_buffer)
4249 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4250 host->adma_table_sz, host->align_buffer,
4251 host->align_addr);
4252 host->adma_table = NULL;
4253 host->align_buffer = NULL;
4254
4255 return ret;
4256 }
4257 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4258
sdhci_cleanup_host(struct sdhci_host * host)4259 void sdhci_cleanup_host(struct sdhci_host *host)
4260 {
4261 struct mmc_host *mmc = host->mmc;
4262
4263 if (!IS_ERR(mmc->supply.vqmmc))
4264 regulator_disable(mmc->supply.vqmmc);
4265
4266 if (host->align_buffer)
4267 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4268 host->adma_table_sz, host->align_buffer,
4269 host->align_addr);
4270 host->adma_table = NULL;
4271 host->align_buffer = NULL;
4272 }
4273 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4274
__sdhci_add_host(struct sdhci_host * host)4275 int __sdhci_add_host(struct sdhci_host *host)
4276 {
4277 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4278 struct mmc_host *mmc = host->mmc;
4279 int ret;
4280
4281 host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4282 if (!host->complete_wq)
4283 return -ENOMEM;
4284
4285 INIT_WORK(&host->complete_work, sdhci_complete_work);
4286
4287 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4288 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4289
4290 init_waitqueue_head(&host->buf_ready_int);
4291
4292 sdhci_init(host, 0);
4293
4294 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4295 IRQF_SHARED, mmc_hostname(mmc), host);
4296 if (ret) {
4297 pr_err("%s: Failed to request IRQ %d: %d\n",
4298 mmc_hostname(mmc), host->irq, ret);
4299 goto unwq;
4300 }
4301
4302 ret = sdhci_led_register(host);
4303 if (ret) {
4304 pr_err("%s: Failed to register LED device: %d\n",
4305 mmc_hostname(mmc), ret);
4306 goto unirq;
4307 }
4308
4309 ret = mmc_add_host(mmc);
4310 if (ret)
4311 goto unled;
4312
4313 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4314 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4315 (host->flags & SDHCI_USE_ADMA) ?
4316 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4317 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4318
4319 sdhci_enable_card_detection(host);
4320
4321 return 0;
4322
4323 unled:
4324 sdhci_led_unregister(host);
4325 unirq:
4326 sdhci_do_reset(host, SDHCI_RESET_ALL);
4327 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4328 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4329 free_irq(host->irq, host);
4330 unwq:
4331 destroy_workqueue(host->complete_wq);
4332
4333 return ret;
4334 }
4335 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4336
sdhci_add_host(struct sdhci_host * host)4337 int sdhci_add_host(struct sdhci_host *host)
4338 {
4339 int ret;
4340
4341 ret = sdhci_setup_host(host);
4342 if (ret)
4343 return ret;
4344
4345 ret = __sdhci_add_host(host);
4346 if (ret)
4347 goto cleanup;
4348
4349 return 0;
4350
4351 cleanup:
4352 sdhci_cleanup_host(host);
4353
4354 return ret;
4355 }
4356 EXPORT_SYMBOL_GPL(sdhci_add_host);
4357
sdhci_remove_host(struct sdhci_host * host,int dead)4358 void sdhci_remove_host(struct sdhci_host *host, int dead)
4359 {
4360 struct mmc_host *mmc = host->mmc;
4361 unsigned long flags;
4362
4363 if (dead) {
4364 spin_lock_irqsave(&host->lock, flags);
4365
4366 host->flags |= SDHCI_DEVICE_DEAD;
4367
4368 if (sdhci_has_requests(host)) {
4369 pr_err("%s: Controller removed during "
4370 " transfer!\n", mmc_hostname(mmc));
4371 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4372 }
4373
4374 spin_unlock_irqrestore(&host->lock, flags);
4375 }
4376
4377 sdhci_disable_card_detection(host);
4378
4379 mmc_remove_host(mmc);
4380
4381 sdhci_led_unregister(host);
4382
4383 if (!dead)
4384 sdhci_do_reset(host, SDHCI_RESET_ALL);
4385
4386 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4387 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4388 free_irq(host->irq, host);
4389
4390 del_timer_sync(&host->timer);
4391 del_timer_sync(&host->data_timer);
4392
4393 destroy_workqueue(host->complete_wq);
4394
4395 if (!IS_ERR(mmc->supply.vqmmc))
4396 regulator_disable(mmc->supply.vqmmc);
4397
4398 if (host->align_buffer)
4399 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4400 host->adma_table_sz, host->align_buffer,
4401 host->align_addr);
4402
4403 host->adma_table = NULL;
4404 host->align_buffer = NULL;
4405 }
4406
4407 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4408
sdhci_free_host(struct sdhci_host * host)4409 void sdhci_free_host(struct sdhci_host *host)
4410 {
4411 mmc_free_host(host->mmc);
4412 }
4413
4414 EXPORT_SYMBOL_GPL(sdhci_free_host);
4415
4416 /*****************************************************************************\
4417 * *
4418 * Driver init/exit *
4419 * *
4420 \*****************************************************************************/
4421
sdhci_drv_init(void)4422 static int __init sdhci_drv_init(void)
4423 {
4424 pr_info(DRIVER_NAME
4425 ": Secure Digital Host Controller Interface driver\n");
4426 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4427
4428 return 0;
4429 }
4430
sdhci_drv_exit(void)4431 static void __exit sdhci_drv_exit(void)
4432 {
4433 }
4434
4435 module_init(sdhci_drv_init);
4436 module_exit(sdhci_drv_exit);
4437
4438 module_param(debug_quirks, uint, 0444);
4439 module_param(debug_quirks2, uint, 0444);
4440
4441 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4442 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4443 MODULE_LICENSE("GPL");
4444
4445 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4446 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
4447