• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * Thanks to the following companies for their support:
12  *
13  *     - JMicron (hardware and technical support)
14  */
15 
16 #include <linux/delay.h>
17 #include <linux/ktime.h>
18 #include <linux/highmem.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <linux/sizes.h>
25 #include <linux/swiotlb.h>
26 #include <linux/regulator/consumer.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/of.h>
29 
30 #include <linux/leds.h>
31 
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/card.h>
35 #include <linux/mmc/sdio.h>
36 #include <linux/mmc/slot-gpio.h>
37 
38 #include "sdhci.h"
39 
40 #define DRIVER_NAME "sdhci"
41 
42 #define DBG(f, x...) \
43 	pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44 
45 #define SDHCI_DUMP(f, x...) \
46 	pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
47 
48 #define MAX_TUNING_LOOP 40
49 
50 static unsigned int debug_quirks = 0;
51 static unsigned int debug_quirks2;
52 
53 static void sdhci_finish_data(struct sdhci_host *);
54 
55 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
56 
sdhci_dumpregs(struct sdhci_host * host)57 void sdhci_dumpregs(struct sdhci_host *host)
58 {
59 	SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
60 
61 	SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
62 		   sdhci_readl(host, SDHCI_DMA_ADDRESS),
63 		   sdhci_readw(host, SDHCI_HOST_VERSION));
64 	SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
65 		   sdhci_readw(host, SDHCI_BLOCK_SIZE),
66 		   sdhci_readw(host, SDHCI_BLOCK_COUNT));
67 	SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
68 		   sdhci_readl(host, SDHCI_ARGUMENT),
69 		   sdhci_readw(host, SDHCI_TRANSFER_MODE));
70 	SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
71 		   sdhci_readl(host, SDHCI_PRESENT_STATE),
72 		   sdhci_readb(host, SDHCI_HOST_CONTROL));
73 	SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
74 		   sdhci_readb(host, SDHCI_POWER_CONTROL),
75 		   sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
76 	SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
77 		   sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
78 		   sdhci_readw(host, SDHCI_CLOCK_CONTROL));
79 	SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
80 		   sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
81 		   sdhci_readl(host, SDHCI_INT_STATUS));
82 	SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
83 		   sdhci_readl(host, SDHCI_INT_ENABLE),
84 		   sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
85 	SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
86 		   sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
87 		   sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
88 	SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
89 		   sdhci_readl(host, SDHCI_CAPABILITIES),
90 		   sdhci_readl(host, SDHCI_CAPABILITIES_1));
91 	SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
92 		   sdhci_readw(host, SDHCI_COMMAND),
93 		   sdhci_readl(host, SDHCI_MAX_CURRENT));
94 	SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
95 		   sdhci_readl(host, SDHCI_RESPONSE),
96 		   sdhci_readl(host, SDHCI_RESPONSE + 4));
97 	SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
98 		   sdhci_readl(host, SDHCI_RESPONSE + 8),
99 		   sdhci_readl(host, SDHCI_RESPONSE + 12));
100 	SDHCI_DUMP("Host ctl2: 0x%08x\n",
101 		   sdhci_readw(host, SDHCI_HOST_CONTROL2));
102 
103 	if (host->flags & SDHCI_USE_ADMA) {
104 		if (host->flags & SDHCI_USE_64_BIT_DMA) {
105 			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
106 				   sdhci_readl(host, SDHCI_ADMA_ERROR),
107 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
108 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
109 		} else {
110 			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
111 				   sdhci_readl(host, SDHCI_ADMA_ERROR),
112 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
113 		}
114 	}
115 
116 	SDHCI_DUMP("============================================\n");
117 }
118 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
119 
120 /*****************************************************************************\
121  *                                                                           *
122  * Low level functions                                                       *
123  *                                                                           *
124 \*****************************************************************************/
125 
sdhci_data_line_cmd(struct mmc_command * cmd)126 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
127 {
128 	return cmd->data || cmd->flags & MMC_RSP_BUSY;
129 }
130 
sdhci_set_card_detection(struct sdhci_host * host,bool enable)131 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
132 {
133 	u32 present;
134 
135 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
136 	    !mmc_card_is_removable(host->mmc))
137 		return;
138 
139 	if (enable) {
140 		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
141 				      SDHCI_CARD_PRESENT;
142 
143 		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
144 				       SDHCI_INT_CARD_INSERT;
145 	} else {
146 		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
147 	}
148 
149 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
150 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
151 }
152 
sdhci_enable_card_detection(struct sdhci_host * host)153 static void sdhci_enable_card_detection(struct sdhci_host *host)
154 {
155 	sdhci_set_card_detection(host, true);
156 }
157 
sdhci_disable_card_detection(struct sdhci_host * host)158 static void sdhci_disable_card_detection(struct sdhci_host *host)
159 {
160 	sdhci_set_card_detection(host, false);
161 }
162 
sdhci_runtime_pm_bus_on(struct sdhci_host * host)163 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
164 {
165 	if (host->bus_on)
166 		return;
167 	host->bus_on = true;
168 	pm_runtime_get_noresume(host->mmc->parent);
169 }
170 
sdhci_runtime_pm_bus_off(struct sdhci_host * host)171 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
172 {
173 	if (!host->bus_on)
174 		return;
175 	host->bus_on = false;
176 	pm_runtime_put_noidle(host->mmc->parent);
177 }
178 
sdhci_reset(struct sdhci_host * host,u8 mask)179 void sdhci_reset(struct sdhci_host *host, u8 mask)
180 {
181 	ktime_t timeout;
182 
183 	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
184 
185 	if (mask & SDHCI_RESET_ALL) {
186 		host->clock = 0;
187 		/* Reset-all turns off SD Bus Power */
188 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
189 			sdhci_runtime_pm_bus_off(host);
190 	}
191 
192 	/* Wait max 100 ms */
193 	timeout = ktime_add_ms(ktime_get(), 100);
194 
195 	/* hw clears the bit when it's done */
196 	while (1) {
197 		bool timedout = ktime_after(ktime_get(), timeout);
198 
199 		if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
200 			break;
201 		if (timedout) {
202 			pr_err("%s: Reset 0x%x never completed.\n",
203 				mmc_hostname(host->mmc), (int)mask);
204 			sdhci_dumpregs(host);
205 			return;
206 		}
207 		udelay(10);
208 	}
209 }
210 EXPORT_SYMBOL_GPL(sdhci_reset);
211 
sdhci_do_reset(struct sdhci_host * host,u8 mask)212 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
213 {
214 	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
215 		struct mmc_host *mmc = host->mmc;
216 
217 		if (!mmc->ops->get_cd(mmc))
218 			return;
219 	}
220 
221 	host->ops->reset(host, mask);
222 
223 	if (mask & SDHCI_RESET_ALL) {
224 		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
225 			if (host->ops->enable_dma)
226 				host->ops->enable_dma(host);
227 		}
228 
229 		/* Resetting the controller clears many */
230 		host->preset_enabled = false;
231 	}
232 }
233 
sdhci_set_default_irqs(struct sdhci_host * host)234 static void sdhci_set_default_irqs(struct sdhci_host *host)
235 {
236 	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
237 		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
238 		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
239 		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
240 		    SDHCI_INT_RESPONSE;
241 
242 	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
243 	    host->tuning_mode == SDHCI_TUNING_MODE_3)
244 		host->ier |= SDHCI_INT_RETUNE;
245 
246 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
247 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
248 }
249 
sdhci_init(struct sdhci_host * host,int soft)250 static void sdhci_init(struct sdhci_host *host, int soft)
251 {
252 	struct mmc_host *mmc = host->mmc;
253 
254 	if (soft)
255 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
256 	else
257 		sdhci_do_reset(host, SDHCI_RESET_ALL);
258 
259 	sdhci_set_default_irqs(host);
260 
261 	host->cqe_on = false;
262 
263 	if (soft) {
264 		/* force clock reconfiguration */
265 		host->clock = 0;
266 		mmc->ops->set_ios(mmc, &mmc->ios);
267 	}
268 }
269 
sdhci_reinit(struct sdhci_host * host)270 static void sdhci_reinit(struct sdhci_host *host)
271 {
272 	sdhci_init(host, 0);
273 	sdhci_enable_card_detection(host);
274 }
275 
__sdhci_led_activate(struct sdhci_host * host)276 static void __sdhci_led_activate(struct sdhci_host *host)
277 {
278 	u8 ctrl;
279 
280 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
281 	ctrl |= SDHCI_CTRL_LED;
282 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
283 }
284 
__sdhci_led_deactivate(struct sdhci_host * host)285 static void __sdhci_led_deactivate(struct sdhci_host *host)
286 {
287 	u8 ctrl;
288 
289 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
290 	ctrl &= ~SDHCI_CTRL_LED;
291 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
292 }
293 
294 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
sdhci_led_control(struct led_classdev * led,enum led_brightness brightness)295 static void sdhci_led_control(struct led_classdev *led,
296 			      enum led_brightness brightness)
297 {
298 	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
299 	unsigned long flags;
300 
301 	spin_lock_irqsave(&host->lock, flags);
302 
303 	if (host->runtime_suspended)
304 		goto out;
305 
306 	if (brightness == LED_OFF)
307 		__sdhci_led_deactivate(host);
308 	else
309 		__sdhci_led_activate(host);
310 out:
311 	spin_unlock_irqrestore(&host->lock, flags);
312 }
313 
sdhci_led_register(struct sdhci_host * host)314 static int sdhci_led_register(struct sdhci_host *host)
315 {
316 	struct mmc_host *mmc = host->mmc;
317 
318 	snprintf(host->led_name, sizeof(host->led_name),
319 		 "%s::", mmc_hostname(mmc));
320 
321 	host->led.name = host->led_name;
322 	host->led.brightness = LED_OFF;
323 	host->led.default_trigger = mmc_hostname(mmc);
324 	host->led.brightness_set = sdhci_led_control;
325 
326 	return led_classdev_register(mmc_dev(mmc), &host->led);
327 }
328 
sdhci_led_unregister(struct sdhci_host * host)329 static void sdhci_led_unregister(struct sdhci_host *host)
330 {
331 	led_classdev_unregister(&host->led);
332 }
333 
sdhci_led_activate(struct sdhci_host * host)334 static inline void sdhci_led_activate(struct sdhci_host *host)
335 {
336 }
337 
sdhci_led_deactivate(struct sdhci_host * host)338 static inline void sdhci_led_deactivate(struct sdhci_host *host)
339 {
340 }
341 
342 #else
343 
sdhci_led_register(struct sdhci_host * host)344 static inline int sdhci_led_register(struct sdhci_host *host)
345 {
346 	return 0;
347 }
348 
sdhci_led_unregister(struct sdhci_host * host)349 static inline void sdhci_led_unregister(struct sdhci_host *host)
350 {
351 }
352 
sdhci_led_activate(struct sdhci_host * host)353 static inline void sdhci_led_activate(struct sdhci_host *host)
354 {
355 	__sdhci_led_activate(host);
356 }
357 
sdhci_led_deactivate(struct sdhci_host * host)358 static inline void sdhci_led_deactivate(struct sdhci_host *host)
359 {
360 	__sdhci_led_deactivate(host);
361 }
362 
363 #endif
364 
365 /*****************************************************************************\
366  *                                                                           *
367  * Core functions                                                            *
368  *                                                                           *
369 \*****************************************************************************/
370 
sdhci_read_block_pio(struct sdhci_host * host)371 static void sdhci_read_block_pio(struct sdhci_host *host)
372 {
373 	unsigned long flags;
374 	size_t blksize, len, chunk;
375 	u32 uninitialized_var(scratch);
376 	u8 *buf;
377 
378 	DBG("PIO reading\n");
379 
380 	blksize = host->data->blksz;
381 	chunk = 0;
382 
383 	local_irq_save(flags);
384 
385 	while (blksize) {
386 		BUG_ON(!sg_miter_next(&host->sg_miter));
387 
388 		len = min(host->sg_miter.length, blksize);
389 
390 		blksize -= len;
391 		host->sg_miter.consumed = len;
392 
393 		buf = host->sg_miter.addr;
394 
395 		while (len) {
396 			if (chunk == 0) {
397 				scratch = sdhci_readl(host, SDHCI_BUFFER);
398 				chunk = 4;
399 			}
400 
401 			*buf = scratch & 0xFF;
402 
403 			buf++;
404 			scratch >>= 8;
405 			chunk--;
406 			len--;
407 		}
408 	}
409 
410 	sg_miter_stop(&host->sg_miter);
411 
412 	local_irq_restore(flags);
413 }
414 
sdhci_write_block_pio(struct sdhci_host * host)415 static void sdhci_write_block_pio(struct sdhci_host *host)
416 {
417 	unsigned long flags;
418 	size_t blksize, len, chunk;
419 	u32 scratch;
420 	u8 *buf;
421 
422 	DBG("PIO writing\n");
423 
424 	blksize = host->data->blksz;
425 	chunk = 0;
426 	scratch = 0;
427 
428 	local_irq_save(flags);
429 
430 	while (blksize) {
431 		BUG_ON(!sg_miter_next(&host->sg_miter));
432 
433 		len = min(host->sg_miter.length, blksize);
434 
435 		blksize -= len;
436 		host->sg_miter.consumed = len;
437 
438 		buf = host->sg_miter.addr;
439 
440 		while (len) {
441 			scratch |= (u32)*buf << (chunk * 8);
442 
443 			buf++;
444 			chunk++;
445 			len--;
446 
447 			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
448 				sdhci_writel(host, scratch, SDHCI_BUFFER);
449 				chunk = 0;
450 				scratch = 0;
451 			}
452 		}
453 	}
454 
455 	sg_miter_stop(&host->sg_miter);
456 
457 	local_irq_restore(flags);
458 }
459 
sdhci_transfer_pio(struct sdhci_host * host)460 static void sdhci_transfer_pio(struct sdhci_host *host)
461 {
462 	u32 mask;
463 
464 	if (host->blocks == 0)
465 		return;
466 
467 	if (host->data->flags & MMC_DATA_READ)
468 		mask = SDHCI_DATA_AVAILABLE;
469 	else
470 		mask = SDHCI_SPACE_AVAILABLE;
471 
472 	/*
473 	 * Some controllers (JMicron JMB38x) mess up the buffer bits
474 	 * for transfers < 4 bytes. As long as it is just one block,
475 	 * we can ignore the bits.
476 	 */
477 	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
478 		(host->data->blocks == 1))
479 		mask = ~0;
480 
481 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
482 		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
483 			udelay(100);
484 
485 		if (host->data->flags & MMC_DATA_READ)
486 			sdhci_read_block_pio(host);
487 		else
488 			sdhci_write_block_pio(host);
489 
490 		host->blocks--;
491 		if (host->blocks == 0)
492 			break;
493 	}
494 
495 	DBG("PIO transfer complete.\n");
496 }
497 
sdhci_pre_dma_transfer(struct sdhci_host * host,struct mmc_data * data,int cookie)498 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
499 				  struct mmc_data *data, int cookie)
500 {
501 	int sg_count;
502 
503 	/*
504 	 * If the data buffers are already mapped, return the previous
505 	 * dma_map_sg() result.
506 	 */
507 	if (data->host_cookie == COOKIE_PRE_MAPPED)
508 		return data->sg_count;
509 
510 	/* Bounce write requests to the bounce buffer */
511 	if (host->bounce_buffer) {
512 		unsigned int length = data->blksz * data->blocks;
513 
514 		if (length > host->bounce_buffer_size) {
515 			pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
516 			       mmc_hostname(host->mmc), length,
517 			       host->bounce_buffer_size);
518 			return -EIO;
519 		}
520 		if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
521 			/* Copy the data to the bounce buffer */
522 			sg_copy_to_buffer(data->sg, data->sg_len,
523 					  host->bounce_buffer,
524 					  length);
525 		}
526 		/* Switch ownership to the DMA */
527 		dma_sync_single_for_device(host->mmc->parent,
528 					   host->bounce_addr,
529 					   host->bounce_buffer_size,
530 					   mmc_get_dma_dir(data));
531 		/* Just a dummy value */
532 		sg_count = 1;
533 	} else {
534 		/* Just access the data directly from memory */
535 		sg_count = dma_map_sg(mmc_dev(host->mmc),
536 				      data->sg, data->sg_len,
537 				      mmc_get_dma_dir(data));
538 	}
539 
540 	if (sg_count == 0)
541 		return -ENOSPC;
542 
543 	data->sg_count = sg_count;
544 	data->host_cookie = cookie;
545 
546 	return sg_count;
547 }
548 
sdhci_kmap_atomic(struct scatterlist * sg,unsigned long * flags)549 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
550 {
551 	local_irq_save(*flags);
552 	return kmap_atomic(sg_page(sg)) + sg->offset;
553 }
554 
sdhci_kunmap_atomic(void * buffer,unsigned long * flags)555 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
556 {
557 	kunmap_atomic(buffer);
558 	local_irq_restore(*flags);
559 }
560 
sdhci_adma_write_desc(struct sdhci_host * host,void * desc,dma_addr_t addr,int len,unsigned cmd)561 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
562 				  dma_addr_t addr, int len, unsigned cmd)
563 {
564 	struct sdhci_adma2_64_desc *dma_desc = desc;
565 
566 	/* 32-bit and 64-bit descriptors have these members in same position */
567 	dma_desc->cmd = cpu_to_le16(cmd);
568 	dma_desc->len = cpu_to_le16(len);
569 	dma_desc->addr_lo = cpu_to_le32((u32)addr);
570 
571 	if (host->flags & SDHCI_USE_64_BIT_DMA)
572 		dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
573 }
574 
sdhci_adma_mark_end(void * desc)575 static void sdhci_adma_mark_end(void *desc)
576 {
577 	struct sdhci_adma2_64_desc *dma_desc = desc;
578 
579 	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
580 	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
581 }
582 
sdhci_adma_table_pre(struct sdhci_host * host,struct mmc_data * data,int sg_count)583 static void sdhci_adma_table_pre(struct sdhci_host *host,
584 	struct mmc_data *data, int sg_count)
585 {
586 	struct scatterlist *sg;
587 	unsigned long flags;
588 	dma_addr_t addr, align_addr;
589 	void *desc, *align;
590 	char *buffer;
591 	int len, offset, i;
592 
593 	/*
594 	 * The spec does not specify endianness of descriptor table.
595 	 * We currently guess that it is LE.
596 	 */
597 
598 	host->sg_count = sg_count;
599 
600 	desc = host->adma_table;
601 	align = host->align_buffer;
602 
603 	align_addr = host->align_addr;
604 
605 	for_each_sg(data->sg, sg, host->sg_count, i) {
606 		addr = sg_dma_address(sg);
607 		len = sg_dma_len(sg);
608 
609 		/*
610 		 * The SDHCI specification states that ADMA addresses must
611 		 * be 32-bit aligned. If they aren't, then we use a bounce
612 		 * buffer for the (up to three) bytes that screw up the
613 		 * alignment.
614 		 */
615 		offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
616 			 SDHCI_ADMA2_MASK;
617 		if (offset) {
618 			if (data->flags & MMC_DATA_WRITE) {
619 				buffer = sdhci_kmap_atomic(sg, &flags);
620 				memcpy(align, buffer, offset);
621 				sdhci_kunmap_atomic(buffer, &flags);
622 			}
623 
624 			/* tran, valid */
625 			sdhci_adma_write_desc(host, desc, align_addr, offset,
626 					      ADMA2_TRAN_VALID);
627 
628 			BUG_ON(offset > 65536);
629 
630 			align += SDHCI_ADMA2_ALIGN;
631 			align_addr += SDHCI_ADMA2_ALIGN;
632 
633 			desc += host->desc_sz;
634 
635 			addr += offset;
636 			len -= offset;
637 		}
638 
639 		BUG_ON(len > 65536);
640 
641 		if (len) {
642 			/* tran, valid */
643 			sdhci_adma_write_desc(host, desc, addr, len,
644 					      ADMA2_TRAN_VALID);
645 			desc += host->desc_sz;
646 		}
647 
648 		/*
649 		 * If this triggers then we have a calculation bug
650 		 * somewhere. :/
651 		 */
652 		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
653 	}
654 
655 	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
656 		/* Mark the last descriptor as the terminating descriptor */
657 		if (desc != host->adma_table) {
658 			desc -= host->desc_sz;
659 			sdhci_adma_mark_end(desc);
660 		}
661 	} else {
662 		/* Add a terminating entry - nop, end, valid */
663 		sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
664 	}
665 }
666 
sdhci_adma_table_post(struct sdhci_host * host,struct mmc_data * data)667 static void sdhci_adma_table_post(struct sdhci_host *host,
668 	struct mmc_data *data)
669 {
670 	struct scatterlist *sg;
671 	int i, size;
672 	void *align;
673 	char *buffer;
674 	unsigned long flags;
675 
676 	if (data->flags & MMC_DATA_READ) {
677 		bool has_unaligned = false;
678 
679 		/* Do a quick scan of the SG list for any unaligned mappings */
680 		for_each_sg(data->sg, sg, host->sg_count, i)
681 			if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
682 				has_unaligned = true;
683 				break;
684 			}
685 
686 		if (has_unaligned) {
687 			dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
688 					    data->sg_len, DMA_FROM_DEVICE);
689 
690 			align = host->align_buffer;
691 
692 			for_each_sg(data->sg, sg, host->sg_count, i) {
693 				if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
694 					size = SDHCI_ADMA2_ALIGN -
695 					       (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
696 
697 					buffer = sdhci_kmap_atomic(sg, &flags);
698 					memcpy(buffer, align, size);
699 					sdhci_kunmap_atomic(buffer, &flags);
700 
701 					align += SDHCI_ADMA2_ALIGN;
702 				}
703 			}
704 		}
705 	}
706 }
707 
sdhci_sdma_address(struct sdhci_host * host)708 static u32 sdhci_sdma_address(struct sdhci_host *host)
709 {
710 	if (host->bounce_buffer)
711 		return host->bounce_addr;
712 	else
713 		return sg_dma_address(host->data->sg);
714 }
715 
sdhci_calc_timeout(struct sdhci_host * host,struct mmc_command * cmd)716 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
717 {
718 	u8 count;
719 	struct mmc_data *data = cmd->data;
720 	unsigned target_timeout, current_timeout;
721 
722 	/*
723 	 * If the host controller provides us with an incorrect timeout
724 	 * value, just skip the check and use 0xE.  The hardware may take
725 	 * longer to time out, but that's much better than having a too-short
726 	 * timeout value.
727 	 */
728 	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
729 		return 0xE;
730 
731 	/* Unspecified timeout, assume max */
732 	if (!data && !cmd->busy_timeout)
733 		return 0xE;
734 
735 	/* timeout in us */
736 	if (!data)
737 		target_timeout = cmd->busy_timeout * 1000;
738 	else {
739 		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
740 		if (host->clock && data->timeout_clks) {
741 			unsigned long long val;
742 
743 			/*
744 			 * data->timeout_clks is in units of clock cycles.
745 			 * host->clock is in Hz.  target_timeout is in us.
746 			 * Hence, us = 1000000 * cycles / Hz.  Round up.
747 			 */
748 			val = 1000000ULL * data->timeout_clks;
749 			if (do_div(val, host->clock))
750 				target_timeout++;
751 			target_timeout += val;
752 		}
753 	}
754 
755 	/*
756 	 * Figure out needed cycles.
757 	 * We do this in steps in order to fit inside a 32 bit int.
758 	 * The first step is the minimum timeout, which will have a
759 	 * minimum resolution of 6 bits:
760 	 * (1) 2^13*1000 > 2^22,
761 	 * (2) host->timeout_clk < 2^16
762 	 *     =>
763 	 *     (1) / (2) > 2^6
764 	 */
765 	count = 0;
766 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
767 	while (current_timeout < target_timeout) {
768 		count++;
769 		current_timeout <<= 1;
770 		if (count >= 0xF)
771 			break;
772 	}
773 
774 	if (count >= 0xF) {
775 		DBG("Too large timeout 0x%x requested for CMD%d!\n",
776 		    count, cmd->opcode);
777 		count = 0xE;
778 	}
779 
780 	return count;
781 }
782 
sdhci_set_transfer_irqs(struct sdhci_host * host)783 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
784 {
785 	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
786 	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
787 
788 	if (host->flags & SDHCI_REQ_USE_DMA)
789 		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
790 	else
791 		host->ier = (host->ier & ~dma_irqs) | pio_irqs;
792 
793 	if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
794 		host->ier |= SDHCI_INT_AUTO_CMD_ERR;
795 	else
796 		host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
797 
798 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
799 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
800 }
801 
sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)802 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
803 {
804 	u8 count;
805 
806 	if (host->ops->set_timeout) {
807 		host->ops->set_timeout(host, cmd);
808 	} else {
809 		count = sdhci_calc_timeout(host, cmd);
810 		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
811 	}
812 }
813 
sdhci_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)814 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
815 {
816 	u8 ctrl;
817 	struct mmc_data *data = cmd->data;
818 
819 	if (sdhci_data_line_cmd(cmd))
820 		sdhci_set_timeout(host, cmd);
821 
822 	if (!data)
823 		return;
824 
825 	WARN_ON(host->data);
826 
827 	/* Sanity checks */
828 	BUG_ON(data->blksz * data->blocks > 524288);
829 	BUG_ON(data->blksz > host->mmc->max_blk_size);
830 	BUG_ON(data->blocks > 65535);
831 
832 	host->data = data;
833 	host->data_early = 0;
834 	host->data->bytes_xfered = 0;
835 
836 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
837 		struct scatterlist *sg;
838 		unsigned int length_mask, offset_mask;
839 		int i;
840 
841 		host->flags |= SDHCI_REQ_USE_DMA;
842 
843 		/*
844 		 * FIXME: This doesn't account for merging when mapping the
845 		 * scatterlist.
846 		 *
847 		 * The assumption here being that alignment and lengths are
848 		 * the same after DMA mapping to device address space.
849 		 */
850 		length_mask = 0;
851 		offset_mask = 0;
852 		if (host->flags & SDHCI_USE_ADMA) {
853 			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
854 				length_mask = 3;
855 				/*
856 				 * As we use up to 3 byte chunks to work
857 				 * around alignment problems, we need to
858 				 * check the offset as well.
859 				 */
860 				offset_mask = 3;
861 			}
862 		} else {
863 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
864 				length_mask = 3;
865 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
866 				offset_mask = 3;
867 		}
868 
869 		if (unlikely(length_mask | offset_mask)) {
870 			for_each_sg(data->sg, sg, data->sg_len, i) {
871 				if (sg->length & length_mask) {
872 					DBG("Reverting to PIO because of transfer size (%d)\n",
873 					    sg->length);
874 					host->flags &= ~SDHCI_REQ_USE_DMA;
875 					break;
876 				}
877 				if (sg->offset & offset_mask) {
878 					DBG("Reverting to PIO because of bad alignment\n");
879 					host->flags &= ~SDHCI_REQ_USE_DMA;
880 					break;
881 				}
882 			}
883 		}
884 	}
885 
886 	if (host->flags & SDHCI_REQ_USE_DMA) {
887 		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
888 
889 		if (sg_cnt <= 0) {
890 			/*
891 			 * This only happens when someone fed
892 			 * us an invalid request.
893 			 */
894 			WARN_ON(1);
895 			host->flags &= ~SDHCI_REQ_USE_DMA;
896 		} else if (host->flags & SDHCI_USE_ADMA) {
897 			sdhci_adma_table_pre(host, data, sg_cnt);
898 
899 			sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
900 			if (host->flags & SDHCI_USE_64_BIT_DMA)
901 				sdhci_writel(host,
902 					     (u64)host->adma_addr >> 32,
903 					     SDHCI_ADMA_ADDRESS_HI);
904 		} else {
905 			WARN_ON(sg_cnt != 1);
906 			sdhci_writel(host, sdhci_sdma_address(host),
907 				     SDHCI_DMA_ADDRESS);
908 		}
909 	}
910 
911 	/*
912 	 * Always adjust the DMA selection as some controllers
913 	 * (e.g. JMicron) can't do PIO properly when the selection
914 	 * is ADMA.
915 	 */
916 	if (host->version >= SDHCI_SPEC_200) {
917 		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
918 		ctrl &= ~SDHCI_CTRL_DMA_MASK;
919 		if ((host->flags & SDHCI_REQ_USE_DMA) &&
920 			(host->flags & SDHCI_USE_ADMA)) {
921 			if (host->flags & SDHCI_USE_64_BIT_DMA)
922 				ctrl |= SDHCI_CTRL_ADMA64;
923 			else
924 				ctrl |= SDHCI_CTRL_ADMA32;
925 		} else {
926 			ctrl |= SDHCI_CTRL_SDMA;
927 		}
928 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
929 	}
930 
931 	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
932 		int flags;
933 
934 		flags = SG_MITER_ATOMIC;
935 		if (host->data->flags & MMC_DATA_READ)
936 			flags |= SG_MITER_TO_SG;
937 		else
938 			flags |= SG_MITER_FROM_SG;
939 		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
940 		host->blocks = data->blocks;
941 	}
942 
943 	sdhci_set_transfer_irqs(host);
944 
945 	/* Set the DMA boundary value and block size */
946 	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
947 		     SDHCI_BLOCK_SIZE);
948 	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
949 }
950 
sdhci_auto_cmd12(struct sdhci_host * host,struct mmc_request * mrq)951 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
952 				    struct mmc_request *mrq)
953 {
954 	return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
955 	       !mrq->cap_cmd_during_tfr;
956 }
957 
sdhci_set_transfer_mode(struct sdhci_host * host,struct mmc_command * cmd)958 static void sdhci_set_transfer_mode(struct sdhci_host *host,
959 	struct mmc_command *cmd)
960 {
961 	u16 mode = 0;
962 	struct mmc_data *data = cmd->data;
963 
964 	if (data == NULL) {
965 		if (host->quirks2 &
966 			SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
967 			sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
968 		} else {
969 		/* clear Auto CMD settings for no data CMDs */
970 			mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
971 			sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
972 				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
973 		}
974 		return;
975 	}
976 
977 	WARN_ON(!host->data);
978 
979 	if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
980 		mode = SDHCI_TRNS_BLK_CNT_EN;
981 
982 	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
983 		mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
984 		/*
985 		 * If we are sending CMD23, CMD12 never gets sent
986 		 * on successful completion (so no Auto-CMD12).
987 		 */
988 		if (sdhci_auto_cmd12(host, cmd->mrq) &&
989 		    (cmd->opcode != SD_IO_RW_EXTENDED))
990 			mode |= SDHCI_TRNS_AUTO_CMD12;
991 		else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
992 			mode |= SDHCI_TRNS_AUTO_CMD23;
993 			sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
994 		}
995 	}
996 
997 	if (data->flags & MMC_DATA_READ)
998 		mode |= SDHCI_TRNS_READ;
999 	if (host->flags & SDHCI_REQ_USE_DMA)
1000 		mode |= SDHCI_TRNS_DMA;
1001 
1002 	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1003 }
1004 
sdhci_needs_reset(struct sdhci_host * host,struct mmc_request * mrq)1005 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1006 {
1007 	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1008 		((mrq->cmd && mrq->cmd->error) ||
1009 		 (mrq->sbc && mrq->sbc->error) ||
1010 		 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1011 		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1012 }
1013 
__sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1014 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1015 {
1016 	int i;
1017 
1018 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1019 		if (host->mrqs_done[i] == mrq) {
1020 			WARN_ON(1);
1021 			return;
1022 		}
1023 	}
1024 
1025 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1026 		if (!host->mrqs_done[i]) {
1027 			host->mrqs_done[i] = mrq;
1028 			break;
1029 		}
1030 	}
1031 
1032 	WARN_ON(i >= SDHCI_MAX_MRQS);
1033 
1034 	tasklet_schedule(&host->finish_tasklet);
1035 }
1036 
sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1037 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1038 {
1039 	if (host->cmd && host->cmd->mrq == mrq)
1040 		host->cmd = NULL;
1041 
1042 	if (host->data_cmd && host->data_cmd->mrq == mrq)
1043 		host->data_cmd = NULL;
1044 
1045 	if (host->data && host->data->mrq == mrq)
1046 		host->data = NULL;
1047 
1048 	if (sdhci_needs_reset(host, mrq))
1049 		host->pending_reset = true;
1050 
1051 	__sdhci_finish_mrq(host, mrq);
1052 }
1053 
sdhci_finish_data(struct sdhci_host * host)1054 static void sdhci_finish_data(struct sdhci_host *host)
1055 {
1056 	struct mmc_command *data_cmd = host->data_cmd;
1057 	struct mmc_data *data = host->data;
1058 
1059 	host->data = NULL;
1060 	host->data_cmd = NULL;
1061 
1062 	/*
1063 	 * The controller needs a reset of internal state machines upon error
1064 	 * conditions.
1065 	 */
1066 	if (data->error) {
1067 		if (!host->cmd || host->cmd == data_cmd)
1068 			sdhci_do_reset(host, SDHCI_RESET_CMD);
1069 		sdhci_do_reset(host, SDHCI_RESET_DATA);
1070 	}
1071 
1072 	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1073 	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1074 		sdhci_adma_table_post(host, data);
1075 
1076 	/*
1077 	 * The specification states that the block count register must
1078 	 * be updated, but it does not specify at what point in the
1079 	 * data flow. That makes the register entirely useless to read
1080 	 * back so we have to assume that nothing made it to the card
1081 	 * in the event of an error.
1082 	 */
1083 	if (data->error)
1084 		data->bytes_xfered = 0;
1085 	else
1086 		data->bytes_xfered = data->blksz * data->blocks;
1087 
1088 	/*
1089 	 * Need to send CMD12 if -
1090 	 * a) open-ended multiblock transfer (no CMD23)
1091 	 * b) error in multiblock transfer
1092 	 */
1093 	if (data->stop &&
1094 	    (data->error ||
1095 	     !data->mrq->sbc)) {
1096 		/*
1097 		 * 'cap_cmd_during_tfr' request must not use the command line
1098 		 * after mmc_command_done() has been called. It is upper layer's
1099 		 * responsibility to send the stop command if required.
1100 		 */
1101 		if (data->mrq->cap_cmd_during_tfr) {
1102 			sdhci_finish_mrq(host, data->mrq);
1103 		} else {
1104 			/* Avoid triggering warning in sdhci_send_command() */
1105 			host->cmd = NULL;
1106 			sdhci_send_command(host, data->stop);
1107 		}
1108 	} else {
1109 		sdhci_finish_mrq(host, data->mrq);
1110 	}
1111 }
1112 
sdhci_mod_timer(struct sdhci_host * host,struct mmc_request * mrq,unsigned long timeout)1113 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1114 			    unsigned long timeout)
1115 {
1116 	if (sdhci_data_line_cmd(mrq->cmd))
1117 		mod_timer(&host->data_timer, timeout);
1118 	else
1119 		mod_timer(&host->timer, timeout);
1120 }
1121 
sdhci_del_timer(struct sdhci_host * host,struct mmc_request * mrq)1122 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1123 {
1124 	if (sdhci_data_line_cmd(mrq->cmd))
1125 		del_timer(&host->data_timer);
1126 	else
1127 		del_timer(&host->timer);
1128 }
1129 
sdhci_send_command(struct sdhci_host * host,struct mmc_command * cmd)1130 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1131 {
1132 	int flags;
1133 	u32 mask;
1134 	unsigned long timeout;
1135 
1136 	WARN_ON(host->cmd);
1137 
1138 	/* Initially, a command has no error */
1139 	cmd->error = 0;
1140 
1141 	if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1142 	    cmd->opcode == MMC_STOP_TRANSMISSION)
1143 		cmd->flags |= MMC_RSP_BUSY;
1144 
1145 	/* Wait max 10 ms */
1146 	timeout = 10;
1147 
1148 	mask = SDHCI_CMD_INHIBIT;
1149 	if (sdhci_data_line_cmd(cmd))
1150 		mask |= SDHCI_DATA_INHIBIT;
1151 
1152 	/* We shouldn't wait for data inihibit for stop commands, even
1153 	   though they might use busy signaling */
1154 	if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1155 		mask &= ~SDHCI_DATA_INHIBIT;
1156 
1157 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1158 		if (timeout == 0) {
1159 			pr_err("%s: Controller never released inhibit bit(s).\n",
1160 			       mmc_hostname(host->mmc));
1161 			sdhci_dumpregs(host);
1162 			cmd->error = -EIO;
1163 			sdhci_finish_mrq(host, cmd->mrq);
1164 			return;
1165 		}
1166 		timeout--;
1167 		mdelay(1);
1168 	}
1169 
1170 	timeout = jiffies;
1171 	if (!cmd->data && cmd->busy_timeout > 9000)
1172 		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1173 	else
1174 		timeout += 10 * HZ;
1175 	sdhci_mod_timer(host, cmd->mrq, timeout);
1176 
1177 	host->cmd = cmd;
1178 	if (sdhci_data_line_cmd(cmd)) {
1179 		WARN_ON(host->data_cmd);
1180 		host->data_cmd = cmd;
1181 	}
1182 
1183 	sdhci_prepare_data(host, cmd);
1184 
1185 	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1186 
1187 	sdhci_set_transfer_mode(host, cmd);
1188 
1189 	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1190 		pr_err("%s: Unsupported response type!\n",
1191 			mmc_hostname(host->mmc));
1192 		cmd->error = -EINVAL;
1193 		sdhci_finish_mrq(host, cmd->mrq);
1194 		return;
1195 	}
1196 
1197 	if (!(cmd->flags & MMC_RSP_PRESENT))
1198 		flags = SDHCI_CMD_RESP_NONE;
1199 	else if (cmd->flags & MMC_RSP_136)
1200 		flags = SDHCI_CMD_RESP_LONG;
1201 	else if (cmd->flags & MMC_RSP_BUSY)
1202 		flags = SDHCI_CMD_RESP_SHORT_BUSY;
1203 	else
1204 		flags = SDHCI_CMD_RESP_SHORT;
1205 
1206 	if (cmd->flags & MMC_RSP_CRC)
1207 		flags |= SDHCI_CMD_CRC;
1208 	if (cmd->flags & MMC_RSP_OPCODE)
1209 		flags |= SDHCI_CMD_INDEX;
1210 
1211 	/* CMD19 is special in that the Data Present Select should be set */
1212 	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1213 	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1214 		flags |= SDHCI_CMD_DATA;
1215 
1216 	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1217 }
1218 EXPORT_SYMBOL_GPL(sdhci_send_command);
1219 
sdhci_read_rsp_136(struct sdhci_host * host,struct mmc_command * cmd)1220 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1221 {
1222 	int i, reg;
1223 
1224 	for (i = 0; i < 4; i++) {
1225 		reg = SDHCI_RESPONSE + (3 - i) * 4;
1226 		cmd->resp[i] = sdhci_readl(host, reg);
1227 	}
1228 
1229 	if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1230 		return;
1231 
1232 	/* CRC is stripped so we need to do some shifting */
1233 	for (i = 0; i < 4; i++) {
1234 		cmd->resp[i] <<= 8;
1235 		if (i != 3)
1236 			cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1237 	}
1238 }
1239 
sdhci_finish_command(struct sdhci_host * host)1240 static void sdhci_finish_command(struct sdhci_host *host)
1241 {
1242 	struct mmc_command *cmd = host->cmd;
1243 
1244 	host->cmd = NULL;
1245 
1246 	if (cmd->flags & MMC_RSP_PRESENT) {
1247 		if (cmd->flags & MMC_RSP_136) {
1248 			sdhci_read_rsp_136(host, cmd);
1249 		} else {
1250 			cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1251 		}
1252 	}
1253 
1254 	if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1255 		mmc_command_done(host->mmc, cmd->mrq);
1256 
1257 	/*
1258 	 * The host can send and interrupt when the busy state has
1259 	 * ended, allowing us to wait without wasting CPU cycles.
1260 	 * The busy signal uses DAT0 so this is similar to waiting
1261 	 * for data to complete.
1262 	 *
1263 	 * Note: The 1.0 specification is a bit ambiguous about this
1264 	 *       feature so there might be some problems with older
1265 	 *       controllers.
1266 	 */
1267 	if (cmd->flags & MMC_RSP_BUSY) {
1268 		if (cmd->data) {
1269 			DBG("Cannot wait for busy signal when also doing a data transfer");
1270 		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1271 			   cmd == host->data_cmd) {
1272 			/* Command complete before busy is ended */
1273 			return;
1274 		}
1275 	}
1276 
1277 	/* Finished CMD23, now send actual command. */
1278 	if (cmd == cmd->mrq->sbc) {
1279 		sdhci_send_command(host, cmd->mrq->cmd);
1280 	} else {
1281 
1282 		/* Processed actual command. */
1283 		if (host->data && host->data_early)
1284 			sdhci_finish_data(host);
1285 
1286 		if (!cmd->data)
1287 			sdhci_finish_mrq(host, cmd->mrq);
1288 	}
1289 }
1290 
sdhci_get_preset_value(struct sdhci_host * host)1291 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1292 {
1293 	u16 preset = 0;
1294 
1295 	switch (host->timing) {
1296 	case MMC_TIMING_UHS_SDR12:
1297 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1298 		break;
1299 	case MMC_TIMING_UHS_SDR25:
1300 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1301 		break;
1302 	case MMC_TIMING_UHS_SDR50:
1303 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1304 		break;
1305 	case MMC_TIMING_UHS_SDR104:
1306 	case MMC_TIMING_MMC_HS200:
1307 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1308 		break;
1309 	case MMC_TIMING_UHS_DDR50:
1310 	case MMC_TIMING_MMC_DDR52:
1311 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1312 		break;
1313 	case MMC_TIMING_MMC_HS400:
1314 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1315 		break;
1316 	default:
1317 		pr_warn("%s: Invalid UHS-I mode selected\n",
1318 			mmc_hostname(host->mmc));
1319 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1320 		break;
1321 	}
1322 	return preset;
1323 }
1324 
sdhci_calc_clk(struct sdhci_host * host,unsigned int clock,unsigned int * actual_clock)1325 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1326 		   unsigned int *actual_clock)
1327 {
1328 	int div = 0; /* Initialized for compiler warning */
1329 	int real_div = div, clk_mul = 1;
1330 	u16 clk = 0;
1331 	bool switch_base_clk = false;
1332 
1333 	if (host->version >= SDHCI_SPEC_300) {
1334 		if (host->preset_enabled) {
1335 			u16 pre_val;
1336 
1337 			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1338 			pre_val = sdhci_get_preset_value(host);
1339 			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1340 				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1341 			if (host->clk_mul &&
1342 				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1343 				clk = SDHCI_PROG_CLOCK_MODE;
1344 				real_div = div + 1;
1345 				clk_mul = host->clk_mul;
1346 			} else {
1347 				real_div = max_t(int, 1, div << 1);
1348 			}
1349 			goto clock_set;
1350 		}
1351 
1352 		/*
1353 		 * Check if the Host Controller supports Programmable Clock
1354 		 * Mode.
1355 		 */
1356 		if (host->clk_mul) {
1357 			for (div = 1; div <= 1024; div++) {
1358 				if ((host->max_clk * host->clk_mul / div)
1359 					<= clock)
1360 					break;
1361 			}
1362 			if ((host->max_clk * host->clk_mul / div) <= clock) {
1363 				/*
1364 				 * Set Programmable Clock Mode in the Clock
1365 				 * Control register.
1366 				 */
1367 				clk = SDHCI_PROG_CLOCK_MODE;
1368 				real_div = div;
1369 				clk_mul = host->clk_mul;
1370 				div--;
1371 			} else {
1372 				/*
1373 				 * Divisor can be too small to reach clock
1374 				 * speed requirement. Then use the base clock.
1375 				 */
1376 				switch_base_clk = true;
1377 			}
1378 		}
1379 
1380 		if (!host->clk_mul || switch_base_clk) {
1381 			/* Version 3.00 divisors must be a multiple of 2. */
1382 			if (host->max_clk <= clock)
1383 				div = 1;
1384 			else {
1385 				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1386 				     div += 2) {
1387 					if ((host->max_clk / div) <= clock)
1388 						break;
1389 				}
1390 			}
1391 			real_div = div;
1392 			div >>= 1;
1393 			if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1394 				&& !div && host->max_clk <= 25000000)
1395 				div = 1;
1396 		}
1397 	} else {
1398 		/* Version 2.00 divisors must be a power of 2. */
1399 		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1400 			if ((host->max_clk / div) <= clock)
1401 				break;
1402 		}
1403 		real_div = div;
1404 		div >>= 1;
1405 	}
1406 
1407 clock_set:
1408 	if (real_div)
1409 		*actual_clock = (host->max_clk * clk_mul) / real_div;
1410 	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1411 	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1412 		<< SDHCI_DIVIDER_HI_SHIFT;
1413 
1414 	return clk;
1415 }
1416 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1417 
sdhci_enable_clk(struct sdhci_host * host,u16 clk)1418 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1419 {
1420 	ktime_t timeout;
1421 
1422 	clk |= SDHCI_CLOCK_INT_EN;
1423 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1424 
1425 	/* Wait max 20 ms */
1426 	timeout = ktime_add_ms(ktime_get(), 20);
1427 	while (1) {
1428 		bool timedout = ktime_after(ktime_get(), timeout);
1429 
1430 		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1431 		if (clk & SDHCI_CLOCK_INT_STABLE)
1432 			break;
1433 		if (timedout) {
1434 			pr_err("%s: Internal clock never stabilised.\n",
1435 			       mmc_hostname(host->mmc));
1436 			sdhci_dumpregs(host);
1437 			return;
1438 		}
1439 		udelay(10);
1440 	}
1441 
1442 	clk |= SDHCI_CLOCK_CARD_EN;
1443 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1444 }
1445 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1446 
sdhci_set_clock(struct sdhci_host * host,unsigned int clock)1447 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1448 {
1449 	u16 clk;
1450 
1451 	host->mmc->actual_clock = 0;
1452 
1453 	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1454 
1455 	if (clock == 0)
1456 		return;
1457 
1458 	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1459 	sdhci_enable_clk(host, clk);
1460 }
1461 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1462 
sdhci_set_power_reg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)1463 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1464 				unsigned short vdd)
1465 {
1466 	struct mmc_host *mmc = host->mmc;
1467 
1468 	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1469 
1470 	if (mode != MMC_POWER_OFF)
1471 		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1472 	else
1473 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1474 }
1475 
sdhci_set_power_noreg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)1476 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1477 			   unsigned short vdd)
1478 {
1479 	u8 pwr = 0;
1480 
1481 	if (mode != MMC_POWER_OFF) {
1482 		switch (1 << vdd) {
1483 		case MMC_VDD_165_195:
1484 		/*
1485 		 * Without a regulator, SDHCI does not support 2.0v
1486 		 * so we only get here if the driver deliberately
1487 		 * added the 2.0v range to ocr_avail. Map it to 1.8v
1488 		 * for the purpose of turning on the power.
1489 		 */
1490 		case MMC_VDD_20_21:
1491 			pwr = SDHCI_POWER_180;
1492 			break;
1493 		case MMC_VDD_29_30:
1494 		case MMC_VDD_30_31:
1495 			pwr = SDHCI_POWER_300;
1496 			break;
1497 		case MMC_VDD_32_33:
1498 		case MMC_VDD_33_34:
1499 			pwr = SDHCI_POWER_330;
1500 			break;
1501 		default:
1502 			WARN(1, "%s: Invalid vdd %#x\n",
1503 			     mmc_hostname(host->mmc), vdd);
1504 			break;
1505 		}
1506 	}
1507 
1508 	if (host->pwr == pwr)
1509 		return;
1510 
1511 	host->pwr = pwr;
1512 
1513 	if (pwr == 0) {
1514 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1515 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1516 			sdhci_runtime_pm_bus_off(host);
1517 	} else {
1518 		/*
1519 		 * Spec says that we should clear the power reg before setting
1520 		 * a new value. Some controllers don't seem to like this though.
1521 		 */
1522 		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1523 			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1524 
1525 		/*
1526 		 * At least the Marvell CaFe chip gets confused if we set the
1527 		 * voltage and set turn on power at the same time, so set the
1528 		 * voltage first.
1529 		 */
1530 		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1531 			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1532 
1533 		pwr |= SDHCI_POWER_ON;
1534 
1535 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1536 
1537 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1538 			sdhci_runtime_pm_bus_on(host);
1539 
1540 		/*
1541 		 * Some controllers need an extra 10ms delay of 10ms before
1542 		 * they can apply clock after applying power
1543 		 */
1544 		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1545 			mdelay(10);
1546 	}
1547 }
1548 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1549 
sdhci_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)1550 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1551 		     unsigned short vdd)
1552 {
1553 	if (IS_ERR(host->mmc->supply.vmmc))
1554 		sdhci_set_power_noreg(host, mode, vdd);
1555 	else
1556 		sdhci_set_power_reg(host, mode, vdd);
1557 }
1558 EXPORT_SYMBOL_GPL(sdhci_set_power);
1559 
1560 /*****************************************************************************\
1561  *                                                                           *
1562  * MMC callbacks                                                             *
1563  *                                                                           *
1564 \*****************************************************************************/
1565 
sdhci_request(struct mmc_host * mmc,struct mmc_request * mrq)1566 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1567 {
1568 	struct sdhci_host *host;
1569 	int present;
1570 	unsigned long flags;
1571 
1572 	host = mmc_priv(mmc);
1573 
1574 	/* Firstly check card presence */
1575 	present = mmc->ops->get_cd(mmc);
1576 
1577 	spin_lock_irqsave(&host->lock, flags);
1578 
1579 	sdhci_led_activate(host);
1580 
1581 	/*
1582 	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1583 	 * requests if Auto-CMD12 is enabled.
1584 	 */
1585 	if (sdhci_auto_cmd12(host, mrq)) {
1586 		if (mrq->stop) {
1587 			mrq->data->stop = NULL;
1588 			mrq->stop = NULL;
1589 		}
1590 	}
1591 
1592 	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1593 		mrq->cmd->error = -ENOMEDIUM;
1594 		sdhci_finish_mrq(host, mrq);
1595 	} else {
1596 		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1597 			sdhci_send_command(host, mrq->sbc);
1598 		else
1599 			sdhci_send_command(host, mrq->cmd);
1600 	}
1601 
1602 	mmiowb();
1603 	spin_unlock_irqrestore(&host->lock, flags);
1604 }
1605 
sdhci_set_bus_width(struct sdhci_host * host,int width)1606 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1607 {
1608 	u8 ctrl;
1609 
1610 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1611 	if (width == MMC_BUS_WIDTH_8) {
1612 		ctrl &= ~SDHCI_CTRL_4BITBUS;
1613 		ctrl |= SDHCI_CTRL_8BITBUS;
1614 	} else {
1615 		if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1616 			ctrl &= ~SDHCI_CTRL_8BITBUS;
1617 		if (width == MMC_BUS_WIDTH_4)
1618 			ctrl |= SDHCI_CTRL_4BITBUS;
1619 		else
1620 			ctrl &= ~SDHCI_CTRL_4BITBUS;
1621 	}
1622 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1623 }
1624 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1625 
sdhci_set_uhs_signaling(struct sdhci_host * host,unsigned timing)1626 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1627 {
1628 	u16 ctrl_2;
1629 
1630 	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1631 	/* Select Bus Speed Mode for host */
1632 	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1633 	if ((timing == MMC_TIMING_MMC_HS200) ||
1634 	    (timing == MMC_TIMING_UHS_SDR104))
1635 		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1636 	else if (timing == MMC_TIMING_UHS_SDR12)
1637 		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1638 	else if (timing == MMC_TIMING_UHS_SDR25)
1639 		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1640 	else if (timing == MMC_TIMING_UHS_SDR50)
1641 		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1642 	else if ((timing == MMC_TIMING_UHS_DDR50) ||
1643 		 (timing == MMC_TIMING_MMC_DDR52))
1644 		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1645 	else if (timing == MMC_TIMING_MMC_HS400)
1646 		ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1647 	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1648 }
1649 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1650 
sdhci_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)1651 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1652 {
1653 	struct sdhci_host *host = mmc_priv(mmc);
1654 	u8 ctrl;
1655 
1656 	if (ios->power_mode == MMC_POWER_UNDEFINED)
1657 		return;
1658 
1659 	if (host->flags & SDHCI_DEVICE_DEAD) {
1660 		if (!IS_ERR(mmc->supply.vmmc) &&
1661 		    ios->power_mode == MMC_POWER_OFF)
1662 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1663 		return;
1664 	}
1665 
1666 	/*
1667 	 * Reset the chip on each power off.
1668 	 * Should clear out any weird states.
1669 	 */
1670 	if (ios->power_mode == MMC_POWER_OFF) {
1671 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1672 		sdhci_reinit(host);
1673 	}
1674 
1675 	if (host->version >= SDHCI_SPEC_300 &&
1676 		(ios->power_mode == MMC_POWER_UP) &&
1677 		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1678 		sdhci_enable_preset_value(host, false);
1679 
1680 	if (!ios->clock || ios->clock != host->clock) {
1681 		host->ops->set_clock(host, ios->clock);
1682 		host->clock = ios->clock;
1683 
1684 		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1685 		    host->clock) {
1686 			host->timeout_clk = host->mmc->actual_clock ?
1687 						host->mmc->actual_clock / 1000 :
1688 						host->clock / 1000;
1689 			host->mmc->max_busy_timeout =
1690 				host->ops->get_max_timeout_count ?
1691 				host->ops->get_max_timeout_count(host) :
1692 				1 << 27;
1693 			host->mmc->max_busy_timeout /= host->timeout_clk;
1694 		}
1695 	}
1696 
1697 	if (host->ops->set_power)
1698 		host->ops->set_power(host, ios->power_mode, ios->vdd);
1699 	else
1700 		sdhci_set_power(host, ios->power_mode, ios->vdd);
1701 
1702 	if (host->ops->platform_send_init_74_clocks)
1703 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1704 
1705 	host->ops->set_bus_width(host, ios->bus_width);
1706 
1707 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1708 
1709 	if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
1710 		if (ios->timing == MMC_TIMING_SD_HS ||
1711 		     ios->timing == MMC_TIMING_MMC_HS ||
1712 		     ios->timing == MMC_TIMING_MMC_HS400 ||
1713 		     ios->timing == MMC_TIMING_MMC_HS200 ||
1714 		     ios->timing == MMC_TIMING_MMC_DDR52 ||
1715 		     ios->timing == MMC_TIMING_UHS_SDR50 ||
1716 		     ios->timing == MMC_TIMING_UHS_SDR104 ||
1717 		     ios->timing == MMC_TIMING_UHS_DDR50 ||
1718 		     ios->timing == MMC_TIMING_UHS_SDR25)
1719 			ctrl |= SDHCI_CTRL_HISPD;
1720 		else
1721 			ctrl &= ~SDHCI_CTRL_HISPD;
1722 	}
1723 
1724 	if (host->version >= SDHCI_SPEC_300) {
1725 		u16 clk, ctrl_2;
1726 
1727 		if (!host->preset_enabled) {
1728 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1729 			/*
1730 			 * We only need to set Driver Strength if the
1731 			 * preset value enable is not set.
1732 			 */
1733 			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1734 			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1735 			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1736 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1737 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1738 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1739 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1740 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1741 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1742 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1743 			else {
1744 				pr_warn("%s: invalid driver type, default to driver type B\n",
1745 					mmc_hostname(mmc));
1746 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1747 			}
1748 
1749 			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1750 		} else {
1751 			/*
1752 			 * According to SDHC Spec v3.00, if the Preset Value
1753 			 * Enable in the Host Control 2 register is set, we
1754 			 * need to reset SD Clock Enable before changing High
1755 			 * Speed Enable to avoid generating clock gliches.
1756 			 */
1757 
1758 			/* Reset SD Clock Enable */
1759 			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1760 			clk &= ~SDHCI_CLOCK_CARD_EN;
1761 			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1762 
1763 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1764 
1765 			/* Re-enable SD Clock */
1766 			host->ops->set_clock(host, host->clock);
1767 		}
1768 
1769 		/* Reset SD Clock Enable */
1770 		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1771 		clk &= ~SDHCI_CLOCK_CARD_EN;
1772 		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1773 
1774 		host->ops->set_uhs_signaling(host, ios->timing);
1775 		host->timing = ios->timing;
1776 
1777 		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1778 				((ios->timing == MMC_TIMING_UHS_SDR12) ||
1779 				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1780 				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1781 				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1782 				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1783 				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1784 			u16 preset;
1785 
1786 			sdhci_enable_preset_value(host, true);
1787 			preset = sdhci_get_preset_value(host);
1788 			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1789 				>> SDHCI_PRESET_DRV_SHIFT;
1790 		}
1791 
1792 		/* Re-enable SD Clock */
1793 		host->ops->set_clock(host, host->clock);
1794 	} else
1795 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1796 
1797 	/*
1798 	 * Some (ENE) controllers go apeshit on some ios operation,
1799 	 * signalling timeout and CRC errors even on CMD0. Resetting
1800 	 * it on each ios seems to solve the problem.
1801 	 */
1802 	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1803 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1804 
1805 	mmiowb();
1806 }
1807 EXPORT_SYMBOL_GPL(sdhci_set_ios);
1808 
sdhci_get_cd(struct mmc_host * mmc)1809 static int sdhci_get_cd(struct mmc_host *mmc)
1810 {
1811 	struct sdhci_host *host = mmc_priv(mmc);
1812 	int gpio_cd = mmc_gpio_get_cd(mmc);
1813 
1814 	if (host->flags & SDHCI_DEVICE_DEAD)
1815 		return 0;
1816 
1817 	/* If nonremovable, assume that the card is always present. */
1818 	if (!mmc_card_is_removable(host->mmc))
1819 		return 1;
1820 
1821 	/*
1822 	 * Try slot gpio detect, if defined it take precedence
1823 	 * over build in controller functionality
1824 	 */
1825 	if (gpio_cd >= 0)
1826 		return !!gpio_cd;
1827 
1828 	/* If polling, assume that the card is always present. */
1829 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1830 		return 1;
1831 
1832 	/* Host native card detect */
1833 	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1834 }
1835 
sdhci_check_ro(struct sdhci_host * host)1836 static int sdhci_check_ro(struct sdhci_host *host)
1837 {
1838 	unsigned long flags;
1839 	int is_readonly;
1840 
1841 	spin_lock_irqsave(&host->lock, flags);
1842 
1843 	if (host->flags & SDHCI_DEVICE_DEAD)
1844 		is_readonly = 0;
1845 	else if (host->ops->get_ro)
1846 		is_readonly = host->ops->get_ro(host);
1847 	else
1848 		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1849 				& SDHCI_WRITE_PROTECT);
1850 
1851 	spin_unlock_irqrestore(&host->lock, flags);
1852 
1853 	/* This quirk needs to be replaced by a callback-function later */
1854 	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1855 		!is_readonly : is_readonly;
1856 }
1857 
1858 #define SAMPLE_COUNT	5
1859 
sdhci_get_ro(struct mmc_host * mmc)1860 static int sdhci_get_ro(struct mmc_host *mmc)
1861 {
1862 	struct sdhci_host *host = mmc_priv(mmc);
1863 	int i, ro_count;
1864 
1865 	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1866 		return sdhci_check_ro(host);
1867 
1868 	ro_count = 0;
1869 	for (i = 0; i < SAMPLE_COUNT; i++) {
1870 		if (sdhci_check_ro(host)) {
1871 			if (++ro_count > SAMPLE_COUNT / 2)
1872 				return 1;
1873 		}
1874 		msleep(30);
1875 	}
1876 	return 0;
1877 }
1878 
sdhci_hw_reset(struct mmc_host * mmc)1879 static void sdhci_hw_reset(struct mmc_host *mmc)
1880 {
1881 	struct sdhci_host *host = mmc_priv(mmc);
1882 
1883 	if (host->ops && host->ops->hw_reset)
1884 		host->ops->hw_reset(host);
1885 }
1886 
sdhci_enable_sdio_irq_nolock(struct sdhci_host * host,int enable)1887 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1888 {
1889 	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1890 		if (enable)
1891 			host->ier |= SDHCI_INT_CARD_INT;
1892 		else
1893 			host->ier &= ~SDHCI_INT_CARD_INT;
1894 
1895 		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1896 		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1897 		mmiowb();
1898 	}
1899 }
1900 
sdhci_enable_sdio_irq(struct mmc_host * mmc,int enable)1901 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1902 {
1903 	struct sdhci_host *host = mmc_priv(mmc);
1904 	unsigned long flags;
1905 
1906 	if (enable)
1907 		pm_runtime_get_noresume(host->mmc->parent);
1908 
1909 	spin_lock_irqsave(&host->lock, flags);
1910 	if (enable)
1911 		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1912 	else
1913 		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1914 
1915 	sdhci_enable_sdio_irq_nolock(host, enable);
1916 	spin_unlock_irqrestore(&host->lock, flags);
1917 
1918 	if (!enable)
1919 		pm_runtime_put_noidle(host->mmc->parent);
1920 }
1921 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
1922 
sdhci_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)1923 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1924 				      struct mmc_ios *ios)
1925 {
1926 	struct sdhci_host *host = mmc_priv(mmc);
1927 	u16 ctrl;
1928 	int ret;
1929 
1930 	/*
1931 	 * Signal Voltage Switching is only applicable for Host Controllers
1932 	 * v3.00 and above.
1933 	 */
1934 	if (host->version < SDHCI_SPEC_300)
1935 		return 0;
1936 
1937 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1938 
1939 	switch (ios->signal_voltage) {
1940 	case MMC_SIGNAL_VOLTAGE_330:
1941 		if (!(host->flags & SDHCI_SIGNALING_330))
1942 			return -EINVAL;
1943 		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1944 		ctrl &= ~SDHCI_CTRL_VDD_180;
1945 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1946 
1947 		if (!IS_ERR(mmc->supply.vqmmc)) {
1948 			ret = mmc_regulator_set_vqmmc(mmc, ios);
1949 			if (ret) {
1950 				pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1951 					mmc_hostname(mmc));
1952 				return -EIO;
1953 			}
1954 		}
1955 		/* Wait for 5ms */
1956 		usleep_range(5000, 5500);
1957 
1958 		/* 3.3V regulator output should be stable within 5 ms */
1959 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1960 		if (!(ctrl & SDHCI_CTRL_VDD_180))
1961 			return 0;
1962 
1963 		pr_warn("%s: 3.3V regulator output did not became stable\n",
1964 			mmc_hostname(mmc));
1965 
1966 		return -EAGAIN;
1967 	case MMC_SIGNAL_VOLTAGE_180:
1968 		if (!(host->flags & SDHCI_SIGNALING_180))
1969 			return -EINVAL;
1970 		if (!IS_ERR(mmc->supply.vqmmc)) {
1971 			ret = mmc_regulator_set_vqmmc(mmc, ios);
1972 			if (ret) {
1973 				pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1974 					mmc_hostname(mmc));
1975 				return -EIO;
1976 			}
1977 		}
1978 
1979 		/*
1980 		 * Enable 1.8V Signal Enable in the Host Control2
1981 		 * register
1982 		 */
1983 		ctrl |= SDHCI_CTRL_VDD_180;
1984 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1985 
1986 		/* Some controller need to do more when switching */
1987 		if (host->ops->voltage_switch)
1988 			host->ops->voltage_switch(host);
1989 
1990 		/* 1.8V regulator output should be stable within 5 ms */
1991 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1992 		if (ctrl & SDHCI_CTRL_VDD_180)
1993 			return 0;
1994 
1995 		pr_warn("%s: 1.8V regulator output did not became stable\n",
1996 			mmc_hostname(mmc));
1997 
1998 		return -EAGAIN;
1999 	case MMC_SIGNAL_VOLTAGE_120:
2000 		if (!(host->flags & SDHCI_SIGNALING_120))
2001 			return -EINVAL;
2002 		if (!IS_ERR(mmc->supply.vqmmc)) {
2003 			ret = mmc_regulator_set_vqmmc(mmc, ios);
2004 			if (ret) {
2005 				pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2006 					mmc_hostname(mmc));
2007 				return -EIO;
2008 			}
2009 		}
2010 		return 0;
2011 	default:
2012 		/* No signal voltage switch required */
2013 		return 0;
2014 	}
2015 }
2016 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2017 
sdhci_card_busy(struct mmc_host * mmc)2018 static int sdhci_card_busy(struct mmc_host *mmc)
2019 {
2020 	struct sdhci_host *host = mmc_priv(mmc);
2021 	u32 present_state;
2022 
2023 	/* Check whether DAT[0] is 0 */
2024 	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2025 
2026 	return !(present_state & SDHCI_DATA_0_LVL_MASK);
2027 }
2028 
sdhci_prepare_hs400_tuning(struct mmc_host * mmc,struct mmc_ios * ios)2029 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2030 {
2031 	struct sdhci_host *host = mmc_priv(mmc);
2032 	unsigned long flags;
2033 
2034 	spin_lock_irqsave(&host->lock, flags);
2035 	host->flags |= SDHCI_HS400_TUNING;
2036 	spin_unlock_irqrestore(&host->lock, flags);
2037 
2038 	return 0;
2039 }
2040 
sdhci_start_tuning(struct sdhci_host * host)2041 static void sdhci_start_tuning(struct sdhci_host *host)
2042 {
2043 	u16 ctrl;
2044 
2045 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2046 	ctrl |= SDHCI_CTRL_EXEC_TUNING;
2047 	if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2048 		ctrl |= SDHCI_CTRL_TUNED_CLK;
2049 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2050 
2051 	/*
2052 	 * As per the Host Controller spec v3.00, tuning command
2053 	 * generates Buffer Read Ready interrupt, so enable that.
2054 	 *
2055 	 * Note: The spec clearly says that when tuning sequence
2056 	 * is being performed, the controller does not generate
2057 	 * interrupts other than Buffer Read Ready interrupt. But
2058 	 * to make sure we don't hit a controller bug, we _only_
2059 	 * enable Buffer Read Ready interrupt here.
2060 	 */
2061 	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2062 	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2063 }
2064 
sdhci_end_tuning(struct sdhci_host * host)2065 static void sdhci_end_tuning(struct sdhci_host *host)
2066 {
2067 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2068 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2069 }
2070 
sdhci_reset_tuning(struct sdhci_host * host)2071 static void sdhci_reset_tuning(struct sdhci_host *host)
2072 {
2073 	u16 ctrl;
2074 
2075 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2076 	ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2077 	ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2078 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2079 }
2080 
sdhci_abort_tuning(struct sdhci_host * host,u32 opcode)2081 static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2082 {
2083 	sdhci_reset_tuning(host);
2084 
2085 	sdhci_do_reset(host, SDHCI_RESET_CMD);
2086 	sdhci_do_reset(host, SDHCI_RESET_DATA);
2087 
2088 	sdhci_end_tuning(host);
2089 
2090 	mmc_abort_tuning(host->mmc, opcode);
2091 }
2092 
2093 /*
2094  * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2095  * tuning command does not have a data payload (or rather the hardware does it
2096  * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2097  * interrupt setup is different to other commands and there is no timeout
2098  * interrupt so special handling is needed.
2099  */
sdhci_send_tuning(struct sdhci_host * host,u32 opcode)2100 static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2101 {
2102 	struct mmc_host *mmc = host->mmc;
2103 	struct mmc_command cmd = {};
2104 	struct mmc_request mrq = {};
2105 	unsigned long flags;
2106 	u32 b = host->sdma_boundary;
2107 
2108 	spin_lock_irqsave(&host->lock, flags);
2109 
2110 	cmd.opcode = opcode;
2111 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2112 	cmd.mrq = &mrq;
2113 
2114 	mrq.cmd = &cmd;
2115 	/*
2116 	 * In response to CMD19, the card sends 64 bytes of tuning
2117 	 * block to the Host Controller. So we set the block size
2118 	 * to 64 here.
2119 	 */
2120 	if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2121 	    mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2122 		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2123 	else
2124 		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2125 
2126 	/*
2127 	 * The tuning block is sent by the card to the host controller.
2128 	 * So we set the TRNS_READ bit in the Transfer Mode register.
2129 	 * This also takes care of setting DMA Enable and Multi Block
2130 	 * Select in the same register to 0.
2131 	 */
2132 	sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2133 
2134 	sdhci_send_command(host, &cmd);
2135 
2136 	host->cmd = NULL;
2137 
2138 	sdhci_del_timer(host, &mrq);
2139 
2140 	host->tuning_done = 0;
2141 
2142 	mmiowb();
2143 	spin_unlock_irqrestore(&host->lock, flags);
2144 
2145 	/* Wait for Buffer Read Ready interrupt */
2146 	wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2147 			   msecs_to_jiffies(50));
2148 
2149 }
2150 
__sdhci_execute_tuning(struct sdhci_host * host,u32 opcode)2151 static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2152 {
2153 	int i;
2154 
2155 	/*
2156 	 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2157 	 * of loops reaches 40 times.
2158 	 */
2159 	for (i = 0; i < MAX_TUNING_LOOP; i++) {
2160 		u16 ctrl;
2161 
2162 		sdhci_send_tuning(host, opcode);
2163 
2164 		if (!host->tuning_done) {
2165 			pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2166 				 mmc_hostname(host->mmc));
2167 			sdhci_abort_tuning(host, opcode);
2168 			return;
2169 		}
2170 
2171 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2172 		if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2173 			if (ctrl & SDHCI_CTRL_TUNED_CLK)
2174 				return; /* Success! */
2175 			break;
2176 		}
2177 
2178 		/* Spec does not require a delay between tuning cycles */
2179 		if (host->tuning_delay > 0)
2180 			mdelay(host->tuning_delay);
2181 	}
2182 
2183 	pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2184 		mmc_hostname(host->mmc));
2185 	sdhci_reset_tuning(host);
2186 }
2187 
sdhci_execute_tuning(struct mmc_host * mmc,u32 opcode)2188 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2189 {
2190 	struct sdhci_host *host = mmc_priv(mmc);
2191 	int err = 0;
2192 	unsigned int tuning_count = 0;
2193 	bool hs400_tuning;
2194 
2195 	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2196 
2197 	if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2198 		tuning_count = host->tuning_count;
2199 
2200 	/*
2201 	 * The Host Controller needs tuning in case of SDR104 and DDR50
2202 	 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2203 	 * the Capabilities register.
2204 	 * If the Host Controller supports the HS200 mode then the
2205 	 * tuning function has to be executed.
2206 	 */
2207 	switch (host->timing) {
2208 	/* HS400 tuning is done in HS200 mode */
2209 	case MMC_TIMING_MMC_HS400:
2210 		err = -EINVAL;
2211 		goto out;
2212 
2213 	case MMC_TIMING_MMC_HS200:
2214 		/*
2215 		 * Periodic re-tuning for HS400 is not expected to be needed, so
2216 		 * disable it here.
2217 		 */
2218 		if (hs400_tuning)
2219 			tuning_count = 0;
2220 		break;
2221 
2222 	case MMC_TIMING_UHS_SDR104:
2223 	case MMC_TIMING_UHS_DDR50:
2224 		break;
2225 
2226 	case MMC_TIMING_UHS_SDR50:
2227 		if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2228 			break;
2229 		/* FALLTHROUGH */
2230 
2231 	default:
2232 		goto out;
2233 	}
2234 
2235 	if (host->ops->platform_execute_tuning) {
2236 		err = host->ops->platform_execute_tuning(host, opcode);
2237 		goto out;
2238 	}
2239 
2240 	host->mmc->retune_period = tuning_count;
2241 
2242 	if (host->tuning_delay < 0)
2243 		host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2244 
2245 	sdhci_start_tuning(host);
2246 
2247 	__sdhci_execute_tuning(host, opcode);
2248 
2249 	sdhci_end_tuning(host);
2250 out:
2251 	host->flags &= ~SDHCI_HS400_TUNING;
2252 
2253 	return err;
2254 }
2255 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2256 
sdhci_enable_preset_value(struct sdhci_host * host,bool enable)2257 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2258 {
2259 	/* Host Controller v3.00 defines preset value registers */
2260 	if (host->version < SDHCI_SPEC_300)
2261 		return;
2262 
2263 	/*
2264 	 * We only enable or disable Preset Value if they are not already
2265 	 * enabled or disabled respectively. Otherwise, we bail out.
2266 	 */
2267 	if (host->preset_enabled != enable) {
2268 		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2269 
2270 		if (enable)
2271 			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2272 		else
2273 			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2274 
2275 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2276 
2277 		if (enable)
2278 			host->flags |= SDHCI_PV_ENABLED;
2279 		else
2280 			host->flags &= ~SDHCI_PV_ENABLED;
2281 
2282 		host->preset_enabled = enable;
2283 	}
2284 }
2285 
sdhci_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)2286 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2287 				int err)
2288 {
2289 	struct sdhci_host *host = mmc_priv(mmc);
2290 	struct mmc_data *data = mrq->data;
2291 
2292 	if (data->host_cookie != COOKIE_UNMAPPED)
2293 		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2294 			     mmc_get_dma_dir(data));
2295 
2296 	data->host_cookie = COOKIE_UNMAPPED;
2297 }
2298 
sdhci_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)2299 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2300 {
2301 	struct sdhci_host *host = mmc_priv(mmc);
2302 
2303 	mrq->data->host_cookie = COOKIE_UNMAPPED;
2304 
2305 	/*
2306 	 * No pre-mapping in the pre hook if we're using the bounce buffer,
2307 	 * for that we would need two bounce buffers since one buffer is
2308 	 * in flight when this is getting called.
2309 	 */
2310 	if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2311 		sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2312 }
2313 
sdhci_has_requests(struct sdhci_host * host)2314 static inline bool sdhci_has_requests(struct sdhci_host *host)
2315 {
2316 	return host->cmd || host->data_cmd;
2317 }
2318 
sdhci_error_out_mrqs(struct sdhci_host * host,int err)2319 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2320 {
2321 	if (host->data_cmd) {
2322 		host->data_cmd->error = err;
2323 		sdhci_finish_mrq(host, host->data_cmd->mrq);
2324 	}
2325 
2326 	if (host->cmd) {
2327 		host->cmd->error = err;
2328 		sdhci_finish_mrq(host, host->cmd->mrq);
2329 	}
2330 }
2331 
sdhci_card_event(struct mmc_host * mmc)2332 static void sdhci_card_event(struct mmc_host *mmc)
2333 {
2334 	struct sdhci_host *host = mmc_priv(mmc);
2335 	unsigned long flags;
2336 	int present;
2337 
2338 	/* First check if client has provided their own card event */
2339 	if (host->ops->card_event)
2340 		host->ops->card_event(host);
2341 
2342 	present = mmc->ops->get_cd(mmc);
2343 
2344 	spin_lock_irqsave(&host->lock, flags);
2345 
2346 	/* Check sdhci_has_requests() first in case we are runtime suspended */
2347 	if (sdhci_has_requests(host) && !present) {
2348 		pr_err("%s: Card removed during transfer!\n",
2349 			mmc_hostname(host->mmc));
2350 		pr_err("%s: Resetting controller.\n",
2351 			mmc_hostname(host->mmc));
2352 
2353 		sdhci_do_reset(host, SDHCI_RESET_CMD);
2354 		sdhci_do_reset(host, SDHCI_RESET_DATA);
2355 
2356 		sdhci_error_out_mrqs(host, -ENOMEDIUM);
2357 	}
2358 
2359 	spin_unlock_irqrestore(&host->lock, flags);
2360 }
2361 
2362 static const struct mmc_host_ops sdhci_ops = {
2363 	.request	= sdhci_request,
2364 	.post_req	= sdhci_post_req,
2365 	.pre_req	= sdhci_pre_req,
2366 	.set_ios	= sdhci_set_ios,
2367 	.get_cd		= sdhci_get_cd,
2368 	.get_ro		= sdhci_get_ro,
2369 	.hw_reset	= sdhci_hw_reset,
2370 	.enable_sdio_irq = sdhci_enable_sdio_irq,
2371 	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
2372 	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
2373 	.execute_tuning			= sdhci_execute_tuning,
2374 	.card_event			= sdhci_card_event,
2375 	.card_busy	= sdhci_card_busy,
2376 };
2377 
2378 /*****************************************************************************\
2379  *                                                                           *
2380  * Tasklets                                                                  *
2381  *                                                                           *
2382 \*****************************************************************************/
2383 
sdhci_request_done(struct sdhci_host * host)2384 static bool sdhci_request_done(struct sdhci_host *host)
2385 {
2386 	unsigned long flags;
2387 	struct mmc_request *mrq;
2388 	int i;
2389 
2390 	spin_lock_irqsave(&host->lock, flags);
2391 
2392 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2393 		mrq = host->mrqs_done[i];
2394 		if (mrq)
2395 			break;
2396 	}
2397 
2398 	if (!mrq) {
2399 		spin_unlock_irqrestore(&host->lock, flags);
2400 		return true;
2401 	}
2402 
2403 	sdhci_del_timer(host, mrq);
2404 
2405 	/*
2406 	 * Always unmap the data buffers if they were mapped by
2407 	 * sdhci_prepare_data() whenever we finish with a request.
2408 	 * This avoids leaking DMA mappings on error.
2409 	 */
2410 	if (host->flags & SDHCI_REQ_USE_DMA) {
2411 		struct mmc_data *data = mrq->data;
2412 
2413 		if (data && data->host_cookie == COOKIE_MAPPED) {
2414 			if (host->bounce_buffer) {
2415 				/*
2416 				 * On reads, copy the bounced data into the
2417 				 * sglist
2418 				 */
2419 				if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
2420 					unsigned int length = data->bytes_xfered;
2421 
2422 					if (length > host->bounce_buffer_size) {
2423 						pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
2424 						       mmc_hostname(host->mmc),
2425 						       host->bounce_buffer_size,
2426 						       data->bytes_xfered);
2427 						/* Cap it down and continue */
2428 						length = host->bounce_buffer_size;
2429 					}
2430 					dma_sync_single_for_cpu(
2431 						host->mmc->parent,
2432 						host->bounce_addr,
2433 						host->bounce_buffer_size,
2434 						DMA_FROM_DEVICE);
2435 					sg_copy_from_buffer(data->sg,
2436 						data->sg_len,
2437 						host->bounce_buffer,
2438 						length);
2439 				} else {
2440 					/* No copying, just switch ownership */
2441 					dma_sync_single_for_cpu(
2442 						host->mmc->parent,
2443 						host->bounce_addr,
2444 						host->bounce_buffer_size,
2445 						mmc_get_dma_dir(data));
2446 				}
2447 			} else {
2448 				/* Unmap the raw data */
2449 				dma_unmap_sg(mmc_dev(host->mmc), data->sg,
2450 					     data->sg_len,
2451 					     mmc_get_dma_dir(data));
2452 			}
2453 			data->host_cookie = COOKIE_UNMAPPED;
2454 		}
2455 	}
2456 
2457 	/*
2458 	 * The controller needs a reset of internal state machines
2459 	 * upon error conditions.
2460 	 */
2461 	if (sdhci_needs_reset(host, mrq)) {
2462 		/*
2463 		 * Do not finish until command and data lines are available for
2464 		 * reset. Note there can only be one other mrq, so it cannot
2465 		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2466 		 * would both be null.
2467 		 */
2468 		if (host->cmd || host->data_cmd) {
2469 			spin_unlock_irqrestore(&host->lock, flags);
2470 			return true;
2471 		}
2472 
2473 		/* Some controllers need this kick or reset won't work here */
2474 		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2475 			/* This is to force an update */
2476 			host->ops->set_clock(host, host->clock);
2477 
2478 		/* Spec says we should do both at the same time, but Ricoh
2479 		   controllers do not like that. */
2480 		sdhci_do_reset(host, SDHCI_RESET_CMD);
2481 		sdhci_do_reset(host, SDHCI_RESET_DATA);
2482 
2483 		host->pending_reset = false;
2484 	}
2485 
2486 	if (!sdhci_has_requests(host))
2487 		sdhci_led_deactivate(host);
2488 
2489 	host->mrqs_done[i] = NULL;
2490 
2491 	mmiowb();
2492 	spin_unlock_irqrestore(&host->lock, flags);
2493 
2494 	mmc_request_done(host->mmc, mrq);
2495 
2496 	return false;
2497 }
2498 
sdhci_tasklet_finish(unsigned long param)2499 static void sdhci_tasklet_finish(unsigned long param)
2500 {
2501 	struct sdhci_host *host = (struct sdhci_host *)param;
2502 
2503 	while (!sdhci_request_done(host))
2504 		;
2505 }
2506 
sdhci_timeout_timer(unsigned long data)2507 static void sdhci_timeout_timer(unsigned long data)
2508 {
2509 	struct sdhci_host *host;
2510 	unsigned long flags;
2511 
2512 	host = (struct sdhci_host*)data;
2513 
2514 	spin_lock_irqsave(&host->lock, flags);
2515 
2516 	if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2517 		pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2518 		       mmc_hostname(host->mmc));
2519 		sdhci_dumpregs(host);
2520 
2521 		host->cmd->error = -ETIMEDOUT;
2522 		sdhci_finish_mrq(host, host->cmd->mrq);
2523 	}
2524 
2525 	mmiowb();
2526 	spin_unlock_irqrestore(&host->lock, flags);
2527 }
2528 
sdhci_timeout_data_timer(unsigned long data)2529 static void sdhci_timeout_data_timer(unsigned long data)
2530 {
2531 	struct sdhci_host *host;
2532 	unsigned long flags;
2533 
2534 	host = (struct sdhci_host *)data;
2535 
2536 	spin_lock_irqsave(&host->lock, flags);
2537 
2538 	if (host->data || host->data_cmd ||
2539 	    (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2540 		pr_err("%s: Timeout waiting for hardware interrupt.\n",
2541 		       mmc_hostname(host->mmc));
2542 		sdhci_dumpregs(host);
2543 
2544 		if (host->data) {
2545 			host->data->error = -ETIMEDOUT;
2546 			sdhci_finish_data(host);
2547 		} else if (host->data_cmd) {
2548 			host->data_cmd->error = -ETIMEDOUT;
2549 			sdhci_finish_mrq(host, host->data_cmd->mrq);
2550 		} else {
2551 			host->cmd->error = -ETIMEDOUT;
2552 			sdhci_finish_mrq(host, host->cmd->mrq);
2553 		}
2554 	}
2555 
2556 	mmiowb();
2557 	spin_unlock_irqrestore(&host->lock, flags);
2558 }
2559 
2560 /*****************************************************************************\
2561  *                                                                           *
2562  * Interrupt handling                                                        *
2563  *                                                                           *
2564 \*****************************************************************************/
2565 
sdhci_cmd_irq(struct sdhci_host * host,u32 intmask,u32 * intmask_p)2566 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
2567 {
2568 	/* Handle auto-CMD12 error */
2569 	if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
2570 		struct mmc_request *mrq = host->data_cmd->mrq;
2571 		u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2572 		int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2573 				   SDHCI_INT_DATA_TIMEOUT :
2574 				   SDHCI_INT_DATA_CRC;
2575 
2576 		/* Treat auto-CMD12 error the same as data error */
2577 		if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
2578 			*intmask_p |= data_err_bit;
2579 			return;
2580 		}
2581 	}
2582 
2583 	if (!host->cmd) {
2584 		/*
2585 		 * SDHCI recovers from errors by resetting the cmd and data
2586 		 * circuits.  Until that is done, there very well might be more
2587 		 * interrupts, so ignore them in that case.
2588 		 */
2589 		if (host->pending_reset)
2590 			return;
2591 		pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2592 		       mmc_hostname(host->mmc), (unsigned)intmask);
2593 		sdhci_dumpregs(host);
2594 		return;
2595 	}
2596 
2597 	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2598 		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2599 		if (intmask & SDHCI_INT_TIMEOUT)
2600 			host->cmd->error = -ETIMEDOUT;
2601 		else
2602 			host->cmd->error = -EILSEQ;
2603 
2604 		/* Treat data command CRC error the same as data CRC error */
2605 		if (host->cmd->data &&
2606 		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2607 		     SDHCI_INT_CRC) {
2608 			host->cmd = NULL;
2609 			*intmask_p |= SDHCI_INT_DATA_CRC;
2610 			return;
2611 		}
2612 
2613 		sdhci_finish_mrq(host, host->cmd->mrq);
2614 		return;
2615 	}
2616 
2617 	/* Handle auto-CMD23 error */
2618 	if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
2619 		struct mmc_request *mrq = host->cmd->mrq;
2620 		u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
2621 		int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
2622 			  -ETIMEDOUT :
2623 			  -EILSEQ;
2624 
2625 		if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
2626 			mrq->sbc->error = err;
2627 			sdhci_finish_mrq(host, mrq);
2628 			return;
2629 		}
2630 	}
2631 
2632 	if (intmask & SDHCI_INT_RESPONSE)
2633 		sdhci_finish_command(host);
2634 }
2635 
sdhci_adma_show_error(struct sdhci_host * host)2636 static void sdhci_adma_show_error(struct sdhci_host *host)
2637 {
2638 	void *desc = host->adma_table;
2639 	dma_addr_t dma = host->adma_addr;
2640 
2641 	sdhci_dumpregs(host);
2642 
2643 	while (true) {
2644 		struct sdhci_adma2_64_desc *dma_desc = desc;
2645 
2646 		if (host->flags & SDHCI_USE_64_BIT_DMA)
2647 			SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2648 			    (unsigned long long)dma,
2649 			    le32_to_cpu(dma_desc->addr_hi),
2650 			    le32_to_cpu(dma_desc->addr_lo),
2651 			    le16_to_cpu(dma_desc->len),
2652 			    le16_to_cpu(dma_desc->cmd));
2653 		else
2654 			SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2655 			    (unsigned long long)dma,
2656 			    le32_to_cpu(dma_desc->addr_lo),
2657 			    le16_to_cpu(dma_desc->len),
2658 			    le16_to_cpu(dma_desc->cmd));
2659 
2660 		desc += host->desc_sz;
2661 		dma += host->desc_sz;
2662 
2663 		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2664 			break;
2665 	}
2666 }
2667 
sdhci_data_irq(struct sdhci_host * host,u32 intmask)2668 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2669 {
2670 	u32 command;
2671 
2672 	/* CMD19 generates _only_ Buffer Read Ready interrupt */
2673 	if (intmask & SDHCI_INT_DATA_AVAIL) {
2674 		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2675 		if (command == MMC_SEND_TUNING_BLOCK ||
2676 		    command == MMC_SEND_TUNING_BLOCK_HS200) {
2677 			host->tuning_done = 1;
2678 			wake_up(&host->buf_ready_int);
2679 			return;
2680 		}
2681 	}
2682 
2683 	if (!host->data) {
2684 		struct mmc_command *data_cmd = host->data_cmd;
2685 
2686 		/*
2687 		 * The "data complete" interrupt is also used to
2688 		 * indicate that a busy state has ended. See comment
2689 		 * above in sdhci_cmd_irq().
2690 		 */
2691 		if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2692 			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2693 				host->data_cmd = NULL;
2694 				data_cmd->error = -ETIMEDOUT;
2695 				sdhci_finish_mrq(host, data_cmd->mrq);
2696 				return;
2697 			}
2698 			if (intmask & SDHCI_INT_DATA_END) {
2699 				host->data_cmd = NULL;
2700 				/*
2701 				 * Some cards handle busy-end interrupt
2702 				 * before the command completed, so make
2703 				 * sure we do things in the proper order.
2704 				 */
2705 				if (host->cmd == data_cmd)
2706 					return;
2707 
2708 				sdhci_finish_mrq(host, data_cmd->mrq);
2709 				return;
2710 			}
2711 		}
2712 
2713 		/*
2714 		 * SDHCI recovers from errors by resetting the cmd and data
2715 		 * circuits. Until that is done, there very well might be more
2716 		 * interrupts, so ignore them in that case.
2717 		 */
2718 		if (host->pending_reset)
2719 			return;
2720 
2721 		pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2722 		       mmc_hostname(host->mmc), (unsigned)intmask);
2723 		sdhci_dumpregs(host);
2724 
2725 		return;
2726 	}
2727 
2728 	if (intmask & SDHCI_INT_DATA_TIMEOUT)
2729 		host->data->error = -ETIMEDOUT;
2730 	else if (intmask & SDHCI_INT_DATA_END_BIT)
2731 		host->data->error = -EILSEQ;
2732 	else if ((intmask & SDHCI_INT_DATA_CRC) &&
2733 		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2734 			!= MMC_BUS_TEST_R)
2735 		host->data->error = -EILSEQ;
2736 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
2737 		pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
2738 		       intmask);
2739 		sdhci_adma_show_error(host);
2740 		host->data->error = -EIO;
2741 		if (host->ops->adma_workaround)
2742 			host->ops->adma_workaround(host, intmask);
2743 	}
2744 
2745 	if (host->data->error)
2746 		sdhci_finish_data(host);
2747 	else {
2748 		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2749 			sdhci_transfer_pio(host);
2750 
2751 		/*
2752 		 * We currently don't do anything fancy with DMA
2753 		 * boundaries, but as we can't disable the feature
2754 		 * we need to at least restart the transfer.
2755 		 *
2756 		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2757 		 * should return a valid address to continue from, but as
2758 		 * some controllers are faulty, don't trust them.
2759 		 */
2760 		if (intmask & SDHCI_INT_DMA_END) {
2761 			u32 dmastart, dmanow;
2762 
2763 			dmastart = sdhci_sdma_address(host);
2764 			dmanow = dmastart + host->data->bytes_xfered;
2765 			/*
2766 			 * Force update to the next DMA block boundary.
2767 			 */
2768 			dmanow = (dmanow &
2769 				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2770 				SDHCI_DEFAULT_BOUNDARY_SIZE;
2771 			host->data->bytes_xfered = dmanow - dmastart;
2772 			DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2773 			    dmastart, host->data->bytes_xfered, dmanow);
2774 			sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2775 		}
2776 
2777 		if (intmask & SDHCI_INT_DATA_END) {
2778 			if (host->cmd == host->data_cmd) {
2779 				/*
2780 				 * Data managed to finish before the
2781 				 * command completed. Make sure we do
2782 				 * things in the proper order.
2783 				 */
2784 				host->data_early = 1;
2785 			} else {
2786 				sdhci_finish_data(host);
2787 			}
2788 		}
2789 	}
2790 }
2791 
sdhci_irq(int irq,void * dev_id)2792 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2793 {
2794 	irqreturn_t result = IRQ_NONE;
2795 	struct sdhci_host *host = dev_id;
2796 	u32 intmask, mask, unexpected = 0;
2797 	int max_loops = 16;
2798 
2799 	spin_lock(&host->lock);
2800 
2801 	if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2802 		spin_unlock(&host->lock);
2803 		return IRQ_NONE;
2804 	}
2805 
2806 	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2807 	if (!intmask || intmask == 0xffffffff) {
2808 		result = IRQ_NONE;
2809 		goto out;
2810 	}
2811 
2812 	do {
2813 		DBG("IRQ status 0x%08x\n", intmask);
2814 
2815 		if (host->ops->irq) {
2816 			intmask = host->ops->irq(host, intmask);
2817 			if (!intmask)
2818 				goto cont;
2819 		}
2820 
2821 		/* Clear selected interrupts. */
2822 		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2823 				  SDHCI_INT_BUS_POWER);
2824 		sdhci_writel(host, mask, SDHCI_INT_STATUS);
2825 
2826 		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2827 			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2828 				      SDHCI_CARD_PRESENT;
2829 
2830 			/*
2831 			 * There is a observation on i.mx esdhc.  INSERT
2832 			 * bit will be immediately set again when it gets
2833 			 * cleared, if a card is inserted.  We have to mask
2834 			 * the irq to prevent interrupt storm which will
2835 			 * freeze the system.  And the REMOVE gets the
2836 			 * same situation.
2837 			 *
2838 			 * More testing are needed here to ensure it works
2839 			 * for other platforms though.
2840 			 */
2841 			host->ier &= ~(SDHCI_INT_CARD_INSERT |
2842 				       SDHCI_INT_CARD_REMOVE);
2843 			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2844 					       SDHCI_INT_CARD_INSERT;
2845 			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2846 			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2847 
2848 			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2849 				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2850 
2851 			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2852 						       SDHCI_INT_CARD_REMOVE);
2853 			result = IRQ_WAKE_THREAD;
2854 		}
2855 
2856 		if (intmask & SDHCI_INT_CMD_MASK)
2857 			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
2858 
2859 		if (intmask & SDHCI_INT_DATA_MASK)
2860 			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2861 
2862 		if (intmask & SDHCI_INT_BUS_POWER)
2863 			pr_err("%s: Card is consuming too much power!\n",
2864 				mmc_hostname(host->mmc));
2865 
2866 		if (intmask & SDHCI_INT_RETUNE)
2867 			mmc_retune_needed(host->mmc);
2868 
2869 		if ((intmask & SDHCI_INT_CARD_INT) &&
2870 		    (host->ier & SDHCI_INT_CARD_INT)) {
2871 			sdhci_enable_sdio_irq_nolock(host, false);
2872 			host->thread_isr |= SDHCI_INT_CARD_INT;
2873 			result = IRQ_WAKE_THREAD;
2874 		}
2875 
2876 		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2877 			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2878 			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2879 			     SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
2880 
2881 		if (intmask) {
2882 			unexpected |= intmask;
2883 			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2884 		}
2885 cont:
2886 		if (result == IRQ_NONE)
2887 			result = IRQ_HANDLED;
2888 
2889 		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2890 	} while (intmask && --max_loops);
2891 out:
2892 	spin_unlock(&host->lock);
2893 
2894 	if (unexpected) {
2895 		pr_err("%s: Unexpected interrupt 0x%08x.\n",
2896 			   mmc_hostname(host->mmc), unexpected);
2897 		sdhci_dumpregs(host);
2898 	}
2899 
2900 	return result;
2901 }
2902 
sdhci_thread_irq(int irq,void * dev_id)2903 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2904 {
2905 	struct sdhci_host *host = dev_id;
2906 	unsigned long flags;
2907 	u32 isr;
2908 
2909 	spin_lock_irqsave(&host->lock, flags);
2910 	isr = host->thread_isr;
2911 	host->thread_isr = 0;
2912 	spin_unlock_irqrestore(&host->lock, flags);
2913 
2914 	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2915 		struct mmc_host *mmc = host->mmc;
2916 
2917 		mmc->ops->card_event(mmc);
2918 		mmc_detect_change(mmc, msecs_to_jiffies(200));
2919 	}
2920 
2921 	if (isr & SDHCI_INT_CARD_INT) {
2922 		sdio_run_irqs(host->mmc);
2923 
2924 		spin_lock_irqsave(&host->lock, flags);
2925 		if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2926 			sdhci_enable_sdio_irq_nolock(host, true);
2927 		spin_unlock_irqrestore(&host->lock, flags);
2928 	}
2929 
2930 	return isr ? IRQ_HANDLED : IRQ_NONE;
2931 }
2932 
2933 /*****************************************************************************\
2934  *                                                                           *
2935  * Suspend/resume                                                            *
2936  *                                                                           *
2937 \*****************************************************************************/
2938 
2939 #ifdef CONFIG_PM
2940 /*
2941  * To enable wakeup events, the corresponding events have to be enabled in
2942  * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
2943  * Table' in the SD Host Controller Standard Specification.
2944  * It is useless to restore SDHCI_INT_ENABLE state in
2945  * sdhci_disable_irq_wakeups() since it will be set by
2946  * sdhci_enable_card_detection() or sdhci_init().
2947  */
sdhci_enable_irq_wakeups(struct sdhci_host * host)2948 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2949 {
2950 	u8 val;
2951 	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2952 			| SDHCI_WAKE_ON_INT;
2953 	u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2954 		      SDHCI_INT_CARD_INT;
2955 
2956 	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2957 	val |= mask ;
2958 	/* Avoid fake wake up */
2959 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
2960 		val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2961 		irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
2962 	}
2963 	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2964 	sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
2965 }
2966 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2967 
sdhci_disable_irq_wakeups(struct sdhci_host * host)2968 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2969 {
2970 	u8 val;
2971 	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2972 			| SDHCI_WAKE_ON_INT;
2973 
2974 	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2975 	val &= ~mask;
2976 	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2977 }
2978 
sdhci_suspend_host(struct sdhci_host * host)2979 int sdhci_suspend_host(struct sdhci_host *host)
2980 {
2981 	sdhci_disable_card_detection(host);
2982 
2983 	mmc_retune_timer_stop(host->mmc);
2984 
2985 	if (!device_may_wakeup(mmc_dev(host->mmc))) {
2986 		host->ier = 0;
2987 		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2988 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2989 		free_irq(host->irq, host);
2990 	} else {
2991 		sdhci_enable_irq_wakeups(host);
2992 		enable_irq_wake(host->irq);
2993 	}
2994 	return 0;
2995 }
2996 
2997 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2998 
sdhci_resume_host(struct sdhci_host * host)2999 int sdhci_resume_host(struct sdhci_host *host)
3000 {
3001 	struct mmc_host *mmc = host->mmc;
3002 	int ret = 0;
3003 
3004 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3005 		if (host->ops->enable_dma)
3006 			host->ops->enable_dma(host);
3007 	}
3008 
3009 	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3010 	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3011 		/* Card keeps power but host controller does not */
3012 		sdhci_init(host, 0);
3013 		host->pwr = 0;
3014 		host->clock = 0;
3015 		mmc->ops->set_ios(mmc, &mmc->ios);
3016 	} else {
3017 		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3018 		mmiowb();
3019 	}
3020 
3021 	if (!device_may_wakeup(mmc_dev(host->mmc))) {
3022 		ret = request_threaded_irq(host->irq, sdhci_irq,
3023 					   sdhci_thread_irq, IRQF_SHARED,
3024 					   mmc_hostname(host->mmc), host);
3025 		if (ret)
3026 			return ret;
3027 	} else {
3028 		sdhci_disable_irq_wakeups(host);
3029 		disable_irq_wake(host->irq);
3030 	}
3031 
3032 	sdhci_enable_card_detection(host);
3033 
3034 	return ret;
3035 }
3036 
3037 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3038 
sdhci_runtime_suspend_host(struct sdhci_host * host)3039 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3040 {
3041 	unsigned long flags;
3042 
3043 	mmc_retune_timer_stop(host->mmc);
3044 
3045 	spin_lock_irqsave(&host->lock, flags);
3046 	host->ier &= SDHCI_INT_CARD_INT;
3047 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3048 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3049 	spin_unlock_irqrestore(&host->lock, flags);
3050 
3051 	synchronize_hardirq(host->irq);
3052 
3053 	spin_lock_irqsave(&host->lock, flags);
3054 	host->runtime_suspended = true;
3055 	spin_unlock_irqrestore(&host->lock, flags);
3056 
3057 	return 0;
3058 }
3059 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3060 
sdhci_runtime_resume_host(struct sdhci_host * host)3061 int sdhci_runtime_resume_host(struct sdhci_host *host)
3062 {
3063 	struct mmc_host *mmc = host->mmc;
3064 	unsigned long flags;
3065 	int host_flags = host->flags;
3066 
3067 	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3068 		if (host->ops->enable_dma)
3069 			host->ops->enable_dma(host);
3070 	}
3071 
3072 	sdhci_init(host, 0);
3073 
3074 	if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3075 	    mmc->ios.power_mode != MMC_POWER_OFF) {
3076 		/* Force clock and power re-program */
3077 		host->pwr = 0;
3078 		host->clock = 0;
3079 		mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3080 		mmc->ops->set_ios(mmc, &mmc->ios);
3081 
3082 		if ((host_flags & SDHCI_PV_ENABLED) &&
3083 		    !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3084 			spin_lock_irqsave(&host->lock, flags);
3085 			sdhci_enable_preset_value(host, true);
3086 			spin_unlock_irqrestore(&host->lock, flags);
3087 		}
3088 
3089 		if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3090 		    mmc->ops->hs400_enhanced_strobe)
3091 			mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3092 	}
3093 
3094 	spin_lock_irqsave(&host->lock, flags);
3095 
3096 	host->runtime_suspended = false;
3097 
3098 	/* Enable SDIO IRQ */
3099 	if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3100 		sdhci_enable_sdio_irq_nolock(host, true);
3101 
3102 	/* Enable Card Detection */
3103 	sdhci_enable_card_detection(host);
3104 
3105 	spin_unlock_irqrestore(&host->lock, flags);
3106 
3107 	return 0;
3108 }
3109 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3110 
3111 #endif /* CONFIG_PM */
3112 
3113 /*****************************************************************************\
3114  *                                                                           *
3115  * Command Queue Engine (CQE) helpers                                        *
3116  *                                                                           *
3117 \*****************************************************************************/
3118 
sdhci_cqe_enable(struct mmc_host * mmc)3119 void sdhci_cqe_enable(struct mmc_host *mmc)
3120 {
3121 	struct sdhci_host *host = mmc_priv(mmc);
3122 	unsigned long flags;
3123 	u8 ctrl;
3124 
3125 	spin_lock_irqsave(&host->lock, flags);
3126 
3127 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3128 	ctrl &= ~SDHCI_CTRL_DMA_MASK;
3129 	if (host->flags & SDHCI_USE_64_BIT_DMA)
3130 		ctrl |= SDHCI_CTRL_ADMA64;
3131 	else
3132 		ctrl |= SDHCI_CTRL_ADMA32;
3133 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3134 
3135 	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3136 		     SDHCI_BLOCK_SIZE);
3137 
3138 	/* Set maximum timeout */
3139 	sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3140 
3141 	host->ier = host->cqe_ier;
3142 
3143 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3144 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3145 
3146 	host->cqe_on = true;
3147 
3148 	pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3149 		 mmc_hostname(mmc), host->ier,
3150 		 sdhci_readl(host, SDHCI_INT_STATUS));
3151 
3152 	mmiowb();
3153 	spin_unlock_irqrestore(&host->lock, flags);
3154 }
3155 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3156 
sdhci_cqe_disable(struct mmc_host * mmc,bool recovery)3157 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3158 {
3159 	struct sdhci_host *host = mmc_priv(mmc);
3160 	unsigned long flags;
3161 
3162 	spin_lock_irqsave(&host->lock, flags);
3163 
3164 	sdhci_set_default_irqs(host);
3165 
3166 	host->cqe_on = false;
3167 
3168 	if (recovery) {
3169 		sdhci_do_reset(host, SDHCI_RESET_CMD);
3170 		sdhci_do_reset(host, SDHCI_RESET_DATA);
3171 	}
3172 
3173 	pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3174 		 mmc_hostname(mmc), host->ier,
3175 		 sdhci_readl(host, SDHCI_INT_STATUS));
3176 
3177 	mmiowb();
3178 	spin_unlock_irqrestore(&host->lock, flags);
3179 }
3180 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3181 
sdhci_cqe_irq(struct sdhci_host * host,u32 intmask,int * cmd_error,int * data_error)3182 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3183 		   int *data_error)
3184 {
3185 	u32 mask;
3186 
3187 	if (!host->cqe_on)
3188 		return false;
3189 
3190 	if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3191 		*cmd_error = -EILSEQ;
3192 	else if (intmask & SDHCI_INT_TIMEOUT)
3193 		*cmd_error = -ETIMEDOUT;
3194 	else
3195 		*cmd_error = 0;
3196 
3197 	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3198 		*data_error = -EILSEQ;
3199 	else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3200 		*data_error = -ETIMEDOUT;
3201 	else if (intmask & SDHCI_INT_ADMA_ERROR)
3202 		*data_error = -EIO;
3203 	else
3204 		*data_error = 0;
3205 
3206 	/* Clear selected interrupts. */
3207 	mask = intmask & host->cqe_ier;
3208 	sdhci_writel(host, mask, SDHCI_INT_STATUS);
3209 
3210 	if (intmask & SDHCI_INT_BUS_POWER)
3211 		pr_err("%s: Card is consuming too much power!\n",
3212 		       mmc_hostname(host->mmc));
3213 
3214 	intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3215 	if (intmask) {
3216 		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3217 		pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3218 		       mmc_hostname(host->mmc), intmask);
3219 		sdhci_dumpregs(host);
3220 	}
3221 
3222 	return true;
3223 }
3224 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3225 
3226 /*****************************************************************************\
3227  *                                                                           *
3228  * Device allocation/registration                                            *
3229  *                                                                           *
3230 \*****************************************************************************/
3231 
sdhci_alloc_host(struct device * dev,size_t priv_size)3232 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3233 	size_t priv_size)
3234 {
3235 	struct mmc_host *mmc;
3236 	struct sdhci_host *host;
3237 
3238 	WARN_ON(dev == NULL);
3239 
3240 	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3241 	if (!mmc)
3242 		return ERR_PTR(-ENOMEM);
3243 
3244 	host = mmc_priv(mmc);
3245 	host->mmc = mmc;
3246 	host->mmc_host_ops = sdhci_ops;
3247 	mmc->ops = &host->mmc_host_ops;
3248 
3249 	host->flags = SDHCI_SIGNALING_330;
3250 
3251 	host->cqe_ier     = SDHCI_CQE_INT_MASK;
3252 	host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3253 
3254 	host->tuning_delay = -1;
3255 
3256 	host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3257 
3258 	return host;
3259 }
3260 
3261 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3262 
sdhci_set_dma_mask(struct sdhci_host * host)3263 static int sdhci_set_dma_mask(struct sdhci_host *host)
3264 {
3265 	struct mmc_host *mmc = host->mmc;
3266 	struct device *dev = mmc_dev(mmc);
3267 	int ret = -EINVAL;
3268 
3269 	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3270 		host->flags &= ~SDHCI_USE_64_BIT_DMA;
3271 
3272 	/* Try 64-bit mask if hardware is capable  of it */
3273 	if (host->flags & SDHCI_USE_64_BIT_DMA) {
3274 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3275 		if (ret) {
3276 			pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3277 				mmc_hostname(mmc));
3278 			host->flags &= ~SDHCI_USE_64_BIT_DMA;
3279 		}
3280 	}
3281 
3282 	/* 32-bit mask as default & fallback */
3283 	if (ret) {
3284 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3285 		if (ret)
3286 			pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3287 				mmc_hostname(mmc));
3288 	}
3289 
3290 	return ret;
3291 }
3292 
__sdhci_read_caps(struct sdhci_host * host,u16 * ver,u32 * caps,u32 * caps1)3293 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3294 {
3295 	u16 v;
3296 	u64 dt_caps_mask = 0;
3297 	u64 dt_caps = 0;
3298 
3299 	if (host->read_caps)
3300 		return;
3301 
3302 	host->read_caps = true;
3303 
3304 	if (debug_quirks)
3305 		host->quirks = debug_quirks;
3306 
3307 	if (debug_quirks2)
3308 		host->quirks2 = debug_quirks2;
3309 
3310 	sdhci_do_reset(host, SDHCI_RESET_ALL);
3311 
3312 	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3313 			     "sdhci-caps-mask", &dt_caps_mask);
3314 	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3315 			     "sdhci-caps", &dt_caps);
3316 
3317 	v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3318 	host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3319 
3320 	if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3321 		return;
3322 
3323 	if (caps) {
3324 		host->caps = *caps;
3325 	} else {
3326 		host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3327 		host->caps &= ~lower_32_bits(dt_caps_mask);
3328 		host->caps |= lower_32_bits(dt_caps);
3329 	}
3330 
3331 	if (host->version < SDHCI_SPEC_300)
3332 		return;
3333 
3334 	if (caps1) {
3335 		host->caps1 = *caps1;
3336 	} else {
3337 		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3338 		host->caps1 &= ~upper_32_bits(dt_caps_mask);
3339 		host->caps1 |= upper_32_bits(dt_caps);
3340 	}
3341 }
3342 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3343 
sdhci_allocate_bounce_buffer(struct sdhci_host * host)3344 static int sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3345 {
3346 	struct mmc_host *mmc = host->mmc;
3347 	unsigned int max_blocks;
3348 	unsigned int bounce_size;
3349 	int ret;
3350 
3351 	/*
3352 	 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
3353 	 * has diminishing returns, this is probably because SD/MMC
3354 	 * cards are usually optimized to handle this size of requests.
3355 	 */
3356 	bounce_size = SZ_64K;
3357 	/*
3358 	 * Adjust downwards to maximum request size if this is less
3359 	 * than our segment size, else hammer down the maximum
3360 	 * request size to the maximum buffer size.
3361 	 */
3362 	if (mmc->max_req_size < bounce_size)
3363 		bounce_size = mmc->max_req_size;
3364 	max_blocks = bounce_size / 512;
3365 
3366 	/*
3367 	 * When we just support one segment, we can get significant
3368 	 * speedups by the help of a bounce buffer to group scattered
3369 	 * reads/writes together.
3370 	 */
3371 	host->bounce_buffer = devm_kmalloc(mmc->parent,
3372 					   bounce_size,
3373 					   GFP_KERNEL);
3374 	if (!host->bounce_buffer) {
3375 		pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3376 		       mmc_hostname(mmc),
3377 		       bounce_size);
3378 		/*
3379 		 * Exiting with zero here makes sure we proceed with
3380 		 * mmc->max_segs == 1.
3381 		 */
3382 		return 0;
3383 	}
3384 
3385 	host->bounce_addr = dma_map_single(mmc->parent,
3386 					   host->bounce_buffer,
3387 					   bounce_size,
3388 					   DMA_BIDIRECTIONAL);
3389 	ret = dma_mapping_error(mmc->parent, host->bounce_addr);
3390 	if (ret)
3391 		/* Again fall back to max_segs == 1 */
3392 		return 0;
3393 	host->bounce_buffer_size = bounce_size;
3394 
3395 	/* Lie about this since we're bouncing */
3396 	mmc->max_segs = max_blocks;
3397 	mmc->max_seg_size = bounce_size;
3398 	mmc->max_req_size = bounce_size;
3399 
3400 	pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3401 		mmc_hostname(mmc), max_blocks, bounce_size);
3402 
3403 	return 0;
3404 }
3405 
sdhci_setup_host(struct sdhci_host * host)3406 int sdhci_setup_host(struct sdhci_host *host)
3407 {
3408 	struct mmc_host *mmc;
3409 	u32 max_current_caps;
3410 	unsigned int ocr_avail;
3411 	unsigned int override_timeout_clk;
3412 	u32 max_clk;
3413 	int ret;
3414 
3415 	WARN_ON(host == NULL);
3416 	if (host == NULL)
3417 		return -EINVAL;
3418 
3419 	mmc = host->mmc;
3420 
3421 	/*
3422 	 * If there are external regulators, get them. Note this must be done
3423 	 * early before resetting the host and reading the capabilities so that
3424 	 * the host can take the appropriate action if regulators are not
3425 	 * available.
3426 	 */
3427 	ret = mmc_regulator_get_supply(mmc);
3428 	if (ret == -EPROBE_DEFER)
3429 		return ret;
3430 
3431 	DBG("Version:   0x%08x | Present:  0x%08x\n",
3432 	    sdhci_readw(host, SDHCI_HOST_VERSION),
3433 	    sdhci_readl(host, SDHCI_PRESENT_STATE));
3434 	DBG("Caps:      0x%08x | Caps_1:   0x%08x\n",
3435 	    sdhci_readl(host, SDHCI_CAPABILITIES),
3436 	    sdhci_readl(host, SDHCI_CAPABILITIES_1));
3437 
3438 	sdhci_read_caps(host);
3439 
3440 	override_timeout_clk = host->timeout_clk;
3441 
3442 	if (host->version > SDHCI_SPEC_300) {
3443 		pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3444 		       mmc_hostname(mmc), host->version);
3445 	}
3446 
3447 	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3448 		host->flags |= SDHCI_USE_SDMA;
3449 	else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3450 		DBG("Controller doesn't have SDMA capability\n");
3451 	else
3452 		host->flags |= SDHCI_USE_SDMA;
3453 
3454 	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3455 		(host->flags & SDHCI_USE_SDMA)) {
3456 		DBG("Disabling DMA as it is marked broken\n");
3457 		host->flags &= ~SDHCI_USE_SDMA;
3458 	}
3459 
3460 	if ((host->version >= SDHCI_SPEC_200) &&
3461 		(host->caps & SDHCI_CAN_DO_ADMA2))
3462 		host->flags |= SDHCI_USE_ADMA;
3463 
3464 	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3465 		(host->flags & SDHCI_USE_ADMA)) {
3466 		DBG("Disabling ADMA as it is marked broken\n");
3467 		host->flags &= ~SDHCI_USE_ADMA;
3468 	}
3469 
3470 	/*
3471 	 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3472 	 * and *must* do 64-bit DMA.  A driver has the opportunity to change
3473 	 * that during the first call to ->enable_dma().  Similarly
3474 	 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3475 	 * implement.
3476 	 */
3477 	if (host->caps & SDHCI_CAN_64BIT)
3478 		host->flags |= SDHCI_USE_64_BIT_DMA;
3479 
3480 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3481 		ret = sdhci_set_dma_mask(host);
3482 
3483 		if (!ret && host->ops->enable_dma)
3484 			ret = host->ops->enable_dma(host);
3485 
3486 		if (ret) {
3487 			pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3488 				mmc_hostname(mmc));
3489 			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3490 
3491 			ret = 0;
3492 		}
3493 	}
3494 
3495 	/* SDMA does not support 64-bit DMA */
3496 	if (host->flags & SDHCI_USE_64_BIT_DMA)
3497 		host->flags &= ~SDHCI_USE_SDMA;
3498 
3499 	if (host->flags & SDHCI_USE_ADMA) {
3500 		dma_addr_t dma;
3501 		void *buf;
3502 
3503 		/*
3504 		 * The DMA descriptor table size is calculated as the maximum
3505 		 * number of segments times 2, to allow for an alignment
3506 		 * descriptor for each segment, plus 1 for a nop end descriptor,
3507 		 * all multipled by the descriptor size.
3508 		 */
3509 		if (host->flags & SDHCI_USE_64_BIT_DMA) {
3510 			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3511 					      SDHCI_ADMA2_64_DESC_SZ;
3512 			host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3513 		} else {
3514 			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3515 					      SDHCI_ADMA2_32_DESC_SZ;
3516 			host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3517 		}
3518 
3519 		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3520 		buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3521 					 host->adma_table_sz, &dma, GFP_KERNEL);
3522 		if (!buf) {
3523 			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3524 				mmc_hostname(mmc));
3525 			host->flags &= ~SDHCI_USE_ADMA;
3526 		} else if ((dma + host->align_buffer_sz) &
3527 			   (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3528 			pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3529 				mmc_hostname(mmc));
3530 			host->flags &= ~SDHCI_USE_ADMA;
3531 			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3532 					  host->adma_table_sz, buf, dma);
3533 		} else {
3534 			host->align_buffer = buf;
3535 			host->align_addr = dma;
3536 
3537 			host->adma_table = buf + host->align_buffer_sz;
3538 			host->adma_addr = dma + host->align_buffer_sz;
3539 		}
3540 	}
3541 
3542 	/*
3543 	 * If we use DMA, then it's up to the caller to set the DMA
3544 	 * mask, but PIO does not need the hw shim so we set a new
3545 	 * mask here in that case.
3546 	 */
3547 	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3548 		host->dma_mask = DMA_BIT_MASK(64);
3549 		mmc_dev(mmc)->dma_mask = &host->dma_mask;
3550 	}
3551 
3552 	if (host->version >= SDHCI_SPEC_300)
3553 		host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3554 			>> SDHCI_CLOCK_BASE_SHIFT;
3555 	else
3556 		host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3557 			>> SDHCI_CLOCK_BASE_SHIFT;
3558 
3559 	host->max_clk *= 1000000;
3560 	if (host->max_clk == 0 || host->quirks &
3561 			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3562 		if (!host->ops->get_max_clock) {
3563 			pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3564 			       mmc_hostname(mmc));
3565 			ret = -ENODEV;
3566 			goto undma;
3567 		}
3568 		host->max_clk = host->ops->get_max_clock(host);
3569 	}
3570 
3571 	/*
3572 	 * In case of Host Controller v3.00, find out whether clock
3573 	 * multiplier is supported.
3574 	 */
3575 	host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3576 			SDHCI_CLOCK_MUL_SHIFT;
3577 
3578 	/*
3579 	 * In case the value in Clock Multiplier is 0, then programmable
3580 	 * clock mode is not supported, otherwise the actual clock
3581 	 * multiplier is one more than the value of Clock Multiplier
3582 	 * in the Capabilities Register.
3583 	 */
3584 	if (host->clk_mul)
3585 		host->clk_mul += 1;
3586 
3587 	/*
3588 	 * Set host parameters.
3589 	 */
3590 	max_clk = host->max_clk;
3591 
3592 	if (host->ops->get_min_clock)
3593 		mmc->f_min = host->ops->get_min_clock(host);
3594 	else if (host->version >= SDHCI_SPEC_300) {
3595 		if (host->clk_mul)
3596 			max_clk = host->max_clk * host->clk_mul;
3597 		/*
3598 		 * Divided Clock Mode minimum clock rate is always less than
3599 		 * Programmable Clock Mode minimum clock rate.
3600 		 */
3601 		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3602 	} else
3603 		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3604 
3605 	if (!mmc->f_max || mmc->f_max > max_clk)
3606 		mmc->f_max = max_clk;
3607 
3608 	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3609 		host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3610 					SDHCI_TIMEOUT_CLK_SHIFT;
3611 
3612 		if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3613 			host->timeout_clk *= 1000;
3614 
3615 		if (host->timeout_clk == 0) {
3616 			if (!host->ops->get_timeout_clock) {
3617 				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3618 					mmc_hostname(mmc));
3619 				ret = -ENODEV;
3620 				goto undma;
3621 			}
3622 
3623 			host->timeout_clk =
3624 				DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3625 					     1000);
3626 		}
3627 
3628 		if (override_timeout_clk)
3629 			host->timeout_clk = override_timeout_clk;
3630 
3631 		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3632 			host->ops->get_max_timeout_count(host) : 1 << 27;
3633 		mmc->max_busy_timeout /= host->timeout_clk;
3634 	}
3635 
3636 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3637 	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3638 
3639 	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3640 		host->flags |= SDHCI_AUTO_CMD12;
3641 
3642 	/* Auto-CMD23 stuff only works in ADMA or PIO. */
3643 	if ((host->version >= SDHCI_SPEC_300) &&
3644 	    ((host->flags & SDHCI_USE_ADMA) ||
3645 	     !(host->flags & SDHCI_USE_SDMA)) &&
3646 	     !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3647 		host->flags |= SDHCI_AUTO_CMD23;
3648 		DBG("Auto-CMD23 available\n");
3649 	} else {
3650 		DBG("Auto-CMD23 unavailable\n");
3651 	}
3652 
3653 	/*
3654 	 * A controller may support 8-bit width, but the board itself
3655 	 * might not have the pins brought out.  Boards that support
3656 	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3657 	 * their platform code before calling sdhci_add_host(), and we
3658 	 * won't assume 8-bit width for hosts without that CAP.
3659 	 */
3660 	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3661 		mmc->caps |= MMC_CAP_4_BIT_DATA;
3662 
3663 	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3664 		mmc->caps &= ~MMC_CAP_CMD23;
3665 
3666 	if (host->caps & SDHCI_CAN_DO_HISPD)
3667 		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3668 
3669 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3670 	    mmc_card_is_removable(mmc) &&
3671 	    mmc_gpio_get_cd(host->mmc) < 0)
3672 		mmc->caps |= MMC_CAP_NEEDS_POLL;
3673 
3674 	if (!IS_ERR(mmc->supply.vqmmc)) {
3675 		ret = regulator_enable(mmc->supply.vqmmc);
3676 
3677 		/* If vqmmc provides no 1.8V signalling, then there's no UHS */
3678 		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3679 						    1950000))
3680 			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3681 					 SDHCI_SUPPORT_SDR50 |
3682 					 SDHCI_SUPPORT_DDR50);
3683 
3684 		/* In eMMC case vqmmc might be a fixed 1.8V regulator */
3685 		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
3686 						    3600000))
3687 			host->flags &= ~SDHCI_SIGNALING_330;
3688 
3689 		if (ret) {
3690 			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3691 				mmc_hostname(mmc), ret);
3692 			mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3693 		}
3694 	}
3695 
3696 	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3697 		host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3698 				 SDHCI_SUPPORT_DDR50);
3699 	}
3700 
3701 	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3702 	if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3703 			   SDHCI_SUPPORT_DDR50))
3704 		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3705 
3706 	/* SDR104 supports also implies SDR50 support */
3707 	if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3708 		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3709 		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
3710 		 * field can be promoted to support HS200.
3711 		 */
3712 		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3713 			mmc->caps2 |= MMC_CAP2_HS200;
3714 	} else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3715 		mmc->caps |= MMC_CAP_UHS_SDR50;
3716 	}
3717 
3718 	if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3719 	    (host->caps1 & SDHCI_SUPPORT_HS400))
3720 		mmc->caps2 |= MMC_CAP2_HS400;
3721 
3722 	if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3723 	    (IS_ERR(mmc->supply.vqmmc) ||
3724 	     !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3725 					     1300000)))
3726 		mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3727 
3728 	if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3729 	    !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3730 		mmc->caps |= MMC_CAP_UHS_DDR50;
3731 
3732 	/* Does the host need tuning for SDR50? */
3733 	if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3734 		host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3735 
3736 	/* Driver Type(s) (A, C, D) supported by the host */
3737 	if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3738 		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3739 	if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3740 		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3741 	if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3742 		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3743 
3744 	/* Initial value for re-tuning timer count */
3745 	host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3746 			     SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3747 
3748 	/*
3749 	 * In case Re-tuning Timer is not disabled, the actual value of
3750 	 * re-tuning timer will be 2 ^ (n - 1).
3751 	 */
3752 	if (host->tuning_count)
3753 		host->tuning_count = 1 << (host->tuning_count - 1);
3754 
3755 	/* Re-tuning mode supported by the Host Controller */
3756 	host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3757 			     SDHCI_RETUNING_MODE_SHIFT;
3758 
3759 	ocr_avail = 0;
3760 
3761 	/*
3762 	 * According to SD Host Controller spec v3.00, if the Host System
3763 	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3764 	 * the value is meaningful only if Voltage Support in the Capabilities
3765 	 * register is set. The actual current value is 4 times the register
3766 	 * value.
3767 	 */
3768 	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3769 	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3770 		int curr = regulator_get_current_limit(mmc->supply.vmmc);
3771 		if (curr > 0) {
3772 
3773 			/* convert to SDHCI_MAX_CURRENT format */
3774 			curr = curr/1000;  /* convert to mA */
3775 			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3776 
3777 			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3778 			max_current_caps =
3779 				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3780 				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3781 				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
3782 		}
3783 	}
3784 
3785 	if (host->caps & SDHCI_CAN_VDD_330) {
3786 		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3787 
3788 		mmc->max_current_330 = ((max_current_caps &
3789 				   SDHCI_MAX_CURRENT_330_MASK) >>
3790 				   SDHCI_MAX_CURRENT_330_SHIFT) *
3791 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3792 	}
3793 	if (host->caps & SDHCI_CAN_VDD_300) {
3794 		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3795 
3796 		mmc->max_current_300 = ((max_current_caps &
3797 				   SDHCI_MAX_CURRENT_300_MASK) >>
3798 				   SDHCI_MAX_CURRENT_300_SHIFT) *
3799 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3800 	}
3801 	if (host->caps & SDHCI_CAN_VDD_180) {
3802 		ocr_avail |= MMC_VDD_165_195;
3803 
3804 		mmc->max_current_180 = ((max_current_caps &
3805 				   SDHCI_MAX_CURRENT_180_MASK) >>
3806 				   SDHCI_MAX_CURRENT_180_SHIFT) *
3807 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3808 	}
3809 
3810 	/* If OCR set by host, use it instead. */
3811 	if (host->ocr_mask)
3812 		ocr_avail = host->ocr_mask;
3813 
3814 	/* If OCR set by external regulators, give it highest prio. */
3815 	if (mmc->ocr_avail)
3816 		ocr_avail = mmc->ocr_avail;
3817 
3818 	mmc->ocr_avail = ocr_avail;
3819 	mmc->ocr_avail_sdio = ocr_avail;
3820 	if (host->ocr_avail_sdio)
3821 		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3822 	mmc->ocr_avail_sd = ocr_avail;
3823 	if (host->ocr_avail_sd)
3824 		mmc->ocr_avail_sd &= host->ocr_avail_sd;
3825 	else /* normal SD controllers don't support 1.8V */
3826 		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3827 	mmc->ocr_avail_mmc = ocr_avail;
3828 	if (host->ocr_avail_mmc)
3829 		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3830 
3831 	if (mmc->ocr_avail == 0) {
3832 		pr_err("%s: Hardware doesn't report any support voltages.\n",
3833 		       mmc_hostname(mmc));
3834 		ret = -ENODEV;
3835 		goto unreg;
3836 	}
3837 
3838 	if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3839 			  MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3840 			  MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3841 	    (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3842 		host->flags |= SDHCI_SIGNALING_180;
3843 
3844 	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3845 		host->flags |= SDHCI_SIGNALING_120;
3846 
3847 	spin_lock_init(&host->lock);
3848 
3849 	/*
3850 	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3851 	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3852 	 * is less anyway.
3853 	 */
3854 	mmc->max_req_size = 524288;
3855 
3856 	/*
3857 	 * Maximum number of segments. Depends on if the hardware
3858 	 * can do scatter/gather or not.
3859 	 */
3860 	if (host->flags & SDHCI_USE_ADMA) {
3861 		mmc->max_segs = SDHCI_MAX_SEGS;
3862 	} else if (host->flags & SDHCI_USE_SDMA) {
3863 		mmc->max_segs = 1;
3864 		if (swiotlb_max_segment()) {
3865 			unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
3866 						IO_TLB_SEGSIZE;
3867 			mmc->max_req_size = min(mmc->max_req_size,
3868 						max_req_size);
3869 		}
3870 	} else { /* PIO */
3871 		mmc->max_segs = SDHCI_MAX_SEGS;
3872 	}
3873 
3874 	/*
3875 	 * Maximum segment size. Could be one segment with the maximum number
3876 	 * of bytes. When doing hardware scatter/gather, each entry cannot
3877 	 * be larger than 64 KiB though.
3878 	 */
3879 	if (host->flags & SDHCI_USE_ADMA) {
3880 		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3881 			mmc->max_seg_size = 65535;
3882 		else
3883 			mmc->max_seg_size = 65536;
3884 	} else {
3885 		mmc->max_seg_size = mmc->max_req_size;
3886 	}
3887 
3888 	/*
3889 	 * Maximum block size. This varies from controller to controller and
3890 	 * is specified in the capabilities register.
3891 	 */
3892 	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3893 		mmc->max_blk_size = 2;
3894 	} else {
3895 		mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3896 				SDHCI_MAX_BLOCK_SHIFT;
3897 		if (mmc->max_blk_size >= 3) {
3898 			pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3899 				mmc_hostname(mmc));
3900 			mmc->max_blk_size = 0;
3901 		}
3902 	}
3903 
3904 	mmc->max_blk_size = 512 << mmc->max_blk_size;
3905 
3906 	/*
3907 	 * Maximum block count.
3908 	 */
3909 	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3910 
3911 	if (mmc->max_segs == 1) {
3912 		/* This may alter mmc->*_blk_* parameters */
3913 		ret = sdhci_allocate_bounce_buffer(host);
3914 		if (ret)
3915 			return ret;
3916 	}
3917 
3918 	return 0;
3919 
3920 unreg:
3921 	if (!IS_ERR(mmc->supply.vqmmc))
3922 		regulator_disable(mmc->supply.vqmmc);
3923 undma:
3924 	if (host->align_buffer)
3925 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3926 				  host->adma_table_sz, host->align_buffer,
3927 				  host->align_addr);
3928 	host->adma_table = NULL;
3929 	host->align_buffer = NULL;
3930 
3931 	return ret;
3932 }
3933 EXPORT_SYMBOL_GPL(sdhci_setup_host);
3934 
sdhci_cleanup_host(struct sdhci_host * host)3935 void sdhci_cleanup_host(struct sdhci_host *host)
3936 {
3937 	struct mmc_host *mmc = host->mmc;
3938 
3939 	if (!IS_ERR(mmc->supply.vqmmc))
3940 		regulator_disable(mmc->supply.vqmmc);
3941 
3942 	if (host->align_buffer)
3943 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3944 				  host->adma_table_sz, host->align_buffer,
3945 				  host->align_addr);
3946 	host->adma_table = NULL;
3947 	host->align_buffer = NULL;
3948 }
3949 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
3950 
__sdhci_add_host(struct sdhci_host * host)3951 int __sdhci_add_host(struct sdhci_host *host)
3952 {
3953 	struct mmc_host *mmc = host->mmc;
3954 	int ret;
3955 
3956 	/*
3957 	 * Init tasklets.
3958 	 */
3959 	tasklet_init(&host->finish_tasklet,
3960 		sdhci_tasklet_finish, (unsigned long)host);
3961 
3962 	setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3963 	setup_timer(&host->data_timer, sdhci_timeout_data_timer,
3964 		    (unsigned long)host);
3965 
3966 	init_waitqueue_head(&host->buf_ready_int);
3967 
3968 	sdhci_init(host, 0);
3969 
3970 	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3971 				   IRQF_SHARED,	mmc_hostname(mmc), host);
3972 	if (ret) {
3973 		pr_err("%s: Failed to request IRQ %d: %d\n",
3974 		       mmc_hostname(mmc), host->irq, ret);
3975 		goto untasklet;
3976 	}
3977 
3978 	ret = sdhci_led_register(host);
3979 	if (ret) {
3980 		pr_err("%s: Failed to register LED device: %d\n",
3981 		       mmc_hostname(mmc), ret);
3982 		goto unirq;
3983 	}
3984 
3985 	mmiowb();
3986 
3987 	ret = mmc_add_host(mmc);
3988 	if (ret)
3989 		goto unled;
3990 
3991 	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3992 		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3993 		(host->flags & SDHCI_USE_ADMA) ?
3994 		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3995 		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3996 
3997 	sdhci_enable_card_detection(host);
3998 
3999 	return 0;
4000 
4001 unled:
4002 	sdhci_led_unregister(host);
4003 unirq:
4004 	sdhci_do_reset(host, SDHCI_RESET_ALL);
4005 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4006 	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4007 	free_irq(host->irq, host);
4008 untasklet:
4009 	tasklet_kill(&host->finish_tasklet);
4010 
4011 	return ret;
4012 }
4013 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4014 
sdhci_add_host(struct sdhci_host * host)4015 int sdhci_add_host(struct sdhci_host *host)
4016 {
4017 	int ret;
4018 
4019 	ret = sdhci_setup_host(host);
4020 	if (ret)
4021 		return ret;
4022 
4023 	ret = __sdhci_add_host(host);
4024 	if (ret)
4025 		goto cleanup;
4026 
4027 	return 0;
4028 
4029 cleanup:
4030 	sdhci_cleanup_host(host);
4031 
4032 	return ret;
4033 }
4034 EXPORT_SYMBOL_GPL(sdhci_add_host);
4035 
sdhci_remove_host(struct sdhci_host * host,int dead)4036 void sdhci_remove_host(struct sdhci_host *host, int dead)
4037 {
4038 	struct mmc_host *mmc = host->mmc;
4039 	unsigned long flags;
4040 
4041 	if (dead) {
4042 		spin_lock_irqsave(&host->lock, flags);
4043 
4044 		host->flags |= SDHCI_DEVICE_DEAD;
4045 
4046 		if (sdhci_has_requests(host)) {
4047 			pr_err("%s: Controller removed during "
4048 				" transfer!\n", mmc_hostname(mmc));
4049 			sdhci_error_out_mrqs(host, -ENOMEDIUM);
4050 		}
4051 
4052 		spin_unlock_irqrestore(&host->lock, flags);
4053 	}
4054 
4055 	sdhci_disable_card_detection(host);
4056 
4057 	mmc_remove_host(mmc);
4058 
4059 	sdhci_led_unregister(host);
4060 
4061 	if (!dead)
4062 		sdhci_do_reset(host, SDHCI_RESET_ALL);
4063 
4064 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4065 	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4066 	free_irq(host->irq, host);
4067 
4068 	del_timer_sync(&host->timer);
4069 	del_timer_sync(&host->data_timer);
4070 
4071 	tasklet_kill(&host->finish_tasklet);
4072 
4073 	if (!IS_ERR(mmc->supply.vqmmc))
4074 		regulator_disable(mmc->supply.vqmmc);
4075 
4076 	if (host->align_buffer)
4077 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4078 				  host->adma_table_sz, host->align_buffer,
4079 				  host->align_addr);
4080 
4081 	host->adma_table = NULL;
4082 	host->align_buffer = NULL;
4083 }
4084 
4085 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4086 
sdhci_free_host(struct sdhci_host * host)4087 void sdhci_free_host(struct sdhci_host *host)
4088 {
4089 	mmc_free_host(host->mmc);
4090 }
4091 
4092 EXPORT_SYMBOL_GPL(sdhci_free_host);
4093 
4094 /*****************************************************************************\
4095  *                                                                           *
4096  * Driver init/exit                                                          *
4097  *                                                                           *
4098 \*****************************************************************************/
4099 
sdhci_drv_init(void)4100 static int __init sdhci_drv_init(void)
4101 {
4102 	pr_info(DRIVER_NAME
4103 		": Secure Digital Host Controller Interface driver\n");
4104 	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4105 
4106 	return 0;
4107 }
4108 
sdhci_drv_exit(void)4109 static void __exit sdhci_drv_exit(void)
4110 {
4111 }
4112 
4113 module_init(sdhci_drv_init);
4114 module_exit(sdhci_drv_exit);
4115 
4116 module_param(debug_quirks, uint, 0444);
4117 module_param(debug_quirks2, uint, 0444);
4118 
4119 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4120 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4121 MODULE_LICENSE("GPL");
4122 
4123 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4124 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
4125