1 /*
2 * Driver for sunxi SD/MMC host controllers
3 * (C) Copyright 2007-2011 Reuuimlla Technology Co., Ltd.
4 * (C) Copyright 2007-2011 Aaron Maoye <leafy.myeh@reuuimllatech.com>
5 * (C) Copyright 2013-2014 O2S GmbH <www.o2s.ch>
6 * (C) Copyright 2013-2014 David Lanzend�rfer <david.lanzendoerfer@o2s.ch>
7 * (C) Copyright 2013-2014 Hans de Goede <hdegoede@redhat.com>
8 * (C) Copyright 2014-2016 lixiang <lixiang@allwinnertech>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of
13 * the License, or (at your option) any later version.
14 */
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/io.h>
19 #include <linux/device.h>
20 #include <linux/interrupt.h>
21 #include <linux/delay.h>
22 #include <linux/err.h>
23
24 #include <linux/clk.h>
25 #include <linux/reset/sunxi.h>
26 #include <linux/gpio.h>
27 #include <linux/platform_device.h>
28 #include <linux/spinlock.h>
29 #include <linux/scatterlist.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/slab.h>
32 #include <linux/reset.h>
33 //#include <linux/sunxi-gpio.h>
34
35 #include <linux/of_address.h>
36 #include <linux/of_gpio.h>
37 #include <linux/of_platform.h>
38 #include <linux/regulator/consumer.h>
39
40 #include <linux/mmc/host.h>
41 #include <linux/mmc/sd.h>
42 #include <linux/mmc/sdio.h>
43 #include <linux/mmc/mmc.h>
44 #include <linux/mmc/core.h>
45 #include <linux/mmc/card.h>
46 #include <linux/mmc/slot-gpio.h>
47 #include "../core/card.h"
48 #include <mmc_hsq.h>
49
50 //#include <sunxi-sid.h>
51 #include "sunxi-mmc.h"
52 #include "sunxi-mmc-sun50iw1p1-2.h"
53 #include "sunxi-mmc-sun50iw1p1-0.h"
54 #include "sunxi-mmc-sun50iw1p1-1.h"
55 #include "sunxi-mmc-v4p1x.h"
56 #include "sunxi-mmc-v4p10x.h"
57 #include "sunxi-mmc-v4p00x.h"
58 #include "sunxi-mmc-v4p5x.h"
59 #include "sunxi-mmc-v5p3x.h"
60
61
62 #include "sunxi-mmc-debug.h"
63 #include "sunxi-mmc-export.h"
64 #include "sunxi-mmc-panic.h"
65
66 /**default retry times ****/
67 #define SUNXI_DEF_RETRY_TIMES 6
68 /*default value 10 min = 600000 ms,warning,not less then 20**/
69 #define SUNXI_DEF_MAX_R1B_TIMEOUT_MS (600000U)
70 #define SUNXI_MIN_R1B_TIMEOUT_MS (20)
71 #define SUNXI_TRANS_TIMEOUT (5*HZ)
72 #define SUNXI_CMD11_TIMEOUT (1*HZ)
73
74 /*judge encryption flag bit*/
75 #define sunxi_crypt_flags(sg) (((sg->offset) \
76 & (1 << ((sizeof(sg->offset) << 3) - 1))) ? 1 : 0)
77 /*clear encryption flag bit*/
78 #define sunxi_clear_crypt_flags(sg) ((sg->offset) \
79 & ~(1 << ((sizeof(sg->offset) << 3) - 1)))
80
81 /*Check host support busy check on r1b cmd*/
82 #define sunxi_mmc_chk_hr1b_cap(host) (!host->sunxi_mmc_hw_busy \
83 || host->sunxi_mmc_hw_busy(host))
84 /*judge data request if it need to check r1b */
85 #define sunxi_mmc_dreq_r1b_chk_need(host, data) \
86 (data && (data->flags & MMC_DATA_WRITE) \
87 && !(host->ctl_spec_cap & NO_WBUSY_WR_END)\
88 && sunxi_mmc_chk_hr1b_cap(host))
89
90 /*judge cmd request if it need to check r1b */
91 #define sunxi_mmc_creq_r1b_chk_need(host, cmd) \
92 ((cmd->flags & MMC_RSP_BUSY) \
93 && !(host->ctl_spec_cap & NO_WBUSY_WR_END)\
94 && sunxi_mmc_chk_hr1b_cap(host))
95
96 #define sunxi_mmc_host_des_addr(host, soc_phy_address) \
97 ((soc_phy_address) >> (host->des_addr_shift))
98
99 #define sunxi_mmc_clean_retry_cnt(host) \
100 (host->retry_cnt = host->sunxi_ds_dl_cnt = host->sunxi_samp_dl_cnt = 0)
101
102 static void sunxi_mmc_regs_save(struct sunxi_mmc_host *host);
103 static void sunxi_mmc_regs_restore(struct sunxi_mmc_host *host);
104 static int sunxi_mmc_bus_clk_en(struct sunxi_mmc_host *host, int enable);
105 static void sunxi_mmc_parse_cmd(struct mmc_host *mmc, struct mmc_command *cmd,
106 u32 *cval, u32 *im, bool *wdma);
107 static int sunxi_mmc_set_dat(struct sunxi_mmc_host *host, struct mmc_host *mmc,
108 struct mmc_data *data, bool atomic);
109 static void sunxi_mmc_exe_cmd(struct sunxi_mmc_host *host,
110 struct mmc_command *cmd, u32 cmd_val, u32 imask);
111 static irqreturn_t sunxi_mmc_handle_bottom_half(int irq, void *dev_id);
112 static irqreturn_t sunxi_mmc_handle_do_bottom_half(void *dev_id);
113 static void sunxi_mmc_request_done(struct mmc_host *mmc, struct mmc_request *mrq);
114
sunxi_cmd11_timerout_handle(struct work_struct * work)115 static void sunxi_cmd11_timerout_handle(struct work_struct *work)
116 {
117 struct sunxi_mmc_host *host;
118 unsigned long flags;
119 struct mmc_request *mrq = NULL;
120
121 host = container_of(work, struct sunxi_mmc_host, sunxi_timerout_work.work);
122 mrq = host->mrq;
123 SM_ERR(mmc_dev(host->mmc), "cmd11 timout\n");
124 spin_lock_irqsave(&host->lock, flags);
125
126 mrq->cmd->error = -ETIMEDOUT;
127 if (mrq->stop)
128 mrq->stop->error = -ETIMEDOUT;
129 host->mrq = NULL;
130 host->int_sum = 0;
131 host->wait_dma = false;
132
133 /***reset host***/
134 sunxi_mmc_regs_save(host);
135 spin_unlock_irqrestore(&host->lock, flags);
136 /**if gating/reset protect itself,so no lock use host->lock**/
137 sunxi_mmc_bus_clk_en(host, 0);
138 sunxi_mmc_bus_clk_en(host, 1);
139 sunxi_mmc_regs_restore(host);
140
141 mmc_request_done(host->mmc, mrq);
142 }
143
sunxi_timerout_handle(struct work_struct * work)144 static void sunxi_timerout_handle(struct work_struct *work)
145 {
146 struct sunxi_mmc_host *host;
147 unsigned long flags;
148 u32 rint = 0;
149 u32 idma_int = 0;
150 struct mmc_request *mrq = NULL;
151 int rval = 0;
152
153 host = container_of(work, struct sunxi_mmc_host, sunxi_timerout_work.work);
154 spin_lock_irqsave(&host->lock, flags);
155 SM_ERR(mmc_dev(host->mmc), "timer timout\n");
156 queue_delayed_work(system_wq, \
157 &host->sunxi_timerout_work, \
158 SUNXI_TRANS_TIMEOUT);
159 if (host->mrq && !host->manual_stop_mrq && !host->mrq_busy && !host->mrq_retry) {
160 rint = mmc_readl(host, REG_RINTR);
161 idma_int = mmc_readl(host, REG_IDST);
162 if ((rint & (SDXC_INTERRUPT_DONE_BIT | SDXC_INTERRUPT_ERROR_BIT))\
163 || (idma_int & (SDXC_IDMAC_TRANSMIT_INTERRUPT | SDXC_IDMAC_RECEIVE_INTERRUPT))) {
164 SM_INFO(mmc_dev(host->mmc), "transfering\n");
165 if ((host->mrq->data) && (host->mrq->data->flags &MMC_DATA_WRITE)) {
166 u32 cbcr = 0, bbcr = 0;
167 int wait_time = 2;
168 do {
169 cbcr = mmc_readl(host, REG_CBCR);
170 bbcr = mmc_readl(host, REG_BBCR);
171 if ((bbcr-cbcr) >= 1024) {
172 SM_ERR(mmc_dev(host->mmc), "card maybe busy too long %d\n", wait_time);
173 spin_unlock_irqrestore(&host->lock, flags);
174 sunxi_dump_reg(host->mmc);
175 msleep(1000);
176 spin_lock_irqsave(&host->lock, flags);
177 }
178 } while (wait_time--);
179 if (wait_time < 0) {
180 sunxi_mmc_regs_save(host);
181 spin_unlock_irqrestore(&host->lock, flags);
182 /**if gating/reset protect itself,so no lock use host->lock**/
183 sunxi_mmc_bus_clk_en(host, 0);
184 sunxi_mmc_bus_clk_en(host, 1);
185 sunxi_mmc_regs_restore(host);
186 /***use sunxi_mmc_oclk_en to update clk***/
187 rval = host->sunxi_mmc_oclk_en(host, 1);
188 SM_ERR(mmc_dev(host->mmc), "too busy:re update clk\n");
189 if (rval) {
190 SM_ERR(mmc_dev(host->mmc), "retry:update clk failed %s %d\n",
191 __func__, __LINE__);
192 }
193 SM_ERR(mmc_dev(host->mmc), "too busy:host reset and reg recover ok\n");
194 spin_lock_irqsave(&host->lock, flags);
195 host->mrq->cmd->error = -ETIMEDOUT;
196 if (host->mrq->data) {
197 host->mrq->data->error = -ETIMEDOUT;
198 }
199 if (host->mrq->stop)
200 host->mrq->stop->error = -ETIMEDOUT;
201 }
202 mrq = host->mrq;
203 host->mrq = NULL;
204 host->int_sum = 0;
205 host->wait_dma = false;
206 SM_ERR(mmc_dev(host->mmc), "too busy:done\n");
207 sunxi_mmc_request_done(host->mmc, mrq);
208 }
209 goto timeout_out;
210 }
211 SM_INFO(mmc_dev(host->mmc), "timeout retry\n");
212 mmc_writel(host, REG_IMASK, host->sdio_imask | host->dat3_imask);
213 host->mrq_retry = host->mrq;
214 spin_unlock_irqrestore(&host->lock, flags);
215 sunxi_mmc_handle_do_bottom_half(host);
216 return;
217 } else
218 SM_ERR(mmc_dev(host->mmc), "no request running\n");
219 timeout_out:
220 spin_unlock_irqrestore(&host->lock, flags);
221 }
222
sunxi_mmc_request_done(struct mmc_host * mmc,struct mmc_request * mrq)223 static void sunxi_mmc_request_done(struct mmc_host *mmc, struct mmc_request *mrq)
224 {
225 struct sunxi_mmc_host *host = mmc_priv(mmc);
226 struct mmc_command *cmd = (mrq->sbc && !host->sunxi_mmc_opacmd23) ? mrq->sbc : mrq->cmd;
227
228 if ((host->ctl_spec_cap & SUNXI_SC_EN_TIMEOUT_DETECT)
229 || (cmd->opcode == SD_SWITCH_VOLTAGE && (host->ctl_spec_cap & SUNXI_CMD11_TIMEOUT_DETECT))) {
230 cancel_delayed_work(&host->sunxi_timerout_work);
231 }
232 #if IS_ENABLED(CONFIG_MMC_HSQ)
233 /* Validate if the request was from software queue firstly. */
234 if (host->sunxi_caps3 & MMC_SUNXI_CAP3_HSQ) {
235 if (mmc_hsq_finalize_request(host->mmc, mrq))
236 return;
237 }
238 #endif
239
240 mmc_request_done(mmc, mrq);
241 }
242
243 #if IS_ENABLED(CONFIG_REGULATOR)
244 /**
245 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
246 * @vdd: voltage (mV)
247 * @low_bits: prefer low bits in boundary cases
248 *
249 * This function returns the OCR bit number according to the provided @vdd
250 * value. If conversion is not possible a negative errno value returned.
251 *
252 * Depending on the @low_bits flag the function prefers low or high OCR bits
253 * on boundary voltages. For example,
254 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
255 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
256 *
257 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
258 */
mmc_vdd_to_ocrbitnum(int vdd,bool low_bits)259 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
260 {
261 const int max_bit = ilog2(MMC_VDD_35_36);
262 int bit;
263
264 if (vdd < 1650 || vdd > 3600)
265 return -EINVAL;
266
267 if (vdd >= 1650 && vdd <= 1950)
268 return ilog2(MMC_VDD_165_195);
269
270 if (low_bits)
271 vdd -= 1;
272
273 /* Base 2000 mV, step 100 mV, bit's base 8. */
274 bit = (vdd - 2000) / 100 + 8;
275 if (bit > max_bit)
276 return max_bit;
277 return bit;
278 }
279
280 /**
281 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
282 * @vdd_min: minimum voltage value (mV)
283 * @vdd_max: maximum voltage value (mV)
284 *
285 * This function returns the OCR mask bits according to the provided @vdd_min
286 * and @vdd_max values. If conversion is not possible the function returns 0.
287 *
288 * Notes wrt boundary cases:
289 * This function sets the OCR bits for all boundary voltages, for example
290 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
291 * MMC_VDD_34_35 mask.
292 */
sunxi_mmc_vddrange_to_ocrmask(int vdd_min,int vdd_max)293 static u32 sunxi_mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
294 {
295 u32 mask = 0;
296
297 if (vdd_max < vdd_min)
298 return 0;
299
300 /* Prefer high bits for the boundary vdd_max values. */
301 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
302 if (vdd_max < 0)
303 return 0;
304
305 /* Prefer low bits for the boundary vdd_min values. */
306 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
307 if (vdd_min < 0)
308 return 0;
309
310 /* Fill the mask, from max bit to min bit. */
311 while (vdd_max >= vdd_min)
312 mask |= 1 << vdd_max--;
313
314 return mask;
315 }
316 /**
317 * mmc_regulator_get_ocrmask - return mask of supported voltages
318 * @supply: regulator to use
319 *
320 * This returns either a negative errno, or a mask of voltages that
321 * can be provided to MMC/SD/SDIO devices using the specified voltage
322 * regulator. This would normally be called before registering the
323 * MMC host adapter.
324 */
mmc_regulator_get_ocrmask(struct regulator * supply)325 static int mmc_regulator_get_ocrmask(struct regulator *supply)
326 {
327 int result = 0;
328 int count;
329 int i;
330 int vdd_uV;
331 int vdd_mV;
332
333 count = regulator_count_voltages(supply);
334 if (count < 0)
335 return count;
336
337 for (i = 0; i < count; i++) {
338 vdd_uV = regulator_list_voltage(supply, i);
339 if (vdd_uV <= 0)
340 continue;
341
342 vdd_mV = vdd_uV / 1000;
343 result |= sunxi_mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
344 }
345
346 if (!result) {
347 vdd_uV = regulator_get_voltage(supply);
348 if (vdd_uV <= 0)
349 return vdd_uV;
350
351 vdd_mV = vdd_uV / 1000;
352 result = sunxi_mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
353 }
354
355 return result;
356 }
357
358 /**
359 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
360 * @mmc: the host to regulate
361 * @supply: regulator to use
362 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
363 *
364 * Returns zero on success, else negative errno.
365 *
366 * MMC host drivers may use this to enable or disable a regulator using
367 * a particular supply voltage. This would normally be called from the
368 * set_ios() method.
369 */
sunxi_mmc_regulator_set_ocr(struct mmc_host * mmc,struct regulator * supply,unsigned short vdd_bit)370 int sunxi_mmc_regulator_set_ocr(struct mmc_host *mmc,
371 struct regulator *supply,
372 unsigned short vdd_bit)
373 {
374 int result = 0;
375 /*int min_uV, max_uV;*/
376
377 if (vdd_bit) {
378 /*sunxi platform avoid set vcc voltage*/
379 /*mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);*/
380
381 /*result = regulator_set_voltage(supply, min_uV, max_uV);*/
382 if (result == 0 && !mmc->regulator_enabled) {
383 result = regulator_enable(supply);
384 if (!result)
385 mmc->regulator_enabled = true;
386 }
387 } else if (mmc->regulator_enabled) {
388 result = regulator_disable(supply);
389 if (result == 0)
390 mmc->regulator_enabled = false;
391 }
392
393 if (result)
394 SM_ERR(mmc_dev(mmc),
395 "could not set regulator OCR (%d)\n", result);
396 return result;
397 }
398 #else
mmc_regulator_get_ocrmask(struct regulator * supply)399 static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
400 {
401 return 0;
402 }
sunxi_mmc_regulator_set_ocr(struct mmc_host * mmc,struct regulator * supply,unsigned short vdd_bit)403 static inline int sunxi_mmc_regulator_set_ocr(struct mmc_host *mmc,
404 struct regulator *supply,
405 unsigned short vdd_bit)
406 {
407 return 0;
408 }
409 #endif
410
sunxi_mmc_reset_host(struct sunxi_mmc_host * host)411 static int sunxi_mmc_reset_host(struct sunxi_mmc_host *host)
412 {
413 unsigned long expire = jiffies + msecs_to_jiffies(250);
414 u32 rval;
415
416 mmc_writel(host, REG_GCTRL, SDXC_HARDWARE_RESET);
417 do {
418 rval = mmc_readl(host, REG_GCTRL);
419 if (!(rval & SDXC_HARDWARE_RESET))
420 break;
421 cond_resched();
422 } while (time_before(jiffies, expire));
423
424 if (rval & SDXC_HARDWARE_RESET) {
425 SM_ERR(mmc_dev(host->mmc), "fatal err reset timeout\n");
426 return -EIO;
427 }
428
429 return 0;
430 }
431
sunxi_mmc_reset_dmaif(struct sunxi_mmc_host * host)432 static int sunxi_mmc_reset_dmaif(struct sunxi_mmc_host *host)
433 {
434 unsigned long expire = jiffies + msecs_to_jiffies(250);
435 u32 rval;
436
437 rval = mmc_readl(host, REG_GCTRL);
438 mmc_writel(host, REG_GCTRL, rval | SDXC_DMA_RESET);
439 do {
440 rval = mmc_readl(host, REG_GCTRL);
441 if (!(rval & SDXC_DMA_RESET))
442 break;
443 cond_resched();
444 } while (time_before(jiffies, expire));
445
446 if (rval & SDXC_DMA_RESET) {
447 SM_ERR(mmc_dev(host->mmc),
448 "fatal err reset dma interface timeout\n");
449 return -EIO;
450 }
451
452 return 0;
453 }
454
sunxi_mmc_reset_fifo(struct sunxi_mmc_host * host)455 static int sunxi_mmc_reset_fifo(struct sunxi_mmc_host *host)
456 {
457 unsigned long expire = jiffies + msecs_to_jiffies(250);
458 u32 rval;
459
460 rval = mmc_readl(host, REG_GCTRL);
461 mmc_writel(host, REG_GCTRL, rval | SDXC_FIFO_RESET);
462 do {
463 rval = mmc_readl(host, REG_GCTRL);
464 if (!(rval & SDXC_FIFO_RESET))
465 break;
466 cond_resched();
467 } while (time_before(jiffies, expire));
468
469 if (rval & SDXC_FIFO_RESET) {
470 SM_ERR(mmc_dev(host->mmc), "fatal err reset fifo timeout\n");
471 return -EIO;
472 }
473
474 return 0;
475 }
476
sunxi_mmc_reset_dmactl(struct sunxi_mmc_host * host)477 static int sunxi_mmc_reset_dmactl(struct sunxi_mmc_host *host)
478 {
479 unsigned long expire = jiffies + msecs_to_jiffies(250);
480 u32 rval;
481
482 rval = mmc_readl(host, REG_DMAC);
483 mmc_writel(host, REG_DMAC, rval | SDXC_IDMAC_SOFT_RESET);
484 do {
485 rval = mmc_readl(host, REG_DMAC);
486 if (!(rval & SDXC_IDMAC_SOFT_RESET))
487 break;
488 cond_resched();
489 } while (time_before(jiffies, expire));
490
491 if (rval & SDXC_IDMAC_SOFT_RESET) {
492 SM_ERR(mmc_dev(host->mmc),
493 "fatal err reset dma contol timeout\n");
494 return -EIO;
495 }
496
497 return 0;
498 }
499
sunxi_mmc_set_a12a(struct sunxi_mmc_host * host)500 void sunxi_mmc_set_a12a(struct sunxi_mmc_host *host)
501 {
502 mmc_writel(host, REG_A12A, 0);
503 }
504 EXPORT_SYMBOL_GPL(sunxi_mmc_set_a12a);
505
sunxi_mmc_init_host(struct mmc_host * mmc)506 static int sunxi_mmc_init_host(struct mmc_host *mmc)
507 {
508 u32 rval;
509 struct sunxi_mmc_host *host = mmc_priv(mmc);
510
511 if (sunxi_mmc_reset_host(host))
512 return -EIO;
513
514 if (sunxi_mmc_reset_dmactl(host))
515 return -EIO;
516
517
518 mmc_writel(host, REG_FTRGL, host->dma_tl ? host->dma_tl : 0x20070008);
519 SM_DBG(mmc_dev(host->mmc), "REG_FTRGL %x\n",
520 mmc_readl(host, REG_FTRGL));
521 mmc_writel(host, REG_TMOUT, 0xffffffff);
522 mmc_writel(host, REG_IMASK, host->sdio_imask | host->dat3_imask);
523 mmc_writel(host, REG_RINTR, 0xffffffff);
524 mmc_writel(host, REG_DBGC, 0xdeb);
525 /*mmc_writel(host, REG_FUNS, SDXC_CEATA_ON);*/
526 mmc_writel(host, REG_DLBA, sunxi_mmc_host_des_addr(host, host->sg_dma));
527
528 rval = mmc_readl(host, REG_GCTRL);
529 rval |= SDXC_INTERRUPT_ENABLE_BIT;
530 rval &= ~SDXC_ACCESS_DONE_DIRECT;
531 if (host->dat3_imask)
532 rval |= SDXC_DEBOUNCE_ENABLE_BIT;
533
534 mmc_writel(host, REG_GCTRL, rval);
535
536 if (host->sunxi_mmc_set_acmda)
537 host->sunxi_mmc_set_acmda(host);
538
539 return 0;
540 }
541
sunxi_mmc_init_idma_des(struct sunxi_mmc_host * host,struct mmc_data * data)542 static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host,
543 struct mmc_data *data)
544 {
545 struct sunxi_idma_des *pdes = (struct sunxi_idma_des *)host->sg_cpu;
546 struct sunxi_idma_des *pdes_pa = (struct sunxi_idma_des *)host->sg_dma;
547 struct mmc_host *mmc = host->mmc;
548 int i = 0, j = 0;
549 int count = 0;
550 int cycles_count = 0;
551 unsigned int remainder = 0;
552
553 for (i = 0; i < data->sg_len; i++) {
554 cycles_count = (data->sg[i].length - 1) / mmc->max_seg_size + 1;
555 remainder = ((data->sg[i].length - 1) % (mmc->max_seg_size)) + 1;
556 for (j = 0; j < cycles_count; j++) {
557 pdes[i + count].config = SDXC_IDMAC_DES0_CH | SDXC_IDMAC_DES0_OWN |
558 SDXC_IDMAC_DES0_DIC;
559 pdes[i + count].buf_size = (j != (cycles_count - 1)) ? (mmc->max_seg_size):(remainder);
560 pdes[i + count].buf_addr_ptr1 = sunxi_mmc_host_des_addr(host,
561 sg_dma_address(&data->sg[i]) + j * mmc->max_seg_size);
562 /*We use size_t only to avoid compile waring */
563 /*pdes[i].buf_addr_ptr2 = (u32) (size_t) &pdes_pa[i + 1];*/
564 pdes[i + count].buf_addr_ptr2 = (u32)sunxi_mmc_host_des_addr(host, (size_t)&pdes_pa[i + count + 1]);
565 count++;
566 }
567 count--;
568 }
569
570 if ((i + count) > mmc->max_segs) {
571 SM_ERR(mmc_dev(mmc), "sg_len greater than max_segs\n");
572 }
573
574 pdes[0].config |= SDXC_IDMAC_DES0_FD;
575 pdes[i + count - 1].config |= SDXC_IDMAC_DES0_LD;
576 pdes[i + count - 1].config &= ~SDXC_IDMAC_DES0_DIC;
577
578 /*
579 * **Avoid the io-store starting the idmac hitting io-mem before the
580 * descriptors hit the main-mem.
581 */
582 wmb();
583 }
584
sunxi_mmc_get_dma_dir(struct mmc_data * data)585 static enum dma_data_direction sunxi_mmc_get_dma_dir(struct mmc_data *data)
586 {
587 if (data->flags & MMC_DATA_WRITE)
588 return DMA_TO_DEVICE;
589 else
590 return DMA_FROM_DEVICE;
591 }
592
sunxi_mmc_map_dma(struct sunxi_mmc_host * host,struct mmc_data * data,int cookie)593 static int sunxi_mmc_map_dma(struct sunxi_mmc_host *host, struct mmc_data *data,
594 int cookie)
595 {
596 u32 i = 0, dma_len;
597 struct scatterlist *sg = NULL;
598 int max_len = (1 << host->idma_des_size_bits);
599
600 if (data->host_cookie == COOKIE_PRE_MAPPED) {
601 SM_DBG(mmc_dev(host->mmc), "has pre mapp dump stack\n");
602 //dump_stack();
603 return 0;
604 }
605
606 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
607 sunxi_mmc_get_dma_dir(data));
608 if (dma_len == 0) {
609 SM_ERR(mmc_dev(host->mmc), "dma_map_sg failed\n");
610 return -ENOMEM;
611 }
612
613 /*
614 *Acorrding DMA-API.txt,dma_len should not be
615 *always equal to data->sg_len.
616 *Because The dma_map_sg implementation is free
617 *to merge several consecutive sglist entries into one
618 *But according to dma_map_sg implement in fact,
619 *dma_len is always equal to data->sg_len.
620 *So we don't change the code,only add a warning on it only for safe
621 */
622 if (dma_len != data->sg_len) {
623 SM_ERR(mmc_dev(host->mmc), "*******dma_len != data->sg_len*******\n");
624 return -1;
625 }
626
627 if (dma_len > host->mmc->max_segs) {
628 SM_ERR(mmc_dev(host->mmc), "*******dma_len > host->mmc->max_segs*******\n");
629 return -1;
630 }
631
632 for_each_sg(data->sg, sg, data->sg_len, i) {
633 if (sg->offset & 3 || sg->length & 3) {
634 SM_ERR(mmc_dev(host->mmc),
635 "unaligned scatterlist: os %x length %d\n",
636 sg->offset, sg->length);
637 return -EINVAL;
638 }
639 if (data->sg_len > max_len) {
640 SM_ERR(mmc_dev(host->mmc),
641 "******sg len is over one dma des transfer len******\n");
642 return -1;
643 }
644 }
645
646 data->host_cookie = cookie;
647 SM_DBG(mmc_dev(host->mmc), "map dmap %x %p\n", data->host_cookie, data);
648
649 return 0;
650 }
651
sunxi_mmc_start_dma(struct sunxi_mmc_host * host,struct mmc_data * data,bool atomic)652 static int sunxi_mmc_start_dma(struct sunxi_mmc_host *host,
653 struct mmc_data *data, bool atomic)
654 {
655 u32 rval;
656
657 sunxi_mmc_init_idma_des(host, data);
658
659 if (!atomic) {
660 sunxi_mmc_reset_fifo(host);
661 sunxi_mmc_reset_dmaif(host);
662 sunxi_mmc_reset_dmactl(host);
663 SM_DBG(mmc_dev(host->mmc), "reset fifo dmaif dmactl\n");
664 } else {
665 SM_DBG(mmc_dev(host->mmc), "no reset fifo dmaif dmactl\n");
666 rval = mmc_readl(host, REG_GCTRL);
667 if (rval & (SDXC_FIFO_RESET | SDXC_DMA_RESET)) {
668 SM_ERR(mmc_dev(host->mmc), " reset fifo dmaif timeout\n");
669 return -EBUSY;
670 }
671
672 rval = mmc_readl(host, REG_DMAC);
673 if (rval & SDXC_IDMAC_SOFT_RESET) {
674 SM_ERR(mmc_dev(host->mmc), " reset dmac control timeout\n");
675 return -EBUSY;
676 }
677 }
678
679 rval = mmc_readl(host, REG_GCTRL);
680 rval |= SDXC_DMA_ENABLE_BIT;
681 mmc_writel(host, REG_GCTRL, rval);
682
683 if (!(data->flags & MMC_DATA_WRITE))
684 mmc_writel(host, REG_IDIE, SDXC_IDMAC_RECEIVE_INTERRUPT);
685
686 mmc_writel(host, REG_DMAC, SDXC_IDMAC_FIX_BURST | SDXC_IDMAC_IDMA_ON);
687 return 0;
688 }
689
690
691
sunxi_mmc_send_manual_stop(struct sunxi_mmc_host * host,struct mmc_request * req)692 static void sunxi_mmc_send_manual_stop(struct sunxi_mmc_host *host,
693 struct mmc_request *req)
694 {
695 u32 arg, cmd_val, ri;
696 unsigned long expire = jiffies + msecs_to_jiffies(1000);
697
698 cmd_val = SDXC_START | SDXC_RESP_EXPECT |
699 SDXC_STOP_ABORT_CMD | SDXC_CHECK_RESPONSE_CRC;
700
701 if (req->cmd->opcode == SD_IO_RW_EXTENDED) {
702 cmd_val |= SD_IO_RW_DIRECT;
703 arg = (1U << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
704 ((req->cmd->arg >> 28) & 0x7);
705 } else {
706 cmd_val |= MMC_STOP_TRANSMISSION;
707 arg = 0;
708 }
709
710 mmc_writel(host, REG_CARG, arg);
711 /**cmd shuld be sent before arg**/
712 wmb();
713 mmc_writel(host, REG_CMDR, cmd_val);
714
715 do {
716 ri = mmc_readl(host, REG_RINTR);
717 } while (!(ri & (SDXC_COMMAND_DONE | SDXC_INTERRUPT_ERROR_BIT)) &&
718 time_before(jiffies, expire));
719
720 if (!(ri & SDXC_COMMAND_DONE) || (ri & SDXC_INTERRUPT_ERROR_BIT)) {
721 SM_ERR(mmc_dev(host->mmc),
722 "send manual stop command failed %x\n", (unsigned int)(ri & SDXC_INTERRUPT_ERROR_BIT));
723 if (req->stop)
724 req->stop->resp[0] = -ETIMEDOUT;
725 } else {
726 if (req->stop)
727 req->stop->resp[0] = mmc_readl(host, REG_RESP0);
728 SM_DBG(mmc_dev(host->mmc), "send manual stop command ok\n");
729 }
730
731 mmc_writel(host, REG_RINTR, 0xffff);
732 }
733
sunxi_mmc_dump_errinfo(struct sunxi_mmc_host * host)734 static void sunxi_mmc_dump_errinfo(struct sunxi_mmc_host *host)
735 {
736 struct mmc_command *cmd = host->mrq->cmd;
737 struct mmc_data *data = host->mrq->data;
738
739 /* For some cmds timeout is normal with sd/mmc cards */
740 /*
741 * if ((host->int_sum & SDXC_INTERRUPT_ERROR_BIT) ==
742 * SDXC_RESP_TIMEOUT &&
743 *(cmd->opcode == SD_IO_SEND_OP_COND || cmd->opcode == SD_IO_RW_DIRECT))
744 * return;
745 */
746
747 SM_ERR(mmc_dev(host->mmc),
748 "smc %d p%d err, cmd %d,%s%s%s%s%s%s%s%s%s%s !!\n",
749 host->mmc->index, host->phy_index, cmd->opcode,
750 data ? (data->flags & MMC_DATA_WRITE ? " WR" : " RD") : "",
751 host->int_sum & SDXC_RESP_ERROR ? " RE" : "",
752 host->int_sum & SDXC_RESP_CRC_ERROR ? " RCE" : "",
753 host->int_sum & SDXC_DATA_CRC_ERROR ? " DCE" : "",
754 host->int_sum & SDXC_RESP_TIMEOUT ? " RTO" : "",
755 host->int_sum & SDXC_DATA_TIMEOUT ? " DTO" : "",
756 host->int_sum & SDXC_FIFO_RUN_ERROR ? " FE" : "",
757 host->int_sum & SDXC_HARD_WARE_LOCKED ? " HL" : "",
758 host->int_sum & SDXC_START_BIT_ERROR ? " SBE" : "",
759 host->int_sum & SDXC_END_BIT_ERROR ? " EBE" : "");
760 /*sunxi_mmc_dumphex32(host,"sunxi mmc",host->reg_base,0x180); */
761 /*sunxi_mmc_dump_des(host,host->sg_cpu,PAGE_SIZE); */
762 }
763
764 #define SUNXI_FINAL_CONT 1
765 #define SUNXI_FINAL_END 2
766 #define SUNXI_FINAL_BHALF 3
767 #define SUNXI_FINAL_NONE 0
768
769
770
771 /* Called in interrupt context! */
sunxi_mmc_finalize_request(struct sunxi_mmc_host * host)772 static int sunxi_mmc_finalize_request(struct sunxi_mmc_host *host)
773 {
774 struct mmc_request *mrq = host->mrq;
775 struct mmc_data *data = mrq->data;
776 struct mmc_command *sbc = mrq->sbc;
777 struct mmc_command *cmd = mrq->cmd;
778 const struct mmc_host_ops *ops = host->mmc->ops;
779 u32 imask = 0;
780 u32 cmd_val = 0;
781 u32 rval;
782 bool wait_dma = false;
783 bool cont_dat_cmd = false;
784 int err_flags = SDXC_INTERRUPT_ERROR_BIT;
785
786 if (data && data->flags & MMC_DATA_WRITE && (host->sunxi_mmc_hw_wbusy_wait))
787 err_flags &= ~SDXC_START_BIT_ERROR;
788
789
790 if (host->int_sum & err_flags) {
791 sunxi_mmc_dump_errinfo(host);
792 if (((host->ctl_spec_cap & SUNXI_SC_EN_RETRY) && data)\
793 || ((host->ctl_spec_cap & SUNXI_SC_EN_RETRY_CMD) && !data)) {
794 host->mrq_retry = mrq;
795 host->errno_retry =
796 host->int_sum & SDXC_INTERRUPT_ERROR_BIT;
797 } else {
798 mrq->cmd->error = -ETIMEDOUT;
799
800 if (data) {
801 data->error = -ETIMEDOUT;
802 host->manual_stop_mrq = mrq;
803 }
804
805 if (mrq->stop)
806 mrq->stop->error = -ETIMEDOUT;
807 }
808 } else {
809 /*if (!sbc || (sbc && host->sunxi_mmc_opacmd23)) {*/
810 if (!sbc || (host->sunxi_mmc_opacmd23)) {
811
812 if (cmd->flags & MMC_RSP_136) {
813 cmd->resp[0] = mmc_readl(host, REG_RESP3);
814 cmd->resp[1] = mmc_readl(host, REG_RESP2);
815 cmd->resp[2] = mmc_readl(host, REG_RESP1);
816 cmd->resp[3] = mmc_readl(host, REG_RESP0);
817 } else {
818 cmd->resp[0] = mmc_readl(host, REG_RESP0);
819 }
820
821 if (data) {
822 data->bytes_xfered = data->blocks * data->blksz;
823 if (sbc && host->sunxi_mmc_opacmd23)
824 host->sunxi_mmc_opacmd23(host, false, 0, sbc->resp);
825 }
826
827 /*
828 *To avoid that "wait busy" and "maual stop"
829 *occur at the same time,
830 *We wait busy only on not error occur.
831 */
832 if (sunxi_mmc_creq_r1b_chk_need(host, cmd)
833 || sunxi_mmc_dreq_r1b_chk_need(host, data)) {
834 if ((ops->card_busy) && (ops->card_busy(host->mmc))) {
835 host->mrq_busy = host->mrq;
836 SM_DBG(mmc_dev(host->mmc),
837 "cmd%d,wb\n", cmd->opcode);
838 }
839 }
840 /**clear retry count if retry ok*/
841 if (host->retry_cnt)
842 printk("%d,end\n", host->retry_cnt);
843 sunxi_mmc_clean_retry_cnt(host);
844 } else {
845
846 if (host->int_sum & SDXC_COMMAND_DONE) {
847 sbc->resp[0] = mmc_readl(host, REG_RESP0);
848 cont_dat_cmd = true;
849 goto out;
850 } else if (host->int_sum & SDXC_INTERRUPT_DDONE_BIT) {
851 cmd->resp[0] = mmc_readl(host, REG_RESP0);
852 data->bytes_xfered = data->blocks * data->blksz;
853
854 /*
855 *To avoid that "wait busy" and "maual stop"
856 *occur at the same time,
857 *We wait busy only on not error occur.
858 */
859 if (sunxi_mmc_dreq_r1b_chk_need(host, data)) {
860 if ((ops->card_busy) && (ops->card_busy(host->mmc))) {
861 host->mrq_busy = host->mrq;
862 SM_DBG(mmc_dev(host->mmc),
863 "cmd%d,wb\n", cmd->opcode);
864 }
865 }
866 /**clear retry count if retry ok*/
867 sunxi_mmc_clean_retry_cnt(host);
868 }
869 }
870 }
871
872 if (data) {
873 mmc_writel(host, REG_IDST, 0x337);
874 mmc_writel(host, REG_IDIE, 0);
875 mmc_writel(host, REG_DMAC, SDXC_IDMAC_SOFT_RESET);
876 rval = mmc_readl(host, REG_GCTRL);
877 rval |= SDXC_DMA_RESET;
878 mmc_writel(host, REG_GCTRL, rval);
879 rval &= ~SDXC_DMA_ENABLE_BIT;
880 mmc_writel(host, REG_GCTRL, rval);
881 rval |= SDXC_FIFO_RESET;
882 mmc_writel(host, REG_GCTRL, rval);
883 if (data->host_cookie != COOKIE_PRE_MAPPED) {
884 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
885 sunxi_mmc_get_dma_dir(data));
886 data->host_cookie = COOKIE_UNMAPPED;
887 SM_DBG(mmc_dev(host->mmc), "Umapped %p", data);
888 }
889 if (host->sunxi_mmc_hw_wbusy_wait)
890 host->sunxi_mmc_hw_wbusy_wait(host, data, false);
891
892 sunxi_mmc_uperf_stat(host, data, host->mrq_busy, false);
893 if (host->sunxi_mmc_on_off_emce && data->sg) {
894 if (host->crypt_flag) {
895 SM_DBG(mmc_dev(host->mmc), "emce is disable\n");
896 host->sunxi_mmc_on_off_emce(host, 0, 0, 0,
897 data->bytes_xfered, 1, 0);
898 }
899 }
900 host->crypt_flag = 0;
901 }
902
903 out:
904 mmc_writel(host, REG_IMASK, host->sdio_imask | host->dat3_imask);
905 mmc_writel(host, REG_RINTR, 0xffff);
906
907 if (host->dat3_imask) {
908 rval = mmc_readl(host, REG_GCTRL);
909 mmc_writel(host, REG_GCTRL, rval | SDXC_DEBOUNCE_ENABLE_BIT);
910 }
911
912 host->mrq = NULL;
913 host->int_sum = 0;
914 host->wait_dma = false;
915
916
917 if (cont_dat_cmd) {
918 sunxi_mmc_parse_cmd(host->mmc,
919 cmd,
920 &cmd_val,
921 &imask,
922 &wait_dma);
923 host->mrq = mrq;
924 host->wait_dma = wait_dma;
925 sunxi_mmc_exe_cmd(host, cmd, cmd_val, imask);
926 return SUNXI_FINAL_CONT;
927 }
928
929 return (host->manual_stop_mrq
930 || host->mrq_busy
931 || host->mrq_retry) ? SUNXI_FINAL_BHALF : SUNXI_FINAL_END;
932 }
933
sunxi_mmc_irq(int irq,void * dev_id)934 static irqreturn_t sunxi_mmc_irq(int irq, void *dev_id)
935 {
936 struct sunxi_mmc_host *host = dev_id;
937 struct mmc_request *mrq;
938 u32 msk_int, idma_int;
939 bool finalize = false;
940 bool sdio_int = false;
941 int final_ret = 0;
942 irqreturn_t ret = IRQ_HANDLED;
943
944 spin_lock(&host->lock);
945
946 idma_int = mmc_readl(host, REG_IDST);
947 msk_int = mmc_readl(host, REG_MISTA);
948
949 SM_DBG(mmc_dev(host->mmc), "irq: rq %p mi %08x idi %08x\n",
950 host->mrq, msk_int, idma_int);
951
952 if (host->dat3_imask) {
953 if (msk_int & SDXC_CARD_INSERT) {
954 mmc_writel(host, REG_RINTR, SDXC_CARD_INSERT);
955 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
956 goto out;
957 }
958 if (msk_int & SDXC_CARD_REMOVE) {
959 mmc_writel(host, REG_RINTR, SDXC_CARD_REMOVE);
960 mmc_detect_change(host->mmc, msecs_to_jiffies(50));
961 goto out;
962 }
963 }
964
965 mrq = host->mrq;
966 if (mrq) {
967 if (idma_int & SDXC_IDMAC_RECEIVE_INTERRUPT)
968 host->wait_dma = false;
969
970 host->int_sum |= msk_int;
971
972 /* Wait for COMMAND_DONE on RESPONSE_TIMEOUT before finalize */
973 if ((host->int_sum & SDXC_RESP_TIMEOUT) &&
974 !(host->int_sum & SDXC_COMMAND_DONE))
975 mmc_writel(host, REG_IMASK,
976 host->sdio_imask | host->
977 dat3_imask | SDXC_COMMAND_DONE);
978 /* Don't wait for dma on error */
979 else if (host->int_sum & SDXC_INTERRUPT_ERROR_BIT)
980 finalize = true;
981 else if ((host->int_sum & SDXC_INTERRUPT_DONE_BIT) &&
982 !host->wait_dma)
983 finalize = true;
984 }
985
986 if (msk_int & SDXC_SDIO_INTERRUPT)
987 sdio_int = true;
988
989 mmc_writel(host, REG_RINTR, msk_int);
990 mmc_writel(host, REG_IDST, idma_int);
991
992 if (finalize)
993 final_ret = sunxi_mmc_finalize_request(host);
994 out:
995 /******************************************************/
996 smp_wmb();
997 spin_unlock(&host->lock);
998
999 if (finalize && (final_ret == SUNXI_FINAL_END))
1000 sunxi_mmc_request_done(host->mmc, mrq);
1001
1002 if (sdio_int)
1003 mmc_signal_sdio_irq(host->mmc);
1004
1005 if (final_ret == SUNXI_FINAL_BHALF)
1006 ret = IRQ_WAKE_THREAD;
1007
1008 return ret;
1009 }
1010
sunxi_check_r1_ready(struct sunxi_mmc_host * smc_host,unsigned ms)1011 int sunxi_check_r1_ready(struct sunxi_mmc_host *smc_host, unsigned ms)
1012 {
1013 unsigned long expire = jiffies + msecs_to_jiffies(ms);
1014 const struct mmc_host_ops *ops = smc_host->mmc->ops;
1015
1016 SM_INFO(mmc_dev(smc_host->mmc), "wrd\n");
1017 do {
1018 if ((ops->card_busy) && (!(ops->card_busy(smc_host->mmc))))
1019 break;
1020 } while (time_before(jiffies, expire));
1021
1022 if ((ops->card_busy) && ((ops->card_busy(smc_host->mmc)))) {
1023 SM_ERR(mmc_dev(smc_host->mmc), "wait r1 rdy %d ms timeout\n",
1024 ms);
1025 return -1;
1026 } else {
1027 return 0;
1028 }
1029 }
1030
sunxi_check_r1_ready_may_sleep(struct sunxi_mmc_host * smc_host)1031 int sunxi_check_r1_ready_may_sleep(struct sunxi_mmc_host *smc_host)
1032 {
1033 unsigned int cnt = 0;
1034 /*
1035 *SUNXI_DEF_MAX_R1B_TIMEOUT-10ms(dead wait)-(10)
1036 *(wait interval 10us,all wait 10*1000 us=10ms)**
1037 */
1038 unsigned int delay_max_cnt[2] = {0};
1039 int i = 0;
1040 unsigned long expire = jiffies + msecs_to_jiffies(10);
1041 const struct mmc_host_ops *ops = smc_host->mmc->ops;
1042
1043 delay_max_cnt[0] = 1000; /*wait interval 10us */
1044 /*wait interval 1ms */
1045 delay_max_cnt[1] = smc_host->mmc->max_busy_timeout-10-10;
1046
1047 /*****dead wait******/
1048 do {
1049 if ((ops->card_busy) && (!(ops->card_busy(smc_host->mmc))))
1050 break;
1051 cond_resched();
1052 } while (time_before(jiffies, expire));
1053
1054 if ((ops->card_busy) && (!(ops->card_busy(smc_host->mmc)))) {
1055 SM_DBG(mmc_dev(smc_host->mmc), "dead Wait r1 rdy ok\n");
1056 return 0;
1057 }
1058
1059 /*****no dead wait*****/
1060 for (i = 0; i < 2; i++, cnt = 0) {
1061 do {
1062 if ((ops->card_busy) && (!(ops->card_busy(smc_host->mmc)))) {
1063 SM_DBG(mmc_dev(smc_host->mmc),
1064 "cmd%d Wait r1 rdy ok c%d i%d\n",
1065 mmc_readl(smc_host, REG_CMDR) & 0x3F,
1066 cnt, i);
1067 return 0;
1068 }
1069
1070 /* wait data0 busy... */
1071 if (i == 0) {
1072 if (((cnt % 500000) == 0) && cnt) {
1073 SM_INFO(mmc_dev(smc_host->mmc),
1074 "cmd%d Has wait r1 rdy c%d i%d\n",
1075 mmc_readl(smc_host,
1076 REG_CMDR) & 0x3F,
1077 cnt, i);
1078 }
1079 usleep_range(10, 20);
1080 } else {
1081 if (((cnt % 5000) == 0) && cnt) {
1082 SM_INFO(mmc_dev(smc_host->mmc),
1083 "cmd%d Has wait r1 rdy c%d i%d\n",
1084 mmc_readl(smc_host,
1085 REG_CMDR) & 0x3F,
1086 cnt, i);
1087 }
1088 usleep_range(1000, 1200);
1089 }
1090 } while ((cnt++) < delay_max_cnt[i]);
1091 }
1092 SM_ERR(mmc_dev(smc_host->mmc), "cmd%d Wait r1 rdy timeout\n",
1093 mmc_readl(smc_host, REG_CMDR) & 0x3F);
1094 return -1;
1095 }
1096
sunxi_mmc_handle_bottom_half(int irq,void * dev_id)1097 static irqreturn_t sunxi_mmc_handle_bottom_half(int irq, void *dev_id)
1098 {
1099 return sunxi_mmc_handle_do_bottom_half(dev_id);
1100 }
1101
sunxi_mmc_handle_do_bottom_half(void * dev_id)1102 static irqreturn_t sunxi_mmc_handle_do_bottom_half(void *dev_id)
1103 {
1104 struct sunxi_mmc_host *host = dev_id;
1105 struct mmc_request *mrq_stop;
1106 struct mmc_request *mrq_busy = NULL;
1107 struct mmc_request *mrq_retry = NULL;
1108 struct mmc_host *mmc = host->mmc;
1109 int rval = 0;
1110 unsigned long iflags;
1111
1112 spin_lock_irqsave(&host->lock, iflags);
1113 mrq_stop = host->manual_stop_mrq;
1114 mrq_busy = host->mrq_busy;
1115 mrq_retry = host->mrq_retry;
1116 spin_unlock_irqrestore(&host->lock, iflags);
1117
1118 if (mrq_busy) {
1119 /*
1120 *Here,we don't use the timeout value in mrq_busy->busy_timeout
1121 *Because this value may not right for example when useing TRIM
1122 *So we use 10min wait time max and print time value every
1123 *5 second
1124 */
1125 rval = sunxi_check_r1_ready_may_sleep(host);
1126 spin_lock_irqsave(&host->lock, iflags);
1127 if (rval) {
1128 mrq_busy->cmd->error = -ETIMEDOUT;
1129 if (mrq_busy->data)
1130 mrq_busy->data->error = -ETIMEDOUT;
1131 if (mrq_busy->stop)
1132 mrq_busy->stop->error = -ETIMEDOUT;
1133 }
1134 host->mrq_busy = NULL;
1135 /******************************************************/
1136 sunxi_mmc_uperf_stat(host, mrq_busy->data, mrq_busy, true);
1137 smp_wmb();
1138 spin_unlock_irqrestore(&host->lock, iflags);
1139 sunxi_mmc_request_done(mmc, mrq_busy);
1140 return IRQ_HANDLED;
1141 }
1142 SM_DBG(mmc_dev(mmc), "no request for busy\n");
1143
1144 if (mrq_stop) {
1145 SM_ERR(mmc_dev(mmc), "data error, sending stop command\n");
1146 /***reset host***/
1147 spin_lock_irqsave(&host->lock, iflags);
1148 sunxi_mmc_regs_save(host);
1149 spin_unlock_irqrestore(&host->lock, iflags);
1150 /**if gating/reset protect itself,so no lock use host->lock**/
1151 sunxi_mmc_bus_clk_en(host, 0);
1152 sunxi_mmc_bus_clk_en(host, 1);
1153 sunxi_mmc_regs_restore(host);
1154 SM_DBG(mmc_dev(host->mmc),
1155 "no device retry:host reset and reg recover ok\n");
1156
1157 /***use sunxi_mmc_oclk_en to update clk***/
1158 rval = host->sunxi_mmc_oclk_en(host, 1);
1159 SM_ERR(mmc_dev(host->mmc),
1160 "stop:recover\n");
1161 if (rval) {
1162 SM_ERR(mmc_dev(mmc), "retry:update clk failed %s %d\n",
1163 __func__, __LINE__);
1164 }
1165
1166 /*
1167 * We will never have more than one outstanding request,
1168 * and we do not complete the request until after
1169 * we've cleared host->manual_stop_mrq so we do not need to
1170 * spin lock this function.
1171 * Additionally we have wait states within this function
1172 * so having it in a lock is a very bad idea.
1173 */
1174 sunxi_mmc_send_manual_stop(host, mrq_stop);
1175 if (gpio_is_valid(host->card_pwr_gpio))
1176 gpio_set_value(host->card_pwr_gpio,
1177 (host->
1178 ctl_spec_cap &
1179 CARD_PWR_GPIO_HIGH_ACTIVE) ? 0 : 1);
1180
1181 /***reset host***/
1182 sunxi_mmc_regs_save(host);
1183 sunxi_mmc_bus_clk_en(host, 0);
1184 sunxi_mmc_bus_clk_en(host, 1);
1185 sunxi_mmc_regs_restore(host);
1186 SM_INFO(mmc_dev(host->mmc),
1187 "reset:host reset and recover finish\n");
1188 /***update clk***/
1189 rval = host->sunxi_mmc_oclk_en(host, 1);
1190 if (rval) {
1191 SM_ERR(mmc_dev(mmc), "reset:update clk failed %s %d\n",
1192 __func__, __LINE__);
1193 }
1194
1195 spin_lock_irqsave(&host->lock, iflags);
1196 host->manual_stop_mrq = NULL;
1197 /******************************************************/
1198 smp_wmb();
1199 spin_unlock_irqrestore(&host->lock, iflags);
1200
1201 sunxi_mmc_request_done(mmc, mrq_stop);
1202 return IRQ_HANDLED;
1203 }
1204 SM_DBG(mmc_dev(mmc), "no request for manual stop\n");
1205
1206 if (mrq_retry) {
1207 bool wait_dma = false;
1208 u32 imask = 0;
1209 u32 cmd_val = 0;
1210 struct mmc_command *cmd = NULL;
1211 struct mmc_data *data = mrq_retry->data;
1212 cmd = (mrq_retry->sbc && !host->sunxi_mmc_opacmd23) ? mrq_retry->sbc : mrq_retry->cmd;
1213
1214 SM_INFO(mmc_dev(host->mmc), "retry:start\n");
1215
1216 /***Recover device state and stop host state machine****/
1217 if (data) {
1218 SM_ERR(mmc_dev(mmc), "retry:stop\n");
1219 /***reset host***/
1220 spin_lock_irqsave(&host->lock, iflags);
1221 sunxi_mmc_regs_save(host);
1222 spin_unlock_irqrestore(&host->lock, iflags);
1223 /**if gating/reset protect itself,so no lock use host->lock**/
1224 sunxi_mmc_bus_clk_en(host, 0);
1225 sunxi_mmc_bus_clk_en(host, 1);
1226 sunxi_mmc_regs_restore(host);
1227 SM_DBG(mmc_dev(host->mmc),
1228 "no device retry:host reset and reg recover ok\n");
1229
1230 /***use sunxi_mmc_oclk_en to update clk***/
1231 rval = host->sunxi_mmc_oclk_en(host, 1);
1232 SM_ERR(mmc_dev(host->mmc),
1233 "retry:stop recover\n");
1234 if (rval) {
1235 SM_ERR(mmc_dev(mmc), "retry:update clk failed %s %d\n",
1236 __func__, __LINE__);
1237 }
1238
1239 sunxi_mmc_send_manual_stop(host, mrq_retry);
1240 }
1241
1242 /*****If device not exit,no need to retry*****/
1243 /**to do:how to deal with data3 detect better here**/
1244 if (!mmc_gpio_get_cd(mmc)) {
1245 SM_ERR(mmc_dev(mmc), "retry:no device\n");
1246 /***reset host***/
1247 spin_lock_irqsave(&host->lock, iflags);
1248 sunxi_mmc_regs_save(host);
1249 spin_unlock_irqrestore(&host->lock, iflags);
1250 /**if gating/reset protect itself,so no lock use host->lock**/
1251 sunxi_mmc_bus_clk_en(host, 0);
1252 sunxi_mmc_bus_clk_en(host, 1);
1253 sunxi_mmc_regs_restore(host);
1254 SM_DBG(mmc_dev(host->mmc),
1255 "no device retry:host reset and reg recover ok\n");
1256
1257 /***use sunxi_mmc_oclk_en to update clk***/
1258 rval = host->sunxi_mmc_oclk_en(host, 1);
1259 SM_ERR(mmc_dev(host->mmc),
1260 "no device retry:recover ck\n");
1261 if (rval) {
1262 SM_ERR(mmc_dev(mmc), "retry:update clk failed %s %d\n",
1263 __func__, __LINE__);
1264 }
1265
1266 goto retry_giveup;
1267 }
1268
1269 /***wait device busy over***/
1270 rval = sunxi_mmc_check_r1_ready(mmc, 1000);
1271 if (rval) {
1272 SM_ERR(mmc_dev(host->mmc), "retry:busy timeout\n");
1273 //goto retry_giveup;
1274 }
1275
1276 /***reset host***/
1277 spin_lock_irqsave(&host->lock, iflags);
1278 sunxi_mmc_regs_save(host);
1279 spin_unlock_irqrestore(&host->lock, iflags);
1280 /**if gating/reset protect itself,so no lock use host->lock**/
1281 sunxi_mmc_bus_clk_en(host, 0);
1282 sunxi_mmc_bus_clk_en(host, 1);
1283 sunxi_mmc_regs_restore(host);
1284 SM_DBG(mmc_dev(host->mmc),
1285 "retry:host reset and reg recover ok\n");
1286
1287 /***set phase/delay not lock***/
1288 if (host->sunxi_mmc_judge_retry) {
1289 rval =
1290 host->sunxi_mmc_judge_retry(host, NULL,
1291 host->retry_cnt,
1292 host->errno_retry,
1293 NULL);
1294 if (rval) {
1295 SM_ERR(mmc_dev(mmc),
1296 "retry:set phase failed or over retry times\n");
1297 goto reupdate_clk;
1298 }
1299 } else if (host->retry_cnt > SUNXI_DEF_RETRY_TIMES) {
1300 SM_ERR(mmc_dev(mmc),
1301 "retry:over default retry times\n");
1302 goto reupdate_clk;
1303 }
1304
1305 /***use sunxi_mmc_oclk_en to update clk***/
1306 rval = host->sunxi_mmc_oclk_en(host, 1);
1307 if (rval) {
1308 SM_ERR(mmc_dev(mmc), "retry:update clk failed %s %d\n",
1309 __func__, __LINE__);
1310 goto retry_giveup;
1311 }
1312
1313 if (data) {
1314 rval = sunxi_mmc_map_dma(host, data, COOKIE_MAPPED);
1315 if (rval < 0) {
1316 SM_ERR(mmc_dev(mmc), "map DMA failed\n");
1317 goto retry_giveup;
1318 }
1319 }
1320
1321 sunxi_mmc_parse_cmd(mmc, cmd, &cmd_val, &imask, &wait_dma);
1322 if (data)
1323 sunxi_mmc_set_dat(host, mmc, data, false);
1324 spin_lock_irqsave(&host->lock, iflags);
1325 host->mrq = mrq_retry;
1326 host->mrq_retry = NULL;
1327 host->wait_dma = wait_dma;
1328 host->retry_cnt++;
1329 host->errno_retry = 0;
1330 sunxi_mmc_exe_cmd(host, cmd, cmd_val, imask);
1331 SM_INFO(mmc_dev(host->mmc), "*****retry:re-send cmd*****\n");
1332 /******************************************************/
1333 smp_wmb();
1334 spin_unlock_irqrestore(&host->lock, iflags);
1335 return IRQ_HANDLED;
1336 reupdate_clk:
1337 /**update clk for other cmd from upper layer to be sent****/
1338 rval = host->sunxi_mmc_oclk_en(host, 1);
1339 if (rval)
1340 SM_ERR(mmc_dev(mmc), "retry:update clk failed %s %d\n",
1341 __func__, __LINE__);
1342 retry_giveup:
1343 SM_ERR(mmc_dev(host->mmc), "retry:give up\n");
1344 spin_lock_irqsave(&host->lock, iflags);
1345 host->mrq_retry = NULL;
1346 host->mrq = NULL;
1347 host->int_sum = 0;
1348 host->wait_dma = false;
1349 host->errno_retry = 0;
1350 sunxi_mmc_clean_retry_cnt(host);
1351 /**clear retry count if retry giveup*/
1352 cmd->error = -ETIMEDOUT;
1353 if (mrq_retry->sbc)
1354 mrq_retry->cmd->error = -ETIMEDOUT;
1355 if (data)
1356 data->error = -ETIMEDOUT;
1357 if (mrq_retry->stop)
1358 mrq_retry->stop->error = -ETIMEDOUT;
1359 /******************************************************/
1360 smp_wmb();
1361 spin_unlock_irqrestore(&host->lock, iflags);
1362 sunxi_mmc_request_done(host->mmc, mrq_retry);
1363 return IRQ_HANDLED;
1364
1365 }
1366 SM_DBG(mmc_dev(host->mmc), "no request for data retry\n");
1367
1368 SM_ERR(mmc_dev(host->mmc), "no request in bottom halfhalf\n");
1369
1370 return IRQ_HANDLED;
1371 }
1372
sunxi_mmc_update_clk(struct sunxi_mmc_host * host)1373 s32 sunxi_mmc_update_clk(struct sunxi_mmc_host *host)
1374 {
1375 u32 rval;
1376 /*1000ms timeout*/
1377 unsigned long expire = jiffies + msecs_to_jiffies(1000);
1378 s32 ret = 0;
1379
1380 /* mask data0 when update clock */
1381 mmc_writel(host, REG_CLKCR,
1382 mmc_readl(host, REG_CLKCR) | SDXC_MASK_DATA0);
1383
1384 rval = SDXC_START | SDXC_UPCLK_ONLY | SDXC_WAIT_PRE_OVER;
1385 /*
1386 * if (smc_host->voltage_switching)
1387 * rval |= SDXC_VolSwitch;
1388 */
1389 mmc_writel(host, REG_CMDR, rval);
1390
1391 do {
1392 rval = mmc_readl(host, REG_CMDR);
1393 } while (time_before(jiffies, expire) && (rval & SDXC_START));
1394
1395 if (rval & SDXC_START) {
1396 SM_ERR(mmc_dev(host->mmc),
1397 "update clock timeout, fatal error!!!\n");
1398 ret = -EIO;
1399 }
1400
1401 /* release data0 after update clock */
1402 mmc_writel(host, REG_CLKCR,
1403 mmc_readl(host, REG_CLKCR) & (~SDXC_MASK_DATA0));
1404
1405 return ret;
1406 }
1407
sunxi_mmc_bus_clk_en(struct sunxi_mmc_host * host,int enable)1408 static int sunxi_mmc_bus_clk_en(struct sunxi_mmc_host *host, int enable)
1409 {
1410 int rval = 0;
1411 struct mmc_host *mmc = host->mmc;
1412
1413 if (enable) {
1414 if (!IS_ERR(host->clk_rst)) {
1415 rval = reset_control_deassert(host->clk_rst);
1416 if (rval) {
1417 SM_ERR(mmc_dev(mmc), "reset err %d\n", rval);
1418 return -1;
1419 }
1420 }
1421
1422 rval = clk_prepare_enable(host->clk_ahb);
1423 if (rval) {
1424 SM_ERR(mmc_dev(mmc), "Enable ahb clk err %d\n", rval);
1425 return -1;
1426 }
1427 rval = clk_prepare_enable(host->clk_mmc);
1428 if (rval) {
1429 SM_ERR(mmc_dev(mmc), "Enable mmc clk err %d\n", rval);
1430 return -1;
1431 }
1432 } else {
1433 clk_disable_unprepare(host->clk_mmc);
1434 clk_disable_unprepare(host->clk_ahb);
1435 if (!IS_ERR(host->clk_rst))
1436 reset_control_assert(host->clk_rst);
1437 }
1438 return 0;
1439 }
1440
sunxi_mmc_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)1441 static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1442 {
1443 struct sunxi_mmc_host *host = mmc_priv(mmc);
1444 u32 rval;
1445 static const char * const bus_mode[] = { "", "OD", "PP" };
1446 static const char * const pwr_mode[] = { "OFF", "UP", "ON", "udef" };
1447 static const char * const timing[] = {
1448 "LEGACY(SDR12)", "MMC-HS(SDR20)", "SD-HS(SDR25)", "UHS-SDR12",
1449 "UHS-SDR25",
1450 "UHS-SDR50", "UHS-SDR104", "UHS-DDR50", "MMC-DDR52",
1451 "MMC-HS200", "MMC-HS400"
1452 };
1453 static const char * const drv_type[] = { "B", "A", "C", "D" };
1454
1455 WARN_ON(ios->bus_mode >= ARRAY_SIZE(bus_mode));
1456 WARN_ON(ios->power_mode >= ARRAY_SIZE(pwr_mode));
1457 WARN_ON(ios->timing >= ARRAY_SIZE(timing));
1458 SM_INFO(mmc_dev(mmc),
1459 "sdc set ios:clk %dHz bm %s pm %s vdd %d width %d timing %s dt %s\n",
1460 ios->clock, bus_mode[ios->bus_mode],
1461 pwr_mode[ios->power_mode], ios->vdd,
1462 1 << ios->bus_width, timing[ios->timing],
1463 drv_type[ios->drv_type]);
1464
1465 /* Set the power state */
1466 switch (ios->power_mode) {
1467 case MMC_POWER_ON:
1468 break;
1469
1470 case MMC_POWER_UP:
1471 if (host->power_on)
1472 break;
1473
1474 if (!IS_ERR(mmc->supply.vmmc)) {
1475 rval =
1476 sunxi_mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1477 ios->vdd);
1478 if (rval)
1479 return;
1480 }
1481 if (!IS_ERR(mmc->supply.vqmmc)) {
1482 rval = regulator_enable(mmc->supply.vqmmc);
1483 if (rval < 0) {
1484 SM_ERR(mmc_dev(mmc),
1485 "failed to enable vqmmc regulator\n");
1486 return;
1487 }
1488 }
1489
1490 if (!IS_ERR(host->supply.vqmmc33sw)) {
1491 rval = regulator_enable(host->supply.vqmmc33sw);
1492 if (rval < 0) {
1493 SM_ERR(mmc_dev(mmc),
1494 "failed to enable vqmmc33sw regulator\n");
1495 return;
1496 }
1497 }
1498 if (!IS_ERR(host->supply.vqmmc18sw)) {
1499 rval = regulator_enable(host->supply.vqmmc18sw);
1500 if (rval < 0) {
1501 SM_ERR(mmc_dev(mmc),
1502 "failed to enable vqmmc18sw regulator\n");
1503 return;
1504 }
1505 }
1506
1507 if (gpio_is_valid(host->card_pwr_gpio)) {
1508 if (!IS_ERR(host->pins_sleep)) {
1509 rval = pinctrl_select_state(host->pinctrl,
1510 host->pins_sleep);
1511 if (rval) {
1512 SM_ERR(mmc_dev(mmc),
1513 "could not set sleep pins\n");
1514 return;
1515 }
1516 }
1517 gpio_set_value(host->card_pwr_gpio,
1518 (host->
1519 ctl_spec_cap &
1520 CARD_PWR_GPIO_HIGH_ACTIVE) ? 0 : 1);
1521 msleep(host->time_pwroff_ms);
1522 gpio_set_value(host->card_pwr_gpio,
1523 (host->
1524 ctl_spec_cap &
1525 CARD_PWR_GPIO_HIGH_ACTIVE) ? 1 : 0);
1526 /*delay to ensure voltage stability*/
1527 msleep(1);
1528 }
1529
1530 if (!IS_ERR(host->pins_default)) {
1531 rval =
1532 pinctrl_select_state(host->pinctrl,
1533 host->pins_default);
1534 if (rval) {
1535 SM_ERR(mmc_dev(mmc),
1536 "could not set default pins\n");
1537 return;
1538 }
1539 }
1540
1541 if (!IS_ERR(host->clk_rst)) {
1542 rval = reset_control_deassert(host->clk_rst);
1543 if (rval) {
1544 SM_ERR(mmc_dev(mmc), "reset err %d\n", rval);
1545 return;
1546 }
1547 }
1548
1549 rval = clk_prepare_enable(host->clk_ahb);
1550 if (rval) {
1551 SM_ERR(mmc_dev(mmc), "Enable ahb clk err %d\n", rval);
1552 return;
1553 }
1554 rval = clk_prepare_enable(host->clk_mmc);
1555 if (rval) {
1556 SM_ERR(mmc_dev(mmc), "Enable mmc clk err %d\n", rval);
1557 return;
1558 }
1559
1560 host->ferror = sunxi_mmc_init_host(mmc);
1561 if (host->ferror)
1562 return;
1563
1564 enable_irq(host->irq);
1565
1566 host->power_on = 1;
1567 SM_DBG(mmc_dev(mmc), "power on!\n");
1568 break;
1569
1570 case MMC_POWER_OFF:
1571 if (!host->power_on || host->dat3_imask)
1572 break;
1573
1574 disable_irq(host->irq);
1575 sunxi_mmc_reset_host(host);
1576
1577 clk_disable_unprepare(host->clk_mmc);
1578 clk_disable_unprepare(host->clk_ahb);
1579
1580 if (!IS_ERR(host->clk_rst))
1581 reset_control_assert(host->clk_rst);
1582
1583 if (!IS_ERR(host->pins_sleep)) {
1584 rval =
1585 pinctrl_select_state(host->pinctrl,
1586 host->pins_sleep);
1587 if (rval) {
1588 SM_ERR(mmc_dev(mmc),
1589 "could not set sleep pins\n");
1590 return;
1591 }
1592 }
1593
1594 if (gpio_is_valid(host->card_pwr_gpio)) {
1595 gpio_set_value(host->card_pwr_gpio,
1596 (host->
1597 ctl_spec_cap &
1598 CARD_PWR_GPIO_HIGH_ACTIVE) ? 0 : 1);
1599 msleep(host->time_pwroff_ms);
1600 }
1601
1602 if (!IS_ERR(host->pins_uart_jtag)) {
1603 rval =
1604 pinctrl_select_state(host->pinctrl,
1605 host->pins_uart_jtag);
1606 if (rval) {
1607 SM_ERR(mmc_dev(mmc),
1608 "could not set uart_jtag pins\n");
1609 return;
1610 }
1611 }
1612
1613
1614 if (!IS_ERR(host->supply.vqmmc18sw)) {
1615 rval = regulator_disable(host->supply.vqmmc18sw);
1616 if (rval) {
1617 SM_ERR(mmc_dev(mmc),
1618 "Could not disable vqmmc18sw\n");
1619 return;
1620 }
1621 }
1622
1623 /*SD PMU control*/
1624 if (!IS_ERR(host->supply.vqmmc33sw)) {
1625 rval = regulator_disable(host->supply.vqmmc33sw);
1626 if (rval) {
1627 SM_ERR(mmc_dev(mmc),
1628 "Could not disable vqmmc33sw\n");
1629 return;
1630 }
1631 }
1632
1633 if (!IS_ERR(mmc->supply.vqmmc)) {
1634 rval = regulator_disable(mmc->supply.vqmmc);
1635 if (rval) {
1636 SM_ERR(mmc_dev(mmc),
1637 "Could not disable vqmmc\n");
1638 return;
1639 }
1640 }
1641
1642 if (!IS_ERR(mmc->supply.vmmc)) {
1643 rval = sunxi_mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1644 if (rval)
1645 return;
1646 }
1647
1648 host->power_on = 0;
1649 SM_DBG(mmc_dev(mmc), "power off!\n");
1650 break;
1651 }
1652
1653 /* set bus width */
1654 switch (ios->bus_width) {
1655 case MMC_BUS_WIDTH_1:
1656 mmc_writel(host, REG_WIDTH, SDXC_WIDTH1);
1657 break;
1658 case MMC_BUS_WIDTH_4:
1659 mmc_writel(host, REG_WIDTH, SDXC_WIDTH4);
1660 break;
1661 case MMC_BUS_WIDTH_8:
1662 mmc_writel(host, REG_WIDTH, SDXC_WIDTH8);
1663 break;
1664 }
1665
1666 SM_DBG(mmc_dev(host->mmc), "REG_WIDTH: 0x%08x\n",
1667 mmc_readl(host, REG_WIDTH));
1668
1669 /* set ddr mode */
1670 if (host->power_on
1671 && ios->clock) {
1672 /**If we set ddr mode,we should disable mclk first**/
1673 clk_disable_unprepare(host->clk_mmc);
1674 rval = mmc_readl(host, REG_GCTRL);
1675 if (sunxi_mmc_ddr_timing(ios->timing))
1676 rval |= SDXC_DDR_MODE;
1677 else
1678 rval &= ~SDXC_DDR_MODE;
1679 mmc_writel(host, REG_GCTRL, rval);
1680 SM_DBG(mmc_dev(host->mmc), "REG_GCTRL: 0x%08x\n",
1681 mmc_readl(host, REG_GCTRL));
1682 rval = clk_prepare_enable(host->clk_mmc);
1683 if (rval) {
1684 SM_ERR(mmc_dev(mmc), "Enable mmc clk err %d\n", rval);
1685 return;
1686 }
1687
1688 }
1689 /* set up clock */
1690 if (ios->power_mode && host->sunxi_mmc_clk_set_rate) {
1691 host->ferror = host->sunxi_mmc_clk_set_rate(host, ios);
1692 /* Android code had a usleep_range(50000, 55000); here */
1693 }
1694 }
1695
sunxi_mmc_enable_sdio_irq(struct mmc_host * mmc,int enable)1696 static void sunxi_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
1697 {
1698 struct sunxi_mmc_host *host = mmc_priv(mmc);
1699 unsigned long flags;
1700 u32 imask;
1701
1702 spin_lock_irqsave(&host->lock, flags);
1703
1704 imask = mmc_readl(host, REG_IMASK);
1705 if (enable) {
1706 host->sdio_imask = SDXC_SDIO_INTERRUPT;
1707 imask |= SDXC_SDIO_INTERRUPT;
1708 } else {
1709 host->sdio_imask = 0;
1710 imask &= ~SDXC_SDIO_INTERRUPT;
1711 }
1712 mmc_writel(host, REG_IMASK, imask);
1713 spin_unlock_irqrestore(&host->lock, flags);
1714 }
1715
sunxi_mmc_hw_reset(struct mmc_host * mmc)1716 static void sunxi_mmc_hw_reset(struct mmc_host *mmc)
1717 {
1718 struct sunxi_mmc_host *host = mmc_priv(mmc);
1719 mmc_writel(host, REG_HWRST, 0);
1720 udelay(10);
1721 mmc_writel(host, REG_HWRST, 1);
1722 udelay(300);
1723 }
1724
sunxi_mmc_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)1725 static int sunxi_mmc_signal_voltage_switch(struct mmc_host *mmc,
1726 struct mmc_ios *ios)
1727 {
1728 int ret = 0;
1729 struct regulator *vqmmc = mmc->supply.vqmmc;
1730 struct device_node *np = NULL;
1731 bool disable_vol_switch = false;
1732 bool vol_switch_without_pmu = false;
1733 struct sunxi_mmc_host *host = mmc_priv(mmc);
1734
1735 if (!mmc->parent || !mmc->parent->of_node) {
1736 SM_ERR(mmc_dev(mmc),
1737 "no dts to parse signal switch fun,use default\n");
1738 return 0;
1739 }
1740
1741 np = mmc->parent->of_node;
1742 disable_vol_switch =
1743 of_property_read_bool(np, "sunxi-dis-signal-vol-sw");
1744
1745 #if IS_ENABLED(CONFIG_REGULATOR)
1746 vol_switch_without_pmu = true;
1747 #else
1748 vol_switch_without_pmu =
1749 of_property_read_bool(np, "sunxi-signal-vol-sw-without-pmu");
1750 #endif
1751 /*For some emmc,io voltage will be fixed at 1.8v or other voltage,
1752 *so we can not switch io voltage
1753 */
1754 /*Because mmc core will change the io voltage to 3.3v when power up,
1755 *so will must disable voltage switch
1756 */
1757 SM_DBG(mmc_dev(mmc), "%d,%d\n", disable_vol_switch, vol_switch_without_pmu);
1758
1759 if (disable_vol_switch || (!vol_switch_without_pmu)) {
1760 SM_DBG(mmc_dev(mmc), "disable signal voltage-switch\n");
1761 return 0;
1762 }
1763
1764 switch (ios->signal_voltage) {
1765 case MMC_SIGNAL_VOLTAGE_330:
1766 if (!IS_ERR(vqmmc)) {
1767 ret = regulator_set_voltage(vqmmc, 3300000, 3300000);
1768 if (ret) {
1769 SM_ERR(mmc_dev(mmc),
1770 "Switching to 3.3V signalling voltage failed\n");
1771 return -EIO;
1772 } else
1773 SM_INFO(mmc_dev(mmc),
1774 "Switching to 3.3V signalling voltage ok\n");
1775 } else {
1776 SM_INFO(mmc_dev(mmc),
1777 "no vqmmc,Check if there is regulator\n");
1778 }
1779
1780 if (!IS_ERR(host->pins_default)) {
1781 ret = pinctrl_select_state(host->pinctrl, host->pins_default);
1782 if (ret)
1783 SM_WARN(mmc_dev(mmc), "Cannot select 3.3v pio mode\n");
1784 else
1785 SM_DBG(mmc_dev(mmc), "select 3.3v pio mode\n");
1786 }
1787
1788 return 0;
1789 case MMC_SIGNAL_VOLTAGE_180:
1790 if (!IS_ERR(vqmmc)) {
1791 ret = regulator_set_voltage(vqmmc, 1800000, 1800000);
1792 if (ret) {
1793 SM_ERR(mmc_dev(mmc),
1794 "Switching to 1.8V signalling voltage failed\n");
1795 return -EIO;
1796 } else
1797 SM_INFO(mmc_dev(mmc),
1798 "Switching to 1.8V signalling voltage ok\n");
1799 } else {
1800 SM_INFO(mmc_dev(mmc),
1801 "no vqmmc,Check if there is regulator\n");
1802 }
1803
1804 if (!IS_ERR(host->pins_bias_1v8)) {
1805 ret = pinctrl_select_state(host->pinctrl, host->pins_bias_1v8);
1806 if (ret)
1807 SM_WARN(mmc_dev(mmc), "Cannot select 1.8v pio mode\n");
1808 else
1809 SM_DBG(mmc_dev(mmc), "select 1.8v pio mode\n");
1810 /*
1811 {
1812 int v = 0;
1813 sunxi_remap_readl(0x300b000+0x340, &v);
1814 sunxi_remap_writel(0x300b000+0x340, v | (1<<5));
1815 sunxi_dump_reg(mmc);
1816 }
1817 */
1818 }
1819
1820 return 0;
1821 case MMC_SIGNAL_VOLTAGE_120:
1822 if (!IS_ERR(vqmmc)) {
1823 ret = regulator_set_voltage(vqmmc, 1200000, 1200000);
1824 if (ret) {
1825 SM_ERR(mmc_dev(mmc),
1826 "Switching to 1.2V signalling voltage failed\n");
1827 return -EIO;
1828 }
1829 } else {
1830 SM_INFO(mmc_dev(mmc),
1831 "no vqmmc,Check if there is regulator\n");
1832 return 0;
1833 }
1834
1835 SM_ERR(mmc_dev(mmc), "*************Cannot support 1.2v now*************\n");
1836
1837 return 0;
1838 default:
1839 /* No signal voltage switch required */
1840 SM_ERR(mmc_dev(mmc),
1841 "unknown signal voltage switch request %x\n",
1842 ios->signal_voltage);
1843 return -1;
1844 }
1845 }
1846
__sunxi_mmc_card_busy(struct mmc_host * mmc)1847 static int __sunxi_mmc_card_busy(struct mmc_host *mmc)
1848 {
1849 u32 data_down;
1850 struct sunxi_mmc_host *host = mmc_priv(mmc);
1851 unsigned long expire = jiffies + msecs_to_jiffies(10);
1852 u32 rval;
1853 u32 ret;
1854
1855 if (host->voltage_switching == 0) {
1856 ret = mmc_readl(host, REG_STAS) & SDXC_CARD_DATA_BUSY;
1857 } else {
1858 /*only cmd11 switch voltage process come here*/
1859 data_down = mmc_readl(host, REG_STAS);
1860 /* check whether data[3:0]*/
1861 if ((data_down & SDXC_CARD_PRESENT)) {
1862 SM_DBG(mmc_dev(mmc), "now is present\n");
1863 /*wait switch voltage done*/
1864 do {
1865 rval = mmc_readl(host, REG_RINTR);
1866 } while (time_before(jiffies, expire) && ((rval & SDXC_SWITCH_DDONE_BIT) != (SDXC_SWITCH_DDONE_BIT)));
1867
1868 host->voltage_switching = 0;
1869 mmc_writel(host, REG_RINTR, SDXC_SWITCH_DDONE_BIT);
1870 ret = ((rval & SDXC_SWITCH_DDONE_BIT) == SDXC_SWITCH_DDONE_BIT) ? 0 : 1;
1871 } else {
1872 SM_DBG(mmc_dev(mmc), "card is not presenting\n");
1873 ret = (!(data_down & SDXC_CARD_PRESENT));
1874 }
1875 }
1876
1877 return ret;
1878 }
1879
sunxi_mmc_card_busy(struct mmc_host * mmc)1880 static int sunxi_mmc_card_busy(struct mmc_host *mmc)
1881 {
1882 struct sunxi_mmc_host *host = mmc_priv(mmc);
1883
1884 if (!host->sunxi_mmc_dat0_busy) {
1885 /*host control support busy detect*/
1886 return __sunxi_mmc_card_busy(mmc);
1887 } else {
1888 /*host control donnt support busy detect;
1889 *used on v4p10x version driver
1890 * */
1891 return host->sunxi_mmc_dat0_busy(host);
1892 }
1893 }
1894
sunxi_mmc_parse_cmd(struct mmc_host * mmc,struct mmc_command * cmd,u32 * cval,u32 * im,bool * wdma)1895 static void sunxi_mmc_parse_cmd(struct mmc_host *mmc, struct mmc_command *cmd,
1896 u32 *cval, u32 *im, bool *wdma)
1897 {
1898 bool wait_dma = false;
1899 u32 imask = SDXC_INTERRUPT_ERROR_BIT;
1900 u32 cmd_val = SDXC_START | (cmd->opcode & 0x3f);
1901 struct sunxi_mmc_host *host = mmc_priv(mmc);
1902
1903
1904 if (cmd->opcode == MMC_GO_IDLE_STATE) {
1905 cmd_val |= SDXC_SEND_INIT_SEQUENCE;
1906 imask |= SDXC_COMMAND_DONE;
1907 }
1908
1909 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1910 cmd_val |= SDXC_VOLTAGE_SWITCH;
1911 imask |= SDXC_VOLTAGE_CHANGE_DONE;
1912 host->voltage_switching = 1;
1913 /* switch controller to high power mode */
1914 if (host->sunxi_mmc_oclk_en) {
1915 host->sunxi_mmc_oclk_en(host, 1);
1916 } else {
1917 /*if the definition of sunxi_mmc_oclk_en is missing,
1918 *cat not execute cmd11-process,because, it should switch controller to high power mode
1919 * before cmd11-process.
1920 * */
1921 SM_ERR(mmc_dev(mmc), "the definition of sunxi_mmc_oclk_en is missing\n");
1922 }
1923 }
1924
1925 if (cmd->flags & MMC_RSP_PRESENT) {
1926 cmd_val |= SDXC_RESP_EXPECT;
1927 if (cmd->flags & MMC_RSP_136)
1928 cmd_val |= SDXC_LONG_RESPONSE;
1929 if (cmd->flags & MMC_RSP_CRC)
1930 cmd_val |= SDXC_CHECK_RESPONSE_CRC;
1931
1932 if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) {
1933 cmd_val |= SDXC_DATA_EXPECT | SDXC_WAIT_PRE_OVER;
1934 if (cmd->data->flags & MMC_DATA_STREAM) {
1935 imask |= SDXC_AUTO_COMMAND_DONE;
1936 cmd_val |= SDXC_SEQUENCE_MODE |
1937 SDXC_SEND_AUTO_STOP;
1938 }
1939
1940 if ((cmd->mrq->sbc == NULL) && cmd->data->stop) {
1941 if (host->sunxi_mmc_hw_wbusy_wait) {
1942 if (cmd->data->flags & MMC_DATA_READ)
1943 imask |= SDXC_AUTO_COMMAND_DONE;
1944 } else
1945 imask |= SDXC_AUTO_COMMAND_DONE;
1946 cmd_val |= SDXC_SEND_AUTO_STOP;
1947 } else {
1948 if (host->sunxi_mmc_hw_wbusy_wait) {
1949 if (cmd->data->flags & MMC_DATA_READ)
1950 imask |= SDXC_DATA_OVER;
1951 } else
1952 imask |= SDXC_DATA_OVER;
1953 }
1954
1955 if (cmd->data->flags & MMC_DATA_WRITE)
1956 cmd_val |= SDXC_WRITE;
1957 else if (cmd->data->flags & MMC_DATA_READ)
1958 wait_dma = true;
1959 else
1960 SM_ERR(mmc_dev(mmc),
1961 "!!!!!!!Not support cmd->data->flags %x !!!!!!!\n",
1962 cmd->data->flags);
1963 } else {
1964 imask |= SDXC_COMMAND_DONE;
1965 }
1966 } else {
1967 imask |= SDXC_COMMAND_DONE;
1968 }
1969 *im = imask;
1970 *cval = cmd_val;
1971 *wdma = wait_dma;
1972 }
1973
sunxi_mmc_set_dat(struct sunxi_mmc_host * host,struct mmc_host * mmc,struct mmc_data * data,bool atomic)1974 static int sunxi_mmc_set_dat(struct sunxi_mmc_host *host, struct mmc_host *mmc,
1975 struct mmc_data *data, bool atomic)
1976 {
1977 struct mmc_command *sbc = data->mrq->sbc;
1978 int ret = 0;
1979
1980 mmc_writel(host, REG_BLKSZ, data->blksz);
1981 mmc_writel(host, REG_BCNTR, data->blksz * data->blocks);
1982 if (host->sunxi_mmc_thld_ctl)
1983 host->sunxi_mmc_thld_ctl(host, &mmc->ios, data);
1984 if (host->sunxi_mmc_hw_wbusy_wait)
1985 host->sunxi_mmc_hw_wbusy_wait(host, data, true);
1986 ret = sunxi_mmc_start_dma(host, data, atomic);
1987 if (ret)
1988 return -EBUSY;
1989 if (host->sunxi_mmc_opacmd23 && sbc)
1990 host->sunxi_mmc_opacmd23(host, true, sbc->arg, NULL);
1991 return 0;
1992 }
1993
1994
1995
1996
sunxi_mmc_exe_cmd(struct sunxi_mmc_host * host,struct mmc_command * cmd,u32 cmd_val,u32 imask)1997 static void sunxi_mmc_exe_cmd(struct sunxi_mmc_host *host,
1998 struct mmc_command *cmd, u32 cmd_val, u32 imask)
1999 {
2000 u32 rval = 0;
2001
2002 if (host->dat3_imask) {
2003 rval = mmc_readl(host, REG_GCTRL);
2004 rval &= ~SDXC_DEBOUNCE_ENABLE_BIT;
2005 mmc_writel(host, REG_GCTRL, rval);
2006 }
2007 mmc_writel(host, REG_IMASK,
2008 host->sdio_imask | host->dat3_imask | imask);
2009 mmc_writel(host, REG_CARG, cmd->arg);
2010 /*********************************************************/
2011 wmb();
2012 mmc_writel(host, REG_CMDR, cmd_val);
2013 }
2014
sunxi_mmc_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)2015 static void sunxi_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2016 int err)
2017 {
2018 struct mmc_data *data = mrq->data;
2019 //SM_DBG(mmc_dev(mmc), "int post request\n");
2020
2021 if (data->host_cookie != COOKIE_UNMAPPED) {
2022 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
2023 sunxi_mmc_get_dma_dir(data));
2024 data->host_cookie = COOKIE_UNMAPPED;
2025 SM_DBG(mmc_dev(mmc), "post request %p\n", data);
2026 }
2027 }
2028
sunxi_mmc_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)2029 static void sunxi_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2030 {
2031 struct sunxi_mmc_host *host = mmc_priv(mmc);
2032 struct mmc_data *data = mrq->data;
2033
2034 data->host_cookie = COOKIE_UNMAPPED;
2035
2036 sunxi_mmc_map_dma(host, data, COOKIE_PRE_MAPPED);
2037 SM_DBG(mmc_dev(mmc), "prepare request %p\n", data);
2038
2039 }
2040
sunxi_mmc_request(struct mmc_host * mmc,struct mmc_request * mrq)2041 static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
2042 {
2043 struct sunxi_mmc_host *host = mmc_priv(mmc);
2044 struct mmc_command *cmd = (mrq->sbc && !host->sunxi_mmc_opacmd23) ? mrq->sbc : mrq->cmd;
2045 struct mmc_data *data = mrq->data;
2046 unsigned long iflags;
2047 bool wait_dma = false;
2048 u32 imask = 0;
2049 u32 cmd_val = 0;
2050 u32 arg_pwr_off = ((MMC_SWITCH_MODE_WRITE_BYTE << 24) |
2051 (EXT_CSD_POWER_OFF_NOTIFICATION << 16) |
2052 (EXT_CSD_POWER_ON << 8) |
2053 (EXT_CSD_CMD_SET_NORMAL << 0));
2054 int crypt_flags = 0;
2055 struct scatterlist *sg = NULL;
2056 int ret;
2057 int work_timeout;
2058
2059 if (cmd->opcode == SD_SWITCH_VOLTAGE && host->ctl_spec_cap & SUNXI_CMD11_TIMEOUT_DETECT) {
2060 INIT_DELAYED_WORK(&host->sunxi_timerout_work, sunxi_cmd11_timerout_handle);
2061 work_timeout = SUNXI_CMD11_TIMEOUT;
2062 } else if (host->ctl_spec_cap & SUNXI_SC_EN_TIMEOUT_DETECT) {
2063 INIT_DELAYED_WORK(&host->sunxi_timerout_work, sunxi_timerout_handle);
2064 work_timeout = SUNXI_TRANS_TIMEOUT;
2065 }
2066
2067 /* Check for set_ios errors (should never happen) */
2068 if (host->ferror) {
2069 mrq->cmd->error = host->ferror;
2070 sunxi_mmc_request_done(mmc, mrq);
2071 return;
2072 }
2073
2074 /*avoid mmc switch to power_off_notification:0x01;
2075 *which Can't accept sudden power failure
2076 */
2077 if ((cmd->opcode == MMC_SWITCH) && (cmd->arg == arg_pwr_off)) {
2078 SM_ERR(mmc_dev(mmc), "avoid to switch power_off_notification to POWERED_ON(0x01)\n");
2079 mrq->cmd->error = -EBADMSG;
2080 sunxi_mmc_request_done(mmc, mrq);
2081 return ;
2082 }
2083
2084 if (host->sunxi_mmc_on_off_emce) {
2085 if (data && data->sg) {
2086 sg = data->sg;
2087 crypt_flags = sunxi_crypt_flags(sg);
2088 if (crypt_flags)
2089 sg->offset = sunxi_clear_crypt_flags(sg);
2090 }
2091 }
2092
2093 if (data) {
2094 ret = sunxi_mmc_map_dma(host, data, COOKIE_MAPPED);
2095 if (ret < 0) {
2096 SM_ERR(mmc_dev(mmc), "map DMA failed\n");
2097 cmd->error = ret;
2098 data->error = ret;
2099 sunxi_mmc_request_done(mmc, mrq);
2100 return;
2101 }
2102 }
2103
2104
2105 sunxi_mmc_parse_cmd(mmc, cmd, &cmd_val, &imask, &wait_dma);
2106
2107 /*
2108 *if (host->ctl_spec_cap & SUNXI_SC_EN_TIMEOUT_DETECT)
2109 * cancel_delayed_work_sync(&host->sunxi_timerout_work);
2110 */
2111
2112 SM_DBG(mmc_dev(mmc), "cmd %d(%08x) arg %x ie 0x%08x len %d\n",
2113 cmd_val & 0x3f, cmd_val, cmd->arg, imask,
2114 mrq->data ? mrq->data->blksz * mrq->data->blocks : 0);
2115
2116 spin_lock_irqsave(&host->lock, iflags);
2117
2118 if (host->mrq || host->manual_stop_mrq
2119 || host->mrq_busy || host->mrq_retry) {
2120 spin_unlock_irqrestore(&host->lock, iflags);
2121
2122 if (data) {
2123 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
2124 sunxi_mmc_get_dma_dir(data));
2125 data->host_cookie = COOKIE_UNMAPPED;
2126 }
2127
2128 SM_ERR(mmc_dev(mmc), "request already pending,%px,%px,%px %px\n", host->mrq,
2129 host->manual_stop_mrq, host->mrq_busy,
2130 host->mrq_retry);
2131 dump_stack();
2132 mrq->cmd->error = -EBUSY;
2133 sunxi_mmc_request_done(mmc, mrq);
2134 return;
2135 }
2136
2137 if (host->sunxi_mmc_updata_pha) {
2138 spin_unlock_irqrestore(&host->lock, iflags);
2139 host->sunxi_mmc_updata_pha(host, cmd, data);
2140 spin_lock_irqsave(&host->lock, iflags);
2141 }
2142
2143 if (host->sunxi_mmc_on_off_emce) {
2144 if (data && (mmc->card) && crypt_flags) {
2145 SM_DBG(mmc_dev(mmc), "emce is enable\n");
2146 host->sunxi_mmc_on_off_emce(host, 1,
2147 !mmc_card_blockaddr(mmc->card), 1,
2148 data->blksz * data->blocks, 0, 1);
2149 host->crypt_flag = crypt_flags;
2150 }
2151 }
2152
2153 if (data) {
2154 if (host->perf_enable && cmd->data)
2155 host->perf.start = ktime_get();
2156 spin_unlock_irqrestore(&host->lock, iflags);
2157 sunxi_mmc_set_dat(host, mmc, data, false);
2158 spin_lock_irqsave(&host->lock, iflags);
2159 }
2160
2161 host->mrq = mrq;
2162 host->wait_dma = wait_dma;
2163
2164 if ((cmd->opcode == SD_SWITCH_VOLTAGE && (host->ctl_spec_cap & SUNXI_CMD11_TIMEOUT_DETECT))
2165 || (host->ctl_spec_cap & SUNXI_SC_EN_TIMEOUT_DETECT))
2166 queue_delayed_work(system_wq, \
2167 &host->sunxi_timerout_work, \
2168 work_timeout);
2169 sunxi_mmc_exe_cmd(host, cmd, cmd_val, imask);
2170 /******************************************************/
2171 smp_wmb();
2172 spin_unlock_irqrestore(&host->lock, iflags);
2173 }
2174
2175
2176
sunxi_mmc_request_atomic(struct mmc_host * mmc,struct mmc_request * mrq)2177 static int sunxi_mmc_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
2178 {
2179 struct sunxi_mmc_host *host = mmc_priv(mmc);
2180 struct mmc_command *cmd = (mrq->sbc && !host->sunxi_mmc_opacmd23) ? mrq->sbc : mrq->cmd;
2181 struct mmc_data *data = mrq->data;
2182 unsigned long iflags;
2183 bool wait_dma = false;
2184 u32 imask = 0;
2185 u32 cmd_val = 0;
2186 u32 arg_pwr_off = ((MMC_SWITCH_MODE_WRITE_BYTE << 24) |
2187 (EXT_CSD_POWER_OFF_NOTIFICATION << 16) |
2188 (EXT_CSD_POWER_ON << 8) |
2189 (EXT_CSD_CMD_SET_NORMAL << 0));
2190 int crypt_flags = 0;
2191 struct scatterlist *sg = NULL;
2192 int ret;
2193 int work_timeout;
2194
2195 if (cmd->opcode == SD_SWITCH_VOLTAGE && host->ctl_spec_cap & SUNXI_CMD11_TIMEOUT_DETECT) {
2196 INIT_DELAYED_WORK(&host->sunxi_timerout_work, sunxi_cmd11_timerout_handle);
2197 work_timeout = SUNXI_CMD11_TIMEOUT;
2198 } else if (host->ctl_spec_cap & SUNXI_SC_EN_TIMEOUT_DETECT) {
2199 INIT_DELAYED_WORK(&host->sunxi_timerout_work, sunxi_timerout_handle);
2200 work_timeout = SUNXI_TRANS_TIMEOUT;
2201 }
2202
2203 /* Check for set_ios errors (should never happen) */
2204 if (host->ferror) {
2205 mrq->cmd->error = host->ferror;
2206 sunxi_mmc_request_done(mmc, mrq);
2207 return -EIO;
2208 }
2209
2210 /*avoid mmc switch to power_off_notification:0x01;
2211 *which Can't accept sudden power failure
2212 */
2213 if ((cmd->opcode == MMC_SWITCH) && (cmd->arg == arg_pwr_off)) {
2214 SM_ERR(mmc_dev(mmc), "avoid to switch power_off_notification to POWERED_ON(0x01)\n");
2215 mrq->cmd->error = -EBADMSG;
2216 sunxi_mmc_request_done(mmc, mrq);
2217 return -EBUSY;
2218 }
2219
2220 if (host->sunxi_mmc_on_off_emce) {
2221 if (data && data->sg) {
2222 sg = data->sg;
2223 crypt_flags = sunxi_crypt_flags(sg);
2224 if (crypt_flags)
2225 sg->offset = sunxi_clear_crypt_flags(sg);
2226 }
2227 }
2228
2229 /*
2230 if (data) {
2231 ret = sunxi_mmc_map_dma(host, data, COOKIE_MAPPED);
2232 if (ret < 0) {
2233 SM_ERR(mmc_dev(mmc), "map DMA failed\n");
2234 cmd->error = ret;
2235 data->error = ret;
2236 sunxi_mmc_request_done(mmc, mrq);
2237 return;
2238 }
2239 }
2240 */
2241
2242 sunxi_mmc_parse_cmd(mmc, cmd, &cmd_val, &imask, &wait_dma);
2243
2244 /*
2245 *if (host->ctl_spec_cap & SUNXI_SC_EN_TIMEOUT_DETECT)
2246 * cancel_delayed_work_sync(&host->sunxi_timerout_work);
2247 */
2248
2249 SM_DBG(mmc_dev(mmc), "hsq cmd %d(%08x) arg %x ie 0x%08x len %d\n",
2250 cmd_val & 0x3f, cmd_val, cmd->arg, imask,
2251 mrq->data ? mrq->data->blksz * mrq->data->blocks : 0);
2252
2253 spin_lock_irqsave(&host->lock, iflags);
2254
2255 if (host->mrq || host->manual_stop_mrq
2256 || host->mrq_busy || host->mrq_retry) {
2257 spin_unlock_irqrestore(&host->lock, iflags);
2258 /*
2259 if (data) {
2260 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
2261 sunxi_mmc_get_dma_dir(data));
2262 data->host_cookie = COOKIE_UNMAPPED;
2263 }
2264 */
2265
2266 SM_ERR(mmc_dev(mmc), "request already pending,%px,%px,%px %px\n", host->mrq,
2267 host->manual_stop_mrq, host->mrq_busy,
2268 host->mrq_retry);
2269 dump_stack();
2270 mrq->cmd->error = -EBUSY;
2271 sunxi_mmc_request_done(mmc, mrq);
2272 return -EBUSY;
2273 }
2274
2275 if (host->sunxi_mmc_updata_pha) {
2276 SM_DBG(mmc_dev(mmc), "not updata pha\n");
2277 spin_unlock_irqrestore(&host->lock, iflags);
2278 return -EBUSY;
2279
2280 /*
2281 spin_unlock_irqrestore(&host->lock, iflags);
2282 host->sunxi_mmc_updata_pha(host, cmd, data);
2283 spin_lock_irqsave(&host->lock, iflags);
2284 */
2285 }
2286
2287 if (host->sunxi_mmc_on_off_emce) {
2288 if (data && (mmc->card) && crypt_flags) {
2289 SM_DBG(mmc_dev(mmc), "emce is enable\n");
2290 host->sunxi_mmc_on_off_emce(host, 1,
2291 !mmc_card_blockaddr(mmc->card), 1,
2292 data->blksz * data->blocks, 0, 1);
2293 host->crypt_flag = crypt_flags;
2294 }
2295 }
2296
2297 if (data) {
2298 if (host->perf_enable && cmd->data)
2299 host->perf.start = ktime_get();
2300 SM_DBG(mmc_dev(mmc), "set data\n");
2301 ret = sunxi_mmc_set_dat(host, mmc, data, true);
2302 if (ret) {
2303 spin_unlock_irqrestore(&host->lock, iflags);
2304 return -EBUSY;
2305 }
2306 }
2307
2308 host->mrq = mrq;
2309 host->wait_dma = wait_dma;
2310
2311 if ((cmd->opcode == SD_SWITCH_VOLTAGE && (host->ctl_spec_cap & SUNXI_CMD11_TIMEOUT_DETECT))
2312 || (host->ctl_spec_cap & SUNXI_SC_EN_TIMEOUT_DETECT))
2313 queue_delayed_work(system_wq, \
2314 &host->sunxi_timerout_work, \
2315 work_timeout);
2316 sunxi_mmc_exe_cmd(host, cmd, cmd_val, imask);
2317 /******************************************************/
2318 smp_wmb();
2319 spin_unlock_irqrestore(&host->lock, iflags);
2320 return 0;
2321 }
2322
2323
2324
2325
2326 /*we use our own mmc_regulator_get_supply because
2327 *our platform regulator not support supply name,
2328 */
2329 /*only support regulator ID,
2330 *but linux mmc' own mmc_regulator_get_supply use supply name
2331 */
sunxi_mmc_regulator_get_supply(struct mmc_host * mmc)2332 static int sunxi_mmc_regulator_get_supply(struct mmc_host *mmc)
2333 {
2334 struct device *dev = mmc_dev(mmc);
2335 int ret = 0;
2336 struct sunxi_mmc_host *host = mmc_priv(mmc);
2337
2338 mmc->supply.vmmc = regulator_get_optional(dev, "vmmc");
2339 mmc->supply.vqmmc = regulator_get_optional(dev, "vqmmc");
2340 host->supply.vdmmc = regulator_get_optional(dev, "vdmmc");
2341 host->supply.vdmmc33sw = regulator_get_optional(dev, "vdmmc33sw");
2342 host->supply.vdmmc18sw = regulator_get_optional(dev, "vdmmc18sw");
2343 host->supply.vqmmc33sw = regulator_get_optional(dev, "vqmmc33sw");
2344 host->supply.vqmmc18sw = regulator_get_optional(dev, "vqmmc18sw");
2345
2346 if (IS_ERR(mmc->supply.vmmc)) {
2347 SM_INFO(dev, "No vmmc regulator found\n");
2348 } else {
2349 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
2350 if (ret > 0)
2351 mmc->ocr_avail = ret;
2352 else
2353 SM_WARN(dev, "Failed getting OCR mask: %d\n", ret);
2354 }
2355
2356 if (IS_ERR(mmc->supply.vqmmc))
2357 SM_INFO(dev, "No vqmmc regulator found\n");
2358
2359 if (IS_ERR(host->supply.vdmmc))
2360 SM_INFO(dev, "No vdmmc regulator found\n");
2361
2362 if (IS_ERR(host->supply.vdmmc33sw))
2363 SM_INFO(dev, "No vd33sw regulator found\n");
2364
2365 if (IS_ERR(host->supply.vdmmc18sw))
2366 SM_INFO(dev, "No vd18sw regulator found\n");
2367
2368 if (IS_ERR(host->supply.vqmmc33sw))
2369 SM_INFO(dev, "No vq33sw regulator found\n");
2370
2371 if (IS_ERR(host->supply.vqmmc18sw))
2372 SM_INFO(dev, "No vq18sw regulator found\n");
2373
2374 return 0;
2375 }
2376
2377 /*Because our regulator driver does not support binding to device tree,
2378 * so we can not binding it to our dev
2379 *(for example regulator_get(dev, reg_str[0])
2380 * or devm_regulator_get(dev, reg_str[0]) )
2381 */
2382 /*so we must release it manully*/
sunxi_mmc_regulator_release_supply(struct mmc_host * mmc)2383 static void sunxi_mmc_regulator_release_supply(struct mmc_host *mmc)
2384 {
2385 struct sunxi_mmc_host *host = mmc_priv(mmc);
2386 if (!IS_ERR(host->supply.vdmmc18sw))
2387 regulator_put(host->supply.vdmmc18sw);
2388
2389 if (!IS_ERR(host->supply.vdmmc33sw))
2390 regulator_put(host->supply.vdmmc33sw);
2391
2392 if (!IS_ERR(host->supply.vdmmc))
2393 regulator_put(host->supply.vdmmc);
2394
2395 if (!IS_ERR(host->supply.vqmmc18sw))
2396 regulator_put(host->supply.vqmmc18sw);
2397
2398 if (!IS_ERR(host->supply.vqmmc33sw))
2399 regulator_put(host->supply.vqmmc33sw);
2400
2401 if (!IS_ERR(mmc->supply.vqmmc))
2402 regulator_put(mmc->supply.vqmmc);
2403
2404 if (!IS_ERR(mmc->supply.vmmc))
2405 regulator_put(mmc->supply.vmmc);
2406
2407 }
2408
2409
sunxi_mmc_gpio_get_cd(struct mmc_host * mmc)2410 static int sunxi_mmc_gpio_get_cd(struct mmc_host *mmc)
2411 {
2412 u32 present = 0;
2413 int i = 0;
2414 int gpio_val = 0;
2415 struct sunxi_mmc_host *host = mmc_priv(mmc);
2416
2417 if (!(mmc->caps & MMC_CAP_NEEDS_POLL)
2418 || ((mmc->caps & MMC_CAP_NEEDS_POLL)
2419 && !(host->ctl_spec_cap & SUNXI_DIS_KER_NAT_CD)))
2420 return mmc_gpio_get_cd(mmc);
2421
2422 for (i = 0; i < 5; i++) {
2423 gpio_val += mmc_gpio_get_cd(mmc);
2424 usleep_range(1000, 1500);
2425 }
2426
2427 if (gpio_val == 5)
2428 present = 1;
2429 else if (gpio_val == 0)
2430 present = 0;
2431
2432 pr_debug("*%s %s %d %d*\n", mmc_hostname(mmc),
2433 __func__, __LINE__, present);
2434 /*only cd pin change we wil return true*/
2435 if (host->present ^ present) {
2436 host->present = present;
2437 pr_debug("*%s %s %d h%d*\n", mmc_hostname(mmc),
2438 __func__, __LINE__, host->present);
2439 return 1;
2440 }
2441
2442 return 0;
2443 }
2444
2445
2446
2447 static const struct of_device_id sunxi_mmc_of_match[] = {
2448 {.compatible = "allwinner,sun4i-a10-mmc",},
2449 {.compatible = "allwinner,sun5i-a13-mmc",},
2450 {.compatible = "allwinner,sun8iw10p1-sdmmc3",},
2451 {.compatible = "allwinner,sun8iw10p1-sdmmc1",},
2452 {.compatible = "allwinner,sun8iw10p1-sdmmc0",},
2453 {.compatible = "allwinner,sun50i-sdmmc2",},
2454 {.compatible = "allwinner,sun50i-sdmmc1",},
2455 {.compatible = "allwinner,sun50i-sdmmc0",},
2456 {.compatible = "allwinner,sunxi-mmc-v4p1x",},
2457 {.compatible = "allwinner,sunxi-mmc-v4p10x",},
2458 {.compatible = "allwinner,sunxi-mmc-v4p00x",},
2459 {.compatible = "allwinner,sunxi-mmc-v4p5x",},
2460 {.compatible = "allwinner,sunxi-mmc-v4p6x",},
2461 {.compatible = "allwinner,sunxi-mmc-v5p3x",},
2462 { /* sentinel */ }
2463 };
2464
2465 MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
2466
2467 static struct mmc_host_ops sunxi_mmc_ops = {
2468 .post_req = sunxi_mmc_post_req,
2469 .pre_req = sunxi_mmc_pre_req,
2470 .request = sunxi_mmc_request,
2471 .request_atomic = sunxi_mmc_request_atomic,
2472 .set_ios = sunxi_mmc_set_ios,
2473 .get_ro = mmc_gpio_get_ro,
2474 .get_cd = sunxi_mmc_gpio_get_cd,
2475 .enable_sdio_irq = sunxi_mmc_enable_sdio_irq,
2476 .hw_reset = sunxi_mmc_hw_reset,
2477 .start_signal_voltage_switch = sunxi_mmc_signal_voltage_switch,
2478 .card_busy = sunxi_mmc_card_busy,
2479 };
2480
2481 #if defined(MMC_FPGA) && defined(CONFIG_ARCH_SUN8IW10P1)
disable_card2_dat_det(void)2482 void disable_card2_dat_det(void)
2483 {
2484 void __iomem *card2_int_sg_en =
2485 ioremap(0x1c0f000 + 0x1000 * 2 + 0x38, 0x100);
2486 writel(0, card2_int_sg_en);
2487 iounmap(card2_int_sg_en);
2488 }
2489
enable_card3(void)2490 void enable_card3(void)
2491 {
2492 void __iomem *card3_en = ioremap(0x1c20800 + 0xB4, 0x100);
2493 /*void __iomem *card3_en = ioremap(0x1c20800 + 0x48, 0x100);*/
2494 writel(0x55555555, card3_en);
2495 writel(0x55555555, card3_en + 4);
2496 writel(0x55555555, card3_en + 8);
2497 writel(0x55555555, card3_en + 12);
2498 iounmap(card3_en);
2499 }
2500
2501 #endif
2502
2503 #if 0
2504 /*The following shutdown only use for sdmmc2 to be compatible with a20*/
2505
2506 void sunxi_mmc_do_shutdown_com(struct platform_device *pdev)
2507 {
2508 u32 ocr = 0;
2509 u32 err = 0;
2510 struct mmc_host *mmc = NULL;
2511 struct sunxi_mmc_host *host = NULL;
2512 u32 status = 0;
2513
2514 mmc = platform_get_drvdata(pdev);
2515 if (mmc == NULL) {
2516 SM_ERR(&pdev->dev, "%s: mmc is NULL\n", __func__);
2517 goto out;
2518 }
2519
2520 host = mmc_priv(mmc);
2521 if (host == NULL) {
2522 SM_ERR(&pdev->dev, "%s: host is NULL\n", __func__);
2523 goto out;
2524 }
2525
2526 SM_INFO(mmc_dev(mmc), "try to disable cache\n");
2527 mmc_claim_host(mmc);
2528 err = mmc_flush_cache(mmc->card);
2529 mmc_release_host(mmc);
2530 if (err) {
2531 SM_ERR(mmc_dev(mmc), "disable cache failed\n");
2532 /*not release host to not allow android to read/write after shutdown */
2533 mmc_claim_host(mmc);
2534 goto out;
2535 }
2536 /*claim host to not allow androd read/write during shutdown*/
2537 SM_DBG(mmc_dev(mmc), "%s: claim host\n", __func__);
2538 mmc_claim_host(mmc);
2539
2540 do {
2541 if (mmc_send_status(mmc->card, &status) != 0) {
2542 SM_ERR(mmc_dev(mmc), "%s: send status failed\n",
2543 __func__);
2544 /*
2545 *err_out;
2546 *not release host to not allow android to read/write after shutdown
2547 */
2548 goto out;
2549 }
2550 } while (status != 0x00000900);
2551
2552 /*mmc_card_set_ddr_mode(card);*/
2553 mmc_set_timing(mmc, MMC_TIMING_LEGACY);
2554 mmc_set_bus_width(mmc, MMC_BUS_WIDTH_1);
2555 mmc_set_clock(mmc, 400000);
2556 err = mmc_go_idle(mmc);
2557 if (err) {
2558 SM_ERR(mmc_dev(mmc), "%s: mmc_go_idle err\n", __func__);
2559 /*
2560 *err_out;
2561 //not release host to not allow android to read/write after shutdown
2562 */
2563 goto out;
2564 }
2565 /*sd can support cmd1,so not send cmd1 */
2566 if (mmc->card->type != MMC_TYPE_MMC)
2567 /*not release host to not allow android to read/write after shutdown */
2568 goto out;
2569
2570
2571 err = mmc_send_op_cond(mmc, 0, &ocr);
2572 if (err) {
2573 SM_ERR(mmc_dev(mmc), "%s: first mmc_send_op_cond err\n",
2574 __func__);
2575 /*
2576 *err_out;
2577 *not release host to not allow android to read/write after shutdown
2578 */
2579 goto out;
2580 }
2581
2582 err = mmc_send_op_cond(mmc, ocr | (1 << 30), &ocr);
2583 if (err) {
2584 SM_ERR(mmc_dev(mmc), "%s: mmc_send_op_cond err\n",
2585 __func__);
2586 /*err_out;
2587 *not release host to not allow android to read/write after shutdown
2588 */
2589 goto out;
2590 }
2591 /*
2592 *do not release host to not allow
2593 *android to read/write after shutdown
2594 */
2595 goto out;
2596
2597 out:
2598 SM_INFO(mmc_dev(mmc), "%s: mmc shutdown exit..ok\n", __func__);
2599 }
2600 #endif
2601
sunxi_mmc_resource_request(struct sunxi_mmc_host * host,struct platform_device * pdev)2602 static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
2603 struct platform_device *pdev)
2604 {
2605 struct device_node *np = pdev->dev.of_node;
2606 int ret;
2607 u32 caps_val = 0;
2608 enum of_gpio_flags flags;
2609 struct device_node *apk_np = of_find_node_by_name(NULL, "auto_print");
2610 const char *apk_sta = NULL;
2611
2612 ret = of_property_read_u32(np, "ctl-spec-caps", &caps_val);
2613 if (!ret) {
2614 host->ctl_spec_cap |= caps_val;
2615 SM_INFO(&pdev->dev, "ctl-spec-caps %x\n",
2616 host->ctl_spec_cap);
2617 }
2618 #ifdef SUNXI_SDMMC3
2619 if (of_device_is_compatible(np, "allwinner,sun8iw10p1-sdmmc3")) {
2620 host->sunxi_mmc_clk_set_rate =
2621 sunxi_mmc_clk_set_rate_for_sdmmc3;
2622 /*host->dma_tl = (0x3<<28)|(15<<16)|240;*/
2623 host->dma_tl = SUNXI_DMA_TL_SDMMC3;
2624 /*host->idma_des_size_bits = 12;*/
2625 host->idma_des_size_bits = SUNXI_DES_SIZE_SDMMC3;
2626 host->sunxi_mmc_thld_ctl = sunxi_mmc_thld_ctl_for_sdmmc3;
2627 host->sunxi_mmc_save_spec_reg = sunxi_mmc_save_spec_reg3;
2628 host->sunxi_mmc_restore_spec_reg = sunxi_mmc_restore_spec_reg3;
2629 host->sunxi_mmc_dump_dly_table = sunxi_mmc_dump_dly3;
2630 sunxi_mmc_reg_ex_res_inter(host, 3);
2631 host->sunxi_mmc_set_acmda = sunxi_mmc_set_a12a;
2632 host->phy_index = 3; /*2; */
2633 }
2634 #if defined(MMC_FPGA) && defined(CONFIG_ARCH_SUN8IW10P1)
2635 enable_card3();
2636 #endif /*defined(MMC_FPGA) && defined(CONFIG_ARCH_SUN8IW10P1) */
2637
2638 #endif
2639
2640 #ifdef SUNXI_SDMMC2
2641 if (of_device_is_compatible(np, "allwinner,sun50i-sdmmc2")) {
2642 host->sunxi_mmc_clk_set_rate =
2643 sunxi_mmc_clk_set_rate_for_sdmmc2;
2644 /*host->dma_tl = (0x3<<28)|(15<<16)|240;*/
2645 host->dma_tl = SUNXI_DMA_TL_SDMMC2;
2646 /*host->idma_des_size_bits = 12;*/
2647 host->idma_des_size_bits = SUNXI_DES_SIZE_SDMMC2;
2648 host->sunxi_mmc_thld_ctl = sunxi_mmc_thld_ctl_for_sdmmc2;
2649 host->sunxi_mmc_save_spec_reg = sunxi_mmc_save_spec_reg2;
2650 host->sunxi_mmc_restore_spec_reg = sunxi_mmc_restore_spec_reg2;
2651 host->sunxi_mmc_dump_dly_table = sunxi_mmc_dump_dly2;
2652 sunxi_mmc_reg_ex_res_inter(host, 2);
2653 host->sunxi_mmc_set_acmda = sunxi_mmc_set_a12a;
2654 host->phy_index = 2;
2655 host->sunxi_mmc_oclk_en = sunxi_mmc_oclk_onoff_sdmmc2;
2656 }
2657 #endif
2658
2659 #ifdef SUNXI_SDMMC0
2660 if (of_device_is_compatible(np, "allwinner,sun50i-sdmmc0")
2661 || of_device_is_compatible(np, "allwinner,sun8iw10p1-sdmmc0")) {
2662 host->sunxi_mmc_clk_set_rate =
2663 sunxi_mmc_clk_set_rate_for_sdmmc0;
2664 /*host->dma_tl = (0x2<<28)|(7<<16)|248;*/
2665 host->dma_tl = SUNXI_DMA_TL_SDMMC0;
2666 /*host->idma_des_size_bits = 15;*/
2667 host->idma_des_size_bits = SUNXI_DES_SIZE_SDMMC0;
2668 host->sunxi_mmc_thld_ctl = sunxi_mmc_thld_ctl_for_sdmmc0;
2669 host->sunxi_mmc_save_spec_reg = sunxi_mmc_save_spec_reg0;
2670 host->sunxi_mmc_restore_spec_reg = sunxi_mmc_restore_spec_reg0;
2671 sunxi_mmc_reg_ex_res_inter(host, 0);
2672 host->sunxi_mmc_set_acmda = sunxi_mmc_set_a12a;
2673 host->phy_index = 0;
2674 host->sunxi_mmc_oclk_en = sunxi_mmc_oclk_onoff_sdmmc0;
2675 }
2676 #endif
2677
2678 #ifdef SUNXI_SDMMC1
2679 if (of_device_is_compatible(np, "allwinner,sun50i-sdmmc1")
2680 || of_device_is_compatible(np, "allwinner,sun8iw10p1-sdmmc1")) {
2681 host->sunxi_mmc_clk_set_rate =
2682 sunxi_mmc_clk_set_rate_for_sdmmc1;
2683 /*host->dma_tl = (0x3<<28)|(15<<16)|240;*/
2684 host->dma_tl = SUNXI_DMA_TL_SDMMC1;
2685 /*host->idma_des_size_bits = 15;*/
2686 host->idma_des_size_bits = SUNXI_DES_SIZE_SDMMC1;
2687 host->sunxi_mmc_thld_ctl = sunxi_mmc_thld_ctl_for_sdmmc1;
2688 host->sunxi_mmc_save_spec_reg = sunxi_mmc_save_spec_reg1;
2689 host->sunxi_mmc_restore_spec_reg = sunxi_mmc_restore_spec_reg1;
2690 sunxi_mmc_reg_ex_res_inter(host, 1);
2691 host->sunxi_mmc_set_acmda = sunxi_mmc_set_a12a;
2692 host->phy_index = 1;
2693 host->sunxi_mmc_oclk_en = sunxi_mmc_oclk_onoff_sdmmc1;
2694 }
2695 #endif
2696
2697 #if IS_ENABLED(CONFIG_AW_MMC_V4P1X)
2698 if (of_device_is_compatible(np, "allwinner,sunxi-mmc-v4p1x")) {
2699 int phy_index = 0;
2700
2701 if (of_property_match_string(np, "device_type", "sdc0") == 0) {
2702 phy_index = 0;
2703 } else if (of_property_match_string(np, "device_type", "sdc1")
2704 == 0) {
2705 phy_index = 1;
2706 } else if (of_property_match_string(np, "device_type", "sdc2")
2707 == 0) {
2708 phy_index = 2;
2709 } else if (of_property_match_string(np, "device_type", "sdc3")
2710 == 0) {
2711 phy_index = 3;
2712 } else {
2713 SM_ERR(&pdev->dev, "No sdmmc device,check dts\n");
2714 }
2715 sunxi_mmc_init_priv_v4p1x(host, pdev, phy_index);
2716 }
2717 #endif
2718
2719 #if IS_ENABLED(CONFIG_AW_MMC_V4P00X)
2720 if ((of_device_is_compatible(np,
2721 "allwinner,sunxi-mmc-v4p00x")) ||
2722 of_device_is_compatible(np,
2723 "allwinner,sun3iw1p1-sdmmc1") ||
2724 of_device_is_compatible(np,
2725 "allwinner,sun3iw1p1-sdmmc0")) {
2726 int phy_index = 0;
2727
2728 if (of_property_match_string(np,
2729 "device_type", "sdc0") == 0)
2730 phy_index = 0;
2731 else if (of_property_match_string(np,
2732 "device_type", "sdc1") == 0)
2733 phy_index = 1;
2734 else if (of_property_match_string(np,
2735 "device_type", "sdc2") == 0)
2736 phy_index = 2;
2737 else if (of_property_match_string(np,
2738 "device_type", "sdc3") == 0)
2739 phy_index = 3;
2740 else
2741 SM_ERR(&pdev->dev, "No sdmmc device,check dts\n");
2742
2743 sunxi_mmc_init_priv_v4p00x(host, pdev, phy_index);
2744 }
2745 #endif
2746
2747 #if IS_ENABLED(CONFIG_AW_MMC_V4P10X)
2748 if (of_device_is_compatible(np, "allwinner,sunxi-mmc-v4p10x")) {
2749 int phy_index = 0;
2750
2751 if (of_property_match_string(np,
2752 "device_type", "sdc0") == 0)
2753 phy_index = 0;
2754 else if (of_property_match_string(np,
2755 "device_type", "sdc1") == 0)
2756 phy_index = 1;
2757 else if (of_property_match_string(np,
2758 "device_type", "sdc2") == 0)
2759 phy_index = 2;
2760 else if (of_property_match_string(np,
2761 "device_type", "sdc3") == 0)
2762 phy_index = 3;
2763 else
2764 SM_ERR(&pdev->dev, "No sdmmc device,check dts\n");
2765
2766 sunxi_mmc_init_priv_v4p10x(host, pdev, phy_index);
2767 }
2768 #endif
2769
2770 #if IS_ENABLED(CONFIG_AW_MMC_V4P5X)
2771 if (of_device_is_compatible(np, "allwinner,sunxi-mmc-v4p5x")) {
2772 int phy_index = 0;
2773 if (of_property_match_string(np, "device_type", "sdc0") == 0) {
2774 phy_index = 0;
2775 } else if (of_property_match_string(np, "device_type", "sdc1")
2776 == 0) {
2777 phy_index = 1;
2778 } else if (of_property_match_string(np, "device_type", "sdc2")
2779 == 0) {
2780 phy_index = 2;
2781 } else if (of_property_match_string(np, "device_type", "sdc3")
2782 == 0) {
2783 phy_index = 3;
2784 } else {
2785 SM_ERR(&pdev->dev, "No sdmmc device,check dts\n");
2786 }
2787 sunxi_mmc_init_priv_v4p5x(host, pdev, phy_index);
2788 }
2789 #endif
2790
2791 /*ret = mmc_regulator_get_supply(host->mmc);*/
2792 ret = sunxi_mmc_regulator_get_supply(host->mmc);
2793 if (ret) {
2794 return ret;
2795 }
2796 /*Maybe in some platform,no regulator,so we set ocr_avail manully */
2797 if (!host->mmc->ocr_avail) {
2798
2799 host->mmc->ocr_avail =
2800 MMC_VDD_28_29 | MMC_VDD_29_30 | MMC_VDD_30_31 |
2801 MMC_VDD_31_32 | MMC_VDD_32_33 | MMC_VDD_33_34;
2802 SM_ERR(mmc_dev(host->mmc), "manual set ocr\n");
2803 }
2804
2805
2806 /*enable card detect pin power*/
2807 if (!IS_ERR(host->supply.vdmmc)) {
2808 ret = regulator_enable(host->supply.vdmmc);
2809 if (ret < 0) {
2810 SM_ERR(mmc_dev(host->mmc),
2811 "failed to enable vdmmc regulator\n");
2812 return ret;
2813 }
2814 }
2815
2816 /*enable card detect pin power with SD PMU*/
2817 if (!IS_ERR(host->supply.vdmmc33sw)) {
2818 ret = regulator_enable(host->supply.vdmmc33sw);
2819 if (ret < 0) {
2820 SM_ERR(mmc_dev(host->mmc),
2821 "failed to enable vdmmc33sw regulator\n");
2822 return ret;
2823 }
2824 }
2825
2826 if (!IS_ERR(host->supply.vdmmc18sw)) {
2827 ret = regulator_enable(host->supply.vdmmc18sw);
2828 if (ret < 0) {
2829 SM_ERR(mmc_dev(host->mmc),
2830 "failed to enable vdmmc18sw regulator\n");
2831 return ret;
2832 }
2833 }
2834
2835
2836 host->card_pwr_gpio =
2837 of_get_named_gpio_flags(np, "card-pwr-gpios", 0,
2838 (enum of_gpio_flags *)&flags);
2839 if (gpio_is_valid(host->card_pwr_gpio)) {
2840 ret =
2841 devm_gpio_request_one(&pdev->dev, host->card_pwr_gpio,
2842 GPIOF_DIR_OUT, "card-pwr-gpios");
2843 if (ret < 0)
2844 SM_ERR(&pdev->dev,
2845 "could not get card-pwr-gpios gpio\n");
2846 }
2847
2848 host->pinctrl = devm_pinctrl_get(&pdev->dev);
2849 if (IS_ERR(host->pinctrl)) {
2850 SM_WARN(&pdev->dev,
2851 "Could not get pinctrl,check if pin is needed\n");
2852 }
2853
2854 host->pins_default = pinctrl_lookup_state(host->pinctrl,
2855 PINCTRL_STATE_DEFAULT);
2856 if (IS_ERR(host->pins_default)) {
2857 SM_WARN(&pdev->dev,
2858 "could not get default pinstate,check if pin is needed\n");
2859 }
2860
2861 if (apk_np
2862 && !of_property_read_string(apk_np, "status", &apk_sta)
2863 && !strcmp(apk_sta, "okay")) {
2864 host->pins_uart_jtag = pinctrl_lookup_state(host->pinctrl,
2865 "uart_jtag");
2866 if (IS_ERR(host->pins_uart_jtag))
2867 SM_WARN(&pdev->dev, "Cann't get uart0 pinstate,check if needed\n");
2868 } else {
2869 host->pins_uart_jtag = ERR_PTR(-ENODEV);
2870 }
2871
2872 host->pins_sleep = pinctrl_lookup_state(host->pinctrl,
2873 PINCTRL_STATE_SLEEP);
2874 if (IS_ERR(host->pins_sleep))
2875 SM_WARN(&pdev->dev, "Cann't get sleep pinstate,check if needed\n");
2876
2877 host->pins_bias_1v8 = pinctrl_lookup_state(host->pinctrl,
2878 "mmc_1v8");
2879 if (IS_ERR(host->pins_bias_1v8))
2880 SM_WARN(&pdev->dev, "Cann't get pin bias hs pinstate,check if needed\n");
2881
2882 host->reg_base = devm_ioremap_resource(&pdev->dev,
2883 platform_get_resource(pdev,
2884 IORESOURCE_MEM,
2885 0));
2886 if (IS_ERR(host->reg_base)) {
2887 ret = PTR_ERR(host->reg_base);
2888 goto error_disable_regulator;
2889 }
2890
2891 host->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
2892 if (IS_ERR(host->clk_ahb)) {
2893 SM_ERR(&pdev->dev, "Could not get ahb clock\n");
2894 ret = PTR_ERR(host->clk_ahb);
2895 goto error_disable_regulator;
2896 }
2897
2898 host->clk_mmc = devm_clk_get(&pdev->dev, "mmc");
2899 if (IS_ERR(host->clk_mmc)) {
2900 SM_ERR(&pdev->dev, "Could not get mmc clock\n");
2901 ret = PTR_ERR(host->clk_mmc);
2902 goto error_disable_regulator;
2903 }
2904
2905 host->clk_rst = devm_reset_control_get(&pdev->dev, "rst");
2906 if (IS_ERR(host->clk_rst))
2907 SM_WARN(&pdev->dev, "Could not get mmc rst\n");
2908
2909 if (!IS_ERR(host->clk_rst)) {
2910 ret = reset_control_deassert(host->clk_rst);
2911 if (ret) {
2912 SM_ERR(&pdev->dev, "reset err %d\n", ret);
2913 goto error_disable_regulator;
2914 }
2915 }
2916
2917 ret = clk_prepare_enable(host->clk_ahb);
2918 if (ret) {
2919 SM_ERR(&pdev->dev, "Enable ahb clk err %d\n", ret);
2920 goto error_assert_reset;
2921 }
2922
2923 ret = clk_prepare_enable(host->clk_mmc);
2924 if (ret) {
2925 SM_ERR(&pdev->dev, "Enable mmc clk err %d\n", ret);
2926 goto error_disable_clk_ahb;
2927 }
2928 #if defined(MMC_FPGA) && defined(CONFIG_ARCH_SUN8IW10P1)
2929 disable_card2_dat_det();
2930 #endif
2931 /*
2932 * Sometimes the controller asserts the irq on boot for some reason,
2933 * make sure the controller is in a sane state before enabling irqs.
2934 */
2935 ret = sunxi_mmc_reset_host(host);
2936 if (ret)
2937 goto error_disable_clk_mmc;
2938
2939 #ifdef CONFIG_AW_MMC_V4P5X
2940 if (of_device_is_compatible(np, "allwinner,sunxi-mmc-v4p6x")) {
2941 int phy_index = 0;
2942
2943 if (of_property_match_string(np, "device_type", "sdc0") == 0)
2944 phy_index = 0;
2945 else if (of_property_match_string(np, "device_type", "sdc1") == 0)
2946 phy_index = 1;
2947 else if (of_property_match_string(np, "device_type", "sdc2") == 0)
2948 phy_index = 2;
2949 else if (of_property_match_string(np, "device_type", "sdc3") == 0)
2950 phy_index = 3;
2951 else
2952 SM_ERR(&pdev->dev, "No sdmmc device,check dts\n");
2953 sunxi_mmc_init_priv_v4p6x(host, pdev, phy_index);
2954 }
2955 #endif
2956
2957 #ifdef CONFIG_AW_MMC_V5P3X
2958 if (of_device_is_compatible(np, "allwinner,sunxi-mmc-v5p3x")) {
2959 int phy_index = 0;
2960 if (of_property_match_string(np, "device_type", "sdc0") == 0) {
2961 phy_index = 0;
2962 } else if (of_property_match_string(np, "device_type", "sdc1")
2963 == 0) {
2964 phy_index = 1;
2965 } else if (of_property_match_string(np, "device_type", "sdc2")
2966 == 0) {
2967 phy_index = 2;
2968 } else if (of_property_match_string(np, "device_type", "sdc3")
2969 == 0) {
2970 phy_index = 3;
2971 } else {
2972 SM_ERR(&pdev->dev, "No sdmmc device,check dts\n");
2973 }
2974 sunxi_mmc_init_priv_v5p3x(host, pdev, phy_index);
2975 }
2976 #endif
2977
2978 host->irq = platform_get_irq(pdev, 0);
2979 snprintf(host->name, sizeof(host->name), "mmc%d",
2980 host->phy_index);
2981 ret = devm_request_threaded_irq(&pdev->dev, host->irq, sunxi_mmc_irq,
2982 sunxi_mmc_handle_bottom_half, 0,
2983 host->name, host);
2984 if (ret) {
2985 SM_ERR(&pdev->dev, "failed to request irq %d\n", ret);
2986 goto error_disable_clk_mmc;
2987 }
2988
2989 disable_irq(host->irq);
2990
2991 clk_disable_unprepare(host->clk_mmc);
2992 clk_disable_unprepare(host->clk_ahb);
2993
2994 if (!IS_ERR(host->clk_rst))
2995 reset_control_assert(host->clk_rst);
2996
2997 return ret;
2998
2999 error_disable_clk_mmc:
3000 clk_disable_unprepare(host->clk_mmc);
3001 error_disable_clk_ahb:
3002 clk_disable_unprepare(host->clk_ahb);
3003 error_assert_reset:
3004 #if 0
3005 if (!IS_ERR(host->reset))
3006 reset_control_assert(host->reset);
3007 #else
3008 if (!IS_ERR(host->clk_rst))
3009 reset_control_assert(host->clk_rst);
3010 #endif
3011 error_disable_regulator:
3012 if (!IS_ERR(host->supply.vdmmc18sw)) /*SD PMU control*/
3013 regulator_disable(host->supply.vdmmc18sw);
3014 if (!IS_ERR(host->supply.vdmmc33sw)) /*SD PMU control*/
3015 regulator_disable(host->supply.vdmmc33sw);
3016 if (!IS_ERR(host->supply.vdmmc))
3017 regulator_disable(host->supply.vdmmc);
3018 sunxi_mmc_regulator_release_supply(host->mmc);
3019
3020 return ret;
3021 }
3022
sunxi_show_det_mode(struct mmc_host * mmc)3023 static void sunxi_show_det_mode(struct mmc_host *mmc)
3024 {
3025 struct sunxi_mmc_host *host = mmc_priv(mmc);
3026 if (host->sunxi_caps3 & MMC_SUNXI_CAP3_DAT3_DET)
3027 SM_INFO(mmc_dev(mmc), "detmode:data3\n");
3028 else if (mmc->caps & MMC_CAP_NEEDS_POLL)
3029 SM_INFO(mmc_dev(mmc), "detmode:gpio polling\n");
3030 else if (mmc->caps & MMC_CAP_NONREMOVABLE)
3031 SM_INFO(mmc_dev(mmc), "detmode:alway in(non removable)\n");
3032 else if (mmc->slot.cd_irq >= 0)
3033 SM_INFO(mmc_dev(mmc), "detmode:gpio irq\n");
3034 else
3035 SM_INFO(mmc_dev(mmc), "detmode:manually by software\n");
3036 }
sunxi_mmc_extra_of_parse(struct mmc_host * mmc)3037 static int sunxi_mmc_extra_of_parse(struct mmc_host *mmc)
3038 {
3039 struct device_node *np;
3040 struct sunxi_mmc_host *host = mmc_priv(mmc);
3041 int perf_enable = 0;
3042 if (!mmc->parent || !mmc->parent->of_node)
3043 return 0;
3044
3045 np = mmc->parent->of_node;
3046
3047 #if 0
3048 if (of_property_read_bool(np, "cap-erase"))
3049 mmc->caps |= MMC_CAP_ERASE;
3050 #endif
3051 if (of_property_read_bool(np, "mmc-high-capacity-erase-size"))
3052 mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
3053 if (sunxi_mmc_chk_hr1b_cap(host)
3054 && of_property_read_bool(np, "cap-wait-while-busy")) {
3055 int max_busy_timeout = 0;
3056 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
3057 if (of_property_read_u32(np,
3058 "max-busy-timeout", &max_busy_timeout) < 0) {
3059 SM_DBG(mmc->parent,
3060 "max-busy-timeout is missing, use default %d ms.\n",
3061 SUNXI_DEF_MAX_R1B_TIMEOUT_MS);
3062 mmc->max_busy_timeout = SUNXI_DEF_MAX_R1B_TIMEOUT_MS;
3063 } else {
3064 if (max_busy_timeout < SUNXI_MIN_R1B_TIMEOUT_MS)
3065 max_busy_timeout = SUNXI_MIN_R1B_TIMEOUT_MS;
3066 mmc->max_busy_timeout = max_busy_timeout;
3067 SM_DBG(mmc->parent, "max-busy-timeout is %d ms\n",
3068 max_busy_timeout);
3069 }
3070 }
3071
3072 if (of_property_read_bool(np, "cap-cmd23"))
3073 mmc->caps |= MMC_CAP_CMD23;
3074 if (of_property_read_bool(np, "cap-pack-write"))
3075 mmc->caps2 |= MMC_CAP2_PACKED_WR;
3076 if (of_property_read_bool(np, "mmc-cache-ctrl"))
3077 mmc->caps2 |= MMC_CAP2_CACHE_CTRL;
3078 if (of_property_read_bool(np, "cd-used-24M"))
3079 host->sunxi_caps3 |= MMC_SUNXI_CAP3_CD_USED_24M;
3080 if (of_property_read_bool(np, "mmc-bootpart-noacc"))
3081 mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3082 if (of_property_read_bool(np, "cap-hsq")) {
3083 host->sunxi_caps3 |= MMC_SUNXI_CAP3_HSQ;
3084 }
3085 if (of_property_read_u32(np, "debuglevel", &host->debuglevel) == 0) {
3086 SM_INFO(mmc->parent, "Income debuglevel %d\n", host->debuglevel);
3087 }
3088
3089 #ifdef CONFIG_ARCH_SUN8IW16P1
3090 if (sunxi_get_soc_ver() == SUN8IW16P1_REV_A) {
3091 if (host->phy_index == 0) {
3092 mmc->caps &= ~(MMC_CAP_UHS_SDR12|MMC_CAP_UHS_SDR25|
3093 MMC_CAP_UHS_SDR50|MMC_CAP_UHS_SDR104|
3094 MMC_CAP_UHS_DDR50);
3095 }
3096 }
3097 #endif
3098
3099
3100 if (of_property_read_u32(np,
3101 "filter_speed", &(host->filter_speed)) < 0) {
3102 SM_DBG(mmc->parent,
3103 "filter speed is missing, function is no used\n");
3104 } else {
3105 SM_INFO(mmc->parent, "filter speed is %d B/s\n", host->filter_speed);
3106 }
3107
3108 if (of_property_read_u32(np,
3109 "filter_sector", &(host->filter_sector)) < 0) {
3110 SM_DBG(mmc->parent,
3111 "filter sector is missing, function is no used\n");
3112 } else {
3113 SM_INFO(mmc->parent, "filter sector is %d sector\n", host->filter_sector);
3114 }
3115
3116 if (of_property_read_u32(np,
3117 "perf_enable", &perf_enable) < 0) {
3118 SM_DBG(mmc->parent,
3119 "perf_enable is missing, function is no used\n");
3120 } else {
3121 SM_INFO(mmc->parent, "Perf function is enable\n");
3122 host->perf_enable = true;
3123 }
3124
3125 if (of_property_read_u32(np,
3126 "min-frequency", &mmc->f_min) < 0) {
3127 SM_DBG(mmc->parent,
3128 "min-frequency used default:%d\n", mmc->f_min);
3129 } else {
3130 SM_INFO(mmc->parent, "min-frequency:%d\n", mmc->f_min);
3131 }
3132
3133 /* delay time:pwr-gpio control poweroff->powerup */
3134 if (of_property_read_u32(np, "time_pwroff_ms", &host->time_pwroff_ms) < 0) {
3135 host->time_pwroff_ms = 200;
3136 SM_DBG(mmc->parent,
3137 "powerctrl default delay time:%dms\n", host->time_pwroff_ms);
3138 }
3139
3140 if (of_property_read_u32(np,
3141 "req-page-count", &host->req_page_count) < 0) {
3142 host->req_page_count = 4;
3143 SM_DBG(mmc->parent,
3144 "req-page-count used default:%d\n", host->req_page_count);
3145 } else {
3146 SM_DBG(mmc->parent,
3147 "req-page-count used value:%d\n", host->req_page_count);
3148 }
3149
3150
3151 return 0;
3152 }
3153
sunxi_mmc_probe(struct platform_device * pdev)3154 static int sunxi_mmc_probe(struct platform_device *pdev)
3155 {
3156 struct sunxi_mmc_host *host;
3157 struct mmc_host *mmc;
3158 struct mmc_gpio *ctx;
3159 #if IS_ENABLED(CONFIG_MMC_HSQ)
3160 struct mmc_hsq *hsq;
3161 #endif
3162 int ret;
3163
3164 mmc = mmc_alloc_host(sizeof(struct sunxi_mmc_host), &pdev->dev);
3165 if (!mmc) {
3166 SM_ERR(&pdev->dev, "mmc alloc host failed\n");
3167 return -ENOMEM;
3168 }
3169
3170 host = mmc_priv(mmc);
3171 host->mmc = mmc;
3172 #ifdef CONFIG_AW_MMC_DEBUG
3173 host->debuglevel = CONFIG_AW_MMC_PRE_DBGLVL;
3174 #endif
3175 platform_set_drvdata(pdev, mmc);
3176 SM_INFO(&pdev->dev, "%s\n", DRIVER_VERSION);
3177
3178
3179 spin_lock_init(&host->lock);
3180
3181 ret = sunxi_mmc_resource_request(host, pdev);
3182 if (ret)
3183 goto error_free_host;
3184
3185 /* 400kHz ~ 50MHz */
3186 mmc->f_min = 400000;
3187 mmc->f_max = 50000000;
3188
3189 if (sunxi_mmc_chk_hr1b_cap(host)) {
3190 mmc->max_busy_timeout = SUNXI_DEF_MAX_R1B_TIMEOUT_MS;
3191 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
3192 SM_DBG(&pdev->dev, "set host busy\n");
3193 } else
3194 SM_ERR(&pdev->dev, "non-existent host busy\n");
3195
3196 #if !IS_ENABLED(CONFIG_REGULATOR)
3197 /*Because fpga has no regulator,so we add it manully*/
3198 mmc->ocr_avail =
3199 MMC_VDD_28_29 | MMC_VDD_29_30 | MMC_VDD_30_31 | MMC_VDD_31_32 |
3200 MMC_VDD_32_33 | MMC_VDD_33_34;
3201 SM_INFO(&pdev->dev, "***set host ocr***\n");
3202 #endif
3203
3204 mmc_of_parse(mmc);
3205 sunxi_mmc_extra_of_parse(mmc);
3206
3207 host->dma_mask = DMA_BIT_MASK(64);
3208 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
3209 pdev->dev.dma_mask = &host->dma_mask;
3210 host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE * host->req_page_count,
3211 &host->sg_dma, GFP_KERNEL);
3212 if (!host->sg_cpu) {
3213 SM_ERR(&pdev->dev, "Failed to allocate DMA descriptor mem\n");
3214 ret = -ENOMEM;
3215 goto error_free_host;
3216 }
3217
3218 mmc->ops = &sunxi_mmc_ops;
3219 mmc->max_blk_count = 8192;
3220 mmc->max_blk_size = 4096;
3221 mmc->max_segs = PAGE_SIZE * host->req_page_count / sizeof(struct sunxi_idma_des);
3222 mmc->max_seg_size = (1 << host->idma_des_size_bits);
3223 mmc->max_req_size = mmc->max_seg_size * mmc->max_segs;
3224 if (host->sunxi_caps3 & MMC_SUNXI_CAP3_DAT3_DET)
3225 host->dat3_imask = SDXC_CARD_INSERT | SDXC_CARD_REMOVE;
3226 if (host->sunxi_caps3 & MMC_SUNXI_CAP3_CD_USED_24M) {
3227 ctx = (struct mmc_gpio *)mmc->slot.handler_priv;
3228 if (ctx && ctx->cd_gpio) {
3229 ret = gpiod_set_debounce(ctx->cd_gpio, 1);
3230 if (ret < 0) {
3231 SM_INFO(&pdev->dev, "set cd-gpios as 24M fail\n");
3232 }
3233 }
3234 }
3235
3236 SM_DBG(&pdev->dev, "base:0x%p irq:%u\n", host->reg_base, host->irq);
3237
3238 #if IS_ENABLED(CONFIG_MMC_HSQ)
3239 if (host->sunxi_caps3 & MMC_SUNXI_CAP3_HSQ) {
3240 hsq = devm_kzalloc(&pdev->dev, sizeof(*hsq), GFP_KERNEL);
3241 if (!hsq) {
3242 ret = -ENOMEM;
3243 goto error_free_dma;
3244 }
3245
3246 ret = mmc_hsq_init(hsq, host->mmc);
3247 if (ret)
3248 goto error_free_dma;
3249 else
3250 SM_INFO(&pdev->dev, "Hsq init ok\n");
3251 }
3252 #endif
3253
3254 ret = mmc_add_host(mmc);
3255 if (ret)
3256 goto error_free_dma;
3257
3258 /*fix Unbalanced pm_runtime_enable when async occured.*/
3259 dev_pm_set_driver_flags(&mmc->class_dev, DPM_FLAG_NO_DIRECT_COMPLETE);
3260 sunxi_show_det_mode(mmc);
3261 ret = mmc_create_sys_fs(host, pdev);
3262 if (ret) {
3263 SM_ERR(&pdev->dev, "create sys fs failed\n");
3264 goto error_free_dma;
3265 }
3266
3267 sunxi_mmc_panic_init_ps(NULL);
3268
3269 return 0;
3270
3271 error_free_dma:
3272 dma_free_coherent(&pdev->dev, PAGE_SIZE * host->req_page_count, host->sg_cpu,
3273 host->sg_dma);
3274 error_free_host:
3275 mmc_free_host(mmc);
3276 return ret;
3277 }
3278
sunxi_mmc_remove(struct platform_device * pdev)3279 static int sunxi_mmc_remove(struct platform_device *pdev)
3280 {
3281 struct mmc_host *mmc = platform_get_drvdata(pdev);
3282 struct sunxi_mmc_host *host = mmc_priv(mmc);
3283
3284 mmc_remove_host(mmc);
3285 if (host->ctl_spec_cap & SUNXI_SC_EN_TIMEOUT_DETECT)
3286 cancel_delayed_work_sync(&host->sunxi_timerout_work);
3287 disable_irq(host->irq);
3288 sunxi_mmc_reset_host(host);
3289
3290 mmc_remove_sys_fs(host, pdev);
3291
3292 if (!IS_ERR(host->supply.vdmmc18sw)) /*SD PMU control*/
3293 regulator_disable(host->supply.vdmmc18sw);
3294 if (!IS_ERR(host->supply.vdmmc33sw)) /*SD PMU control*/
3295 regulator_disable(host->supply.vdmmc33sw);
3296 if (!IS_ERR(host->supply.vdmmc))
3297 regulator_disable(host->supply.vdmmc);
3298 sunxi_mmc_regulator_release_supply(mmc);
3299
3300 dma_free_coherent(&pdev->dev, PAGE_SIZE * host->req_page_count, host->sg_cpu,
3301 host->sg_dma);
3302 mmc_free_host(mmc);
3303
3304 return 0;
3305 }
3306
sunxi_mmc_regs_save(struct sunxi_mmc_host * host)3307 static void sunxi_mmc_regs_save(struct sunxi_mmc_host *host)
3308 {
3309 struct sunxi_mmc_ctrl_regs *bak_regs = &host->bak_regs;
3310
3311 /*save public register */
3312 bak_regs->gctrl = mmc_readl(host, REG_GCTRL);
3313 bak_regs->clkc = mmc_readl(host, REG_CLKCR);
3314 bak_regs->timeout = mmc_readl(host, REG_TMOUT);
3315 bak_regs->buswid = mmc_readl(host, REG_WIDTH);
3316 bak_regs->waterlvl = mmc_readl(host, REG_FTRGL);
3317 bak_regs->funcsel = mmc_readl(host, REG_FUNS);
3318 bak_regs->debugc = mmc_readl(host, REG_DBGC);
3319 bak_regs->idmacc = mmc_readl(host, REG_DMAC);
3320 bak_regs->dlba = mmc_readl(host, REG_DLBA);
3321 bak_regs->imask = mmc_readl(host, REG_IMASK);
3322
3323 if (host->sunxi_mmc_save_spec_reg)
3324 host->sunxi_mmc_save_spec_reg(host);
3325 else
3326 SM_WARN(mmc_dev(host->mmc), "no spec reg save\n");
3327 }
3328
sunxi_mmc_regs_restore(struct sunxi_mmc_host * host)3329 static void sunxi_mmc_regs_restore(struct sunxi_mmc_host *host)
3330 {
3331 struct sunxi_mmc_ctrl_regs *bak_regs = &host->bak_regs;
3332
3333 /*restore public register */
3334 mmc_writel(host, REG_GCTRL, bak_regs->gctrl);
3335 mmc_writel(host, REG_CLKCR, bak_regs->clkc);
3336 mmc_writel(host, REG_TMOUT, bak_regs->timeout);
3337 mmc_writel(host, REG_WIDTH, bak_regs->buswid);
3338 mmc_writel(host, REG_FTRGL, bak_regs->waterlvl);
3339 mmc_writel(host, REG_FUNS, bak_regs->funcsel);
3340 mmc_writel(host, REG_DBGC, bak_regs->debugc);
3341 mmc_writel(host, REG_DMAC, bak_regs->idmacc);
3342 mmc_writel(host, REG_DLBA, bak_regs->dlba);
3343 mmc_writel(host, REG_IMASK, bak_regs->imask);
3344
3345 if (host->sunxi_mmc_restore_spec_reg)
3346 host->sunxi_mmc_restore_spec_reg(host);
3347 else
3348 SM_WARN(mmc_dev(host->mmc), "no spec reg restore\n");
3349 if (host->sunxi_mmc_set_acmda)
3350 host->sunxi_mmc_set_acmda(host);
3351 }
3352
3353 #if IS_ENABLED(CONFIG_PM)
3354
sunxi_mmc_suspend(struct device * dev)3355 static int sunxi_mmc_suspend(struct device *dev)
3356 {
3357 struct platform_device *pdev = to_platform_device(dev);
3358 struct mmc_host *mmc = platform_get_drvdata(pdev);
3359 struct sunxi_mmc_host *host = mmc_priv(mmc);
3360 int ret = 0;
3361
3362 if (mmc) {
3363 /*ret = mmc_suspend_host(mmc);*/
3364 #if IS_ENABLED(CONFIG_MMC_HSQ)
3365 /* Validate if the request was from software queue firstly. */
3366 if (host->sunxi_caps3 & MMC_SUNXI_CAP3_HSQ) {
3367 mmc_hsq_suspend(mmc);
3368 }
3369 #endif
3370
3371 if (!ret) {
3372 if (!IS_ERR(host->supply.vdmmc18sw)) {
3373 ret = regulator_disable(host->supply.vdmmc18sw);
3374 if (ret) {
3375 SM_ERR(mmc_dev(mmc),
3376 "disable vdmmc18sw failed in suspend\n");
3377 return ret;
3378 }
3379 }
3380
3381 /*SD PMU control*/
3382 if (!IS_ERR(host->supply.vdmmc33sw)) {
3383 ret = regulator_disable(host->supply.vdmmc33sw);
3384 if (ret) {
3385 SM_ERR(mmc_dev(mmc),
3386 "disable vdmmc33sw failed in suspend\n");
3387 return ret;
3388 }
3389 }
3390
3391 if (!IS_ERR(host->supply.vdmmc)) {
3392 ret = regulator_disable(host->supply.vdmmc);
3393 if (ret) {
3394 SM_ERR(mmc_dev(mmc),
3395 "disable vdmmc failed in suspend\n");
3396 return ret;
3397 }
3398 }
3399
3400
3401 if (mmc_card_keep_power(mmc) || host->dat3_imask) {
3402 disable_irq(host->irq);
3403 sunxi_mmc_regs_save(host);
3404
3405 clk_disable_unprepare(host->clk_mmc);
3406 clk_disable_unprepare(host->clk_ahb);
3407
3408 if (!IS_ERR(host->clk_rst))
3409 reset_control_assert(host->clk_rst);
3410
3411 if (!IS_ERR(host->pins_sleep)) {
3412 ret =
3413 pinctrl_select_state(host->pinctrl,
3414 host->
3415 pins_sleep);
3416 if (ret) {
3417 SM_ERR(mmc_dev(mmc),
3418 "could not set sleep pins in suspend\n");
3419 return ret;
3420 }
3421 }
3422 if (!IS_ERR(host->supply.vqmmc18sw))
3423 regulator_disable(host->supply.vqmmc18sw);
3424 if (!IS_ERR(host->supply.vqmmc33sw))
3425 regulator_disable(host->supply.vqmmc33sw);
3426 if (!IS_ERR(mmc->supply.vqmmc))
3427 regulator_disable(mmc->supply.vqmmc);
3428
3429
3430 if (!IS_ERR(mmc->supply.vmmc)) {
3431 ret =
3432 sunxi_mmc_regulator_set_ocr(mmc,
3433 mmc->supply.
3434 vmmc, 0);
3435 return ret;
3436 }
3437 SM_DBG(mmc_dev(mmc), "dat3_imask %x\n",
3438 host->dat3_imask);
3439 /*dump_reg(host); */
3440 }
3441 /*sunxi_mmc_gpio_suspend_cd(mmc);*/
3442 /*sunxi_dump_reg(mmc); */
3443 }
3444 }
3445
3446 return ret;
3447 }
3448
sunxi_mmc_resume(struct device * dev)3449 static int sunxi_mmc_resume(struct device *dev)
3450 {
3451 struct platform_device *pdev = to_platform_device(dev);
3452 struct mmc_host *mmc = platform_get_drvdata(pdev);
3453 struct sunxi_mmc_host *host = mmc_priv(mmc);
3454 int ret = 0;
3455
3456 if (mmc) {
3457 /*sunxi_mmc_gpio_resume_cd(mmc);*/
3458 if (mmc_card_keep_power(mmc) || host->dat3_imask) {
3459 if (!IS_ERR(mmc->supply.vmmc)) {
3460 ret =
3461 sunxi_mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
3462 mmc->ios.vdd);
3463 if (ret)
3464 return ret;
3465 }
3466
3467 if (!IS_ERR(mmc->supply.vqmmc)) {
3468 ret = regulator_enable(mmc->supply.vqmmc);
3469 if (ret < 0) {
3470 SM_ERR(mmc_dev(mmc),
3471 "failed to enable vqmmc regulator\n");
3472 return ret;
3473 }
3474 }
3475 /*SD PMU control*/
3476 if (!IS_ERR(host->supply.vqmmc33sw)) {
3477 ret = regulator_enable(host->supply.vqmmc33sw);
3478 if (ret < 0) {
3479 SM_ERR(mmc_dev(mmc),
3480 "failed to enable vqmmc33sw regulator\n");
3481 return ret;
3482 }
3483 }
3484 /*SD PMU control*/
3485 if (!IS_ERR(host->supply.vqmmc18sw)) {
3486 ret = regulator_enable(host->supply.vqmmc18sw);
3487 if (ret < 0) {
3488 SM_ERR(mmc_dev(mmc),
3489 "failed to enable vq18sw regulator\n");
3490 return ret;
3491 }
3492 }
3493
3494 if (!IS_ERR(host->pins_default)) {
3495 ret =
3496 pinctrl_select_state(host->pinctrl,
3497 host->pins_default);
3498 if (ret) {
3499 SM_ERR(mmc_dev(mmc),
3500 "could not set default pins in resume\n");
3501 return ret;
3502 }
3503 }
3504
3505 if (!IS_ERR(host->clk_rst)) {
3506 ret = reset_control_deassert(host->clk_rst);
3507 if (ret) {
3508 SM_ERR(mmc_dev(mmc), "reset err %d\n",
3509 ret);
3510 return ret;
3511 }
3512 }
3513
3514 ret = clk_prepare_enable(host->clk_ahb);
3515 if (ret) {
3516 SM_ERR(mmc_dev(mmc), "Enable ahb clk err %d\n",
3517 ret);
3518 return ret;
3519 }
3520 ret = clk_prepare_enable(host->clk_mmc);
3521 if (ret) {
3522 SM_ERR(mmc_dev(mmc), "Enable mmc clk err %d\n",
3523 ret);
3524 return ret;
3525 }
3526
3527 host->ferror = sunxi_mmc_init_host(mmc);
3528 if (host->ferror)
3529 return -1;
3530
3531 sunxi_mmc_regs_restore(host);
3532 host->ferror = sunxi_mmc_update_clk(host);
3533 if (host->ferror)
3534 return -1;
3535
3536 enable_irq(host->irq);
3537 SM_INFO(mmc_dev(mmc), "dat3_imask %x\n",
3538 host->dat3_imask);
3539 /*dump_reg(host); */
3540 }
3541 /*enable card detect pin power*/
3542 if (!IS_ERR(host->supply.vdmmc)) {
3543 ret = regulator_enable(host->supply.vdmmc);
3544 if (ret < 0) {
3545 SM_ERR(mmc_dev(mmc),
3546 "failed to enable vdmmc regulator\n");
3547 return ret;
3548 }
3549 }
3550 /*SD PMU control*/
3551 if (!IS_ERR(host->supply.vdmmc33sw)) {
3552 ret = regulator_enable(host->supply.vdmmc33sw);
3553 if (ret < 0) {
3554 SM_ERR(mmc_dev(mmc),
3555 "failed to enable vdmmc33sw regulator\n");
3556 return ret;
3557 }
3558 }
3559 /*SD PMU control*/
3560 if (!IS_ERR(host->supply.vdmmc18sw)) {
3561 ret = regulator_enable(host->supply.vdmmc18sw);
3562 if (ret < 0) {
3563 SM_ERR(mmc_dev(mmc),
3564 "failed to enable vdmmc18sw regulator\n");
3565 return ret;
3566 }
3567 }
3568
3569
3570 /*sunxi_dump_reg(mmc); */
3571 /*ret = mmc_resume_host(mmc);*/
3572
3573 #if IS_ENABLED(CONFIG_MMC_HSQ)
3574 /* Validate if the request was from software queue firstly. */
3575 if (host->sunxi_caps3 & MMC_SUNXI_CAP3_HSQ) {
3576 mmc_hsq_resume(mmc);
3577 }
3578 #endif
3579
3580 }
3581
3582 return ret;
3583 }
3584
3585 static const struct dev_pm_ops sunxi_mmc_pm = {
3586 .suspend = sunxi_mmc_suspend,
3587 .resume = sunxi_mmc_resume,
3588 };
3589
3590 #define sunxi_mmc_pm_ops (&sunxi_mmc_pm)
3591
3592 #else /* CONFIG_PM */
3593
3594 #define sunxi_mmc_pm_ops NULL
3595
3596 #endif /* CONFIG_PM */
3597
sunxi_shutdown_mmc(struct platform_device * pdev)3598 void sunxi_shutdown_mmc(struct platform_device *pdev)
3599 {
3600 struct mmc_host *mmc = platform_get_drvdata(pdev);
3601 struct sunxi_mmc_host *host = mmc_priv(mmc);
3602
3603 if (host->sunxi_mmc_shutdown)
3604 host->sunxi_mmc_shutdown(pdev);
3605 }
3606
3607 static struct platform_driver sunxi_mmc_driver = {
3608 .driver = {
3609 .name = "sunxi-mmc",
3610 .of_match_table = of_match_ptr(sunxi_mmc_of_match),
3611 .pm = sunxi_mmc_pm_ops,
3612 },
3613 .probe = sunxi_mmc_probe,
3614 .remove = sunxi_mmc_remove,
3615 .shutdown = sunxi_shutdown_mmc,
3616 };
3617
3618 module_platform_driver(sunxi_mmc_driver);
3619
3620 MODULE_DESCRIPTION("Allwinner's SD/MMC Card Controller Driver");
3621 MODULE_LICENSE("GPL v2");
3622 MODULE_AUTHOR("David Lanzend�rfer <david.lanzendoerfer@o2s.ch> && lixiang@allwinnertech.com");
3623 MODULE_ALIAS("platform:sunxi-mmc");
3624
3625
3626