1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
4 *
5 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
6 */
7
8 #include <linux/module.h>
9 #include <linux/of_device.h>
10 #include <linux/delay.h>
11 #include <linux/mmc/mmc.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_opp.h>
14 #include <linux/slab.h>
15 #include <linux/iopoll.h>
16 #include <linux/regulator/consumer.h>
17 #include <linux/interconnect.h>
18 #include <linux/pinctrl/consumer.h>
19
20 #include "sdhci-pltfm.h"
21 #include "cqhci.h"
22
23 #define CORE_MCI_VERSION 0x50
24 #define CORE_VERSION_MAJOR_SHIFT 28
25 #define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
26 #define CORE_VERSION_MINOR_MASK 0xff
27
28 #define CORE_MCI_GENERICS 0x70
29 #define SWITCHABLE_SIGNALING_VOLTAGE BIT(29)
30
31 #define HC_MODE_EN 0x1
32 #define CORE_POWER 0x0
33 #define CORE_SW_RST BIT(7)
34 #define FF_CLK_SW_RST_DIS BIT(13)
35
36 #define CORE_PWRCTL_BUS_OFF BIT(0)
37 #define CORE_PWRCTL_BUS_ON BIT(1)
38 #define CORE_PWRCTL_IO_LOW BIT(2)
39 #define CORE_PWRCTL_IO_HIGH BIT(3)
40 #define CORE_PWRCTL_BUS_SUCCESS BIT(0)
41 #define CORE_PWRCTL_BUS_FAIL BIT(1)
42 #define CORE_PWRCTL_IO_SUCCESS BIT(2)
43 #define CORE_PWRCTL_IO_FAIL BIT(3)
44 #define REQ_BUS_OFF BIT(0)
45 #define REQ_BUS_ON BIT(1)
46 #define REQ_IO_LOW BIT(2)
47 #define REQ_IO_HIGH BIT(3)
48 #define INT_MASK 0xf
49 #define MAX_PHASES 16
50 #define CORE_DLL_LOCK BIT(7)
51 #define CORE_DDR_DLL_LOCK BIT(11)
52 #define CORE_DLL_EN BIT(16)
53 #define CORE_CDR_EN BIT(17)
54 #define CORE_CK_OUT_EN BIT(18)
55 #define CORE_CDR_EXT_EN BIT(19)
56 #define CORE_DLL_PDN BIT(29)
57 #define CORE_DLL_RST BIT(30)
58 #define CORE_CMD_DAT_TRACK_SEL BIT(0)
59
60 #define CORE_DDR_CAL_EN BIT(0)
61 #define CORE_FLL_CYCLE_CNT BIT(18)
62 #define CORE_DLL_CLOCK_DISABLE BIT(21)
63
64 #define DLL_USR_CTL_POR_VAL 0x10800
65 #define ENABLE_DLL_LOCK_STATUS BIT(26)
66 #define FINE_TUNE_MODE_EN BIT(27)
67 #define BIAS_OK_SIGNAL BIT(29)
68
69 #define DLL_CONFIG_3_LOW_FREQ_VAL 0x08
70 #define DLL_CONFIG_3_HIGH_FREQ_VAL 0x10
71
72 #define CORE_VENDOR_SPEC_POR_VAL 0xa9c
73 #define CORE_CLK_PWRSAVE BIT(1)
74 #define CORE_HC_MCLK_SEL_DFLT (2 << 8)
75 #define CORE_HC_MCLK_SEL_HS400 (3 << 8)
76 #define CORE_HC_MCLK_SEL_MASK (3 << 8)
77 #define CORE_IO_PAD_PWR_SWITCH_EN BIT(15)
78 #define CORE_IO_PAD_PWR_SWITCH BIT(16)
79 #define CORE_HC_SELECT_IN_EN BIT(18)
80 #define CORE_HC_SELECT_IN_HS400 (6 << 19)
81 #define CORE_HC_SELECT_IN_MASK (7 << 19)
82
83 #define CORE_3_0V_SUPPORT BIT(25)
84 #define CORE_1_8V_SUPPORT BIT(26)
85 #define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT)
86
87 #define CORE_CSR_CDC_CTLR_CFG0 0x130
88 #define CORE_SW_TRIG_FULL_CALIB BIT(16)
89 #define CORE_HW_AUTOCAL_ENA BIT(17)
90
91 #define CORE_CSR_CDC_CTLR_CFG1 0x134
92 #define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
93 #define CORE_TIMER_ENA BIT(16)
94
95 #define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
96 #define CORE_CSR_CDC_REFCOUNT_CFG 0x140
97 #define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
98 #define CORE_CDC_OFFSET_CFG 0x14C
99 #define CORE_CSR_CDC_DELAY_CFG 0x150
100 #define CORE_CDC_SLAVE_DDA_CFG 0x160
101 #define CORE_CSR_CDC_STATUS0 0x164
102 #define CORE_CALIBRATION_DONE BIT(0)
103
104 #define CORE_CDC_ERROR_CODE_MASK 0x7000000
105
106 #define CORE_CSR_CDC_GEN_CFG 0x178
107 #define CORE_CDC_SWITCH_BYPASS_OFF BIT(0)
108 #define CORE_CDC_SWITCH_RC_EN BIT(1)
109
110 #define CORE_CDC_T4_DLY_SEL BIT(0)
111 #define CORE_CMDIN_RCLK_EN BIT(1)
112 #define CORE_START_CDC_TRAFFIC BIT(6)
113
114 #define CORE_PWRSAVE_DLL BIT(3)
115
116 #define DDR_CONFIG_POR_VAL 0x80040873
117
118
119 #define INVALID_TUNING_PHASE -1
120 #define SDHCI_MSM_MIN_CLOCK 400000
121 #define CORE_FREQ_100MHZ (100 * 1000 * 1000)
122
123 #define CDR_SELEXT_SHIFT 20
124 #define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
125 #define CMUX_SHIFT_PHASE_SHIFT 24
126 #define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
127
128 #define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
129
130 /* Timeout value to avoid infinite waiting for pwr_irq */
131 #define MSM_PWR_IRQ_TIMEOUT_MS 5000
132
133 /* Max load for eMMC Vdd-io supply */
134 #define MMC_VQMMC_MAX_LOAD_UA 325000
135
136 #define msm_host_readl(msm_host, host, offset) \
137 msm_host->var_ops->msm_readl_relaxed(host, offset)
138
139 #define msm_host_writel(msm_host, val, host, offset) \
140 msm_host->var_ops->msm_writel_relaxed(val, host, offset)
141
142 /* CQHCI vendor specific registers */
143 #define CQHCI_VENDOR_CFG1 0xA00
144 #define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
145
146 struct sdhci_msm_offset {
147 u32 core_hc_mode;
148 u32 core_mci_data_cnt;
149 u32 core_mci_status;
150 u32 core_mci_fifo_cnt;
151 u32 core_mci_version;
152 u32 core_generics;
153 u32 core_testbus_config;
154 u32 core_testbus_sel2_bit;
155 u32 core_testbus_ena;
156 u32 core_testbus_sel2;
157 u32 core_pwrctl_status;
158 u32 core_pwrctl_mask;
159 u32 core_pwrctl_clear;
160 u32 core_pwrctl_ctl;
161 u32 core_sdcc_debug_reg;
162 u32 core_dll_config;
163 u32 core_dll_status;
164 u32 core_vendor_spec;
165 u32 core_vendor_spec_adma_err_addr0;
166 u32 core_vendor_spec_adma_err_addr1;
167 u32 core_vendor_spec_func2;
168 u32 core_vendor_spec_capabilities0;
169 u32 core_ddr_200_cfg;
170 u32 core_vendor_spec3;
171 u32 core_dll_config_2;
172 u32 core_dll_config_3;
173 u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */
174 u32 core_ddr_config;
175 u32 core_dll_usr_ctl; /* Present on SDCC5.1 onwards */
176 };
177
178 static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
179 .core_mci_data_cnt = 0x35c,
180 .core_mci_status = 0x324,
181 .core_mci_fifo_cnt = 0x308,
182 .core_mci_version = 0x318,
183 .core_generics = 0x320,
184 .core_testbus_config = 0x32c,
185 .core_testbus_sel2_bit = 3,
186 .core_testbus_ena = (1 << 31),
187 .core_testbus_sel2 = (1 << 3),
188 .core_pwrctl_status = 0x240,
189 .core_pwrctl_mask = 0x244,
190 .core_pwrctl_clear = 0x248,
191 .core_pwrctl_ctl = 0x24c,
192 .core_sdcc_debug_reg = 0x358,
193 .core_dll_config = 0x200,
194 .core_dll_status = 0x208,
195 .core_vendor_spec = 0x20c,
196 .core_vendor_spec_adma_err_addr0 = 0x214,
197 .core_vendor_spec_adma_err_addr1 = 0x218,
198 .core_vendor_spec_func2 = 0x210,
199 .core_vendor_spec_capabilities0 = 0x21c,
200 .core_ddr_200_cfg = 0x224,
201 .core_vendor_spec3 = 0x250,
202 .core_dll_config_2 = 0x254,
203 .core_dll_config_3 = 0x258,
204 .core_ddr_config = 0x25c,
205 .core_dll_usr_ctl = 0x388,
206 };
207
208 static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
209 .core_hc_mode = 0x78,
210 .core_mci_data_cnt = 0x30,
211 .core_mci_status = 0x34,
212 .core_mci_fifo_cnt = 0x44,
213 .core_mci_version = 0x050,
214 .core_generics = 0x70,
215 .core_testbus_config = 0x0cc,
216 .core_testbus_sel2_bit = 4,
217 .core_testbus_ena = (1 << 3),
218 .core_testbus_sel2 = (1 << 4),
219 .core_pwrctl_status = 0xdc,
220 .core_pwrctl_mask = 0xe0,
221 .core_pwrctl_clear = 0xe4,
222 .core_pwrctl_ctl = 0xe8,
223 .core_sdcc_debug_reg = 0x124,
224 .core_dll_config = 0x100,
225 .core_dll_status = 0x108,
226 .core_vendor_spec = 0x10c,
227 .core_vendor_spec_adma_err_addr0 = 0x114,
228 .core_vendor_spec_adma_err_addr1 = 0x118,
229 .core_vendor_spec_func2 = 0x110,
230 .core_vendor_spec_capabilities0 = 0x11c,
231 .core_ddr_200_cfg = 0x184,
232 .core_vendor_spec3 = 0x1b0,
233 .core_dll_config_2 = 0x1b4,
234 .core_ddr_config_old = 0x1b8,
235 .core_ddr_config = 0x1bc,
236 };
237
238 struct sdhci_msm_variant_ops {
239 u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset);
240 void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host,
241 u32 offset);
242 };
243
244 /*
245 * From V5, register spaces have changed. Wrap this info in a structure
246 * and choose the data_structure based on version info mentioned in DT.
247 */
248 struct sdhci_msm_variant_info {
249 bool mci_removed;
250 bool restore_dll_config;
251 bool uses_tassadar_dll;
252 const struct sdhci_msm_variant_ops *var_ops;
253 const struct sdhci_msm_offset *offset;
254 };
255
256 struct sdhci_msm_host {
257 struct platform_device *pdev;
258 void __iomem *core_mem; /* MSM SDCC mapped address */
259 int pwr_irq; /* power irq */
260 struct clk *bus_clk; /* SDHC bus voter clock */
261 struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
262 struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
263 unsigned long clk_rate;
264 struct mmc_host *mmc;
265 struct opp_table *opp_table;
266 bool use_14lpp_dll_reset;
267 bool tuning_done;
268 bool calibration_done;
269 u8 saved_tuning_phase;
270 bool use_cdclp533;
271 u32 curr_pwr_state;
272 u32 curr_io_level;
273 wait_queue_head_t pwr_irq_wait;
274 bool pwr_irq_flag;
275 u32 caps_0;
276 bool mci_removed;
277 bool restore_dll_config;
278 const struct sdhci_msm_variant_ops *var_ops;
279 const struct sdhci_msm_offset *offset;
280 bool use_cdr;
281 u32 transfer_mode;
282 bool updated_ddr_cfg;
283 bool uses_tassadar_dll;
284 u32 dll_config;
285 u32 ddr_config;
286 bool vqmmc_enabled;
287 };
288
sdhci_priv_msm_offset(struct sdhci_host * host)289 static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
290 {
291 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
292 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
293
294 return msm_host->offset;
295 }
296
297 /*
298 * APIs to read/write to vendor specific registers which were there in the
299 * core_mem region before MCI was removed.
300 */
sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host * host,u32 offset)301 static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host,
302 u32 offset)
303 {
304 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
305 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
306
307 return readl_relaxed(msm_host->core_mem + offset);
308 }
309
sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host * host,u32 offset)310 static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host,
311 u32 offset)
312 {
313 return readl_relaxed(host->ioaddr + offset);
314 }
315
sdhci_msm_mci_variant_writel_relaxed(u32 val,struct sdhci_host * host,u32 offset)316 static void sdhci_msm_mci_variant_writel_relaxed(u32 val,
317 struct sdhci_host *host, u32 offset)
318 {
319 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
320 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
321
322 writel_relaxed(val, msm_host->core_mem + offset);
323 }
324
sdhci_msm_v5_variant_writel_relaxed(u32 val,struct sdhci_host * host,u32 offset)325 static void sdhci_msm_v5_variant_writel_relaxed(u32 val,
326 struct sdhci_host *host, u32 offset)
327 {
328 writel_relaxed(val, host->ioaddr + offset);
329 }
330
msm_get_clock_rate_for_bus_mode(struct sdhci_host * host,unsigned int clock)331 static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
332 unsigned int clock)
333 {
334 struct mmc_ios ios = host->mmc->ios;
335 /*
336 * The SDHC requires internal clock frequency to be double the
337 * actual clock that will be set for DDR mode. The controller
338 * uses the faster clock(100/400MHz) for some of its parts and
339 * send the actual required clock (50/200MHz) to the card.
340 */
341 if (ios.timing == MMC_TIMING_UHS_DDR50 ||
342 ios.timing == MMC_TIMING_MMC_DDR52 ||
343 ios.timing == MMC_TIMING_MMC_HS400 ||
344 host->flags & SDHCI_HS400_TUNING)
345 clock *= 2;
346 return clock;
347 }
348
msm_set_clock_rate_for_bus_mode(struct sdhci_host * host,unsigned int clock)349 static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
350 unsigned int clock)
351 {
352 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
353 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
354 struct mmc_ios curr_ios = host->mmc->ios;
355 struct clk *core_clk = msm_host->bulk_clks[0].clk;
356 int rc;
357
358 clock = msm_get_clock_rate_for_bus_mode(host, clock);
359 rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), clock);
360 if (rc) {
361 pr_err("%s: Failed to set clock at rate %u at timing %d\n",
362 mmc_hostname(host->mmc), clock,
363 curr_ios.timing);
364 return;
365 }
366 msm_host->clk_rate = clock;
367 pr_debug("%s: Setting clock at rate %lu at timing %d\n",
368 mmc_hostname(host->mmc), clk_get_rate(core_clk),
369 curr_ios.timing);
370 }
371
372 /* Platform specific tuning */
msm_dll_poll_ck_out_en(struct sdhci_host * host,u8 poll)373 static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
374 {
375 u32 wait_cnt = 50;
376 u8 ck_out_en;
377 struct mmc_host *mmc = host->mmc;
378 const struct sdhci_msm_offset *msm_offset =
379 sdhci_priv_msm_offset(host);
380
381 /* Poll for CK_OUT_EN bit. max. poll time = 50us */
382 ck_out_en = !!(readl_relaxed(host->ioaddr +
383 msm_offset->core_dll_config) & CORE_CK_OUT_EN);
384
385 while (ck_out_en != poll) {
386 if (--wait_cnt == 0) {
387 dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
388 mmc_hostname(mmc), poll);
389 return -ETIMEDOUT;
390 }
391 udelay(1);
392
393 ck_out_en = !!(readl_relaxed(host->ioaddr +
394 msm_offset->core_dll_config) & CORE_CK_OUT_EN);
395 }
396
397 return 0;
398 }
399
msm_config_cm_dll_phase(struct sdhci_host * host,u8 phase)400 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
401 {
402 int rc;
403 static const u8 grey_coded_phase_table[] = {
404 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
405 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
406 };
407 unsigned long flags;
408 u32 config;
409 struct mmc_host *mmc = host->mmc;
410 const struct sdhci_msm_offset *msm_offset =
411 sdhci_priv_msm_offset(host);
412
413 if (phase > 0xf)
414 return -EINVAL;
415
416 spin_lock_irqsave(&host->lock, flags);
417
418 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
419 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
420 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
421 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
422
423 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
424 rc = msm_dll_poll_ck_out_en(host, 0);
425 if (rc)
426 goto err_out;
427
428 /*
429 * Write the selected DLL clock output phase (0 ... 15)
430 * to CDR_SELEXT bit field of DLL_CONFIG register.
431 */
432 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
433 config &= ~CDR_SELEXT_MASK;
434 config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
435 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
436
437 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
438 config |= CORE_CK_OUT_EN;
439 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
440
441 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
442 rc = msm_dll_poll_ck_out_en(host, 1);
443 if (rc)
444 goto err_out;
445
446 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
447 config |= CORE_CDR_EN;
448 config &= ~CORE_CDR_EXT_EN;
449 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
450 goto out;
451
452 err_out:
453 dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
454 mmc_hostname(mmc), phase);
455 out:
456 spin_unlock_irqrestore(&host->lock, flags);
457 return rc;
458 }
459
460 /*
461 * Find out the greatest range of consecuitive selected
462 * DLL clock output phases that can be used as sampling
463 * setting for SD3.0 UHS-I card read operation (in SDR104
464 * timing mode) or for eMMC4.5 card read operation (in
465 * HS400/HS200 timing mode).
466 * Select the 3/4 of the range and configure the DLL with the
467 * selected DLL clock output phase.
468 */
469
msm_find_most_appropriate_phase(struct sdhci_host * host,u8 * phase_table,u8 total_phases)470 static int msm_find_most_appropriate_phase(struct sdhci_host *host,
471 u8 *phase_table, u8 total_phases)
472 {
473 int ret;
474 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
475 u8 phases_per_row[MAX_PHASES] = { 0 };
476 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
477 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
478 bool phase_0_found = false, phase_15_found = false;
479 struct mmc_host *mmc = host->mmc;
480
481 if (!total_phases || (total_phases > MAX_PHASES)) {
482 dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
483 mmc_hostname(mmc), total_phases);
484 return -EINVAL;
485 }
486
487 for (cnt = 0; cnt < total_phases; cnt++) {
488 ranges[row_index][col_index] = phase_table[cnt];
489 phases_per_row[row_index] += 1;
490 col_index++;
491
492 if ((cnt + 1) == total_phases) {
493 continue;
494 /* check if next phase in phase_table is consecutive or not */
495 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
496 row_index++;
497 col_index = 0;
498 }
499 }
500
501 if (row_index >= MAX_PHASES)
502 return -EINVAL;
503
504 /* Check if phase-0 is present in first valid window? */
505 if (!ranges[0][0]) {
506 phase_0_found = true;
507 phase_0_raw_index = 0;
508 /* Check if cycle exist between 2 valid windows */
509 for (cnt = 1; cnt <= row_index; cnt++) {
510 if (phases_per_row[cnt]) {
511 for (i = 0; i < phases_per_row[cnt]; i++) {
512 if (ranges[cnt][i] == 15) {
513 phase_15_found = true;
514 phase_15_raw_index = cnt;
515 break;
516 }
517 }
518 }
519 }
520 }
521
522 /* If 2 valid windows form cycle then merge them as single window */
523 if (phase_0_found && phase_15_found) {
524 /* number of phases in raw where phase 0 is present */
525 u8 phases_0 = phases_per_row[phase_0_raw_index];
526 /* number of phases in raw where phase 15 is present */
527 u8 phases_15 = phases_per_row[phase_15_raw_index];
528
529 if (phases_0 + phases_15 >= MAX_PHASES)
530 /*
531 * If there are more than 1 phase windows then total
532 * number of phases in both the windows should not be
533 * more than or equal to MAX_PHASES.
534 */
535 return -EINVAL;
536
537 /* Merge 2 cyclic windows */
538 i = phases_15;
539 for (cnt = 0; cnt < phases_0; cnt++) {
540 ranges[phase_15_raw_index][i] =
541 ranges[phase_0_raw_index][cnt];
542 if (++i >= MAX_PHASES)
543 break;
544 }
545
546 phases_per_row[phase_0_raw_index] = 0;
547 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
548 }
549
550 for (cnt = 0; cnt <= row_index; cnt++) {
551 if (phases_per_row[cnt] > curr_max) {
552 curr_max = phases_per_row[cnt];
553 selected_row_index = cnt;
554 }
555 }
556
557 i = (curr_max * 3) / 4;
558 if (i)
559 i--;
560
561 ret = ranges[selected_row_index][i];
562
563 if (ret >= MAX_PHASES) {
564 ret = -EINVAL;
565 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
566 mmc_hostname(mmc), ret);
567 }
568
569 return ret;
570 }
571
msm_cm_dll_set_freq(struct sdhci_host * host)572 static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
573 {
574 u32 mclk_freq = 0, config;
575 const struct sdhci_msm_offset *msm_offset =
576 sdhci_priv_msm_offset(host);
577
578 /* Program the MCLK value to MCLK_FREQ bit field */
579 if (host->clock <= 112000000)
580 mclk_freq = 0;
581 else if (host->clock <= 125000000)
582 mclk_freq = 1;
583 else if (host->clock <= 137000000)
584 mclk_freq = 2;
585 else if (host->clock <= 150000000)
586 mclk_freq = 3;
587 else if (host->clock <= 162000000)
588 mclk_freq = 4;
589 else if (host->clock <= 175000000)
590 mclk_freq = 5;
591 else if (host->clock <= 187000000)
592 mclk_freq = 6;
593 else if (host->clock <= 200000000)
594 mclk_freq = 7;
595
596 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
597 config &= ~CMUX_SHIFT_PHASE_MASK;
598 config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
599 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
600 }
601
602 /* Initialize the DLL (Programmable Delay Line) */
msm_init_cm_dll(struct sdhci_host * host)603 static int msm_init_cm_dll(struct sdhci_host *host)
604 {
605 struct mmc_host *mmc = host->mmc;
606 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
607 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
608 int wait_cnt = 50;
609 unsigned long flags, xo_clk = 0;
610 u32 config;
611 const struct sdhci_msm_offset *msm_offset =
612 msm_host->offset;
613
614 if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk))
615 xo_clk = clk_get_rate(msm_host->xo_clk);
616
617 spin_lock_irqsave(&host->lock, flags);
618
619 /*
620 * Make sure that clock is always enabled when DLL
621 * tuning is in progress. Keeping PWRSAVE ON may
622 * turn off the clock.
623 */
624 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
625 config &= ~CORE_CLK_PWRSAVE;
626 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
627
628 if (msm_host->dll_config)
629 writel_relaxed(msm_host->dll_config,
630 host->ioaddr + msm_offset->core_dll_config);
631
632 if (msm_host->use_14lpp_dll_reset) {
633 config = readl_relaxed(host->ioaddr +
634 msm_offset->core_dll_config);
635 config &= ~CORE_CK_OUT_EN;
636 writel_relaxed(config, host->ioaddr +
637 msm_offset->core_dll_config);
638
639 config = readl_relaxed(host->ioaddr +
640 msm_offset->core_dll_config_2);
641 config |= CORE_DLL_CLOCK_DISABLE;
642 writel_relaxed(config, host->ioaddr +
643 msm_offset->core_dll_config_2);
644 }
645
646 config = readl_relaxed(host->ioaddr +
647 msm_offset->core_dll_config);
648 config |= CORE_DLL_RST;
649 writel_relaxed(config, host->ioaddr +
650 msm_offset->core_dll_config);
651
652 config = readl_relaxed(host->ioaddr +
653 msm_offset->core_dll_config);
654 config |= CORE_DLL_PDN;
655 writel_relaxed(config, host->ioaddr +
656 msm_offset->core_dll_config);
657
658 if (!msm_host->dll_config)
659 msm_cm_dll_set_freq(host);
660
661 if (msm_host->use_14lpp_dll_reset &&
662 !IS_ERR_OR_NULL(msm_host->xo_clk)) {
663 u32 mclk_freq = 0;
664
665 config = readl_relaxed(host->ioaddr +
666 msm_offset->core_dll_config_2);
667 config &= CORE_FLL_CYCLE_CNT;
668 if (config)
669 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
670 xo_clk);
671 else
672 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
673 xo_clk);
674
675 config = readl_relaxed(host->ioaddr +
676 msm_offset->core_dll_config_2);
677 config &= ~(0xFF << 10);
678 config |= mclk_freq << 10;
679
680 writel_relaxed(config, host->ioaddr +
681 msm_offset->core_dll_config_2);
682 /* wait for 5us before enabling DLL clock */
683 udelay(5);
684 }
685
686 config = readl_relaxed(host->ioaddr +
687 msm_offset->core_dll_config);
688 config &= ~CORE_DLL_RST;
689 writel_relaxed(config, host->ioaddr +
690 msm_offset->core_dll_config);
691
692 config = readl_relaxed(host->ioaddr +
693 msm_offset->core_dll_config);
694 config &= ~CORE_DLL_PDN;
695 writel_relaxed(config, host->ioaddr +
696 msm_offset->core_dll_config);
697
698 if (msm_host->use_14lpp_dll_reset) {
699 if (!msm_host->dll_config)
700 msm_cm_dll_set_freq(host);
701 config = readl_relaxed(host->ioaddr +
702 msm_offset->core_dll_config_2);
703 config &= ~CORE_DLL_CLOCK_DISABLE;
704 writel_relaxed(config, host->ioaddr +
705 msm_offset->core_dll_config_2);
706 }
707
708 /*
709 * Configure DLL user control register to enable DLL status.
710 * This setting is applicable to SDCC v5.1 onwards only.
711 */
712 if (msm_host->uses_tassadar_dll) {
713 config = DLL_USR_CTL_POR_VAL | FINE_TUNE_MODE_EN |
714 ENABLE_DLL_LOCK_STATUS | BIAS_OK_SIGNAL;
715 writel_relaxed(config, host->ioaddr +
716 msm_offset->core_dll_usr_ctl);
717
718 config = readl_relaxed(host->ioaddr +
719 msm_offset->core_dll_config_3);
720 config &= ~0xFF;
721 if (msm_host->clk_rate < 150000000)
722 config |= DLL_CONFIG_3_LOW_FREQ_VAL;
723 else
724 config |= DLL_CONFIG_3_HIGH_FREQ_VAL;
725 writel_relaxed(config, host->ioaddr +
726 msm_offset->core_dll_config_3);
727 }
728
729 config = readl_relaxed(host->ioaddr +
730 msm_offset->core_dll_config);
731 config |= CORE_DLL_EN;
732 writel_relaxed(config, host->ioaddr +
733 msm_offset->core_dll_config);
734
735 config = readl_relaxed(host->ioaddr +
736 msm_offset->core_dll_config);
737 config |= CORE_CK_OUT_EN;
738 writel_relaxed(config, host->ioaddr +
739 msm_offset->core_dll_config);
740
741 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
742 while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) &
743 CORE_DLL_LOCK)) {
744 /* max. wait for 50us sec for LOCK bit to be set */
745 if (--wait_cnt == 0) {
746 dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
747 mmc_hostname(mmc));
748 spin_unlock_irqrestore(&host->lock, flags);
749 return -ETIMEDOUT;
750 }
751 udelay(1);
752 }
753
754 spin_unlock_irqrestore(&host->lock, flags);
755 return 0;
756 }
757
msm_hc_select_default(struct sdhci_host * host)758 static void msm_hc_select_default(struct sdhci_host *host)
759 {
760 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
761 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
762 u32 config;
763 const struct sdhci_msm_offset *msm_offset =
764 msm_host->offset;
765
766 if (!msm_host->use_cdclp533) {
767 config = readl_relaxed(host->ioaddr +
768 msm_offset->core_vendor_spec3);
769 config &= ~CORE_PWRSAVE_DLL;
770 writel_relaxed(config, host->ioaddr +
771 msm_offset->core_vendor_spec3);
772 }
773
774 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
775 config &= ~CORE_HC_MCLK_SEL_MASK;
776 config |= CORE_HC_MCLK_SEL_DFLT;
777 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
778
779 /*
780 * Disable HC_SELECT_IN to be able to use the UHS mode select
781 * configuration from Host Control2 register for all other
782 * modes.
783 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
784 * in VENDOR_SPEC_FUNC
785 */
786 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
787 config &= ~CORE_HC_SELECT_IN_EN;
788 config &= ~CORE_HC_SELECT_IN_MASK;
789 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
790
791 /*
792 * Make sure above writes impacting free running MCLK are completed
793 * before changing the clk_rate at GCC.
794 */
795 wmb();
796 }
797
msm_hc_select_hs400(struct sdhci_host * host)798 static void msm_hc_select_hs400(struct sdhci_host *host)
799 {
800 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
801 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
802 struct mmc_ios ios = host->mmc->ios;
803 u32 config, dll_lock;
804 int rc;
805 const struct sdhci_msm_offset *msm_offset =
806 msm_host->offset;
807
808 /* Select the divided clock (free running MCLK/2) */
809 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
810 config &= ~CORE_HC_MCLK_SEL_MASK;
811 config |= CORE_HC_MCLK_SEL_HS400;
812
813 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
814 /*
815 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
816 * register
817 */
818 if ((msm_host->tuning_done || ios.enhanced_strobe) &&
819 !msm_host->calibration_done) {
820 config = readl_relaxed(host->ioaddr +
821 msm_offset->core_vendor_spec);
822 config |= CORE_HC_SELECT_IN_HS400;
823 config |= CORE_HC_SELECT_IN_EN;
824 writel_relaxed(config, host->ioaddr +
825 msm_offset->core_vendor_spec);
826 }
827 if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
828 /*
829 * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
830 * core_dll_status to be set. This should get set
831 * within 15 us at 200 MHz.
832 */
833 rc = readl_relaxed_poll_timeout(host->ioaddr +
834 msm_offset->core_dll_status,
835 dll_lock,
836 (dll_lock &
837 (CORE_DLL_LOCK |
838 CORE_DDR_DLL_LOCK)), 10,
839 1000);
840 if (rc == -ETIMEDOUT)
841 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
842 mmc_hostname(host->mmc), dll_lock);
843 }
844 /*
845 * Make sure above writes impacting free running MCLK are completed
846 * before changing the clk_rate at GCC.
847 */
848 wmb();
849 }
850
851 /*
852 * sdhci_msm_hc_select_mode :- In general all timing modes are
853 * controlled via UHS mode select in Host Control2 register.
854 * eMMC specific HS200/HS400 doesn't have their respective modes
855 * defined here, hence we use these values.
856 *
857 * HS200 - SDR104 (Since they both are equivalent in functionality)
858 * HS400 - This involves multiple configurations
859 * Initially SDR104 - when tuning is required as HS200
860 * Then when switching to DDR @ 400MHz (HS400) we use
861 * the vendor specific HC_SELECT_IN to control the mode.
862 *
863 * In addition to controlling the modes we also need to select the
864 * correct input clock for DLL depending on the mode.
865 *
866 * HS400 - divided clock (free running MCLK/2)
867 * All other modes - default (free running MCLK)
868 */
sdhci_msm_hc_select_mode(struct sdhci_host * host)869 static void sdhci_msm_hc_select_mode(struct sdhci_host *host)
870 {
871 struct mmc_ios ios = host->mmc->ios;
872
873 if (ios.timing == MMC_TIMING_MMC_HS400 ||
874 host->flags & SDHCI_HS400_TUNING)
875 msm_hc_select_hs400(host);
876 else
877 msm_hc_select_default(host);
878 }
879
sdhci_msm_cdclp533_calibration(struct sdhci_host * host)880 static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
881 {
882 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
883 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
884 u32 config, calib_done;
885 int ret;
886 const struct sdhci_msm_offset *msm_offset =
887 msm_host->offset;
888
889 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
890
891 /*
892 * Retuning in HS400 (DDR mode) will fail, just reset the
893 * tuning block and restore the saved tuning phase.
894 */
895 ret = msm_init_cm_dll(host);
896 if (ret)
897 goto out;
898
899 /* Set the selected phase in delay line hw block */
900 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
901 if (ret)
902 goto out;
903
904 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
905 config |= CORE_CMD_DAT_TRACK_SEL;
906 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
907
908 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
909 config &= ~CORE_CDC_T4_DLY_SEL;
910 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
911
912 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
913 config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
914 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
915
916 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
917 config |= CORE_CDC_SWITCH_RC_EN;
918 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
919
920 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
921 config &= ~CORE_START_CDC_TRAFFIC;
922 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
923
924 /* Perform CDC Register Initialization Sequence */
925
926 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
927 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
928 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
929 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
930 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
931 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
932 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
933 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
934 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
935
936 /* CDC HW Calibration */
937
938 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
939 config |= CORE_SW_TRIG_FULL_CALIB;
940 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
941
942 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
943 config &= ~CORE_SW_TRIG_FULL_CALIB;
944 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
945
946 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
947 config |= CORE_HW_AUTOCAL_ENA;
948 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
949
950 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
951 config |= CORE_TIMER_ENA;
952 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
953
954 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
955 calib_done,
956 (calib_done & CORE_CALIBRATION_DONE),
957 1, 50);
958
959 if (ret == -ETIMEDOUT) {
960 pr_err("%s: %s: CDC calibration was not completed\n",
961 mmc_hostname(host->mmc), __func__);
962 goto out;
963 }
964
965 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
966 & CORE_CDC_ERROR_CODE_MASK;
967 if (ret) {
968 pr_err("%s: %s: CDC error code %d\n",
969 mmc_hostname(host->mmc), __func__, ret);
970 ret = -EINVAL;
971 goto out;
972 }
973
974 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
975 config |= CORE_START_CDC_TRAFFIC;
976 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
977 out:
978 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
979 __func__, ret);
980 return ret;
981 }
982
sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host * host)983 static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
984 {
985 struct mmc_host *mmc = host->mmc;
986 u32 dll_status, config, ddr_cfg_offset;
987 int ret;
988 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
989 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
990 const struct sdhci_msm_offset *msm_offset =
991 sdhci_priv_msm_offset(host);
992
993 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
994
995 /*
996 * Currently the core_ddr_config register defaults to desired
997 * configuration on reset. Currently reprogramming the power on
998 * reset (POR) value in case it might have been modified by
999 * bootloaders. In the future, if this changes, then the desired
1000 * values will need to be programmed appropriately.
1001 */
1002 if (msm_host->updated_ddr_cfg)
1003 ddr_cfg_offset = msm_offset->core_ddr_config;
1004 else
1005 ddr_cfg_offset = msm_offset->core_ddr_config_old;
1006 writel_relaxed(msm_host->ddr_config, host->ioaddr + ddr_cfg_offset);
1007
1008 if (mmc->ios.enhanced_strobe) {
1009 config = readl_relaxed(host->ioaddr +
1010 msm_offset->core_ddr_200_cfg);
1011 config |= CORE_CMDIN_RCLK_EN;
1012 writel_relaxed(config, host->ioaddr +
1013 msm_offset->core_ddr_200_cfg);
1014 }
1015
1016 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2);
1017 config |= CORE_DDR_CAL_EN;
1018 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2);
1019
1020 ret = readl_relaxed_poll_timeout(host->ioaddr +
1021 msm_offset->core_dll_status,
1022 dll_status,
1023 (dll_status & CORE_DDR_DLL_LOCK),
1024 10, 1000);
1025
1026 if (ret == -ETIMEDOUT) {
1027 pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
1028 mmc_hostname(host->mmc), __func__);
1029 goto out;
1030 }
1031
1032 /*
1033 * Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1034 * When MCLK is gated OFF, it is not gated for less than 0.5us
1035 * and MCLK must be switched on for at-least 1us before DATA
1036 * starts coming. Controllers with 14lpp and later tech DLL cannot
1037 * guarantee above requirement. So PWRSAVE_DLL should not be
1038 * turned on for host controllers using this DLL.
1039 */
1040 if (!msm_host->use_14lpp_dll_reset) {
1041 config = readl_relaxed(host->ioaddr +
1042 msm_offset->core_vendor_spec3);
1043 config |= CORE_PWRSAVE_DLL;
1044 writel_relaxed(config, host->ioaddr +
1045 msm_offset->core_vendor_spec3);
1046 }
1047
1048 /*
1049 * Drain writebuffer to ensure above DLL calibration
1050 * and PWRSAVE DLL is enabled.
1051 */
1052 wmb();
1053 out:
1054 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1055 __func__, ret);
1056 return ret;
1057 }
1058
sdhci_msm_hs400_dll_calibration(struct sdhci_host * host)1059 static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1060 {
1061 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1062 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1063 struct mmc_host *mmc = host->mmc;
1064 int ret;
1065 u32 config;
1066 const struct sdhci_msm_offset *msm_offset =
1067 msm_host->offset;
1068
1069 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
1070
1071 /*
1072 * Retuning in HS400 (DDR mode) will fail, just reset the
1073 * tuning block and restore the saved tuning phase.
1074 */
1075 ret = msm_init_cm_dll(host);
1076 if (ret)
1077 goto out;
1078
1079 if (!mmc->ios.enhanced_strobe) {
1080 /* Set the selected phase in delay line hw block */
1081 ret = msm_config_cm_dll_phase(host,
1082 msm_host->saved_tuning_phase);
1083 if (ret)
1084 goto out;
1085 config = readl_relaxed(host->ioaddr +
1086 msm_offset->core_dll_config);
1087 config |= CORE_CMD_DAT_TRACK_SEL;
1088 writel_relaxed(config, host->ioaddr +
1089 msm_offset->core_dll_config);
1090 }
1091
1092 if (msm_host->use_cdclp533)
1093 ret = sdhci_msm_cdclp533_calibration(host);
1094 else
1095 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1096 out:
1097 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1098 __func__, ret);
1099 return ret;
1100 }
1101
sdhci_msm_is_tuning_needed(struct sdhci_host * host)1102 static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host)
1103 {
1104 struct mmc_ios *ios = &host->mmc->ios;
1105
1106 /*
1107 * Tuning is required for SDR104, HS200 and HS400 cards and
1108 * if clock frequency is greater than 100MHz in these modes.
1109 */
1110 if (host->clock <= CORE_FREQ_100MHZ ||
1111 !(ios->timing == MMC_TIMING_MMC_HS400 ||
1112 ios->timing == MMC_TIMING_MMC_HS200 ||
1113 ios->timing == MMC_TIMING_UHS_SDR104) ||
1114 ios->enhanced_strobe)
1115 return false;
1116
1117 return true;
1118 }
1119
sdhci_msm_restore_sdr_dll_config(struct sdhci_host * host)1120 static int sdhci_msm_restore_sdr_dll_config(struct sdhci_host *host)
1121 {
1122 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1123 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1124 int ret;
1125
1126 /*
1127 * SDR DLL comes into picture only for timing modes which needs
1128 * tuning.
1129 */
1130 if (!sdhci_msm_is_tuning_needed(host))
1131 return 0;
1132
1133 /* Reset the tuning block */
1134 ret = msm_init_cm_dll(host);
1135 if (ret)
1136 return ret;
1137
1138 /* Restore the tuning block */
1139 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1140
1141 return ret;
1142 }
1143
sdhci_msm_set_cdr(struct sdhci_host * host,bool enable)1144 static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
1145 {
1146 const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host);
1147 u32 config, oldconfig = readl_relaxed(host->ioaddr +
1148 msm_offset->core_dll_config);
1149
1150 config = oldconfig;
1151 if (enable) {
1152 config |= CORE_CDR_EN;
1153 config &= ~CORE_CDR_EXT_EN;
1154 } else {
1155 config &= ~CORE_CDR_EN;
1156 config |= CORE_CDR_EXT_EN;
1157 }
1158
1159 if (config != oldconfig) {
1160 writel_relaxed(config, host->ioaddr +
1161 msm_offset->core_dll_config);
1162 }
1163 }
1164
sdhci_msm_execute_tuning(struct mmc_host * mmc,u32 opcode)1165 static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
1166 {
1167 struct sdhci_host *host = mmc_priv(mmc);
1168 int tuning_seq_cnt = 10;
1169 u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
1170 int rc;
1171 struct mmc_ios ios = host->mmc->ios;
1172 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1173 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1174
1175 if (!sdhci_msm_is_tuning_needed(host)) {
1176 msm_host->use_cdr = false;
1177 sdhci_msm_set_cdr(host, false);
1178 return 0;
1179 }
1180
1181 /* Clock-Data-Recovery used to dynamically adjust RX sampling point */
1182 msm_host->use_cdr = true;
1183
1184 /*
1185 * Clear tuning_done flag before tuning to ensure proper
1186 * HS400 settings.
1187 */
1188 msm_host->tuning_done = 0;
1189
1190 /*
1191 * For HS400 tuning in HS200 timing requires:
1192 * - select MCLK/2 in VENDOR_SPEC
1193 * - program MCLK to 400MHz (or nearest supported) in GCC
1194 */
1195 if (host->flags & SDHCI_HS400_TUNING) {
1196 sdhci_msm_hc_select_mode(host);
1197 msm_set_clock_rate_for_bus_mode(host, ios.clock);
1198 host->flags &= ~SDHCI_HS400_TUNING;
1199 }
1200
1201 retry:
1202 /* First of all reset the tuning block */
1203 rc = msm_init_cm_dll(host);
1204 if (rc)
1205 return rc;
1206
1207 phase = 0;
1208 do {
1209 /* Set the phase in delay line hw block */
1210 rc = msm_config_cm_dll_phase(host, phase);
1211 if (rc)
1212 return rc;
1213
1214 rc = mmc_send_tuning(mmc, opcode, NULL);
1215 if (!rc) {
1216 /* Tuning is successful at this tuning point */
1217 tuned_phases[tuned_phase_cnt++] = phase;
1218 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
1219 mmc_hostname(mmc), phase);
1220 }
1221 } while (++phase < ARRAY_SIZE(tuned_phases));
1222
1223 if (tuned_phase_cnt) {
1224 if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) {
1225 /*
1226 * All phases valid is _almost_ as bad as no phases
1227 * valid. Probably all phases are not really reliable
1228 * but we didn't detect where the unreliable place is.
1229 * That means we'll essentially be guessing and hoping
1230 * we get a good phase. Better to try a few times.
1231 */
1232 dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n",
1233 mmc_hostname(mmc));
1234 if (--tuning_seq_cnt) {
1235 tuned_phase_cnt = 0;
1236 goto retry;
1237 }
1238 }
1239
1240 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1241 tuned_phase_cnt);
1242 if (rc < 0)
1243 return rc;
1244 else
1245 phase = rc;
1246
1247 /*
1248 * Finally set the selected phase in delay
1249 * line hw block.
1250 */
1251 rc = msm_config_cm_dll_phase(host, phase);
1252 if (rc)
1253 return rc;
1254 msm_host->saved_tuning_phase = phase;
1255 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
1256 mmc_hostname(mmc), phase);
1257 } else {
1258 if (--tuning_seq_cnt)
1259 goto retry;
1260 /* Tuning failed */
1261 dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
1262 mmc_hostname(mmc));
1263 rc = -EIO;
1264 }
1265
1266 if (!rc)
1267 msm_host->tuning_done = true;
1268 return rc;
1269 }
1270
1271 /*
1272 * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation.
1273 * This needs to be done for both tuning and enhanced_strobe mode.
1274 * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz
1275 * fixed feedback clock is used.
1276 */
sdhci_msm_hs400(struct sdhci_host * host,struct mmc_ios * ios)1277 static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
1278 {
1279 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1280 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1281 int ret;
1282
1283 if (host->clock > CORE_FREQ_100MHZ &&
1284 (msm_host->tuning_done || ios->enhanced_strobe) &&
1285 !msm_host->calibration_done) {
1286 ret = sdhci_msm_hs400_dll_calibration(host);
1287 if (!ret)
1288 msm_host->calibration_done = true;
1289 else
1290 pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
1291 mmc_hostname(host->mmc), ret);
1292 }
1293 }
1294
sdhci_msm_set_uhs_signaling(struct sdhci_host * host,unsigned int uhs)1295 static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
1296 unsigned int uhs)
1297 {
1298 struct mmc_host *mmc = host->mmc;
1299 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1300 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1301 u16 ctrl_2;
1302 u32 config;
1303 const struct sdhci_msm_offset *msm_offset =
1304 msm_host->offset;
1305
1306 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1307 /* Select Bus Speed Mode for host */
1308 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1309 switch (uhs) {
1310 case MMC_TIMING_UHS_SDR12:
1311 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1312 break;
1313 case MMC_TIMING_UHS_SDR25:
1314 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1315 break;
1316 case MMC_TIMING_UHS_SDR50:
1317 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1318 break;
1319 case MMC_TIMING_MMC_HS400:
1320 case MMC_TIMING_MMC_HS200:
1321 case MMC_TIMING_UHS_SDR104:
1322 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1323 break;
1324 case MMC_TIMING_UHS_DDR50:
1325 case MMC_TIMING_MMC_DDR52:
1326 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1327 break;
1328 }
1329
1330 /*
1331 * When clock frequency is less than 100MHz, the feedback clock must be
1332 * provided and DLL must not be used so that tuning can be skipped. To
1333 * provide feedback clock, the mode selection can be any value less
1334 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
1335 */
1336 if (host->clock <= CORE_FREQ_100MHZ) {
1337 if (uhs == MMC_TIMING_MMC_HS400 ||
1338 uhs == MMC_TIMING_MMC_HS200 ||
1339 uhs == MMC_TIMING_UHS_SDR104)
1340 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1341 /*
1342 * DLL is not required for clock <= 100MHz
1343 * Thus, make sure DLL it is disabled when not required
1344 */
1345 config = readl_relaxed(host->ioaddr +
1346 msm_offset->core_dll_config);
1347 config |= CORE_DLL_RST;
1348 writel_relaxed(config, host->ioaddr +
1349 msm_offset->core_dll_config);
1350
1351 config = readl_relaxed(host->ioaddr +
1352 msm_offset->core_dll_config);
1353 config |= CORE_DLL_PDN;
1354 writel_relaxed(config, host->ioaddr +
1355 msm_offset->core_dll_config);
1356
1357 /*
1358 * The DLL needs to be restored and CDCLP533 recalibrated
1359 * when the clock frequency is set back to 400MHz.
1360 */
1361 msm_host->calibration_done = false;
1362 }
1363
1364 dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
1365 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
1366 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1367
1368 if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
1369 sdhci_msm_hs400(host, &mmc->ios);
1370 }
1371
sdhci_msm_set_pincfg(struct sdhci_msm_host * msm_host,bool level)1372 static int sdhci_msm_set_pincfg(struct sdhci_msm_host *msm_host, bool level)
1373 {
1374 struct platform_device *pdev = msm_host->pdev;
1375 int ret;
1376
1377 if (level)
1378 ret = pinctrl_pm_select_default_state(&pdev->dev);
1379 else
1380 ret = pinctrl_pm_select_sleep_state(&pdev->dev);
1381
1382 return ret;
1383 }
1384
sdhci_msm_set_vmmc(struct mmc_host * mmc)1385 static int sdhci_msm_set_vmmc(struct mmc_host *mmc)
1386 {
1387 if (IS_ERR(mmc->supply.vmmc))
1388 return 0;
1389
1390 return mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, mmc->ios.vdd);
1391 }
1392
msm_toggle_vqmmc(struct sdhci_msm_host * msm_host,struct mmc_host * mmc,bool level)1393 static int msm_toggle_vqmmc(struct sdhci_msm_host *msm_host,
1394 struct mmc_host *mmc, bool level)
1395 {
1396 int ret;
1397 struct mmc_ios ios;
1398
1399 if (msm_host->vqmmc_enabled == level)
1400 return 0;
1401
1402 if (level) {
1403 /* Set the IO voltage regulator to default voltage level */
1404 if (msm_host->caps_0 & CORE_3_0V_SUPPORT)
1405 ios.signal_voltage = MMC_SIGNAL_VOLTAGE_330;
1406 else if (msm_host->caps_0 & CORE_1_8V_SUPPORT)
1407 ios.signal_voltage = MMC_SIGNAL_VOLTAGE_180;
1408
1409 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
1410 ret = mmc_regulator_set_vqmmc(mmc, &ios);
1411 if (ret < 0) {
1412 dev_err(mmc_dev(mmc), "%s: vqmmc set volgate failed: %d\n",
1413 mmc_hostname(mmc), ret);
1414 goto out;
1415 }
1416 }
1417 ret = regulator_enable(mmc->supply.vqmmc);
1418 } else {
1419 ret = regulator_disable(mmc->supply.vqmmc);
1420 }
1421
1422 if (ret)
1423 dev_err(mmc_dev(mmc), "%s: vqmm %sable failed: %d\n",
1424 mmc_hostname(mmc), level ? "en":"dis", ret);
1425 else
1426 msm_host->vqmmc_enabled = level;
1427 out:
1428 return ret;
1429 }
1430
msm_config_vqmmc_mode(struct sdhci_msm_host * msm_host,struct mmc_host * mmc,bool hpm)1431 static int msm_config_vqmmc_mode(struct sdhci_msm_host *msm_host,
1432 struct mmc_host *mmc, bool hpm)
1433 {
1434 int load, ret;
1435
1436 load = hpm ? MMC_VQMMC_MAX_LOAD_UA : 0;
1437 ret = regulator_set_load(mmc->supply.vqmmc, load);
1438 if (ret)
1439 dev_err(mmc_dev(mmc), "%s: vqmmc set load failed: %d\n",
1440 mmc_hostname(mmc), ret);
1441 return ret;
1442 }
1443
sdhci_msm_set_vqmmc(struct sdhci_msm_host * msm_host,struct mmc_host * mmc,bool level)1444 static int sdhci_msm_set_vqmmc(struct sdhci_msm_host *msm_host,
1445 struct mmc_host *mmc, bool level)
1446 {
1447 int ret;
1448 bool always_on;
1449
1450 if (IS_ERR(mmc->supply.vqmmc) ||
1451 (mmc->ios.power_mode == MMC_POWER_UNDEFINED))
1452 return 0;
1453 /*
1454 * For eMMC don't turn off Vqmmc, Instead just configure it in LPM
1455 * and HPM modes by setting the corresponding load.
1456 *
1457 * Till eMMC is initialized (i.e. always_on == 0), just turn on/off
1458 * Vqmmc. Vqmmc gets turned off only if init fails and mmc_power_off
1459 * gets invoked. Once eMMC is initialized (i.e. always_on == 1),
1460 * Vqmmc should remain ON, So just set the load instead of turning it
1461 * off/on.
1462 */
1463 always_on = !mmc_card_is_removable(mmc) &&
1464 mmc->card && mmc_card_mmc(mmc->card);
1465
1466 if (always_on)
1467 ret = msm_config_vqmmc_mode(msm_host, mmc, level);
1468 else
1469 ret = msm_toggle_vqmmc(msm_host, mmc, level);
1470
1471 return ret;
1472 }
1473
sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host * msm_host)1474 static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
1475 {
1476 init_waitqueue_head(&msm_host->pwr_irq_wait);
1477 }
1478
sdhci_msm_complete_pwr_irq_wait(struct sdhci_msm_host * msm_host)1479 static inline void sdhci_msm_complete_pwr_irq_wait(
1480 struct sdhci_msm_host *msm_host)
1481 {
1482 wake_up(&msm_host->pwr_irq_wait);
1483 }
1484
1485 /*
1486 * sdhci_msm_check_power_status API should be called when registers writes
1487 * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens.
1488 * To what state the register writes will change the IO lines should be passed
1489 * as the argument req_type. This API will check whether the IO line's state
1490 * is already the expected state and will wait for power irq only if
1491 * power irq is expected to be triggered based on the current IO line state
1492 * and expected IO line state.
1493 */
sdhci_msm_check_power_status(struct sdhci_host * host,u32 req_type)1494 static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
1495 {
1496 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1497 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1498 bool done = false;
1499 u32 val = SWITCHABLE_SIGNALING_VOLTAGE;
1500 const struct sdhci_msm_offset *msm_offset =
1501 msm_host->offset;
1502
1503 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
1504 mmc_hostname(host->mmc), __func__, req_type,
1505 msm_host->curr_pwr_state, msm_host->curr_io_level);
1506
1507 /*
1508 * The power interrupt will not be generated for signal voltage
1509 * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
1510 * Since sdhci-msm-v5, this bit has been removed and SW must consider
1511 * it as always set.
1512 */
1513 if (!msm_host->mci_removed)
1514 val = msm_host_readl(msm_host, host,
1515 msm_offset->core_generics);
1516 if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
1517 !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
1518 return;
1519 }
1520
1521 /*
1522 * The IRQ for request type IO High/LOW will be generated when -
1523 * there is a state change in 1.8V enable bit (bit 3) of
1524 * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
1525 * which indicates 3.3V IO voltage. So, when MMC core layer tries
1526 * to set it to 3.3V before card detection happens, the
1527 * IRQ doesn't get triggered as there is no state change in this bit.
1528 * The driver already handles this case by changing the IO voltage
1529 * level to high as part of controller power up sequence. Hence, check
1530 * for host->pwr to handle a case where IO voltage high request is
1531 * issued even before controller power up.
1532 */
1533 if ((req_type & REQ_IO_HIGH) && !host->pwr) {
1534 pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
1535 mmc_hostname(host->mmc), req_type);
1536 return;
1537 }
1538 if ((req_type & msm_host->curr_pwr_state) ||
1539 (req_type & msm_host->curr_io_level))
1540 done = true;
1541 /*
1542 * This is needed here to handle cases where register writes will
1543 * not change the current bus state or io level of the controller.
1544 * In this case, no power irq will be triggerred and we should
1545 * not wait.
1546 */
1547 if (!done) {
1548 if (!wait_event_timeout(msm_host->pwr_irq_wait,
1549 msm_host->pwr_irq_flag,
1550 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
1551 dev_warn(&msm_host->pdev->dev,
1552 "%s: pwr_irq for req: (%d) timed out\n",
1553 mmc_hostname(host->mmc), req_type);
1554 }
1555 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
1556 __func__, req_type);
1557 }
1558
sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host * host)1559 static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
1560 {
1561 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1562 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1563 const struct sdhci_msm_offset *msm_offset =
1564 msm_host->offset;
1565
1566 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
1567 mmc_hostname(host->mmc),
1568 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status),
1569 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask),
1570 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl));
1571 }
1572
sdhci_msm_handle_pwr_irq(struct sdhci_host * host,int irq)1573 static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
1574 {
1575 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1576 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1577 struct mmc_host *mmc = host->mmc;
1578 u32 irq_status, irq_ack = 0;
1579 int retry = 10, ret;
1580 u32 pwr_state = 0, io_level = 0;
1581 u32 config;
1582 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
1583
1584 irq_status = msm_host_readl(msm_host, host,
1585 msm_offset->core_pwrctl_status);
1586 irq_status &= INT_MASK;
1587
1588 msm_host_writel(msm_host, irq_status, host,
1589 msm_offset->core_pwrctl_clear);
1590
1591 /*
1592 * There is a rare HW scenario where the first clear pulse could be
1593 * lost when actual reset and clear/read of status register is
1594 * happening at a time. Hence, retry for at least 10 times to make
1595 * sure status register is cleared. Otherwise, this will result in
1596 * a spurious power IRQ resulting in system instability.
1597 */
1598 while (irq_status & msm_host_readl(msm_host, host,
1599 msm_offset->core_pwrctl_status)) {
1600 if (retry == 0) {
1601 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
1602 mmc_hostname(host->mmc), irq_status);
1603 sdhci_msm_dump_pwr_ctrl_regs(host);
1604 WARN_ON(1);
1605 break;
1606 }
1607 msm_host_writel(msm_host, irq_status, host,
1608 msm_offset->core_pwrctl_clear);
1609 retry--;
1610 udelay(10);
1611 }
1612
1613 /* Handle BUS ON/OFF*/
1614 if (irq_status & CORE_PWRCTL_BUS_ON) {
1615 pwr_state = REQ_BUS_ON;
1616 io_level = REQ_IO_HIGH;
1617 }
1618 if (irq_status & CORE_PWRCTL_BUS_OFF) {
1619 pwr_state = REQ_BUS_OFF;
1620 io_level = REQ_IO_LOW;
1621 }
1622
1623 if (pwr_state) {
1624 ret = sdhci_msm_set_vmmc(mmc);
1625 if (!ret)
1626 ret = sdhci_msm_set_vqmmc(msm_host, mmc,
1627 pwr_state & REQ_BUS_ON);
1628 if (!ret)
1629 ret = sdhci_msm_set_pincfg(msm_host,
1630 pwr_state & REQ_BUS_ON);
1631 if (!ret)
1632 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
1633 else
1634 irq_ack |= CORE_PWRCTL_BUS_FAIL;
1635 }
1636
1637 /* Handle IO LOW/HIGH */
1638 if (irq_status & CORE_PWRCTL_IO_LOW)
1639 io_level = REQ_IO_LOW;
1640
1641 if (irq_status & CORE_PWRCTL_IO_HIGH)
1642 io_level = REQ_IO_HIGH;
1643
1644 if (io_level)
1645 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
1646
1647 if (io_level && !IS_ERR(mmc->supply.vqmmc) && !pwr_state) {
1648 ret = mmc_regulator_set_vqmmc(mmc, &mmc->ios);
1649 if (ret < 0) {
1650 dev_err(mmc_dev(mmc), "%s: IO_level setting failed(%d). signal_voltage: %d, vdd: %d irq_status: 0x%08x\n",
1651 mmc_hostname(mmc), ret,
1652 mmc->ios.signal_voltage, mmc->ios.vdd,
1653 irq_status);
1654 irq_ack |= CORE_PWRCTL_IO_FAIL;
1655 }
1656 }
1657
1658 /*
1659 * The driver has to acknowledge the interrupt, switch voltages and
1660 * report back if it succeded or not to this register. The voltage
1661 * switches are handled by the sdhci core, so just report success.
1662 */
1663 msm_host_writel(msm_host, irq_ack, host,
1664 msm_offset->core_pwrctl_ctl);
1665
1666 /*
1667 * If we don't have info regarding the voltage levels supported by
1668 * regulators, don't change the IO PAD PWR SWITCH.
1669 */
1670 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
1671 u32 new_config;
1672 /*
1673 * We should unset IO PAD PWR switch only if the register write
1674 * can set IO lines high and the regulator also switches to 3 V.
1675 * Else, we should keep the IO PAD PWR switch set.
1676 * This is applicable to certain targets where eMMC vccq supply
1677 * is only 1.8V. In such targets, even during REQ_IO_HIGH, the
1678 * IO PAD PWR switch must be kept set to reflect actual
1679 * regulator voltage. This way, during initialization of
1680 * controllers with only 1.8V, we will set the IO PAD bit
1681 * without waiting for a REQ_IO_LOW.
1682 */
1683 config = readl_relaxed(host->ioaddr +
1684 msm_offset->core_vendor_spec);
1685 new_config = config;
1686
1687 if ((io_level & REQ_IO_HIGH) &&
1688 (msm_host->caps_0 & CORE_3_0V_SUPPORT))
1689 new_config &= ~CORE_IO_PAD_PWR_SWITCH;
1690 else if ((io_level & REQ_IO_LOW) ||
1691 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
1692 new_config |= CORE_IO_PAD_PWR_SWITCH;
1693
1694 if (config ^ new_config)
1695 writel_relaxed(new_config, host->ioaddr +
1696 msm_offset->core_vendor_spec);
1697 }
1698
1699 if (pwr_state)
1700 msm_host->curr_pwr_state = pwr_state;
1701 if (io_level)
1702 msm_host->curr_io_level = io_level;
1703
1704 dev_dbg(mmc_dev(mmc), "%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
1705 mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
1706 irq_ack);
1707 }
1708
sdhci_msm_pwr_irq(int irq,void * data)1709 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
1710 {
1711 struct sdhci_host *host = (struct sdhci_host *)data;
1712 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1713 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1714
1715 sdhci_msm_handle_pwr_irq(host, irq);
1716 msm_host->pwr_irq_flag = 1;
1717 sdhci_msm_complete_pwr_irq_wait(msm_host);
1718
1719
1720 return IRQ_HANDLED;
1721 }
1722
sdhci_msm_get_max_clock(struct sdhci_host * host)1723 static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
1724 {
1725 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1726 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1727 struct clk *core_clk = msm_host->bulk_clks[0].clk;
1728
1729 return clk_round_rate(core_clk, ULONG_MAX);
1730 }
1731
sdhci_msm_get_min_clock(struct sdhci_host * host)1732 static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
1733 {
1734 return SDHCI_MSM_MIN_CLOCK;
1735 }
1736
1737 /*
1738 * __sdhci_msm_set_clock - sdhci_msm clock control.
1739 *
1740 * Description:
1741 * MSM controller does not use internal divider and
1742 * instead directly control the GCC clock as per
1743 * HW recommendation.
1744 **/
__sdhci_msm_set_clock(struct sdhci_host * host,unsigned int clock)1745 static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1746 {
1747 u16 clk;
1748 /*
1749 * Keep actual_clock as zero -
1750 * - since there is no divider used so no need of having actual_clock.
1751 * - MSM controller uses SDCLK for data timeout calculation. If
1752 * actual_clock is zero, host->clock is taken for calculation.
1753 */
1754 host->mmc->actual_clock = 0;
1755
1756 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1757
1758 if (clock == 0)
1759 return;
1760
1761 /*
1762 * MSM controller do not use clock divider.
1763 * Thus read SDHCI_CLOCK_CONTROL and only enable
1764 * clock with no divider value programmed.
1765 */
1766 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1767 sdhci_enable_clk(host, clk);
1768 }
1769
1770 /* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
sdhci_msm_set_clock(struct sdhci_host * host,unsigned int clock)1771 static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1772 {
1773 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1774 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1775
1776 if (!clock) {
1777 msm_host->clk_rate = clock;
1778 goto out;
1779 }
1780
1781 sdhci_msm_hc_select_mode(host);
1782
1783 msm_set_clock_rate_for_bus_mode(host, clock);
1784 out:
1785 __sdhci_msm_set_clock(host, clock);
1786 }
1787
1788 /*****************************************************************************\
1789 * *
1790 * MSM Command Queue Engine (CQE) *
1791 * *
1792 \*****************************************************************************/
1793
sdhci_msm_cqe_irq(struct sdhci_host * host,u32 intmask)1794 static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
1795 {
1796 int cmd_error = 0;
1797 int data_error = 0;
1798
1799 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1800 return intmask;
1801
1802 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1803 return 0;
1804 }
1805
sdhci_msm_cqe_disable(struct mmc_host * mmc,bool recovery)1806 static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
1807 {
1808 struct sdhci_host *host = mmc_priv(mmc);
1809 unsigned long flags;
1810 u32 ctrl;
1811
1812 /*
1813 * When CQE is halted, the legacy SDHCI path operates only
1814 * on 16-byte descriptors in 64bit mode.
1815 */
1816 if (host->flags & SDHCI_USE_64_BIT_DMA)
1817 host->desc_sz = 16;
1818
1819 spin_lock_irqsave(&host->lock, flags);
1820
1821 /*
1822 * During CQE command transfers, command complete bit gets latched.
1823 * So s/w should clear command complete interrupt status when CQE is
1824 * either halted or disabled. Otherwise unexpected SDCHI legacy
1825 * interrupt gets triggered when CQE is halted/disabled.
1826 */
1827 ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
1828 ctrl |= SDHCI_INT_RESPONSE;
1829 sdhci_writel(host, ctrl, SDHCI_INT_ENABLE);
1830 sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
1831
1832 spin_unlock_irqrestore(&host->lock, flags);
1833
1834 sdhci_cqe_disable(mmc, recovery);
1835 }
1836
sdhci_msm_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1837 static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1838 {
1839 u32 count, start = 15;
1840
1841 __sdhci_set_timeout(host, cmd);
1842 count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL);
1843 /*
1844 * Update software timeout value if its value is less than hardware data
1845 * timeout value. Qcom SoC hardware data timeout value was calculated
1846 * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock.
1847 */
1848 if (cmd && cmd->data && host->clock > 400000 &&
1849 host->clock <= 50000000 &&
1850 ((1 << (count + start)) > (10 * host->clock)))
1851 host->data_timeout = 22LL * NSEC_PER_SEC;
1852 }
1853
1854 static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
1855 .enable = sdhci_cqe_enable,
1856 .disable = sdhci_msm_cqe_disable,
1857 };
1858
sdhci_msm_cqe_add_host(struct sdhci_host * host,struct platform_device * pdev)1859 static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
1860 struct platform_device *pdev)
1861 {
1862 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1863 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1864 struct cqhci_host *cq_host;
1865 bool dma64;
1866 u32 cqcfg;
1867 int ret;
1868
1869 /*
1870 * When CQE is halted, SDHC operates only on 16byte ADMA descriptors.
1871 * So ensure ADMA table is allocated for 16byte descriptors.
1872 */
1873 if (host->caps & SDHCI_CAN_64BIT)
1874 host->alloc_desc_sz = 16;
1875
1876 ret = sdhci_setup_host(host);
1877 if (ret)
1878 return ret;
1879
1880 cq_host = cqhci_pltfm_init(pdev);
1881 if (IS_ERR(cq_host)) {
1882 ret = PTR_ERR(cq_host);
1883 dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret);
1884 goto cleanup;
1885 }
1886
1887 msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1888 cq_host->ops = &sdhci_msm_cqhci_ops;
1889
1890 dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1891
1892 ret = cqhci_init(cq_host, host->mmc, dma64);
1893 if (ret) {
1894 dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
1895 mmc_hostname(host->mmc), ret);
1896 goto cleanup;
1897 }
1898
1899 /* Disable cqe reset due to cqe enable signal */
1900 cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
1901 cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
1902 cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
1903
1904 /*
1905 * SDHC expects 12byte ADMA descriptors till CQE is enabled.
1906 * So limit desc_sz to 12 so that the data commands that are sent
1907 * during card initialization (before CQE gets enabled) would
1908 * get executed without any issues.
1909 */
1910 if (host->flags & SDHCI_USE_64_BIT_DMA)
1911 host->desc_sz = 12;
1912
1913 ret = __sdhci_add_host(host);
1914 if (ret)
1915 goto cleanup;
1916
1917 dev_info(&pdev->dev, "%s: CQE init: success\n",
1918 mmc_hostname(host->mmc));
1919 return ret;
1920
1921 cleanup:
1922 sdhci_cleanup_host(host);
1923 return ret;
1924 }
1925
1926 /*
1927 * Platform specific register write functions. This is so that, if any
1928 * register write needs to be followed up by platform specific actions,
1929 * they can be added here. These functions can go to sleep when writes
1930 * to certain registers are done.
1931 * These functions are relying on sdhci_set_ios not using spinlock.
1932 */
__sdhci_msm_check_write(struct sdhci_host * host,u16 val,int reg)1933 static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
1934 {
1935 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1936 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1937 u32 req_type = 0;
1938
1939 switch (reg) {
1940 case SDHCI_HOST_CONTROL2:
1941 req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
1942 REQ_IO_HIGH;
1943 break;
1944 case SDHCI_SOFTWARE_RESET:
1945 if (host->pwr && (val & SDHCI_RESET_ALL))
1946 req_type = REQ_BUS_OFF;
1947 break;
1948 case SDHCI_POWER_CONTROL:
1949 req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
1950 break;
1951 case SDHCI_TRANSFER_MODE:
1952 msm_host->transfer_mode = val;
1953 break;
1954 case SDHCI_COMMAND:
1955 if (!msm_host->use_cdr)
1956 break;
1957 if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
1958 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
1959 SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
1960 sdhci_msm_set_cdr(host, true);
1961 else
1962 sdhci_msm_set_cdr(host, false);
1963 break;
1964 }
1965
1966 if (req_type) {
1967 msm_host->pwr_irq_flag = 0;
1968 /*
1969 * Since this register write may trigger a power irq, ensure
1970 * all previous register writes are complete by this point.
1971 */
1972 mb();
1973 }
1974 return req_type;
1975 }
1976
1977 /* This function may sleep*/
sdhci_msm_writew(struct sdhci_host * host,u16 val,int reg)1978 static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
1979 {
1980 u32 req_type = 0;
1981
1982 req_type = __sdhci_msm_check_write(host, val, reg);
1983 writew_relaxed(val, host->ioaddr + reg);
1984
1985 if (req_type)
1986 sdhci_msm_check_power_status(host, req_type);
1987 }
1988
1989 /* This function may sleep*/
sdhci_msm_writeb(struct sdhci_host * host,u8 val,int reg)1990 static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
1991 {
1992 u32 req_type = 0;
1993
1994 req_type = __sdhci_msm_check_write(host, val, reg);
1995
1996 writeb_relaxed(val, host->ioaddr + reg);
1997
1998 if (req_type)
1999 sdhci_msm_check_power_status(host, req_type);
2000 }
2001
sdhci_msm_set_regulator_caps(struct sdhci_msm_host * msm_host)2002 static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
2003 {
2004 struct mmc_host *mmc = msm_host->mmc;
2005 struct regulator *supply = mmc->supply.vqmmc;
2006 u32 caps = 0, config;
2007 struct sdhci_host *host = mmc_priv(mmc);
2008 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
2009
2010 if (!IS_ERR(mmc->supply.vqmmc)) {
2011 if (regulator_is_supported_voltage(supply, 1700000, 1950000))
2012 caps |= CORE_1_8V_SUPPORT;
2013 if (regulator_is_supported_voltage(supply, 2700000, 3600000))
2014 caps |= CORE_3_0V_SUPPORT;
2015
2016 if (!caps)
2017 pr_warn("%s: 1.8/3V not supported for vqmmc\n",
2018 mmc_hostname(mmc));
2019 }
2020
2021 if (caps) {
2022 /*
2023 * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH
2024 * bit can be used as required later on.
2025 */
2026 u32 io_level = msm_host->curr_io_level;
2027
2028 config = readl_relaxed(host->ioaddr +
2029 msm_offset->core_vendor_spec);
2030 config |= CORE_IO_PAD_PWR_SWITCH_EN;
2031
2032 if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT))
2033 config &= ~CORE_IO_PAD_PWR_SWITCH;
2034 else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT))
2035 config |= CORE_IO_PAD_PWR_SWITCH;
2036
2037 writel_relaxed(config,
2038 host->ioaddr + msm_offset->core_vendor_spec);
2039 }
2040 msm_host->caps_0 |= caps;
2041 pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps);
2042 }
2043
sdhci_msm_reset(struct sdhci_host * host,u8 mask)2044 static void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
2045 {
2046 if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL))
2047 cqhci_deactivate(host->mmc);
2048 sdhci_reset(host, mask);
2049 }
2050
sdhci_msm_register_vreg(struct sdhci_msm_host * msm_host)2051 static int sdhci_msm_register_vreg(struct sdhci_msm_host *msm_host)
2052 {
2053 int ret;
2054
2055 ret = mmc_regulator_get_supply(msm_host->mmc);
2056 if (ret)
2057 return ret;
2058
2059 sdhci_msm_set_regulator_caps(msm_host);
2060
2061 return 0;
2062 }
2063
sdhci_msm_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)2064 static int sdhci_msm_start_signal_voltage_switch(struct mmc_host *mmc,
2065 struct mmc_ios *ios)
2066 {
2067 struct sdhci_host *host = mmc_priv(mmc);
2068 u16 ctrl, status;
2069
2070 /*
2071 * Signal Voltage Switching is only applicable for Host Controllers
2072 * v3.00 and above.
2073 */
2074 if (host->version < SDHCI_SPEC_300)
2075 return 0;
2076
2077 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2078
2079 switch (ios->signal_voltage) {
2080 case MMC_SIGNAL_VOLTAGE_330:
2081 if (!(host->flags & SDHCI_SIGNALING_330))
2082 return -EINVAL;
2083
2084 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2085 ctrl &= ~SDHCI_CTRL_VDD_180;
2086 break;
2087 case MMC_SIGNAL_VOLTAGE_180:
2088 if (!(host->flags & SDHCI_SIGNALING_180))
2089 return -EINVAL;
2090
2091 /* Enable 1.8V Signal Enable in the Host Control2 register */
2092 ctrl |= SDHCI_CTRL_VDD_180;
2093 break;
2094
2095 default:
2096 return -EINVAL;
2097 }
2098
2099 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2100
2101 /* Wait for 5ms */
2102 usleep_range(5000, 5500);
2103
2104 /* regulator output should be stable within 5 ms */
2105 status = ctrl & SDHCI_CTRL_VDD_180;
2106 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2107 if ((ctrl & SDHCI_CTRL_VDD_180) == status)
2108 return 0;
2109
2110 dev_warn(mmc_dev(mmc), "%s: Regulator output did not became stable\n",
2111 mmc_hostname(mmc));
2112
2113 return -EAGAIN;
2114 }
2115
2116 #define DRIVER_NAME "sdhci_msm"
2117 #define SDHCI_MSM_DUMP(f, x...) \
2118 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
2119
sdhci_msm_dump_vendor_regs(struct sdhci_host * host)2120 static void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
2121 {
2122 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2123 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2124 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
2125
2126 SDHCI_MSM_DUMP("----------- VENDOR REGISTER DUMP -----------\n");
2127
2128 SDHCI_MSM_DUMP(
2129 "DLL sts: 0x%08x | DLL cfg: 0x%08x | DLL cfg2: 0x%08x\n",
2130 readl_relaxed(host->ioaddr + msm_offset->core_dll_status),
2131 readl_relaxed(host->ioaddr + msm_offset->core_dll_config),
2132 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2));
2133 SDHCI_MSM_DUMP(
2134 "DLL cfg3: 0x%08x | DLL usr ctl: 0x%08x | DDR cfg: 0x%08x\n",
2135 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_3),
2136 readl_relaxed(host->ioaddr + msm_offset->core_dll_usr_ctl),
2137 readl_relaxed(host->ioaddr + msm_offset->core_ddr_config));
2138 SDHCI_MSM_DUMP(
2139 "Vndr func: 0x%08x | Vndr func2 : 0x%08x Vndr func3: 0x%08x\n",
2140 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec),
2141 readl_relaxed(host->ioaddr +
2142 msm_offset->core_vendor_spec_func2),
2143 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3));
2144 }
2145
2146 static const struct sdhci_msm_variant_ops mci_var_ops = {
2147 .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed,
2148 .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed,
2149 };
2150
2151 static const struct sdhci_msm_variant_ops v5_var_ops = {
2152 .msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed,
2153 .msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed,
2154 };
2155
2156 static const struct sdhci_msm_variant_info sdhci_msm_mci_var = {
2157 .var_ops = &mci_var_ops,
2158 .offset = &sdhci_msm_mci_offset,
2159 };
2160
2161 static const struct sdhci_msm_variant_info sdhci_msm_v5_var = {
2162 .mci_removed = true,
2163 .var_ops = &v5_var_ops,
2164 .offset = &sdhci_msm_v5_offset,
2165 };
2166
2167 static const struct sdhci_msm_variant_info sdm845_sdhci_var = {
2168 .mci_removed = true,
2169 .restore_dll_config = true,
2170 .var_ops = &v5_var_ops,
2171 .offset = &sdhci_msm_v5_offset,
2172 };
2173
2174 static const struct sdhci_msm_variant_info sm8250_sdhci_var = {
2175 .mci_removed = true,
2176 .uses_tassadar_dll = true,
2177 .var_ops = &v5_var_ops,
2178 .offset = &sdhci_msm_v5_offset,
2179 };
2180
2181 static const struct of_device_id sdhci_msm_dt_match[] = {
2182 {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
2183 {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
2184 {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
2185 {.compatible = "qcom,sm8250-sdhci", .data = &sm8250_sdhci_var},
2186 {.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var},
2187 {},
2188 };
2189
2190 MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
2191
2192 static const struct sdhci_ops sdhci_msm_ops = {
2193 .reset = sdhci_msm_reset,
2194 .set_clock = sdhci_msm_set_clock,
2195 .get_min_clock = sdhci_msm_get_min_clock,
2196 .get_max_clock = sdhci_msm_get_max_clock,
2197 .set_bus_width = sdhci_set_bus_width,
2198 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
2199 .write_w = sdhci_msm_writew,
2200 .write_b = sdhci_msm_writeb,
2201 .irq = sdhci_msm_cqe_irq,
2202 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
2203 .set_power = sdhci_set_power_noreg,
2204 .set_timeout = sdhci_msm_set_timeout,
2205 };
2206
2207 static const struct sdhci_pltfm_data sdhci_msm_pdata = {
2208 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
2209 SDHCI_QUIRK_SINGLE_POWER_WRITE |
2210 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
2211 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
2212
2213 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
2214 .ops = &sdhci_msm_ops,
2215 };
2216
sdhci_msm_get_of_property(struct platform_device * pdev,struct sdhci_host * host)2217 static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
2218 struct sdhci_host *host)
2219 {
2220 struct device_node *node = pdev->dev.of_node;
2221 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2222 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2223
2224 if (of_property_read_u32(node, "qcom,ddr-config",
2225 &msm_host->ddr_config))
2226 msm_host->ddr_config = DDR_CONFIG_POR_VAL;
2227
2228 of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
2229 }
2230
2231
sdhci_msm_probe(struct platform_device * pdev)2232 static int sdhci_msm_probe(struct platform_device *pdev)
2233 {
2234 struct sdhci_host *host;
2235 struct sdhci_pltfm_host *pltfm_host;
2236 struct sdhci_msm_host *msm_host;
2237 struct clk *clk;
2238 int ret;
2239 u16 host_version, core_minor;
2240 u32 core_version, config;
2241 u8 core_major;
2242 const struct sdhci_msm_offset *msm_offset;
2243 const struct sdhci_msm_variant_info *var_info;
2244 struct device_node *node = pdev->dev.of_node;
2245
2246 host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
2247 if (IS_ERR(host))
2248 return PTR_ERR(host);
2249
2250 host->sdma_boundary = 0;
2251 pltfm_host = sdhci_priv(host);
2252 msm_host = sdhci_pltfm_priv(pltfm_host);
2253 msm_host->mmc = host->mmc;
2254 msm_host->pdev = pdev;
2255
2256 ret = mmc_of_parse(host->mmc);
2257 if (ret)
2258 goto pltfm_free;
2259
2260 /*
2261 * Based on the compatible string, load the required msm host info from
2262 * the data associated with the version info.
2263 */
2264 var_info = of_device_get_match_data(&pdev->dev);
2265
2266 msm_host->mci_removed = var_info->mci_removed;
2267 msm_host->restore_dll_config = var_info->restore_dll_config;
2268 msm_host->var_ops = var_info->var_ops;
2269 msm_host->offset = var_info->offset;
2270 msm_host->uses_tassadar_dll = var_info->uses_tassadar_dll;
2271
2272 msm_offset = msm_host->offset;
2273
2274 sdhci_get_of_property(pdev);
2275 sdhci_msm_get_of_property(pdev, host);
2276
2277 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
2278
2279 /* Setup SDCC bus voter clock. */
2280 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
2281 if (!IS_ERR(msm_host->bus_clk)) {
2282 /* Vote for max. clk rate for max. performance */
2283 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
2284 if (ret)
2285 goto pltfm_free;
2286 ret = clk_prepare_enable(msm_host->bus_clk);
2287 if (ret)
2288 goto pltfm_free;
2289 }
2290
2291 /* Setup main peripheral bus clock */
2292 clk = devm_clk_get(&pdev->dev, "iface");
2293 if (IS_ERR(clk)) {
2294 ret = PTR_ERR(clk);
2295 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
2296 goto bus_clk_disable;
2297 }
2298 msm_host->bulk_clks[1].clk = clk;
2299
2300 /* Setup SDC MMC clock */
2301 clk = devm_clk_get(&pdev->dev, "core");
2302 if (IS_ERR(clk)) {
2303 ret = PTR_ERR(clk);
2304 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
2305 goto bus_clk_disable;
2306 }
2307 msm_host->bulk_clks[0].clk = clk;
2308
2309 /* Check for optional interconnect paths */
2310 ret = dev_pm_opp_of_find_icc_paths(&pdev->dev, NULL);
2311 if (ret)
2312 goto bus_clk_disable;
2313
2314 msm_host->opp_table = dev_pm_opp_set_clkname(&pdev->dev, "core");
2315 if (IS_ERR(msm_host->opp_table)) {
2316 ret = PTR_ERR(msm_host->opp_table);
2317 goto bus_clk_disable;
2318 }
2319
2320 /* OPP table is optional */
2321 ret = dev_pm_opp_of_add_table(&pdev->dev);
2322 if (ret && ret != -ENODEV) {
2323 dev_err(&pdev->dev, "Invalid OPP table in Device tree\n");
2324 goto opp_put_clkname;
2325 }
2326
2327 /* Vote for maximum clock rate for maximum performance */
2328 ret = dev_pm_opp_set_rate(&pdev->dev, INT_MAX);
2329 if (ret)
2330 dev_warn(&pdev->dev, "core clock boost failed\n");
2331
2332 clk = devm_clk_get(&pdev->dev, "cal");
2333 if (IS_ERR(clk))
2334 clk = NULL;
2335 msm_host->bulk_clks[2].clk = clk;
2336
2337 clk = devm_clk_get(&pdev->dev, "sleep");
2338 if (IS_ERR(clk))
2339 clk = NULL;
2340 msm_host->bulk_clks[3].clk = clk;
2341
2342 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2343 msm_host->bulk_clks);
2344 if (ret)
2345 goto opp_cleanup;
2346
2347 /*
2348 * xo clock is needed for FLL feature of cm_dll.
2349 * In case if xo clock is not mentioned in DT, warn and proceed.
2350 */
2351 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
2352 if (IS_ERR(msm_host->xo_clk)) {
2353 ret = PTR_ERR(msm_host->xo_clk);
2354 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
2355 }
2356
2357 if (!msm_host->mci_removed) {
2358 msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1);
2359 if (IS_ERR(msm_host->core_mem)) {
2360 ret = PTR_ERR(msm_host->core_mem);
2361 goto clk_disable;
2362 }
2363 }
2364
2365 /* Reset the vendor spec register to power on reset state */
2366 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
2367 host->ioaddr + msm_offset->core_vendor_spec);
2368
2369 if (!msm_host->mci_removed) {
2370 /* Set HC_MODE_EN bit in HC_MODE register */
2371 msm_host_writel(msm_host, HC_MODE_EN, host,
2372 msm_offset->core_hc_mode);
2373 config = msm_host_readl(msm_host, host,
2374 msm_offset->core_hc_mode);
2375 config |= FF_CLK_SW_RST_DIS;
2376 msm_host_writel(msm_host, config, host,
2377 msm_offset->core_hc_mode);
2378 }
2379
2380 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
2381 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
2382 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
2383 SDHCI_VENDOR_VER_SHIFT));
2384
2385 core_version = msm_host_readl(msm_host, host,
2386 msm_offset->core_mci_version);
2387 core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
2388 CORE_VERSION_MAJOR_SHIFT;
2389 core_minor = core_version & CORE_VERSION_MINOR_MASK;
2390 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
2391 core_version, core_major, core_minor);
2392
2393 if (core_major == 1 && core_minor >= 0x42)
2394 msm_host->use_14lpp_dll_reset = true;
2395
2396 /*
2397 * SDCC 5 controller with major version 1, minor version 0x34 and later
2398 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
2399 */
2400 if (core_major == 1 && core_minor < 0x34)
2401 msm_host->use_cdclp533 = true;
2402
2403 /*
2404 * Support for some capabilities is not advertised by newer
2405 * controller versions and must be explicitly enabled.
2406 */
2407 if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
2408 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
2409 config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
2410 writel_relaxed(config, host->ioaddr +
2411 msm_offset->core_vendor_spec_capabilities0);
2412 }
2413
2414 if (core_major == 1 && core_minor >= 0x49)
2415 msm_host->updated_ddr_cfg = true;
2416
2417 ret = sdhci_msm_register_vreg(msm_host);
2418 if (ret)
2419 goto clk_disable;
2420
2421 /*
2422 * Power on reset state may trigger power irq if previous status of
2423 * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
2424 * interrupt in GIC, any pending power irq interrupt should be
2425 * acknowledged. Otherwise power irq interrupt handler would be
2426 * fired prematurely.
2427 */
2428 sdhci_msm_handle_pwr_irq(host, 0);
2429
2430 /*
2431 * Ensure that above writes are propogated before interrupt enablement
2432 * in GIC.
2433 */
2434 mb();
2435
2436 /* Setup IRQ for handling power/voltage tasks with PMIC */
2437 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
2438 if (msm_host->pwr_irq < 0) {
2439 ret = msm_host->pwr_irq;
2440 goto clk_disable;
2441 }
2442
2443 sdhci_msm_init_pwr_irq_wait(msm_host);
2444 /* Enable pwr irq interrupts */
2445 msm_host_writel(msm_host, INT_MASK, host,
2446 msm_offset->core_pwrctl_mask);
2447
2448 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
2449 sdhci_msm_pwr_irq, IRQF_ONESHOT,
2450 dev_name(&pdev->dev), host);
2451 if (ret) {
2452 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
2453 goto clk_disable;
2454 }
2455
2456 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
2457
2458 pm_runtime_get_noresume(&pdev->dev);
2459 pm_runtime_set_active(&pdev->dev);
2460 pm_runtime_enable(&pdev->dev);
2461 pm_runtime_set_autosuspend_delay(&pdev->dev,
2462 MSM_MMC_AUTOSUSPEND_DELAY_MS);
2463 pm_runtime_use_autosuspend(&pdev->dev);
2464
2465 host->mmc_host_ops.start_signal_voltage_switch =
2466 sdhci_msm_start_signal_voltage_switch;
2467 host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
2468 if (of_property_read_bool(node, "supports-cqe"))
2469 ret = sdhci_msm_cqe_add_host(host, pdev);
2470 else
2471 ret = sdhci_add_host(host);
2472 if (ret)
2473 goto pm_runtime_disable;
2474
2475 pm_runtime_mark_last_busy(&pdev->dev);
2476 pm_runtime_put_autosuspend(&pdev->dev);
2477
2478 return 0;
2479
2480 pm_runtime_disable:
2481 pm_runtime_disable(&pdev->dev);
2482 pm_runtime_set_suspended(&pdev->dev);
2483 pm_runtime_put_noidle(&pdev->dev);
2484 clk_disable:
2485 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2486 msm_host->bulk_clks);
2487 opp_cleanup:
2488 dev_pm_opp_of_remove_table(&pdev->dev);
2489 opp_put_clkname:
2490 dev_pm_opp_put_clkname(msm_host->opp_table);
2491 bus_clk_disable:
2492 if (!IS_ERR(msm_host->bus_clk))
2493 clk_disable_unprepare(msm_host->bus_clk);
2494 pltfm_free:
2495 sdhci_pltfm_free(pdev);
2496 return ret;
2497 }
2498
sdhci_msm_remove(struct platform_device * pdev)2499 static int sdhci_msm_remove(struct platform_device *pdev)
2500 {
2501 struct sdhci_host *host = platform_get_drvdata(pdev);
2502 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2503 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2504 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
2505 0xffffffff);
2506
2507 sdhci_remove_host(host, dead);
2508
2509 dev_pm_opp_of_remove_table(&pdev->dev);
2510 dev_pm_opp_put_clkname(msm_host->opp_table);
2511 pm_runtime_get_sync(&pdev->dev);
2512 pm_runtime_disable(&pdev->dev);
2513 pm_runtime_put_noidle(&pdev->dev);
2514
2515 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2516 msm_host->bulk_clks);
2517 if (!IS_ERR(msm_host->bus_clk))
2518 clk_disable_unprepare(msm_host->bus_clk);
2519 sdhci_pltfm_free(pdev);
2520 return 0;
2521 }
2522
sdhci_msm_runtime_suspend(struct device * dev)2523 static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
2524 {
2525 struct sdhci_host *host = dev_get_drvdata(dev);
2526 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2527 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2528
2529 /* Drop the performance vote */
2530 dev_pm_opp_set_rate(dev, 0);
2531 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2532 msm_host->bulk_clks);
2533
2534 return 0;
2535 }
2536
sdhci_msm_runtime_resume(struct device * dev)2537 static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
2538 {
2539 struct sdhci_host *host = dev_get_drvdata(dev);
2540 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2541 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2542 int ret;
2543
2544 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2545 msm_host->bulk_clks);
2546 if (ret)
2547 return ret;
2548 /*
2549 * Whenever core-clock is gated dynamically, it's needed to
2550 * restore the SDR DLL settings when the clock is ungated.
2551 */
2552 if (msm_host->restore_dll_config && msm_host->clk_rate)
2553 ret = sdhci_msm_restore_sdr_dll_config(host);
2554
2555 dev_pm_opp_set_rate(dev, msm_host->clk_rate);
2556
2557 return ret;
2558 }
2559
2560 static const struct dev_pm_ops sdhci_msm_pm_ops = {
2561 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2562 pm_runtime_force_resume)
2563 SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend,
2564 sdhci_msm_runtime_resume,
2565 NULL)
2566 };
2567
2568 static struct platform_driver sdhci_msm_driver = {
2569 .probe = sdhci_msm_probe,
2570 .remove = sdhci_msm_remove,
2571 .driver = {
2572 .name = "sdhci_msm",
2573 .of_match_table = sdhci_msm_dt_match,
2574 .pm = &sdhci_msm_pm_ops,
2575 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2576 },
2577 };
2578
2579 module_platform_driver(sdhci_msm_driver);
2580
2581 MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
2582 MODULE_LICENSE("GPL v2");
2583