• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *	Stanley Chu <stanley.chu@mediatek.com>
6  *	Peter Wang <peter.wang@mediatek.com>
7  */
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/of.h>
12 #include <linux/of_address.h>
13 #include <linux/of_device.h>
14 #include <linux/phy/phy.h>
15 #include <linux/platform_device.h>
16 #include <linux/regulator/consumer.h>
17 #include <linux/reset.h>
18 #include <linux/soc/mediatek/mtk_sip_svc.h>
19 
20 #include "ufshcd.h"
21 #include "ufshcd-crypto.h"
22 #include "ufshcd-pltfrm.h"
23 #include "ufs_quirks.h"
24 #include "unipro.h"
25 #include "ufs-mediatek.h"
26 
27 #define CREATE_TRACE_POINTS
28 #include "ufs-mediatek-trace.h"
29 
30 #define ufs_mtk_smc(cmd, val, res) \
31 	arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
32 		      cmd, val, 0, 0, 0, 0, 0, &(res))
33 
34 #define ufs_mtk_va09_pwr_ctrl(res, on) \
35 	ufs_mtk_smc(UFS_MTK_SIP_VA09_PWR_CTRL, on, res)
36 
37 #define ufs_mtk_crypto_ctrl(res, enable) \
38 	ufs_mtk_smc(UFS_MTK_SIP_CRYPTO_CTRL, enable, res)
39 
40 #define ufs_mtk_ref_clk_notify(on, res) \
41 	ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
42 
43 #define ufs_mtk_device_reset_ctrl(high, res) \
44 	ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
45 
46 static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
47 	UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
48 		UFS_DEVICE_QUIRK_DELAY_AFTER_LPM),
49 	UFS_FIX(UFS_VENDOR_SKHYNIX, "H9HQ21AFAMZDAR",
50 		UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES),
51 	END_FIX
52 };
53 
54 static const struct of_device_id ufs_mtk_of_match[] = {
55 	{ .compatible = "mediatek,mt8183-ufshci" },
56 	{},
57 };
58 
ufs_mtk_is_boost_crypt_enabled(struct ufs_hba * hba)59 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
60 {
61 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
62 
63 	return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
64 }
65 
ufs_mtk_is_va09_supported(struct ufs_hba * hba)66 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
67 {
68 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
69 
70 	return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
71 }
72 
ufs_mtk_is_broken_vcc(struct ufs_hba * hba)73 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
74 {
75 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
76 
77 	return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
78 }
79 
ufs_mtk_is_pmc_via_fastauto(struct ufs_hba * hba)80 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
81 {
82 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
83 
84 	return (host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
85 }
86 
ufs_mtk_cfg_unipro_cg(struct ufs_hba * hba,bool enable)87 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
88 {
89 	u32 tmp;
90 
91 	if (enable) {
92 		ufshcd_dme_get(hba,
93 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
94 		tmp = tmp |
95 		      (1 << RX_SYMBOL_CLK_GATE_EN) |
96 		      (1 << SYS_CLK_GATE_EN) |
97 		      (1 << TX_CLK_GATE_EN);
98 		ufshcd_dme_set(hba,
99 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
100 
101 		ufshcd_dme_get(hba,
102 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
103 		tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
104 		ufshcd_dme_set(hba,
105 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
106 	} else {
107 		ufshcd_dme_get(hba,
108 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
109 		tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
110 			      (1 << SYS_CLK_GATE_EN) |
111 			      (1 << TX_CLK_GATE_EN));
112 		ufshcd_dme_set(hba,
113 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
114 
115 		ufshcd_dme_get(hba,
116 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
117 		tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
118 		ufshcd_dme_set(hba,
119 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
120 	}
121 }
122 
ufs_mtk_crypto_enable(struct ufs_hba * hba)123 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
124 {
125 	struct arm_smccc_res res;
126 
127 	ufs_mtk_crypto_ctrl(res, 1);
128 	if (res.a0) {
129 		dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
130 			 __func__, res.a0);
131 		hba->caps &= ~UFSHCD_CAP_CRYPTO;
132 	}
133 }
134 
ufs_mtk_host_reset(struct ufs_hba * hba)135 static void ufs_mtk_host_reset(struct ufs_hba *hba)
136 {
137 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
138 
139 	reset_control_assert(host->hci_reset);
140 	reset_control_assert(host->crypto_reset);
141 	reset_control_assert(host->unipro_reset);
142 
143 	usleep_range(100, 110);
144 
145 	reset_control_deassert(host->unipro_reset);
146 	reset_control_deassert(host->crypto_reset);
147 	reset_control_deassert(host->hci_reset);
148 }
149 
ufs_mtk_init_reset_control(struct ufs_hba * hba,struct reset_control ** rc,char * str)150 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
151 				       struct reset_control **rc,
152 				       char *str)
153 {
154 	*rc = devm_reset_control_get(hba->dev, str);
155 	if (IS_ERR(*rc)) {
156 		dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
157 			 str, PTR_ERR(*rc));
158 		*rc = NULL;
159 	}
160 }
161 
ufs_mtk_init_reset(struct ufs_hba * hba)162 static void ufs_mtk_init_reset(struct ufs_hba *hba)
163 {
164 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
165 
166 	ufs_mtk_init_reset_control(hba, &host->hci_reset,
167 				   "hci_rst");
168 	ufs_mtk_init_reset_control(hba, &host->unipro_reset,
169 				   "unipro_rst");
170 	ufs_mtk_init_reset_control(hba, &host->crypto_reset,
171 				   "crypto_rst");
172 }
173 
ufs_mtk_hce_enable_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)174 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
175 				     enum ufs_notify_change_status status)
176 {
177 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
178 	unsigned long flags;
179 
180 	if (status == PRE_CHANGE) {
181 		if (host->unipro_lpm) {
182 			hba->vps->hba_enable_delay_us = 0;
183 		} else {
184 			hba->vps->hba_enable_delay_us = 600;
185 			ufs_mtk_host_reset(hba);
186 		}
187 
188 		if (hba->caps & UFSHCD_CAP_CRYPTO)
189 			ufs_mtk_crypto_enable(hba);
190 
191 		if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
192 			spin_lock_irqsave(hba->host->host_lock, flags);
193 			ufshcd_writel(hba, 0,
194 				      REG_AUTO_HIBERNATE_IDLE_TIMER);
195 			spin_unlock_irqrestore(hba->host->host_lock,
196 					       flags);
197 
198 			hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
199 			hba->ahit = 0;
200 		}
201 	}
202 
203 	return 0;
204 }
205 
ufs_mtk_bind_mphy(struct ufs_hba * hba)206 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
207 {
208 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
209 	struct device *dev = hba->dev;
210 	struct device_node *np = dev->of_node;
211 	int err = 0;
212 
213 	host->mphy = devm_of_phy_get_by_index(dev, np, 0);
214 
215 	if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
216 		/*
217 		 * UFS driver might be probed before the phy driver does.
218 		 * In that case we would like to return EPROBE_DEFER code.
219 		 */
220 		err = -EPROBE_DEFER;
221 		dev_info(dev,
222 			 "%s: required phy hasn't probed yet. err = %d\n",
223 			__func__, err);
224 	} else if (IS_ERR(host->mphy)) {
225 		err = PTR_ERR(host->mphy);
226 		if (err != -ENODEV) {
227 			dev_info(dev, "%s: PHY get failed %d\n", __func__,
228 				 err);
229 		}
230 	}
231 
232 	if (err)
233 		host->mphy = NULL;
234 	/*
235 	 * Allow unbound mphy because not every platform needs specific
236 	 * mphy control.
237 	 */
238 	if (err == -ENODEV)
239 		err = 0;
240 
241 	return err;
242 }
243 
ufs_mtk_setup_ref_clk(struct ufs_hba * hba,bool on)244 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
245 {
246 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
247 	struct arm_smccc_res res;
248 	ktime_t timeout, time_checked;
249 	u32 value;
250 
251 	if (host->ref_clk_enabled == on)
252 		return 0;
253 
254 	if (on) {
255 		ufs_mtk_ref_clk_notify(on, res);
256 		ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
257 		ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
258 	} else {
259 		ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
260 	}
261 
262 	/* Wait for ack */
263 	timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
264 	do {
265 		time_checked = ktime_get();
266 		value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
267 
268 		/* Wait until ack bit equals to req bit */
269 		if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
270 			goto out;
271 
272 		usleep_range(100, 200);
273 	} while (ktime_before(time_checked, timeout));
274 
275 	dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
276 
277 	ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
278 
279 	return -ETIMEDOUT;
280 
281 out:
282 	host->ref_clk_enabled = on;
283 	if (!on) {
284 		ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
285 		ufs_mtk_ref_clk_notify(on, res);
286 	}
287 
288 	return 0;
289 }
290 
ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba * hba,u16 gating_us,u16 ungating_us)291 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
292 					  u16 gating_us, u16 ungating_us)
293 {
294 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
295 
296 	if (hba->dev_info.clk_gating_wait_us) {
297 		host->ref_clk_gating_wait_us =
298 			hba->dev_info.clk_gating_wait_us;
299 	} else {
300 		host->ref_clk_gating_wait_us = gating_us;
301 	}
302 
303 	host->ref_clk_ungating_wait_us = ungating_us;
304 }
305 
ufs_mtk_dbg_sel(struct ufs_hba * hba)306 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
307 {
308 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
309 
310 	if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
311 		ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
312 		ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
313 		ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
314 		ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
315 		ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
316 	} else {
317 		ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
318 	}
319 }
320 
ufs_mtk_wait_idle_state(struct ufs_hba * hba,unsigned long retry_ms)321 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
322 			    unsigned long retry_ms)
323 {
324 	u64 timeout, time_checked;
325 	u32 val, sm;
326 	bool wait_idle;
327 
328 	/* cannot use plain ktime_get() in suspend */
329 	timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
330 
331 	/* wait a specific time after check base */
332 	udelay(10);
333 	wait_idle = false;
334 
335 	do {
336 		time_checked = ktime_get_mono_fast_ns();
337 		ufs_mtk_dbg_sel(hba);
338 		val = ufshcd_readl(hba, REG_UFS_PROBE);
339 
340 		sm = val & 0x1f;
341 
342 		/*
343 		 * if state is in H8 enter and H8 enter confirm
344 		 * wait until return to idle state.
345 		 */
346 		if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
347 			wait_idle = true;
348 			udelay(50);
349 			continue;
350 		} else if (!wait_idle)
351 			break;
352 
353 		if (wait_idle && (sm == VS_HCE_BASE))
354 			break;
355 	} while (time_checked < timeout);
356 
357 	if (wait_idle && sm != VS_HCE_BASE)
358 		dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
359 }
360 
ufs_mtk_wait_link_state(struct ufs_hba * hba,u32 state,unsigned long max_wait_ms)361 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
362 				   unsigned long max_wait_ms)
363 {
364 	ktime_t timeout, time_checked;
365 	u32 val;
366 
367 	timeout = ktime_add_ms(ktime_get(), max_wait_ms);
368 	do {
369 		time_checked = ktime_get();
370 		ufs_mtk_dbg_sel(hba);
371 		val = ufshcd_readl(hba, REG_UFS_PROBE);
372 		val = val >> 28;
373 
374 		if (val == state)
375 			return 0;
376 
377 		/* Sleep for max. 200us */
378 		usleep_range(100, 200);
379 	} while (ktime_before(time_checked, timeout));
380 
381 	if (val == state)
382 		return 0;
383 
384 	return -ETIMEDOUT;
385 }
386 
ufs_mtk_mphy_power_on(struct ufs_hba * hba,bool on)387 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
388 {
389 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
390 	struct phy *mphy = host->mphy;
391 	struct arm_smccc_res res;
392 	int ret = 0;
393 
394 	if (!mphy || !(on ^ host->mphy_powered_on))
395 		return 0;
396 
397 	if (on) {
398 		if (ufs_mtk_is_va09_supported(hba)) {
399 			ret = regulator_enable(host->reg_va09);
400 			if (ret < 0)
401 				goto out;
402 			/* wait 200 us to stablize VA09 */
403 			usleep_range(200, 210);
404 			ufs_mtk_va09_pwr_ctrl(res, 1);
405 		}
406 		phy_power_on(mphy);
407 	} else {
408 		phy_power_off(mphy);
409 		if (ufs_mtk_is_va09_supported(hba)) {
410 			ufs_mtk_va09_pwr_ctrl(res, 0);
411 			ret = regulator_disable(host->reg_va09);
412 			if (ret < 0)
413 				goto out;
414 		}
415 	}
416 out:
417 	if (ret) {
418 		dev_info(hba->dev,
419 			 "failed to %s va09: %d\n",
420 			 on ? "enable" : "disable",
421 			 ret);
422 	} else {
423 		host->mphy_powered_on = on;
424 	}
425 
426 	return ret;
427 }
428 
ufs_mtk_get_host_clk(struct device * dev,const char * name,struct clk ** clk_out)429 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
430 				struct clk **clk_out)
431 {
432 	struct clk *clk;
433 	int err = 0;
434 
435 	clk = devm_clk_get(dev, name);
436 	if (IS_ERR(clk))
437 		err = PTR_ERR(clk);
438 	else
439 		*clk_out = clk;
440 
441 	return err;
442 }
443 
ufs_mtk_boost_crypt(struct ufs_hba * hba,bool boost)444 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
445 {
446 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
447 	struct ufs_mtk_crypt_cfg *cfg;
448 	struct regulator *reg;
449 	int volt, ret;
450 
451 	if (!ufs_mtk_is_boost_crypt_enabled(hba))
452 		return;
453 
454 	cfg = host->crypt;
455 	volt = cfg->vcore_volt;
456 	reg = cfg->reg_vcore;
457 
458 	ret = clk_prepare_enable(cfg->clk_crypt_mux);
459 	if (ret) {
460 		dev_info(hba->dev, "clk_prepare_enable(): %d\n",
461 			 ret);
462 		return;
463 	}
464 
465 	if (boost) {
466 		ret = regulator_set_voltage(reg, volt, INT_MAX);
467 		if (ret) {
468 			dev_info(hba->dev,
469 				 "failed to set vcore to %d\n", volt);
470 			goto out;
471 		}
472 
473 		ret = clk_set_parent(cfg->clk_crypt_mux,
474 				     cfg->clk_crypt_perf);
475 		if (ret) {
476 			dev_info(hba->dev,
477 				 "failed to set clk_crypt_perf\n");
478 			regulator_set_voltage(reg, 0, INT_MAX);
479 			goto out;
480 		}
481 	} else {
482 		ret = clk_set_parent(cfg->clk_crypt_mux,
483 				     cfg->clk_crypt_lp);
484 		if (ret) {
485 			dev_info(hba->dev,
486 				 "failed to set clk_crypt_lp\n");
487 			goto out;
488 		}
489 
490 		ret = regulator_set_voltage(reg, 0, INT_MAX);
491 		if (ret) {
492 			dev_info(hba->dev,
493 				 "failed to set vcore to MIN\n");
494 		}
495 	}
496 out:
497 	clk_disable_unprepare(cfg->clk_crypt_mux);
498 }
499 
ufs_mtk_init_host_clk(struct ufs_hba * hba,const char * name,struct clk ** clk)500 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
501 				 struct clk **clk)
502 {
503 	int ret;
504 
505 	ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
506 	if (ret) {
507 		dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
508 			 name, ret);
509 	}
510 
511 	return ret;
512 }
513 
ufs_mtk_init_boost_crypt(struct ufs_hba * hba)514 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
515 {
516 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
517 	struct ufs_mtk_crypt_cfg *cfg;
518 	struct device *dev = hba->dev;
519 	struct regulator *reg;
520 	u32 volt;
521 
522 	host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
523 				   GFP_KERNEL);
524 	if (!host->crypt)
525 		goto disable_caps;
526 
527 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
528 	if (IS_ERR(reg)) {
529 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
530 			 PTR_ERR(reg));
531 		goto disable_caps;
532 	}
533 
534 	if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
535 				 &volt)) {
536 		dev_info(dev, "failed to get boost-crypt-vcore-min");
537 		goto disable_caps;
538 	}
539 
540 	cfg = host->crypt;
541 	if (ufs_mtk_init_host_clk(hba, "crypt_mux",
542 				  &cfg->clk_crypt_mux))
543 		goto disable_caps;
544 
545 	if (ufs_mtk_init_host_clk(hba, "crypt_lp",
546 				  &cfg->clk_crypt_lp))
547 		goto disable_caps;
548 
549 	if (ufs_mtk_init_host_clk(hba, "crypt_perf",
550 				  &cfg->clk_crypt_perf))
551 		goto disable_caps;
552 
553 	cfg->reg_vcore = reg;
554 	cfg->vcore_volt = volt;
555 	host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
556 
557 disable_caps:
558 	return;
559 }
560 
ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba * hba)561 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
562 {
563 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
564 
565 	host->reg_va09 = regulator_get(hba->dev, "va09");
566 	if (IS_ERR(host->reg_va09))
567 		dev_info(hba->dev, "failed to get va09");
568 	else
569 		host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
570 }
571 
ufs_mtk_init_host_caps(struct ufs_hba * hba)572 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
573 {
574 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
575 	struct device_node *np = hba->dev->of_node;
576 
577 	if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
578 		ufs_mtk_init_boost_crypt(hba);
579 
580 	if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
581 		ufs_mtk_init_va09_pwr_ctrl(hba);
582 
583 	if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
584 		host->caps |= UFS_MTK_CAP_DISABLE_AH8;
585 
586 	if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
587 		host->caps |= UFS_MTK_CAP_BROKEN_VCC;
588 
589 	if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
590 		host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
591 
592 	dev_info(hba->dev, "caps: 0x%x", host->caps);
593 }
594 
ufs_mtk_scale_perf(struct ufs_hba * hba,bool up)595 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool up)
596 {
597 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
598 
599 	ufs_mtk_boost_crypt(hba, up);
600 	ufs_mtk_setup_ref_clk(hba, up);
601 
602 	if (up)
603 		phy_power_on(host->mphy);
604 	else
605 		phy_power_off(host->mphy);
606 }
607 
608 /**
609  * ufs_mtk_setup_clocks - enables/disable clocks
610  * @hba: host controller instance
611  * @on: If true, enable clocks else disable them.
612  * @status: PRE_CHANGE or POST_CHANGE notify
613  *
614  * Returns 0 on success, non-zero on failure.
615  */
ufs_mtk_setup_clocks(struct ufs_hba * hba,bool on,enum ufs_notify_change_status status)616 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
617 				enum ufs_notify_change_status status)
618 {
619 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
620 	bool clk_pwr_off = false;
621 	int ret = 0;
622 
623 	/*
624 	 * In case ufs_mtk_init() is not yet done, simply ignore.
625 	 * This ufs_mtk_setup_clocks() shall be called from
626 	 * ufs_mtk_init() after init is done.
627 	 */
628 	if (!host)
629 		return 0;
630 
631 	if (!on && status == PRE_CHANGE) {
632 		if (ufshcd_is_link_off(hba)) {
633 			clk_pwr_off = true;
634 		} else if (ufshcd_is_link_hibern8(hba) ||
635 			 (!ufshcd_can_hibern8_during_gating(hba) &&
636 			 ufshcd_is_auto_hibern8_enabled(hba))) {
637 			/*
638 			 * Gate ref-clk and poweroff mphy if link state is in
639 			 * OFF or Hibern8 by either Auto-Hibern8 or
640 			 * ufshcd_link_state_transition().
641 			 */
642 			ret = ufs_mtk_wait_link_state(hba,
643 						      VS_LINK_HIBERN8,
644 						      15);
645 			if (!ret)
646 				clk_pwr_off = true;
647 		}
648 
649 		if (clk_pwr_off)
650 			ufs_mtk_scale_perf(hba, false);
651 	} else if (on && status == POST_CHANGE) {
652 		ufs_mtk_scale_perf(hba, true);
653 	}
654 
655 	return ret;
656 }
657 
ufs_mtk_get_controller_version(struct ufs_hba * hba)658 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
659 {
660 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
661 	int ret, ver = 0;
662 
663 	if (host->hw_ver.major)
664 		return;
665 
666 	/* Set default (minimum) version anyway */
667 	host->hw_ver.major = 2;
668 
669 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
670 	if (!ret) {
671 		if (ver >= UFS_UNIPRO_VER_1_8) {
672 			host->hw_ver.major = 3;
673 			/*
674 			 * Fix HCI version for some platforms with
675 			 * incorrect version
676 			 */
677 			if (hba->ufs_version < ufshci_version(3, 0))
678 				hba->ufs_version = ufshci_version(3, 0);
679 		}
680 	}
681 }
682 
ufs_mtk_get_ufs_hci_version(struct ufs_hba * hba)683 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
684 {
685 	return hba->ufs_version;
686 }
687 
688 /**
689  * ufs_mtk_init - find other essential mmio bases
690  * @hba: host controller instance
691  *
692  * Binds PHY with controller and powers up PHY enabling clocks
693  * and regulators.
694  *
695  * Returns -EPROBE_DEFER if binding fails, returns negative error
696  * on phy power up failure and returns zero on success.
697  */
ufs_mtk_init(struct ufs_hba * hba)698 static int ufs_mtk_init(struct ufs_hba *hba)
699 {
700 	const struct of_device_id *id;
701 	struct device *dev = hba->dev;
702 	struct ufs_mtk_host *host;
703 	int err = 0;
704 
705 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
706 	if (!host) {
707 		err = -ENOMEM;
708 		dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
709 		goto out;
710 	}
711 
712 	host->hba = hba;
713 	ufshcd_set_variant(hba, host);
714 
715 	id = of_match_device(ufs_mtk_of_match, dev);
716 	if (!id) {
717 		err = -EINVAL;
718 		goto out;
719 	}
720 
721 	/* Initialize host capability */
722 	ufs_mtk_init_host_caps(hba);
723 
724 	err = ufs_mtk_bind_mphy(hba);
725 	if (err)
726 		goto out_variant_clear;
727 
728 	ufs_mtk_init_reset(hba);
729 
730 	/* Enable runtime autosuspend */
731 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
732 
733 	/* Enable clock-gating */
734 	hba->caps |= UFSHCD_CAP_CLK_GATING;
735 
736 	/* Enable inline encryption */
737 	hba->caps |= UFSHCD_CAP_CRYPTO;
738 
739 	/* Enable WriteBooster */
740 	hba->caps |= UFSHCD_CAP_WB_EN;
741 	hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
742 	hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
743 
744 	if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
745 		hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
746 
747 	/*
748 	 * ufshcd_vops_init() is invoked after
749 	 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
750 	 * phy clock setup is skipped.
751 	 *
752 	 * Enable phy clocks specifically here.
753 	 */
754 	ufs_mtk_mphy_power_on(hba, true);
755 	ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
756 
757 	host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
758 
759 	goto out;
760 
761 out_variant_clear:
762 	ufshcd_set_variant(hba, NULL);
763 out:
764 	return err;
765 }
766 
ufs_mtk_pmc_via_fastauto(struct ufs_hba * hba,struct ufs_pa_layer_attr * dev_req_params)767 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
768 	struct ufs_pa_layer_attr *dev_req_params)
769 {
770 	if (!ufs_mtk_is_pmc_via_fastauto(hba))
771 		return false;
772 
773 	if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
774 		return false;
775 
776 	if ((dev_req_params->pwr_tx != FAST_MODE) &&
777 		(dev_req_params->gear_tx < UFS_HS_G4))
778 		return false;
779 
780 	if ((dev_req_params->pwr_rx != FAST_MODE) &&
781 		(dev_req_params->gear_rx < UFS_HS_G4))
782 		return false;
783 
784 	return true;
785 }
786 
ufs_mtk_pre_pwr_change(struct ufs_hba * hba,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)787 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
788 				  struct ufs_pa_layer_attr *dev_max_params,
789 				  struct ufs_pa_layer_attr *dev_req_params)
790 {
791 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
792 	struct ufs_dev_params host_cap;
793 	int ret;
794 
795 	ufshcd_init_pwr_dev_param(&host_cap);
796 	host_cap.hs_rx_gear = UFS_HS_G5;
797 	host_cap.hs_tx_gear = UFS_HS_G5;
798 
799 	ret = ufshcd_get_pwr_dev_param(&host_cap,
800 				       dev_max_params,
801 				       dev_req_params);
802 	if (ret) {
803 		pr_info("%s: failed to determine capabilities\n",
804 			__func__);
805 	}
806 
807 	if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
808 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
809 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
810 
811 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
812 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
813 
814 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
815 			dev_req_params->lane_tx);
816 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
817 			dev_req_params->lane_rx);
818 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
819 			dev_req_params->hs_rate);
820 
821 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
822 			PA_NO_ADAPT);
823 
824 		ret = ufshcd_uic_change_pwr_mode(hba,
825 			FASTAUTO_MODE << 4 | FASTAUTO_MODE);
826 
827 		if (ret) {
828 			dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
829 				__func__, ret);
830 		}
831 	}
832 
833 	if (host->hw_ver.major >= 3) {
834 		ret = ufshcd_dme_configure_adapt(hba,
835 					   dev_req_params->gear_tx,
836 					   PA_INITIAL_ADAPT);
837 	}
838 
839 	return ret;
840 }
841 
ufs_mtk_pwr_change_notify(struct ufs_hba * hba,enum ufs_notify_change_status stage,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)842 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
843 				     enum ufs_notify_change_status stage,
844 				     struct ufs_pa_layer_attr *dev_max_params,
845 				     struct ufs_pa_layer_attr *dev_req_params)
846 {
847 	int ret = 0;
848 
849 	switch (stage) {
850 	case PRE_CHANGE:
851 		ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
852 					     dev_req_params);
853 		break;
854 	case POST_CHANGE:
855 		break;
856 	default:
857 		ret = -EINVAL;
858 		break;
859 	}
860 
861 	return ret;
862 }
863 
ufs_mtk_unipro_set_lpm(struct ufs_hba * hba,bool lpm)864 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
865 {
866 	int ret;
867 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
868 
869 	ret = ufshcd_dme_set(hba,
870 			     UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
871 			     lpm ? 1 : 0);
872 	if (!ret || !lpm) {
873 		/*
874 		 * Forcibly set as non-LPM mode if UIC commands is failed
875 		 * to use default hba_enable_delay_us value for re-enabling
876 		 * the host.
877 		 */
878 		host->unipro_lpm = lpm;
879 	}
880 
881 	return ret;
882 }
883 
ufs_mtk_pre_link(struct ufs_hba * hba)884 static int ufs_mtk_pre_link(struct ufs_hba *hba)
885 {
886 	int ret;
887 	u32 tmp;
888 
889 	ufs_mtk_get_controller_version(hba);
890 
891 	ret = ufs_mtk_unipro_set_lpm(hba, false);
892 	if (ret)
893 		return ret;
894 
895 	/*
896 	 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
897 	 * to make sure that both host and device TX LCC are disabled
898 	 * once link startup is completed.
899 	 */
900 	ret = ufshcd_disable_host_tx_lcc(hba);
901 	if (ret)
902 		return ret;
903 
904 	/* disable deep stall */
905 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
906 	if (ret)
907 		return ret;
908 
909 	tmp &= ~(1 << 6);
910 
911 	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
912 
913 	return ret;
914 }
915 
ufs_mtk_setup_clk_gating(struct ufs_hba * hba)916 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
917 {
918 	unsigned long flags;
919 	u32 ah_ms;
920 
921 	if (ufshcd_is_clkgating_allowed(hba)) {
922 		if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
923 			ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
924 					  hba->ahit);
925 		else
926 			ah_ms = 10;
927 		spin_lock_irqsave(hba->host->host_lock, flags);
928 		hba->clk_gating.delay_ms = ah_ms + 5;
929 		spin_unlock_irqrestore(hba->host->host_lock, flags);
930 	}
931 }
932 
ufs_mtk_post_link(struct ufs_hba * hba)933 static int ufs_mtk_post_link(struct ufs_hba *hba)
934 {
935 	/* enable unipro clock gating feature */
936 	ufs_mtk_cfg_unipro_cg(hba, true);
937 
938 	/* will be configured during probe hba */
939 	if (ufshcd_is_auto_hibern8_supported(hba))
940 		hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
941 			FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
942 
943 	ufs_mtk_setup_clk_gating(hba);
944 
945 	return 0;
946 }
947 
ufs_mtk_link_startup_notify(struct ufs_hba * hba,enum ufs_notify_change_status stage)948 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
949 				       enum ufs_notify_change_status stage)
950 {
951 	int ret = 0;
952 
953 	switch (stage) {
954 	case PRE_CHANGE:
955 		ret = ufs_mtk_pre_link(hba);
956 		break;
957 	case POST_CHANGE:
958 		ret = ufs_mtk_post_link(hba);
959 		break;
960 	default:
961 		ret = -EINVAL;
962 		break;
963 	}
964 
965 	return ret;
966 }
967 
ufs_mtk_device_reset(struct ufs_hba * hba)968 static int ufs_mtk_device_reset(struct ufs_hba *hba)
969 {
970 	struct arm_smccc_res res;
971 
972 	/* disable hba before device reset */
973 	ufshcd_hba_stop(hba);
974 
975 	ufs_mtk_device_reset_ctrl(0, res);
976 
977 	/*
978 	 * The reset signal is active low. UFS devices shall detect
979 	 * more than or equal to 1us of positive or negative RST_n
980 	 * pulse width.
981 	 *
982 	 * To be on safe side, keep the reset low for at least 10us.
983 	 */
984 	usleep_range(10, 15);
985 
986 	ufs_mtk_device_reset_ctrl(1, res);
987 
988 	/* Some devices may need time to respond to rst_n */
989 	usleep_range(10000, 15000);
990 
991 	dev_info(hba->dev, "device reset done\n");
992 
993 	return 0;
994 }
995 
ufs_mtk_link_set_hpm(struct ufs_hba * hba)996 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
997 {
998 	int err;
999 
1000 	err = ufshcd_hba_enable(hba);
1001 	if (err)
1002 		return err;
1003 
1004 	err = ufs_mtk_unipro_set_lpm(hba, false);
1005 	if (err)
1006 		return err;
1007 
1008 	err = ufshcd_uic_hibern8_exit(hba);
1009 	if (!err)
1010 		ufshcd_set_link_active(hba);
1011 	else
1012 		return err;
1013 
1014 	err = ufshcd_make_hba_operational(hba);
1015 	if (err)
1016 		return err;
1017 
1018 	return 0;
1019 }
1020 
ufs_mtk_link_set_lpm(struct ufs_hba * hba)1021 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1022 {
1023 	int err;
1024 
1025 	err = ufs_mtk_unipro_set_lpm(hba, true);
1026 	if (err) {
1027 		/* Resume UniPro state for following error recovery */
1028 		ufs_mtk_unipro_set_lpm(hba, false);
1029 		return err;
1030 	}
1031 
1032 	return 0;
1033 }
1034 
ufs_mtk_vreg_set_lpm(struct ufs_hba * hba,bool lpm)1035 static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1036 {
1037 	if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
1038 		return;
1039 
1040 	if (lpm && !hba->vreg_info.vcc->enabled)
1041 		regulator_set_mode(hba->vreg_info.vccq2->reg,
1042 				   REGULATOR_MODE_IDLE);
1043 	else if (!lpm)
1044 		regulator_set_mode(hba->vreg_info.vccq2->reg,
1045 				   REGULATOR_MODE_NORMAL);
1046 }
1047 
ufs_mtk_auto_hibern8_disable(struct ufs_hba * hba)1048 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1049 {
1050 	unsigned long flags;
1051 	int ret;
1052 
1053 	/* disable auto-hibern8 */
1054 	spin_lock_irqsave(hba->host->host_lock, flags);
1055 	ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1056 	spin_unlock_irqrestore(hba->host->host_lock, flags);
1057 
1058 	/* wait host return to idle state when auto-hibern8 off */
1059 	ufs_mtk_wait_idle_state(hba, 5);
1060 
1061 	ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1062 	if (ret)
1063 		dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1064 }
1065 
ufs_mtk_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op,enum ufs_notify_change_status status)1066 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1067 	enum ufs_notify_change_status status)
1068 {
1069 	int err;
1070 	struct arm_smccc_res res;
1071 
1072 	if (status == PRE_CHANGE) {
1073 		if (!ufshcd_is_auto_hibern8_supported(hba))
1074 			return 0;
1075 		ufs_mtk_auto_hibern8_disable(hba);
1076 		return 0;
1077 	}
1078 
1079 	if (ufshcd_is_link_hibern8(hba)) {
1080 		err = ufs_mtk_link_set_lpm(hba);
1081 		if (err)
1082 			goto fail;
1083 	}
1084 
1085 	if (!ufshcd_is_link_active(hba)) {
1086 		/*
1087 		 * Make sure no error will be returned to prevent
1088 		 * ufshcd_suspend() re-enabling regulators while vreg is still
1089 		 * in low-power mode.
1090 		 */
1091 		err = ufs_mtk_mphy_power_on(hba, false);
1092 		if (err)
1093 			goto fail;
1094 	}
1095 
1096 	if (ufshcd_is_link_off(hba))
1097 		ufs_mtk_device_reset_ctrl(0, res);
1098 
1099 	return 0;
1100 fail:
1101 	/*
1102 	 * Set link as off state enforcedly to trigger
1103 	 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1104 	 * for completed host reset.
1105 	 */
1106 	ufshcd_set_link_off(hba);
1107 	return -EAGAIN;
1108 }
1109 
ufs_mtk_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)1110 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1111 {
1112 	int err;
1113 
1114 	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1115 		ufs_mtk_vreg_set_lpm(hba, false);
1116 
1117 	err = ufs_mtk_mphy_power_on(hba, true);
1118 	if (err)
1119 		goto fail;
1120 
1121 	if (ufshcd_is_link_hibern8(hba)) {
1122 		err = ufs_mtk_link_set_hpm(hba);
1123 		if (err)
1124 			goto fail;
1125 	}
1126 
1127 	return 0;
1128 fail:
1129 	return ufshcd_link_recovery(hba);
1130 }
1131 
ufs_mtk_dbg_register_dump(struct ufs_hba * hba)1132 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1133 {
1134 	ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
1135 
1136 	ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1137 
1138 	ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1139 			 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1140 			 "MPHY Ctrl ");
1141 
1142 	/* Direct debugging information to REG_MTK_PROBE */
1143 	ufs_mtk_dbg_sel(hba);
1144 	ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1145 }
1146 
ufs_mtk_apply_dev_quirks(struct ufs_hba * hba)1147 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1148 {
1149 	struct ufs_dev_info *dev_info = &hba->dev_info;
1150 	u16 mid = dev_info->wmanufacturerid;
1151 
1152 	if (mid == UFS_VENDOR_SAMSUNG)
1153 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1154 
1155 	/*
1156 	 * Decide waiting time before gating reference clock and
1157 	 * after ungating reference clock according to vendors'
1158 	 * requirements.
1159 	 */
1160 	if (mid == UFS_VENDOR_SAMSUNG)
1161 		ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1);
1162 	else if (mid == UFS_VENDOR_SKHYNIX)
1163 		ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30);
1164 	else if (mid == UFS_VENDOR_TOSHIBA)
1165 		ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32);
1166 
1167 	return 0;
1168 }
1169 
ufs_mtk_fixup_dev_quirks(struct ufs_hba * hba)1170 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1171 {
1172 	ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1173 
1174 	if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1175 	    (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1176 		hba->vreg_info.vcc->always_on = true;
1177 		/*
1178 		 * VCC will be kept always-on thus we don't
1179 		 * need any delay during regulator operations
1180 		 */
1181 		hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1182 			UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1183 	}
1184 }
1185 
ufs_mtk_event_notify(struct ufs_hba * hba,enum ufs_event_type evt,void * data)1186 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1187 				 enum ufs_event_type evt, void *data)
1188 {
1189 	unsigned int val = *(u32 *)data;
1190 
1191 	trace_ufs_mtk_event(evt, val);
1192 }
1193 
1194 /*
1195  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1196  *
1197  * The variant operations configure the necessary controller and PHY
1198  * handshake during initialization.
1199  */
1200 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1201 	.name                = "mediatek.ufshci",
1202 	.init                = ufs_mtk_init,
1203 	.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1204 	.setup_clocks        = ufs_mtk_setup_clocks,
1205 	.hce_enable_notify   = ufs_mtk_hce_enable_notify,
1206 	.link_startup_notify = ufs_mtk_link_startup_notify,
1207 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
1208 	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
1209 	.fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
1210 	.suspend             = ufs_mtk_suspend,
1211 	.resume              = ufs_mtk_resume,
1212 	.dbg_register_dump   = ufs_mtk_dbg_register_dump,
1213 	.device_reset        = ufs_mtk_device_reset,
1214 	.event_notify        = ufs_mtk_event_notify,
1215 };
1216 
1217 /**
1218  * ufs_mtk_probe - probe routine of the driver
1219  * @pdev: pointer to Platform device handle
1220  *
1221  * Return zero for success and non-zero for failure
1222  */
ufs_mtk_probe(struct platform_device * pdev)1223 static int ufs_mtk_probe(struct platform_device *pdev)
1224 {
1225 	int err;
1226 	struct device *dev = &pdev->dev;
1227 	struct device_node *reset_node;
1228 	struct platform_device *reset_pdev;
1229 	struct device_link *link;
1230 
1231 	reset_node = of_find_compatible_node(NULL, NULL,
1232 					     "ti,syscon-reset");
1233 	if (!reset_node) {
1234 		dev_notice(dev, "find ti,syscon-reset fail\n");
1235 		goto skip_reset;
1236 	}
1237 	reset_pdev = of_find_device_by_node(reset_node);
1238 	if (!reset_pdev) {
1239 		dev_notice(dev, "find reset_pdev fail\n");
1240 		goto skip_reset;
1241 	}
1242 	link = device_link_add(dev, &reset_pdev->dev,
1243 		DL_FLAG_AUTOPROBE_CONSUMER);
1244 	if (!link) {
1245 		dev_notice(dev, "add reset device_link fail\n");
1246 		goto skip_reset;
1247 	}
1248 	/* supplier is not probed */
1249 	if (link->status == DL_STATE_DORMANT) {
1250 		err = -EPROBE_DEFER;
1251 		goto out;
1252 	}
1253 
1254 skip_reset:
1255 	/* perform generic probe */
1256 	err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1257 
1258 out:
1259 	if (err)
1260 		dev_info(dev, "probe failed %d\n", err);
1261 
1262 	of_node_put(reset_node);
1263 	return err;
1264 }
1265 
1266 /**
1267  * ufs_mtk_remove - set driver_data of the device to NULL
1268  * @pdev: pointer to platform device handle
1269  *
1270  * Always return 0
1271  */
ufs_mtk_remove(struct platform_device * pdev)1272 static int ufs_mtk_remove(struct platform_device *pdev)
1273 {
1274 	struct ufs_hba *hba =  platform_get_drvdata(pdev);
1275 
1276 	pm_runtime_get_sync(&(pdev)->dev);
1277 	ufshcd_remove(hba);
1278 	return 0;
1279 }
1280 
1281 #ifdef CONFIG_PM_SLEEP
ufs_mtk_system_suspend(struct device * dev)1282 int ufs_mtk_system_suspend(struct device *dev)
1283 {
1284 	struct ufs_hba *hba = dev_get_drvdata(dev);
1285 	int ret;
1286 
1287 	ret = ufshcd_system_suspend(dev);
1288 	if (ret)
1289 		return ret;
1290 
1291 	ufs_mtk_vreg_set_lpm(hba, true);
1292 
1293 	return 0;
1294 }
1295 
ufs_mtk_system_resume(struct device * dev)1296 int ufs_mtk_system_resume(struct device *dev)
1297 {
1298 	struct ufs_hba *hba = dev_get_drvdata(dev);
1299 
1300 	ufs_mtk_vreg_set_lpm(hba, false);
1301 
1302 	return ufshcd_system_resume(dev);
1303 }
1304 #endif
1305 
ufs_mtk_runtime_suspend(struct device * dev)1306 int ufs_mtk_runtime_suspend(struct device *dev)
1307 {
1308 	struct ufs_hba *hba = dev_get_drvdata(dev);
1309 	int ret = 0;
1310 
1311 	ret = ufshcd_runtime_suspend(dev);
1312 	if (ret)
1313 		return ret;
1314 
1315 	ufs_mtk_vreg_set_lpm(hba, true);
1316 
1317 	return 0;
1318 }
1319 
ufs_mtk_runtime_resume(struct device * dev)1320 int ufs_mtk_runtime_resume(struct device *dev)
1321 {
1322 	struct ufs_hba *hba = dev_get_drvdata(dev);
1323 
1324 	ufs_mtk_vreg_set_lpm(hba, false);
1325 
1326 	return ufshcd_runtime_resume(dev);
1327 }
1328 
1329 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1330 	SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
1331 				ufs_mtk_system_resume)
1332 	SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
1333 			   ufs_mtk_runtime_resume, NULL)
1334 	.prepare	 = ufshcd_suspend_prepare,
1335 	.complete	 = ufshcd_resume_complete,
1336 };
1337 
1338 static struct platform_driver ufs_mtk_pltform = {
1339 	.probe      = ufs_mtk_probe,
1340 	.remove     = ufs_mtk_remove,
1341 	.shutdown   = ufshcd_pltfrm_shutdown,
1342 	.driver = {
1343 		.name   = "ufshcd-mtk",
1344 		.pm     = &ufs_mtk_pm_ops,
1345 		.of_match_table = ufs_mtk_of_match,
1346 	},
1347 };
1348 
1349 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1350 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1351 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1352 MODULE_LICENSE("GPL v2");
1353 
1354 module_platform_driver(ufs_mtk_pltform);
1355