1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Universal Flash Storage Host controller Platform bus based glue driver
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 *
6 * Authors:
7 * Santosh Yaraganavi <santosh.sy@samsung.com>
8 * Vinayak Holikatti <h.vinayak@samsung.com>
9 */
10
11 #include <linux/platform_device.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/of.h>
14
15 #include "ufshcd.h"
16 #include "ufshcd-pltfrm.h"
17 #include "unipro.h"
18
19 #define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
20
ufshcd_parse_clock_info(struct ufs_hba * hba)21 static int ufshcd_parse_clock_info(struct ufs_hba *hba)
22 {
23 int ret = 0;
24 int cnt;
25 int i;
26 struct device *dev = hba->dev;
27 struct device_node *np = dev->of_node;
28 char *name;
29 u32 *clkfreq = NULL;
30 struct ufs_clk_info *clki;
31 int len = 0;
32 size_t sz = 0;
33
34 if (!np)
35 goto out;
36
37 cnt = of_property_count_strings(np, "clock-names");
38 if (!cnt || (cnt == -EINVAL)) {
39 dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
40 __func__);
41 } else if (cnt < 0) {
42 dev_err(dev, "%s: count clock strings failed, err %d\n",
43 __func__, cnt);
44 ret = cnt;
45 }
46
47 if (cnt <= 0)
48 goto out;
49
50 if (!of_get_property(np, "freq-table-hz", &len)) {
51 dev_info(dev, "freq-table-hz property not specified\n");
52 goto out;
53 }
54
55 if (len <= 0)
56 goto out;
57
58 sz = len / sizeof(*clkfreq);
59 if (sz != 2 * cnt) {
60 dev_err(dev, "%s len mismatch\n", "freq-table-hz");
61 ret = -EINVAL;
62 goto out;
63 }
64
65 clkfreq = devm_kcalloc(dev, sz, sizeof(*clkfreq),
66 GFP_KERNEL);
67 if (!clkfreq) {
68 ret = -ENOMEM;
69 goto out;
70 }
71
72 ret = of_property_read_u32_array(np, "freq-table-hz",
73 clkfreq, sz);
74 if (ret && (ret != -EINVAL)) {
75 dev_err(dev, "%s: error reading array %d\n",
76 "freq-table-hz", ret);
77 return ret;
78 }
79
80 for (i = 0; i < sz; i += 2) {
81 ret = of_property_read_string_index(np,
82 "clock-names", i/2, (const char **)&name);
83 if (ret)
84 goto out;
85
86 clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
87 if (!clki) {
88 ret = -ENOMEM;
89 goto out;
90 }
91
92 clki->min_freq = clkfreq[i];
93 clki->max_freq = clkfreq[i+1];
94 clki->name = devm_kstrdup(dev, name, GFP_KERNEL);
95 if (!clki->name) {
96 ret = -ENOMEM;
97 goto out;
98 }
99
100 if (!strcmp(name, "ref_clk"))
101 clki->keep_link_active = true;
102 dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
103 clki->min_freq, clki->max_freq, clki->name);
104 list_add_tail(&clki->list, &hba->clk_list_head);
105 }
106 out:
107 return ret;
108 }
109
phandle_exists(const struct device_node * np,const char * phandle_name,int index)110 static bool phandle_exists(const struct device_node *np,
111 const char *phandle_name, int index)
112 {
113 struct device_node *parse_np = of_parse_phandle(np, phandle_name, index);
114
115 if (parse_np)
116 of_node_put(parse_np);
117
118 return parse_np != NULL;
119 }
120
121 #define MAX_PROP_SIZE 32
ufshcd_populate_vreg(struct device * dev,const char * name,struct ufs_vreg ** out_vreg)122 int ufshcd_populate_vreg(struct device *dev, const char *name,
123 struct ufs_vreg **out_vreg)
124 {
125 char prop_name[MAX_PROP_SIZE];
126 struct ufs_vreg *vreg = NULL;
127 struct device_node *np = dev->of_node;
128
129 if (!np) {
130 dev_err(dev, "%s: non DT initialization\n", __func__);
131 goto out;
132 }
133
134 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
135 if (!phandle_exists(np, prop_name, 0)) {
136 dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
137 __func__, prop_name);
138 goto out;
139 }
140
141 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
142 if (!vreg)
143 return -ENOMEM;
144
145 vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
146 if (!vreg->name)
147 return -ENOMEM;
148
149 snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
150 if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
151 dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
152 vreg->max_uA = 0;
153 }
154 out:
155 *out_vreg = vreg;
156 return 0;
157 }
158 EXPORT_SYMBOL_GPL(ufshcd_populate_vreg);
159
160 /**
161 * ufshcd_parse_regulator_info - get regulator info from device tree
162 * @hba: per adapter instance
163 *
164 * Get regulator info from device tree for vcc, vccq, vccq2 power supplies.
165 * If any of the supplies are not defined it is assumed that they are always-on
166 * and hence return zero. If the property is defined but parsing is failed
167 * then return corresponding error.
168 */
ufshcd_parse_regulator_info(struct ufs_hba * hba)169 static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
170 {
171 int err;
172 struct device *dev = hba->dev;
173 struct ufs_vreg_info *info = &hba->vreg_info;
174
175 err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
176 if (err)
177 goto out;
178
179 err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
180 if (err)
181 goto out;
182
183 err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
184 if (err)
185 goto out;
186
187 err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
188 out:
189 return err;
190 }
191
ufshcd_pltfrm_shutdown(struct platform_device * pdev)192 void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
193 {
194 ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
195 }
196 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown);
197
ufshcd_init_lanes_per_dir(struct ufs_hba * hba)198 static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
199 {
200 struct device *dev = hba->dev;
201 int ret;
202
203 ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
204 &hba->lanes_per_direction);
205 if (ret) {
206 dev_dbg(hba->dev,
207 "%s: failed to read lanes-per-direction, ret=%d\n",
208 __func__, ret);
209 hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
210 }
211 }
212
213 /**
214 * ufshcd_get_pwr_dev_param - get finally agreed attributes for
215 * power mode change
216 * @pltfrm_param: pointer to platform parameters
217 * @dev_max: pointer to device attributes
218 * @agreed_pwr: returned agreed attributes
219 *
220 * Returns 0 on success, non-zero value on failure
221 */
ufshcd_get_pwr_dev_param(struct ufs_dev_params * pltfrm_param,struct ufs_pa_layer_attr * dev_max,struct ufs_pa_layer_attr * agreed_pwr)222 int ufshcd_get_pwr_dev_param(struct ufs_dev_params *pltfrm_param,
223 struct ufs_pa_layer_attr *dev_max,
224 struct ufs_pa_layer_attr *agreed_pwr)
225 {
226 int min_pltfrm_gear;
227 int min_dev_gear;
228 bool is_dev_sup_hs = false;
229 bool is_pltfrm_max_hs = false;
230
231 if (dev_max->pwr_rx == FAST_MODE)
232 is_dev_sup_hs = true;
233
234 if (pltfrm_param->desired_working_mode == UFS_HS_MODE) {
235 is_pltfrm_max_hs = true;
236 min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear,
237 pltfrm_param->hs_tx_gear);
238 } else {
239 min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear,
240 pltfrm_param->pwm_tx_gear);
241 }
242
243 /*
244 * device doesn't support HS but
245 * pltfrm_param->desired_working_mode is HS,
246 * thus device and pltfrm_param don't agree
247 */
248 if (!is_dev_sup_hs && is_pltfrm_max_hs) {
249 pr_info("%s: device doesn't support HS\n",
250 __func__);
251 return -ENOTSUPP;
252 } else if (is_dev_sup_hs && is_pltfrm_max_hs) {
253 /*
254 * since device supports HS, it supports FAST_MODE.
255 * since pltfrm_param->desired_working_mode is also HS
256 * then final decision (FAST/FASTAUTO) is done according
257 * to pltfrm_params as it is the restricting factor
258 */
259 agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs;
260 agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
261 } else {
262 /*
263 * here pltfrm_param->desired_working_mode is PWM.
264 * it doesn't matter whether device supports HS or PWM,
265 * in both cases pltfrm_param->desired_working_mode will
266 * determine the mode
267 */
268 agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm;
269 agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
270 }
271
272 /*
273 * we would like tx to work in the minimum number of lanes
274 * between device capability and vendor preferences.
275 * the same decision will be made for rx
276 */
277 agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
278 pltfrm_param->tx_lanes);
279 agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
280 pltfrm_param->rx_lanes);
281
282 /* device maximum gear is the minimum between device rx and tx gears */
283 min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
284
285 /*
286 * if both device capabilities and vendor pre-defined preferences are
287 * both HS or both PWM then set the minimum gear to be the chosen
288 * working gear.
289 * if one is PWM and one is HS then the one that is PWM get to decide
290 * what is the gear, as it is the one that also decided previously what
291 * pwr the device will be configured to.
292 */
293 if ((is_dev_sup_hs && is_pltfrm_max_hs) ||
294 (!is_dev_sup_hs && !is_pltfrm_max_hs)) {
295 agreed_pwr->gear_rx =
296 min_t(u32, min_dev_gear, min_pltfrm_gear);
297 } else if (!is_dev_sup_hs) {
298 agreed_pwr->gear_rx = min_dev_gear;
299 } else {
300 agreed_pwr->gear_rx = min_pltfrm_gear;
301 }
302 agreed_pwr->gear_tx = agreed_pwr->gear_rx;
303
304 agreed_pwr->hs_rate = pltfrm_param->hs_rate;
305
306 return 0;
307 }
308 EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
309
ufshcd_init_pwr_dev_param(struct ufs_dev_params * dev_param)310 void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param)
311 {
312 dev_param->tx_lanes = 2;
313 dev_param->rx_lanes = 2;
314 dev_param->hs_rx_gear = UFS_HS_G3;
315 dev_param->hs_tx_gear = UFS_HS_G3;
316 dev_param->pwm_rx_gear = UFS_PWM_G4;
317 dev_param->pwm_tx_gear = UFS_PWM_G4;
318 dev_param->rx_pwr_pwm = SLOW_MODE;
319 dev_param->tx_pwr_pwm = SLOW_MODE;
320 dev_param->rx_pwr_hs = FAST_MODE;
321 dev_param->tx_pwr_hs = FAST_MODE;
322 dev_param->hs_rate = PA_HS_MODE_B;
323 dev_param->desired_working_mode = UFS_HS_MODE;
324 }
325 EXPORT_SYMBOL_GPL(ufshcd_init_pwr_dev_param);
326
327 /**
328 * ufshcd_pltfrm_init - probe routine of the driver
329 * @pdev: pointer to Platform device handle
330 * @vops: pointer to variant ops
331 *
332 * Returns 0 on success, non-zero value on failure
333 */
ufshcd_pltfrm_init(struct platform_device * pdev,const struct ufs_hba_variant_ops * vops)334 int ufshcd_pltfrm_init(struct platform_device *pdev,
335 const struct ufs_hba_variant_ops *vops)
336 {
337 struct ufs_hba *hba;
338 void __iomem *mmio_base;
339 int irq, err;
340 struct device *dev = &pdev->dev;
341
342 mmio_base = devm_platform_ioremap_resource(pdev, 0);
343 if (IS_ERR(mmio_base)) {
344 err = PTR_ERR(mmio_base);
345 goto out;
346 }
347
348 irq = platform_get_irq(pdev, 0);
349 if (irq < 0) {
350 err = irq;
351 goto out;
352 }
353
354 err = ufshcd_alloc_host(dev, &hba);
355 if (err) {
356 dev_err(&pdev->dev, "Allocation failed\n");
357 goto out;
358 }
359
360 hba->vops = vops;
361
362 err = ufshcd_parse_clock_info(hba);
363 if (err) {
364 dev_err(&pdev->dev, "%s: clock parse failed %d\n",
365 __func__, err);
366 goto dealloc_host;
367 }
368 err = ufshcd_parse_regulator_info(hba);
369 if (err) {
370 dev_err(&pdev->dev, "%s: regulator init failed %d\n",
371 __func__, err);
372 goto dealloc_host;
373 }
374
375 ufshcd_init_lanes_per_dir(hba);
376
377 err = ufshcd_init(hba, mmio_base, irq);
378 if (err) {
379 dev_err(dev, "Initialization failed\n");
380 goto dealloc_host;
381 }
382
383 pm_runtime_set_active(&pdev->dev);
384 pm_runtime_enable(&pdev->dev);
385
386 return 0;
387
388 dealloc_host:
389 ufshcd_dealloc_host(hba);
390 out:
391 return err;
392 }
393 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
394
395 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
396 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
397 MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
398 MODULE_LICENSE("GPL");
399 MODULE_VERSION(UFSHCD_DRIVER_VERSION);
400