• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Synopsys HSDK SDP Generic PLL clock driver
4  *
5  * Copyright (C) 2017 Synopsys
6  */
7 
8 #include <linux/clk-provider.h>
9 #include <linux/delay.h>
10 #include <linux/device.h>
11 #include <linux/err.h>
12 #include <linux/io.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 
19 #define CGU_PLL_CTRL	0x000 /* ARC PLL control register */
20 #define CGU_PLL_STATUS	0x004 /* ARC PLL status register */
21 #define CGU_PLL_FMEAS	0x008 /* ARC PLL frequency measurement register */
22 #define CGU_PLL_MON	0x00C /* ARC PLL monitor register */
23 
24 #define CGU_PLL_CTRL_ODIV_SHIFT		2
25 #define CGU_PLL_CTRL_IDIV_SHIFT		4
26 #define CGU_PLL_CTRL_FBDIV_SHIFT	9
27 #define CGU_PLL_CTRL_BAND_SHIFT		20
28 
29 #define CGU_PLL_CTRL_ODIV_MASK		GENMASK(3, CGU_PLL_CTRL_ODIV_SHIFT)
30 #define CGU_PLL_CTRL_IDIV_MASK		GENMASK(8, CGU_PLL_CTRL_IDIV_SHIFT)
31 #define CGU_PLL_CTRL_FBDIV_MASK		GENMASK(15, CGU_PLL_CTRL_FBDIV_SHIFT)
32 
33 #define CGU_PLL_CTRL_PD			BIT(0)
34 #define CGU_PLL_CTRL_BYPASS		BIT(1)
35 
36 #define CGU_PLL_STATUS_LOCK		BIT(0)
37 #define CGU_PLL_STATUS_ERR		BIT(1)
38 
39 #define HSDK_PLL_MAX_LOCK_TIME		100 /* 100 us */
40 
41 #define CGU_PLL_SOURCE_MAX		1
42 
43 #define CORE_IF_CLK_THRESHOLD_HZ	500000000
44 #define CREG_CORE_IF_CLK_DIV_1		0x0
45 #define CREG_CORE_IF_CLK_DIV_2		0x1
46 
47 struct hsdk_pll_cfg {
48 	u32 rate;
49 	u32 idiv;
50 	u32 fbdiv;
51 	u32 odiv;
52 	u32 band;
53 	u32 bypass;
54 };
55 
56 static const struct hsdk_pll_cfg asdt_pll_cfg[] = {
57 	{ 100000000,  0, 11, 3, 0, 0 },
58 	{ 133000000,  0, 15, 3, 0, 0 },
59 	{ 200000000,  1, 47, 3, 0, 0 },
60 	{ 233000000,  1, 27, 2, 0, 0 },
61 	{ 300000000,  1, 35, 2, 0, 0 },
62 	{ 333000000,  1, 39, 2, 0, 0 },
63 	{ 400000000,  1, 47, 2, 0, 0 },
64 	{ 500000000,  0, 14, 1, 0, 0 },
65 	{ 600000000,  0, 17, 1, 0, 0 },
66 	{ 700000000,  0, 20, 1, 0, 0 },
67 	{ 800000000,  0, 23, 1, 0, 0 },
68 	{ 900000000,  1, 26, 0, 0, 0 },
69 	{ 1000000000, 1, 29, 0, 0, 0 },
70 	{ 1100000000, 1, 32, 0, 0, 0 },
71 	{ 1200000000, 1, 35, 0, 0, 0 },
72 	{ 1300000000, 1, 38, 0, 0, 0 },
73 	{ 1400000000, 1, 41, 0, 0, 0 },
74 	{ 1500000000, 1, 44, 0, 0, 0 },
75 	{ 1600000000, 1, 47, 0, 0, 0 },
76 	{}
77 };
78 
79 static const struct hsdk_pll_cfg hdmi_pll_cfg[] = {
80 	{ 27000000,   0, 0,  0, 0, 1 },
81 	{ 148500000,  0, 21, 3, 0, 0 },
82 	{ 297000000,  0, 21, 2, 0, 0 },
83 	{ 540000000,  0, 19, 1, 0, 0 },
84 	{ 594000000,  0, 21, 1, 0, 0 },
85 	{}
86 };
87 
88 struct hsdk_pll_clk {
89 	struct clk_hw hw;
90 	void __iomem *regs;
91 	void __iomem *spec_regs;
92 	const struct hsdk_pll_devdata *pll_devdata;
93 	struct device *dev;
94 };
95 
96 struct hsdk_pll_devdata {
97 	const struct hsdk_pll_cfg *pll_cfg;
98 	int (*update_rate)(struct hsdk_pll_clk *clk, unsigned long rate,
99 			   const struct hsdk_pll_cfg *cfg);
100 };
101 
102 static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *, unsigned long,
103 				     const struct hsdk_pll_cfg *);
104 static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *, unsigned long,
105 				     const struct hsdk_pll_cfg *);
106 
107 static const struct hsdk_pll_devdata core_pll_devdata = {
108 	.pll_cfg = asdt_pll_cfg,
109 	.update_rate = hsdk_pll_core_update_rate,
110 };
111 
112 static const struct hsdk_pll_devdata sdt_pll_devdata = {
113 	.pll_cfg = asdt_pll_cfg,
114 	.update_rate = hsdk_pll_comm_update_rate,
115 };
116 
117 static const struct hsdk_pll_devdata hdmi_pll_devdata = {
118 	.pll_cfg = hdmi_pll_cfg,
119 	.update_rate = hsdk_pll_comm_update_rate,
120 };
121 
hsdk_pll_write(struct hsdk_pll_clk * clk,u32 reg,u32 val)122 static inline void hsdk_pll_write(struct hsdk_pll_clk *clk, u32 reg, u32 val)
123 {
124 	iowrite32(val, clk->regs + reg);
125 }
126 
hsdk_pll_read(struct hsdk_pll_clk * clk,u32 reg)127 static inline u32 hsdk_pll_read(struct hsdk_pll_clk *clk, u32 reg)
128 {
129 	return ioread32(clk->regs + reg);
130 }
131 
hsdk_pll_set_cfg(struct hsdk_pll_clk * clk,const struct hsdk_pll_cfg * cfg)132 static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
133 				    const struct hsdk_pll_cfg *cfg)
134 {
135 	u32 val = 0;
136 
137 	if (cfg->bypass) {
138 		val = hsdk_pll_read(clk, CGU_PLL_CTRL);
139 		val |= CGU_PLL_CTRL_BYPASS;
140 	} else {
141 		/* Powerdown and Bypass bits should be cleared */
142 		val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
143 		val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
144 		val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
145 		val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
146 	}
147 
148 	dev_dbg(clk->dev, "write configuration: %#x\n", val);
149 
150 	hsdk_pll_write(clk, CGU_PLL_CTRL, val);
151 }
152 
hsdk_pll_is_locked(struct hsdk_pll_clk * clk)153 static inline bool hsdk_pll_is_locked(struct hsdk_pll_clk *clk)
154 {
155 	return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK);
156 }
157 
hsdk_pll_is_err(struct hsdk_pll_clk * clk)158 static inline bool hsdk_pll_is_err(struct hsdk_pll_clk *clk)
159 {
160 	return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR);
161 }
162 
to_hsdk_pll_clk(struct clk_hw * hw)163 static inline struct hsdk_pll_clk *to_hsdk_pll_clk(struct clk_hw *hw)
164 {
165 	return container_of(hw, struct hsdk_pll_clk, hw);
166 }
167 
hsdk_pll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)168 static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
169 					  unsigned long parent_rate)
170 {
171 	u32 val;
172 	u64 rate;
173 	u32 idiv, fbdiv, odiv;
174 	struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
175 
176 	val = hsdk_pll_read(clk, CGU_PLL_CTRL);
177 
178 	dev_dbg(clk->dev, "current configuration: %#x\n", val);
179 
180 	/* Check if PLL is bypassed */
181 	if (val & CGU_PLL_CTRL_BYPASS)
182 		return parent_rate;
183 
184 	/* Check if PLL is disabled */
185 	if (val & CGU_PLL_CTRL_PD)
186 		return 0;
187 
188 	/* input divider = reg.idiv + 1 */
189 	idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT);
190 	/* fb divider = 2*(reg.fbdiv + 1) */
191 	fbdiv = 2 * (1 + ((val & CGU_PLL_CTRL_FBDIV_MASK) >> CGU_PLL_CTRL_FBDIV_SHIFT));
192 	/* output divider = 2^(reg.odiv) */
193 	odiv = 1 << ((val & CGU_PLL_CTRL_ODIV_MASK) >> CGU_PLL_CTRL_ODIV_SHIFT);
194 
195 	rate = (u64)parent_rate * fbdiv;
196 	do_div(rate, idiv * odiv);
197 
198 	return rate;
199 }
200 
hsdk_pll_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)201 static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
202 				unsigned long *prate)
203 {
204 	int i;
205 	unsigned long best_rate;
206 	struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
207 	const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
208 
209 	if (pll_cfg[0].rate == 0)
210 		return -EINVAL;
211 
212 	best_rate = pll_cfg[0].rate;
213 
214 	for (i = 1; pll_cfg[i].rate != 0; i++) {
215 		if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
216 			best_rate = pll_cfg[i].rate;
217 	}
218 
219 	dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
220 
221 	return best_rate;
222 }
223 
hsdk_pll_comm_update_rate(struct hsdk_pll_clk * clk,unsigned long rate,const struct hsdk_pll_cfg * cfg)224 static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
225 				     unsigned long rate,
226 				     const struct hsdk_pll_cfg *cfg)
227 {
228 	hsdk_pll_set_cfg(clk, cfg);
229 
230 	/*
231 	 * Wait until CGU relocks and check error status.
232 	 * If after timeout CGU is unlocked yet return error.
233 	 */
234 	udelay(HSDK_PLL_MAX_LOCK_TIME);
235 	if (!hsdk_pll_is_locked(clk))
236 		return -ETIMEDOUT;
237 
238 	if (hsdk_pll_is_err(clk))
239 		return -EINVAL;
240 
241 	return 0;
242 }
243 
hsdk_pll_core_update_rate(struct hsdk_pll_clk * clk,unsigned long rate,const struct hsdk_pll_cfg * cfg)244 static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
245 				     unsigned long rate,
246 				     const struct hsdk_pll_cfg *cfg)
247 {
248 	/*
249 	 * When core clock exceeds 500MHz, the divider for the interface
250 	 * clock must be programmed to div-by-2.
251 	 */
252 	if (rate > CORE_IF_CLK_THRESHOLD_HZ)
253 		iowrite32(CREG_CORE_IF_CLK_DIV_2, clk->spec_regs);
254 
255 	hsdk_pll_set_cfg(clk, cfg);
256 
257 	/*
258 	 * Wait until CGU relocks and check error status.
259 	 * If after timeout CGU is unlocked yet return error.
260 	 */
261 	udelay(HSDK_PLL_MAX_LOCK_TIME);
262 	if (!hsdk_pll_is_locked(clk))
263 		return -ETIMEDOUT;
264 
265 	if (hsdk_pll_is_err(clk))
266 		return -EINVAL;
267 
268 	/*
269 	 * Program divider to div-by-1 if we succesfuly set core clock below
270 	 * 500MHz threshold.
271 	 */
272 	if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
273 		iowrite32(CREG_CORE_IF_CLK_DIV_1, clk->spec_regs);
274 
275 	return 0;
276 }
277 
hsdk_pll_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)278 static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
279 			     unsigned long parent_rate)
280 {
281 	int i;
282 	struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
283 	const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
284 
285 	for (i = 0; pll_cfg[i].rate != 0; i++) {
286 		if (pll_cfg[i].rate == rate) {
287 			return clk->pll_devdata->update_rate(clk, rate,
288 							     &pll_cfg[i]);
289 		}
290 	}
291 
292 	dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
293 			parent_rate);
294 
295 	return -EINVAL;
296 }
297 
298 static const struct clk_ops hsdk_pll_ops = {
299 	.recalc_rate = hsdk_pll_recalc_rate,
300 	.round_rate = hsdk_pll_round_rate,
301 	.set_rate = hsdk_pll_set_rate,
302 };
303 
hsdk_pll_clk_probe(struct platform_device * pdev)304 static int hsdk_pll_clk_probe(struct platform_device *pdev)
305 {
306 	int ret;
307 	struct resource *mem;
308 	const char *parent_name;
309 	unsigned int num_parents;
310 	struct hsdk_pll_clk *pll_clk;
311 	struct clk_init_data init = { };
312 	struct device *dev = &pdev->dev;
313 
314 	pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
315 	if (!pll_clk)
316 		return -ENOMEM;
317 
318 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
319 	pll_clk->regs = devm_ioremap_resource(dev, mem);
320 	if (IS_ERR(pll_clk->regs))
321 		return PTR_ERR(pll_clk->regs);
322 
323 	init.name = dev->of_node->name;
324 	init.ops = &hsdk_pll_ops;
325 	parent_name = of_clk_get_parent_name(dev->of_node, 0);
326 	init.parent_names = &parent_name;
327 	num_parents = of_clk_get_parent_count(dev->of_node);
328 	if (num_parents == 0 || num_parents > CGU_PLL_SOURCE_MAX) {
329 		dev_err(dev, "wrong clock parents number: %u\n", num_parents);
330 		return -EINVAL;
331 	}
332 	init.num_parents = num_parents;
333 
334 	pll_clk->hw.init = &init;
335 	pll_clk->dev = dev;
336 	pll_clk->pll_devdata = of_device_get_match_data(dev);
337 
338 	if (!pll_clk->pll_devdata) {
339 		dev_err(dev, "No OF match data provided\n");
340 		return -EINVAL;
341 	}
342 
343 	ret = devm_clk_hw_register(dev, &pll_clk->hw);
344 	if (ret) {
345 		dev_err(dev, "failed to register %s clock\n", init.name);
346 		return ret;
347 	}
348 
349 	return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get,
350 			&pll_clk->hw);
351 }
352 
hsdk_pll_clk_remove(struct platform_device * pdev)353 static int hsdk_pll_clk_remove(struct platform_device *pdev)
354 {
355 	of_clk_del_provider(pdev->dev.of_node);
356 	return 0;
357 }
358 
of_hsdk_pll_clk_setup(struct device_node * node)359 static void __init of_hsdk_pll_clk_setup(struct device_node *node)
360 {
361 	int ret;
362 	const char *parent_name;
363 	unsigned int num_parents;
364 	struct hsdk_pll_clk *pll_clk;
365 	struct clk_init_data init = { };
366 
367 	pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
368 	if (!pll_clk)
369 		return;
370 
371 	pll_clk->regs = of_iomap(node, 0);
372 	if (!pll_clk->regs) {
373 		pr_err("failed to map pll registers\n");
374 		goto err_free_pll_clk;
375 	}
376 
377 	pll_clk->spec_regs = of_iomap(node, 1);
378 	if (!pll_clk->spec_regs) {
379 		pr_err("failed to map pll registers\n");
380 		goto err_unmap_comm_regs;
381 	}
382 
383 	init.name = node->name;
384 	init.ops = &hsdk_pll_ops;
385 	parent_name = of_clk_get_parent_name(node, 0);
386 	init.parent_names = &parent_name;
387 	num_parents = of_clk_get_parent_count(node);
388 	if (num_parents > CGU_PLL_SOURCE_MAX) {
389 		pr_err("too much clock parents: %u\n", num_parents);
390 		goto err_unmap_spec_regs;
391 	}
392 	init.num_parents = num_parents;
393 
394 	pll_clk->hw.init = &init;
395 	pll_clk->pll_devdata = &core_pll_devdata;
396 
397 	ret = clk_hw_register(NULL, &pll_clk->hw);
398 	if (ret) {
399 		pr_err("failed to register %pOFn clock\n", node);
400 		goto err_unmap_spec_regs;
401 	}
402 
403 	ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
404 	if (ret) {
405 		pr_err("failed to add hw provider for %pOFn clock\n", node);
406 		goto err_unmap_spec_regs;
407 	}
408 
409 	return;
410 
411 err_unmap_spec_regs:
412 	iounmap(pll_clk->spec_regs);
413 err_unmap_comm_regs:
414 	iounmap(pll_clk->regs);
415 err_free_pll_clk:
416 	kfree(pll_clk);
417 }
418 
419 /* Core PLL needed early for ARC cpus timers */
420 CLK_OF_DECLARE(hsdk_pll_clock, "snps,hsdk-core-pll-clock",
421 of_hsdk_pll_clk_setup);
422 
423 static const struct of_device_id hsdk_pll_clk_id[] = {
424 	{ .compatible = "snps,hsdk-gp-pll-clock", .data = &sdt_pll_devdata},
425 	{ .compatible = "snps,hsdk-hdmi-pll-clock", .data = &hdmi_pll_devdata},
426 	{ }
427 };
428 
429 static struct platform_driver hsdk_pll_clk_driver = {
430 	.driver = {
431 		.name = "hsdk-gp-pll-clock",
432 		.of_match_table = hsdk_pll_clk_id,
433 	},
434 	.probe = hsdk_pll_clk_probe,
435 	.remove = hsdk_pll_clk_remove,
436 };
437 builtin_platform_driver(hsdk_pll_clk_driver);
438