• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2012 National Instruments
3  *
4  * Josh Cartwright <josh.cartwright@ni.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #include <linux/io.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21 #include <linux/kernel.h>
22 #include <linux/clk-provider.h>
23 #include <linux/clk/zynq.h>
24 
25 static void __iomem *slcr_base;
26 
27 struct zynq_pll_clk {
28 	struct clk_hw	hw;
29 	void __iomem	*pll_ctrl;
30 	void __iomem	*pll_cfg;
31 };
32 
33 #define to_zynq_pll_clk(hw)	container_of(hw, struct zynq_pll_clk, hw)
34 
35 #define CTRL_PLL_FDIV(x)	((x) >> 12)
36 
zynq_pll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)37 static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw,
38 					  unsigned long parent_rate)
39 {
40 	struct zynq_pll_clk *pll = to_zynq_pll_clk(hw);
41 	return parent_rate * CTRL_PLL_FDIV(ioread32(pll->pll_ctrl));
42 }
43 
44 static const struct clk_ops zynq_pll_clk_ops = {
45 	.recalc_rate	= zynq_pll_recalc_rate,
46 };
47 
zynq_pll_clk_setup(struct device_node * np)48 static void __init zynq_pll_clk_setup(struct device_node *np)
49 {
50 	struct clk_init_data init;
51 	struct zynq_pll_clk *pll;
52 	const char *parent_name;
53 	struct clk *clk;
54 	u32 regs[2];
55 	int ret;
56 
57 	ret = of_property_read_u32_array(np, "reg", regs, ARRAY_SIZE(regs));
58 	if (WARN_ON(ret))
59 		return;
60 
61 	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
62 	if (WARN_ON(!pll))
63 		return;
64 
65 	pll->pll_ctrl = slcr_base + regs[0];
66 	pll->pll_cfg  = slcr_base + regs[1];
67 
68 	of_property_read_string(np, "clock-output-names", &init.name);
69 
70 	init.ops = &zynq_pll_clk_ops;
71 	parent_name = of_clk_get_parent_name(np, 0);
72 	init.parent_names = &parent_name;
73 	init.num_parents = 1;
74 
75 	pll->hw.init = &init;
76 
77 	clk = clk_register(NULL, &pll->hw);
78 	if (WARN_ON(IS_ERR(clk)))
79 		return;
80 
81 	ret = of_clk_add_provider(np, of_clk_src_simple_get, clk);
82 	if (WARN_ON(ret))
83 		return;
84 }
85 CLK_OF_DECLARE(zynq_pll, "xlnx,zynq-pll", zynq_pll_clk_setup);
86 
87 struct zynq_periph_clk {
88 	struct clk_hw		hw;
89 	struct clk_onecell_data	onecell_data;
90 	struct clk		*gates[2];
91 	void __iomem		*clk_ctrl;
92 	spinlock_t		clkact_lock;
93 };
94 
95 #define to_zynq_periph_clk(hw)	container_of(hw, struct zynq_periph_clk, hw)
96 
97 static const u8 periph_clk_parent_map[] = {
98 	0, 0, 1, 2
99 };
100 #define PERIPH_CLK_CTRL_SRC(x)	(periph_clk_parent_map[((x) & 0x30) >> 4])
101 #define PERIPH_CLK_CTRL_DIV(x)	(((x) & 0x3F00) >> 8)
102 
zynq_periph_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)103 static unsigned long zynq_periph_recalc_rate(struct clk_hw *hw,
104 					     unsigned long parent_rate)
105 {
106 	struct zynq_periph_clk *periph = to_zynq_periph_clk(hw);
107 	return parent_rate / PERIPH_CLK_CTRL_DIV(ioread32(periph->clk_ctrl));
108 }
109 
zynq_periph_get_parent(struct clk_hw * hw)110 static u8 zynq_periph_get_parent(struct clk_hw *hw)
111 {
112 	struct zynq_periph_clk *periph = to_zynq_periph_clk(hw);
113 	return PERIPH_CLK_CTRL_SRC(ioread32(periph->clk_ctrl));
114 }
115 
116 static const struct clk_ops zynq_periph_clk_ops = {
117 	.recalc_rate	= zynq_periph_recalc_rate,
118 	.get_parent	= zynq_periph_get_parent,
119 };
120 
zynq_periph_clk_setup(struct device_node * np)121 static void __init zynq_periph_clk_setup(struct device_node *np)
122 {
123 	struct zynq_periph_clk *periph;
124 	const char *parent_names[3];
125 	struct clk_init_data init;
126 	int clk_num = 0, err;
127 	const char *name;
128 	struct clk *clk;
129 	u32 reg;
130 	int i;
131 
132 	err = of_property_read_u32(np, "reg", &reg);
133 	if (WARN_ON(err))
134 		return;
135 
136 	periph = kzalloc(sizeof(*periph), GFP_KERNEL);
137 	if (WARN_ON(!periph))
138 		return;
139 
140 	periph->clk_ctrl = slcr_base + reg;
141 	spin_lock_init(&periph->clkact_lock);
142 
143 	init.name = np->name;
144 	init.ops = &zynq_periph_clk_ops;
145 	for (i = 0; i < ARRAY_SIZE(parent_names); i++)
146 		parent_names[i] = of_clk_get_parent_name(np, i);
147 	init.parent_names = parent_names;
148 	init.num_parents = ARRAY_SIZE(parent_names);
149 
150 	periph->hw.init = &init;
151 
152 	clk = clk_register(NULL, &periph->hw);
153 	if (WARN_ON(IS_ERR(clk)))
154 		return;
155 
156 	err = of_clk_add_provider(np, of_clk_src_simple_get, clk);
157 	if (WARN_ON(err))
158 		return;
159 
160 	err = of_property_read_string_index(np, "clock-output-names", 0,
161 					    &name);
162 	if (WARN_ON(err))
163 		return;
164 
165 	periph->gates[0] = clk_register_gate(NULL, name, np->name, 0,
166 					     periph->clk_ctrl, 0, 0,
167 					     &periph->clkact_lock);
168 	if (WARN_ON(IS_ERR(periph->gates[0])))
169 		return;
170 	clk_num++;
171 
172 	/* some periph clks have 2 downstream gates */
173 	err = of_property_read_string_index(np, "clock-output-names", 1,
174 					    &name);
175 	if (err != -ENODATA) {
176 		periph->gates[1] = clk_register_gate(NULL, name, np->name, 0,
177 						     periph->clk_ctrl, 1, 0,
178 						     &periph->clkact_lock);
179 		if (WARN_ON(IS_ERR(periph->gates[1])))
180 			return;
181 		clk_num++;
182 	}
183 
184 	periph->onecell_data.clks = periph->gates;
185 	periph->onecell_data.clk_num = clk_num;
186 
187 	err = of_clk_add_provider(np, of_clk_src_onecell_get,
188 				  &periph->onecell_data);
189 	if (WARN_ON(err))
190 		return;
191 }
192 CLK_OF_DECLARE(zynq_periph, "xlnx,zynq-periph-clock", zynq_periph_clk_setup);
193 
194 /* CPU Clock domain is modelled as a mux with 4 children subclks, whose
195  * derivative rates depend on CLK_621_TRUE
196  */
197 
198 struct zynq_cpu_clk {
199 	struct clk_hw		hw;
200 	struct clk_onecell_data	onecell_data;
201 	struct clk		*subclks[4];
202 	void __iomem		*clk_ctrl;
203 	spinlock_t		clkact_lock;
204 };
205 
206 #define to_zynq_cpu_clk(hw)	container_of(hw, struct zynq_cpu_clk, hw)
207 
208 static const u8 zynq_cpu_clk_parent_map[] = {
209 	1, 1, 2, 0
210 };
211 #define CPU_CLK_SRCSEL(x)	(zynq_cpu_clk_parent_map[(((x) & 0x30) >> 4)])
212 #define CPU_CLK_CTRL_DIV(x)	(((x) & 0x3F00) >> 8)
213 
zynq_cpu_clk_get_parent(struct clk_hw * hw)214 static u8 zynq_cpu_clk_get_parent(struct clk_hw *hw)
215 {
216 	struct zynq_cpu_clk *cpuclk = to_zynq_cpu_clk(hw);
217 	return CPU_CLK_SRCSEL(ioread32(cpuclk->clk_ctrl));
218 }
219 
zynq_cpu_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)220 static unsigned long zynq_cpu_clk_recalc_rate(struct clk_hw *hw,
221 					      unsigned long parent_rate)
222 {
223 	struct zynq_cpu_clk *cpuclk = to_zynq_cpu_clk(hw);
224 	return parent_rate / CPU_CLK_CTRL_DIV(ioread32(cpuclk->clk_ctrl));
225 }
226 
227 static const struct clk_ops zynq_cpu_clk_ops = {
228 	.get_parent	= zynq_cpu_clk_get_parent,
229 	.recalc_rate	= zynq_cpu_clk_recalc_rate,
230 };
231 
232 struct zynq_cpu_subclk {
233 	struct clk_hw	hw;
234 	void __iomem	*clk_621;
235 	enum {
236 		CPU_SUBCLK_6X4X,
237 		CPU_SUBCLK_3X2X,
238 		CPU_SUBCLK_2X,
239 		CPU_SUBCLK_1X,
240 	} which;
241 };
242 
243 #define CLK_621_TRUE(x)	((x) & 1)
244 
245 #define to_zynq_cpu_subclk(hw)	container_of(hw, struct zynq_cpu_subclk, hw);
246 
zynq_cpu_subclk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)247 static unsigned long zynq_cpu_subclk_recalc_rate(struct clk_hw *hw,
248 						 unsigned long parent_rate)
249 {
250 	unsigned long uninitialized_var(rate);
251 	struct zynq_cpu_subclk *subclk;
252 	bool is_621;
253 
254 	subclk = to_zynq_cpu_subclk(hw)
255 	is_621 = CLK_621_TRUE(ioread32(subclk->clk_621));
256 
257 	switch (subclk->which) {
258 	case CPU_SUBCLK_6X4X:
259 		rate = parent_rate;
260 		break;
261 	case CPU_SUBCLK_3X2X:
262 		rate = parent_rate / 2;
263 		break;
264 	case CPU_SUBCLK_2X:
265 		rate = parent_rate / (is_621 ? 3 : 2);
266 		break;
267 	case CPU_SUBCLK_1X:
268 		rate = parent_rate / (is_621 ? 6 : 4);
269 		break;
270 	};
271 
272 	return rate;
273 }
274 
275 static const struct clk_ops zynq_cpu_subclk_ops = {
276 	.recalc_rate	= zynq_cpu_subclk_recalc_rate,
277 };
278 
zynq_cpu_subclk_setup(struct device_node * np,u8 which,void __iomem * clk_621)279 static struct clk *zynq_cpu_subclk_setup(struct device_node *np, u8 which,
280 					 void __iomem *clk_621)
281 {
282 	struct zynq_cpu_subclk *subclk;
283 	struct clk_init_data init;
284 	struct clk *clk;
285 	int err;
286 
287 	err = of_property_read_string_index(np, "clock-output-names",
288 					    which, &init.name);
289 	if (WARN_ON(err))
290 		goto err_read_output_name;
291 
292 	subclk = kzalloc(sizeof(*subclk), GFP_KERNEL);
293 	if (!subclk)
294 		goto err_subclk_alloc;
295 
296 	subclk->clk_621 = clk_621;
297 	subclk->which = which;
298 
299 	init.ops = &zynq_cpu_subclk_ops;
300 	init.parent_names = &np->name;
301 	init.num_parents = 1;
302 
303 	subclk->hw.init = &init;
304 
305 	clk = clk_register(NULL, &subclk->hw);
306 	if (WARN_ON(IS_ERR(clk)))
307 		goto err_clk_register;
308 
309 	return clk;
310 
311 err_clk_register:
312 	kfree(subclk);
313 err_subclk_alloc:
314 err_read_output_name:
315 	return ERR_PTR(-EINVAL);
316 }
317 
zynq_cpu_clk_setup(struct device_node * np)318 static void __init zynq_cpu_clk_setup(struct device_node *np)
319 {
320 	struct zynq_cpu_clk *cpuclk;
321 	const char *parent_names[3];
322 	struct clk_init_data init;
323 	void __iomem *clk_621;
324 	struct clk *clk;
325 	u32 reg[2];
326 	int err;
327 	int i;
328 
329 	err = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
330 	if (WARN_ON(err))
331 		return;
332 
333 	cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
334 	if (WARN_ON(!cpuclk))
335 		return;
336 
337 	cpuclk->clk_ctrl = slcr_base + reg[0];
338 	clk_621 = slcr_base + reg[1];
339 	spin_lock_init(&cpuclk->clkact_lock);
340 
341 	init.name = np->name;
342 	init.ops = &zynq_cpu_clk_ops;
343 	for (i = 0; i < ARRAY_SIZE(parent_names); i++)
344 		parent_names[i] = of_clk_get_parent_name(np, i);
345 	init.parent_names = parent_names;
346 	init.num_parents = ARRAY_SIZE(parent_names);
347 
348 	cpuclk->hw.init = &init;
349 
350 	clk = clk_register(NULL, &cpuclk->hw);
351 	if (WARN_ON(IS_ERR(clk)))
352 		return;
353 
354 	err = of_clk_add_provider(np, of_clk_src_simple_get, clk);
355 	if (WARN_ON(err))
356 		return;
357 
358 	for (i = 0; i < 4; i++) {
359 		cpuclk->subclks[i] = zynq_cpu_subclk_setup(np, i, clk_621);
360 		if (WARN_ON(IS_ERR(cpuclk->subclks[i])))
361 			return;
362 	}
363 
364 	cpuclk->onecell_data.clks = cpuclk->subclks;
365 	cpuclk->onecell_data.clk_num = i;
366 
367 	err = of_clk_add_provider(np, of_clk_src_onecell_get,
368 				  &cpuclk->onecell_data);
369 	if (WARN_ON(err))
370 		return;
371 }
372 CLK_OF_DECLARE(zynq_cpu, "xlnx,zynq-cpu-clock", zynq_cpu_clk_setup);
373 
xilinx_zynq_clocks_init(void __iomem * slcr)374 void __init xilinx_zynq_clocks_init(void __iomem *slcr)
375 {
376 	slcr_base = slcr;
377 	of_clk_init(NULL);
378 }
379