1 /*
2 * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3 *
4 * Copyright (C) 2010 Magnus Damm
5 * Copyright (C) 2010 - 2012 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11 #include <linux/clk.h>
12 #include <linux/compiler.h>
13 #include <linux/slab.h>
14 #include <linux/io.h>
15 #include <linux/sh_clk.h>
16
17 #define CPG_CKSTP_BIT BIT(8)
18
sh_clk_read(struct clk * clk)19 static unsigned int sh_clk_read(struct clk *clk)
20 {
21 if (clk->flags & CLK_ENABLE_REG_8BIT)
22 return ioread8(clk->mapped_reg);
23 else if (clk->flags & CLK_ENABLE_REG_16BIT)
24 return ioread16(clk->mapped_reg);
25
26 return ioread32(clk->mapped_reg);
27 }
28
sh_clk_write(int value,struct clk * clk)29 static void sh_clk_write(int value, struct clk *clk)
30 {
31 if (clk->flags & CLK_ENABLE_REG_8BIT)
32 iowrite8(value, clk->mapped_reg);
33 else if (clk->flags & CLK_ENABLE_REG_16BIT)
34 iowrite16(value, clk->mapped_reg);
35 else
36 iowrite32(value, clk->mapped_reg);
37 }
38
sh_clk_mstp_enable(struct clk * clk)39 static int sh_clk_mstp_enable(struct clk *clk)
40 {
41 sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
42 return 0;
43 }
44
sh_clk_mstp_disable(struct clk * clk)45 static void sh_clk_mstp_disable(struct clk *clk)
46 {
47 sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
48 }
49
50 static struct sh_clk_ops sh_clk_mstp_clk_ops = {
51 .enable = sh_clk_mstp_enable,
52 .disable = sh_clk_mstp_disable,
53 .recalc = followparent_recalc,
54 };
55
sh_clk_mstp_register(struct clk * clks,int nr)56 int __init sh_clk_mstp_register(struct clk *clks, int nr)
57 {
58 struct clk *clkp;
59 int ret = 0;
60 int k;
61
62 for (k = 0; !ret && (k < nr); k++) {
63 clkp = clks + k;
64 clkp->ops = &sh_clk_mstp_clk_ops;
65 ret |= clk_register(clkp);
66 }
67
68 return ret;
69 }
70
71 /*
72 * Div/mult table lookup helpers
73 */
clk_to_div_table(struct clk * clk)74 static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
75 {
76 return clk->priv;
77 }
78
clk_to_div_mult_table(struct clk * clk)79 static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
80 {
81 return clk_to_div_table(clk)->div_mult_table;
82 }
83
84 /*
85 * Common div ops
86 */
sh_clk_div_round_rate(struct clk * clk,unsigned long rate)87 static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
88 {
89 return clk_rate_table_round(clk, clk->freq_table, rate);
90 }
91
sh_clk_div_recalc(struct clk * clk)92 static unsigned long sh_clk_div_recalc(struct clk *clk)
93 {
94 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
95 unsigned int idx;
96
97 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
98 table, clk->arch_flags ? &clk->arch_flags : NULL);
99
100 idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
101
102 return clk->freq_table[idx].frequency;
103 }
104
sh_clk_div_set_rate(struct clk * clk,unsigned long rate)105 static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
106 {
107 struct clk_div_table *dt = clk_to_div_table(clk);
108 unsigned long value;
109 int idx;
110
111 idx = clk_rate_table_find(clk, clk->freq_table, rate);
112 if (idx < 0)
113 return idx;
114
115 value = sh_clk_read(clk);
116 value &= ~(clk->div_mask << clk->enable_bit);
117 value |= (idx << clk->enable_bit);
118 sh_clk_write(value, clk);
119
120 /* XXX: Should use a post-change notifier */
121 if (dt->kick)
122 dt->kick(clk);
123
124 return 0;
125 }
126
sh_clk_div_enable(struct clk * clk)127 static int sh_clk_div_enable(struct clk *clk)
128 {
129 if (clk->div_mask == SH_CLK_DIV6_MSK) {
130 int ret = sh_clk_div_set_rate(clk, clk->rate);
131 if (ret < 0)
132 return ret;
133 }
134
135 sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
136 return 0;
137 }
138
sh_clk_div_disable(struct clk * clk)139 static void sh_clk_div_disable(struct clk *clk)
140 {
141 unsigned int val;
142
143 val = sh_clk_read(clk);
144 val |= CPG_CKSTP_BIT;
145
146 /*
147 * div6 clocks require the divisor field to be non-zero or the
148 * above CKSTP toggle silently fails. Ensure that the divisor
149 * array is reset to its initial state on disable.
150 */
151 if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
152 val |= clk->div_mask;
153
154 sh_clk_write(val, clk);
155 }
156
157 static struct sh_clk_ops sh_clk_div_clk_ops = {
158 .recalc = sh_clk_div_recalc,
159 .set_rate = sh_clk_div_set_rate,
160 .round_rate = sh_clk_div_round_rate,
161 };
162
163 static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
164 .recalc = sh_clk_div_recalc,
165 .set_rate = sh_clk_div_set_rate,
166 .round_rate = sh_clk_div_round_rate,
167 .enable = sh_clk_div_enable,
168 .disable = sh_clk_div_disable,
169 };
170
sh_clk_init_parent(struct clk * clk)171 static int __init sh_clk_init_parent(struct clk *clk)
172 {
173 u32 val;
174
175 if (clk->parent)
176 return 0;
177
178 if (!clk->parent_table || !clk->parent_num)
179 return 0;
180
181 if (!clk->src_width) {
182 pr_err("sh_clk_init_parent: cannot select parent clock\n");
183 return -EINVAL;
184 }
185
186 val = (sh_clk_read(clk) >> clk->src_shift);
187 val &= (1 << clk->src_width) - 1;
188
189 if (val >= clk->parent_num) {
190 pr_err("sh_clk_init_parent: parent table size failed\n");
191 return -EINVAL;
192 }
193
194 clk_reparent(clk, clk->parent_table[val]);
195 if (!clk->parent) {
196 pr_err("sh_clk_init_parent: unable to set parent");
197 return -EINVAL;
198 }
199
200 return 0;
201 }
202
sh_clk_div_register_ops(struct clk * clks,int nr,struct clk_div_table * table,struct sh_clk_ops * ops)203 static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
204 struct clk_div_table *table, struct sh_clk_ops *ops)
205 {
206 struct clk *clkp;
207 void *freq_table;
208 int nr_divs = table->div_mult_table->nr_divisors;
209 int freq_table_size = sizeof(struct cpufreq_frequency_table);
210 int ret = 0;
211 int k;
212
213 freq_table_size *= (nr_divs + 1);
214 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
215 if (!freq_table) {
216 pr_err("%s: unable to alloc memory\n", __func__);
217 return -ENOMEM;
218 }
219
220 for (k = 0; !ret && (k < nr); k++) {
221 clkp = clks + k;
222
223 clkp->ops = ops;
224 clkp->priv = table;
225
226 clkp->freq_table = freq_table + (k * freq_table_size);
227 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
228
229 ret = clk_register(clkp);
230 if (ret == 0)
231 ret = sh_clk_init_parent(clkp);
232 }
233
234 return ret;
235 }
236
237 /*
238 * div6 support
239 */
240 static int sh_clk_div6_divisors[64] = {
241 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
242 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
243 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
244 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
245 };
246
247 static struct clk_div_mult_table div6_div_mult_table = {
248 .divisors = sh_clk_div6_divisors,
249 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
250 };
251
252 static struct clk_div_table sh_clk_div6_table = {
253 .div_mult_table = &div6_div_mult_table,
254 };
255
sh_clk_div6_set_parent(struct clk * clk,struct clk * parent)256 static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
257 {
258 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
259 u32 value;
260 int ret, i;
261
262 if (!clk->parent_table || !clk->parent_num)
263 return -EINVAL;
264
265 /* Search the parent */
266 for (i = 0; i < clk->parent_num; i++)
267 if (clk->parent_table[i] == parent)
268 break;
269
270 if (i == clk->parent_num)
271 return -ENODEV;
272
273 ret = clk_reparent(clk, parent);
274 if (ret < 0)
275 return ret;
276
277 value = sh_clk_read(clk) &
278 ~(((1 << clk->src_width) - 1) << clk->src_shift);
279
280 sh_clk_write(value | (i << clk->src_shift), clk);
281
282 /* Rebuild the frequency table */
283 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
284 table, NULL);
285
286 return 0;
287 }
288
289 static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
290 .recalc = sh_clk_div_recalc,
291 .round_rate = sh_clk_div_round_rate,
292 .set_rate = sh_clk_div_set_rate,
293 .enable = sh_clk_div_enable,
294 .disable = sh_clk_div_disable,
295 .set_parent = sh_clk_div6_set_parent,
296 };
297
sh_clk_div6_register(struct clk * clks,int nr)298 int __init sh_clk_div6_register(struct clk *clks, int nr)
299 {
300 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
301 &sh_clk_div_enable_clk_ops);
302 }
303
sh_clk_div6_reparent_register(struct clk * clks,int nr)304 int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
305 {
306 return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
307 &sh_clk_div6_reparent_clk_ops);
308 }
309
310 /*
311 * div4 support
312 */
sh_clk_div4_set_parent(struct clk * clk,struct clk * parent)313 static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
314 {
315 struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
316 u32 value;
317 int ret;
318
319 /* we really need a better way to determine parent index, but for
320 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
321 * no CLK_ENABLE_ON_INIT means external clock...
322 */
323
324 if (parent->flags & CLK_ENABLE_ON_INIT)
325 value = sh_clk_read(clk) & ~(1 << 7);
326 else
327 value = sh_clk_read(clk) | (1 << 7);
328
329 ret = clk_reparent(clk, parent);
330 if (ret < 0)
331 return ret;
332
333 sh_clk_write(value, clk);
334
335 /* Rebiuld the frequency table */
336 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
337 table, &clk->arch_flags);
338
339 return 0;
340 }
341
342 static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
343 .recalc = sh_clk_div_recalc,
344 .set_rate = sh_clk_div_set_rate,
345 .round_rate = sh_clk_div_round_rate,
346 .enable = sh_clk_div_enable,
347 .disable = sh_clk_div_disable,
348 .set_parent = sh_clk_div4_set_parent,
349 };
350
sh_clk_div4_register(struct clk * clks,int nr,struct clk_div4_table * table)351 int __init sh_clk_div4_register(struct clk *clks, int nr,
352 struct clk_div4_table *table)
353 {
354 return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
355 }
356
sh_clk_div4_enable_register(struct clk * clks,int nr,struct clk_div4_table * table)357 int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
358 struct clk_div4_table *table)
359 {
360 return sh_clk_div_register_ops(clks, nr, table,
361 &sh_clk_div_enable_clk_ops);
362 }
363
sh_clk_div4_reparent_register(struct clk * clks,int nr,struct clk_div4_table * table)364 int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
365 struct clk_div4_table *table)
366 {
367 return sh_clk_div_register_ops(clks, nr, table,
368 &sh_clk_div4_reparent_clk_ops);
369 }
370
371 /* FSI-DIV */
fsidiv_recalc(struct clk * clk)372 static unsigned long fsidiv_recalc(struct clk *clk)
373 {
374 u32 value;
375
376 value = __raw_readl(clk->mapping->base);
377
378 value >>= 16;
379 if (value < 2)
380 return clk->parent->rate;
381
382 return clk->parent->rate / value;
383 }
384
fsidiv_round_rate(struct clk * clk,unsigned long rate)385 static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
386 {
387 return clk_rate_div_range_round(clk, 1, 0xffff, rate);
388 }
389
fsidiv_disable(struct clk * clk)390 static void fsidiv_disable(struct clk *clk)
391 {
392 __raw_writel(0, clk->mapping->base);
393 }
394
fsidiv_enable(struct clk * clk)395 static int fsidiv_enable(struct clk *clk)
396 {
397 u32 value;
398
399 value = __raw_readl(clk->mapping->base) >> 16;
400 if (value < 2)
401 return 0;
402
403 __raw_writel((value << 16) | 0x3, clk->mapping->base);
404
405 return 0;
406 }
407
fsidiv_set_rate(struct clk * clk,unsigned long rate)408 static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
409 {
410 int idx;
411
412 idx = (clk->parent->rate / rate) & 0xffff;
413 if (idx < 2)
414 __raw_writel(0, clk->mapping->base);
415 else
416 __raw_writel(idx << 16, clk->mapping->base);
417
418 return 0;
419 }
420
421 static struct sh_clk_ops fsidiv_clk_ops = {
422 .recalc = fsidiv_recalc,
423 .round_rate = fsidiv_round_rate,
424 .set_rate = fsidiv_set_rate,
425 .enable = fsidiv_enable,
426 .disable = fsidiv_disable,
427 };
428
sh_clk_fsidiv_register(struct clk * clks,int nr)429 int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
430 {
431 struct clk_mapping *map;
432 int i;
433
434 for (i = 0; i < nr; i++) {
435
436 map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
437 if (!map) {
438 pr_err("%s: unable to alloc memory\n", __func__);
439 return -ENOMEM;
440 }
441
442 /* clks[i].enable_reg came from SH_CLK_FSIDIV() */
443 map->phys = (phys_addr_t)clks[i].enable_reg;
444 map->len = 8;
445
446 clks[i].enable_reg = 0; /* remove .enable_reg */
447 clks[i].ops = &fsidiv_clk_ops;
448 clks[i].mapping = map;
449
450 clk_register(&clks[i]);
451 }
452
453 return 0;
454 }
455