1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2014 MundoReader S.L.
4 * Author: Heiko Stuebner <heiko@sntech.de>
5 *
6 * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
7 * Author: Xing Zheng <zhengxing@rock-chips.com>
8 *
9 * based on
10 *
11 * samsung/clk.c
12 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
13 * Copyright (c) 2013 Linaro Ltd.
14 * Author: Thomas Abraham <thomas.ab@samsung.com>
15 */
16
17 #include <linux/slab.h>
18 #include <linux/clk.h>
19 #include <linux/clk-provider.h>
20 #include <linux/io.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/regmap.h>
23 #include <linux/reboot.h>
24 #include <linux/rational.h>
25 #include "clk.h"
26
27 /**
28 * Register a clock branch.
29 * Most clock branches have a form like
30 *
31 * src1 --|--\
32 * |M |--[GATE]-[DIV]-
33 * src2 --|--/
34 *
35 * sometimes without one of those components.
36 */
rockchip_clk_register_branch(const char * name,const char * const * parent_names,u8 num_parents,void __iomem * base,int muxdiv_offset,u8 mux_shift,u8 mux_width,u8 mux_flags,u32 * mux_table,int div_offset,u8 div_shift,u8 div_width,u8 div_flags,struct clk_div_table * div_table,int gate_offset,u8 gate_shift,u8 gate_flags,unsigned long flags,spinlock_t * lock)37 static struct clk *rockchip_clk_register_branch(const char *name, const char *const *parent_names, u8 num_parents,
38 void __iomem *base, int muxdiv_offset, u8 mux_shift, u8 mux_width,
39 u8 mux_flags, u32 *mux_table, int div_offset, u8 div_shift,
40 u8 div_width, u8 div_flags, struct clk_div_table *div_table,
41 int gate_offset, u8 gate_shift, u8 gate_flags, unsigned long flags,
42 spinlock_t *lock)
43 {
44 struct clk_hw *hw;
45 struct clk_mux *mux = NULL;
46 struct clk_gate *gate = NULL;
47 struct clk_divider *div = NULL;
48 const struct clk_ops *mux_ops = NULL, *div_ops = NULL, *gate_ops = NULL;
49 int ret;
50
51 if (num_parents > 1) {
52 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
53 if (!mux) {
54 return ERR_PTR(-ENOMEM);
55 }
56
57 mux->reg = base + muxdiv_offset;
58 mux->shift = mux_shift;
59 mux->mask = BIT(mux_width) - 1;
60 mux->flags = mux_flags;
61 mux->table = mux_table;
62 mux->lock = lock;
63 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops : &clk_mux_ops;
64 }
65
66 if (gate_offset >= 0) {
67 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
68 if (!gate) {
69 ret = -ENOMEM;
70 goto err_gate;
71 }
72
73 gate->flags = gate_flags;
74 gate->reg = base + gate_offset;
75 gate->bit_idx = gate_shift;
76 gate->lock = lock;
77 gate_ops = &clk_gate_ops;
78 }
79
80 if (div_width > 0) {
81 div = kzalloc(sizeof(*div), GFP_KERNEL);
82 if (!div) {
83 ret = -ENOMEM;
84 goto err_div;
85 }
86
87 div->flags = div_flags;
88 if (div_offset) {
89 div->reg = base + div_offset;
90 } else {
91 div->reg = base + muxdiv_offset;
92 }
93 div->shift = div_shift;
94 div->width = div_width;
95 div->lock = lock;
96 div->table = div_table;
97 div_ops = (div_flags & CLK_DIVIDER_READ_ONLY) ? &clk_divider_ro_ops : &clk_divider_ops;
98 }
99
100 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, mux ? &mux->hw : NULL, mux_ops,
101 div ? &div->hw : NULL, div_ops, gate ? &gate->hw : NULL, gate_ops, flags);
102 if (IS_ERR(hw)) {
103 kfree(div);
104 kfree(gate);
105 return ERR_CAST(hw);
106 }
107
108 return hw->clk;
109 err_div:
110 kfree(gate);
111 err_gate:
112 kfree(mux);
113 return ERR_PTR(ret);
114 }
115
116 struct rockchip_clk_frac {
117 struct notifier_block clk_nb;
118 struct clk_fractional_divider div;
119 struct clk_gate gate;
120
121 struct clk_mux mux;
122 const struct clk_ops *mux_ops;
123 int mux_frac_idx;
124
125 bool rate_change_remuxed;
126 int rate_change_idx;
127 };
128
129 #define to_rockchip_clk_frac_nb(nb) container_of(nb, struct rockchip_clk_frac, clk_nb)
130
rockchip_clk_frac_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)131 static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb, unsigned long event, void *data)
132 {
133 struct clk_notifier_data *ndata = data;
134 struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb);
135 struct clk_mux *frac_mux = &frac->mux;
136 int ret = 0;
137
138 pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n", __func__, event, ndata->old_rate, ndata->new_rate);
139 if (event == PRE_RATE_CHANGE) {
140 frac->rate_change_idx = frac->mux_ops->get_parent(&frac_mux->hw);
141 if (frac->rate_change_idx != frac->mux_frac_idx) {
142 frac->mux_ops->set_parent(&frac_mux->hw, frac->mux_frac_idx);
143 frac->rate_change_remuxed = 1;
144 }
145 } else if (event == POST_RATE_CHANGE) {
146 /*
147 * The POST_RATE_CHANGE notifier runs directly after the
148 * divider clock is set in clk_change_rate, so we'll have
149 * remuxed back to the original parent before clk_change_rate
150 * reaches the mux itself.
151 */
152 if (frac->rate_change_remuxed) {
153 frac->mux_ops->set_parent(&frac_mux->hw, frac->rate_change_idx);
154 frac->rate_change_remuxed = 0;
155 }
156 }
157
158 return notifier_from_errno(ret);
159 }
160
161 /**
162 * fractional divider must set that denominator is 20 times larger than
163 * numerator to generate precise clock frequency.
164 */
rockchip_fractional_approximation(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate,unsigned long * m,unsigned long * n)165 static void rockchip_fractional_approximation(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate,
166 unsigned long *m, unsigned long *n)
167 {
168 struct clk_fractional_divider *fd = to_clk_fd(hw);
169 unsigned long p_rate, p_parent_rate;
170 struct clk_hw *p_parent;
171 unsigned long scale;
172 u32 div;
173
174 p_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
175 if (((rate * 0x14 > p_rate) && (p_rate % rate != 0)) || (fd->max_prate && fd->max_prate < p_rate)) {
176 p_parent = clk_hw_get_parent(clk_hw_get_parent(hw));
177 if (!p_parent) {
178 *parent_rate = p_rate;
179 } else {
180 p_parent_rate = clk_hw_get_rate(p_parent);
181 *parent_rate = p_parent_rate;
182 if (fd->max_prate && p_parent_rate > fd->max_prate) {
183 div = DIV_ROUND_UP(p_parent_rate, fd->max_prate);
184 *parent_rate = p_parent_rate / div;
185 }
186 }
187
188 if (*parent_rate < rate * 0x14) {
189 /*
190 * Fractional frequency divider to do
191 * integer frequency divider does not
192 * need 20 times the limit.
193 */
194 if (!(*parent_rate % rate)) {
195 *m = 1;
196 *n = *parent_rate / rate;
197 return;
198 } else if (!(fd->flags & CLK_FRAC_DIVIDER_NO_LIMIT)) {
199 pr_warn("%s p_rate(%ld) is low than rate(%ld)*20, use integer or half-div\n", clk_hw_get_name(hw),
200 *parent_rate, rate);
201 *m = 0;
202 *n = 1;
203 return;
204 }
205 }
206 }
207
208 /*
209 * Get rate closer to *parent_rate to guarantee there is no overflow
210 * for m and n. In the result it will be the nearest rate left shifted
211 * by (scale - fd->nwidth) bits.
212 */
213 scale = fls_long(*parent_rate / rate - 1);
214 if (scale > fd->nwidth) {
215 rate <<= scale - fd->nwidth;
216 }
217
218 rational_best_approximation(rate, *parent_rate, GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0), m, n);
219 }
220
rockchip_clk_register_frac_branch(struct rockchip_clk_provider * ctx,const char * name,const char * const * parent_names,u8 num_parents,void __iomem * base,int muxdiv_offset,u8 div_flags,int gate_offset,u8 gate_shift,u8 gate_flags,unsigned long flags,struct rockchip_clk_branch * child,unsigned long max_prate,spinlock_t * lock)221 static struct clk *rockchip_clk_register_frac_branch(struct rockchip_clk_provider *ctx, const char *name,
222 const char *const *parent_names, u8 num_parents,
223 void __iomem *base, int muxdiv_offset, u8 div_flags,
224 int gate_offset, u8 gate_shift, u8 gate_flags, unsigned long flags,
225 struct rockchip_clk_branch *child, unsigned long max_prate,
226 spinlock_t *lock)
227 {
228 struct clk_hw *hw;
229 struct rockchip_clk_frac *frac;
230 struct clk_gate *gate = NULL;
231 struct clk_fractional_divider *div = NULL;
232 const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
233
234 if (muxdiv_offset < 0) {
235 return ERR_PTR(-EINVAL);
236 }
237
238 if (child && child->branch_type != branch_mux) {
239 pr_err("%s: fractional child clock for %s can only be a mux\n", __func__, name);
240 return ERR_PTR(-EINVAL);
241 }
242
243 frac = kzalloc(sizeof(*frac), GFP_KERNEL);
244 if (!frac) {
245 return ERR_PTR(-ENOMEM);
246 }
247
248 if (gate_offset >= 0) {
249 gate = &frac->gate;
250 gate->flags = gate_flags;
251 gate->reg = base + gate_offset;
252 gate->bit_idx = gate_shift;
253 gate->lock = lock;
254 gate_ops = &clk_gate_ops;
255 }
256
257 div = &frac->div;
258 div->flags = div_flags;
259 div->reg = base + muxdiv_offset;
260 div->mshift = 16;
261 div->mwidth = 16;
262 div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
263 div->nshift = 0;
264 div->nwidth = 16;
265 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
266 div->lock = lock;
267 div->approximation = rockchip_fractional_approximation;
268 div->max_prate = max_prate;
269 div_ops = &clk_fractional_divider_ops;
270
271 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, NULL, NULL, &div->hw, div_ops,
272 gate ? &gate->hw : NULL, gate_ops, flags | CLK_SET_RATE_UNGATE);
273 if (IS_ERR(hw)) {
274 kfree(frac);
275 return ERR_CAST(hw);
276 }
277
278 if (child) {
279 struct clk_mux *frac_mux = &frac->mux;
280 struct clk_init_data init;
281 struct clk *mux_clk;
282 int ret;
283
284 frac->mux_frac_idx = match_string(child->parent_names, child->num_parents, name);
285 frac->mux_ops = &clk_mux_ops;
286 frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb;
287
288 frac_mux->reg = base + child->muxdiv_offset;
289 frac_mux->shift = child->mux_shift;
290 frac_mux->mask = BIT(child->mux_width) - 1;
291 frac_mux->flags = child->mux_flags;
292 if (child->mux_table) {
293 frac_mux->table = child->mux_table;
294 }
295 frac_mux->lock = lock;
296 frac_mux->hw.init = &init;
297
298 init.name = child->name;
299 init.flags = child->flags | CLK_SET_RATE_PARENT;
300 init.ops = frac->mux_ops;
301 init.parent_names = child->parent_names;
302 init.num_parents = child->num_parents;
303
304 mux_clk = clk_register(NULL, &frac_mux->hw);
305 if (IS_ERR(mux_clk)) {
306 kfree(frac);
307 return mux_clk;
308 }
309
310 rockchip_clk_add_lookup(ctx, mux_clk, child->id);
311
312 /* notifier on the fraction divider to catch rate changes */
313 if (frac->mux_frac_idx >= 0) {
314 pr_debug("%s: found fractional parent in mux at pos %d\n", __func__, frac->mux_frac_idx);
315 ret = clk_notifier_register(hw->clk, &frac->clk_nb);
316 if (ret) {
317 pr_err("%s: failed to register clock notifier for %s\n", __func__, name);
318 }
319 } else {
320 pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n", __func__, name, child->name);
321 }
322 }
323
324 return hw->clk;
325 }
326
rockchip_clk_register_factor_branch(const char * name,const char * const * parent_names,u8 num_parents,void __iomem * base,unsigned int mult,unsigned int div,int gate_offset,u8 gate_shift,u8 gate_flags,unsigned long flags,spinlock_t * lock)327 static struct clk *rockchip_clk_register_factor_branch(const char *name, const char *const *parent_names,
328 u8 num_parents, void __iomem *base, unsigned int mult,
329 unsigned int div, int gate_offset, u8 gate_shift, u8 gate_flags,
330 unsigned long flags, spinlock_t *lock)
331 {
332 struct clk_hw *hw;
333 struct clk_gate *gate = NULL;
334 struct clk_fixed_factor *fix = NULL;
335
336 /* without gate, register a simple factor clock */
337 if (gate_offset == 0) {
338 return clk_register_fixed_factor(NULL, name, parent_names[0], flags, mult, div);
339 }
340
341 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
342 if (!gate) {
343 return ERR_PTR(-ENOMEM);
344 }
345
346 gate->flags = gate_flags;
347 gate->reg = base + gate_offset;
348 gate->bit_idx = gate_shift;
349 gate->lock = lock;
350
351 fix = kzalloc(sizeof(*fix), GFP_KERNEL);
352 if (!fix) {
353 kfree(gate);
354 return ERR_PTR(-ENOMEM);
355 }
356
357 fix->mult = mult;
358 fix->div = div;
359
360 hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, NULL, NULL, &fix->hw, &clk_fixed_factor_ops,
361 &gate->hw, &clk_gate_ops, flags);
362 if (IS_ERR(hw)) {
363 kfree(fix);
364 kfree(gate);
365 return ERR_CAST(hw);
366 }
367
368 return hw->clk;
369 }
370
rockchip_clk_register_composite_brother_branch(struct rockchip_clk_provider * ctx,const char * name,const char * const * parent_names,u8 num_parents,void __iomem * base,int muxdiv_offset,u8 mux_shift,u8 mux_width,u8 mux_flags,u32 * mux_table,int div_offset,u8 div_shift,u8 div_width,u8 div_flags,struct clk_div_table * div_table,int gate_offset,u8 gate_shift,u8 gate_flags,unsigned long flags,struct rockchip_clk_branch * brother,spinlock_t * lock)371 static struct clk *rockchip_clk_register_composite_brother_branch(
372 struct rockchip_clk_provider *ctx, const char *name, const char *const *parent_names, u8 num_parents,
373 void __iomem *base, int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags, u32 *mux_table, int div_offset,
374 u8 div_shift, u8 div_width, u8 div_flags, struct clk_div_table *div_table, int gate_offset, u8 gate_shift,
375 u8 gate_flags, unsigned long flags, struct rockchip_clk_branch *brother, spinlock_t *lock)
376 {
377 struct clk *clk, *brother_clk;
378 struct clk_composite *composite, *brother_composite;
379 struct clk_hw *hw, *brother_hw;
380
381 if (brother && brother->branch_type != branch_half_divider) {
382 pr_err("%s: composite brother for %s can only be a halfdiv\n", __func__, name);
383 return ERR_PTR(-EINVAL);
384 }
385
386 clk = rockchip_clk_register_branch(name, parent_names, num_parents, base, muxdiv_offset, mux_shift, mux_width,
387 mux_flags, mux_table, div_offset, div_shift, div_width, div_flags, div_table,
388 gate_offset, gate_shift, gate_flags, flags, lock);
389 if (IS_ERR(clk)) {
390 return clk;
391 }
392
393 brother_clk = rockchip_clk_register_halfdiv(
394 brother->name, brother->parent_names, brother->num_parents, base, brother->muxdiv_offset, brother->mux_shift,
395 brother->mux_width, brother->mux_flags, brother->div_offset, brother->div_shift, brother->div_width,
396 brother->div_flags, brother->gate_offset, brother->gate_shift, brother->gate_flags, flags, lock);
397 if (IS_ERR(brother_clk)) {
398 return brother_clk;
399 }
400 rockchip_clk_add_lookup(ctx, brother_clk, brother->id);
401
402 hw = __clk_get_hw(clk);
403 brother_hw = __clk_get_hw(brother_clk);
404 if (hw && brother_hw) {
405 composite = to_clk_composite(hw);
406 brother_composite = to_clk_composite(brother_hw);
407 composite->brother_hw = brother_hw;
408 brother_composite->brother_hw = hw;
409 }
410
411 return clk;
412 }
413
rockchip_clk_init(struct device_node * np,void __iomem * base,unsigned long nr_clks)414 struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np, void __iomem *base, unsigned long nr_clks)
415 {
416 struct rockchip_clk_provider *ctx;
417 struct clk **clk_table;
418 int i;
419
420 ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
421 if (!ctx) {
422 return ERR_PTR(-ENOMEM);
423 }
424
425 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
426 if (!clk_table) {
427 goto err_free;
428 }
429
430 for (i = 0; i < nr_clks; ++i) {
431 clk_table[i] = ERR_PTR(-ENOENT);
432 }
433
434 ctx->reg_base = base;
435 ctx->clk_data.clks = clk_table;
436 ctx->clk_data.clk_num = nr_clks;
437 ctx->cru_node = np;
438 spin_lock_init(&ctx->lock);
439
440 ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node, "rockchip,grf");
441 ctx->pmugrf = syscon_regmap_lookup_by_phandle(ctx->cru_node, "rockchip,pmugrf");
442
443 return ctx;
444
445 err_free:
446 kfree(ctx);
447 return ERR_PTR(-ENOMEM);
448 }
449 EXPORT_SYMBOL_GPL(rockchip_clk_init);
450
rockchip_clk_of_add_provider(struct device_node * np,struct rockchip_clk_provider * ctx)451 void rockchip_clk_of_add_provider(struct device_node *np, struct rockchip_clk_provider *ctx)
452 {
453 if (of_clk_add_provider(np, of_clk_src_onecell_get, &ctx->clk_data)) {
454 pr_err("%s: could not register clk provider\n", __func__);
455 }
456 }
457 EXPORT_SYMBOL_GPL(rockchip_clk_of_add_provider);
458
rockchip_clk_add_lookup(struct rockchip_clk_provider * ctx,struct clk * clk,unsigned int id)459 void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx, struct clk *clk, unsigned int id)
460 {
461 if (ctx->clk_data.clks && id) {
462 ctx->clk_data.clks[id] = clk;
463 }
464 }
465 EXPORT_SYMBOL_GPL(rockchip_clk_add_lookup);
466
rockchip_clk_register_plls(struct rockchip_clk_provider * ctx,struct rockchip_pll_clock * list,unsigned int nr_pll,int grf_lock_offset)467 void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx, struct rockchip_pll_clock *list, unsigned int nr_pll,
468 int grf_lock_offset)
469 {
470 struct clk *clk;
471 int idx;
472
473 for (idx = 0; idx < nr_pll; idx++, list++) {
474 clk = rockchip_clk_register_pll(ctx, list->type, list->name, list->parent_names, list->num_parents,
475 list->con_offset, grf_lock_offset, list->lock_shift, list->mode_offset,
476 list->mode_shift, list->rate_table, list->flags, list->pll_flags);
477 if (IS_ERR(clk)) {
478 pr_err("%s: failed to register clock %s\n", __func__, list->name);
479 continue;
480 }
481
482 rockchip_clk_add_lookup(ctx, clk, list->id);
483 }
484 }
485 EXPORT_SYMBOL_GPL(rockchip_clk_register_plls);
486
rockchip_clk_register_branches(struct rockchip_clk_provider * ctx,struct rockchip_clk_branch * list,unsigned int nr_clk)487 void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, struct rockchip_clk_branch *list,
488 unsigned int nr_clk)
489 {
490 struct clk *clk = NULL;
491 unsigned int idx;
492 unsigned long flags;
493
494 for (idx = 0; idx < nr_clk; idx++, list++) {
495 flags = list->flags;
496
497 /* catch simple muxes */
498 switch (list->branch_type) {
499 case branch_mux:
500 if (list->mux_table) {
501 clk =
502 clk_register_mux_table(NULL, list->name, list->parent_names, list->num_parents, flags,
503 ctx->reg_base + list->muxdiv_offset, list->mux_shift,
504 BIT(list->mux_width) - 1, list->mux_flags, list->mux_table, &ctx->lock);
505 } else {
506 clk = clk_register_mux(NULL, list->name, list->parent_names, list->num_parents, flags,
507 ctx->reg_base + list->muxdiv_offset, list->mux_shift, list->mux_width,
508 list->mux_flags, &ctx->lock);
509 }
510 break;
511 case branch_muxgrf:
512 clk = rockchip_clk_register_muxgrf(list->name, list->parent_names, list->num_parents, flags, ctx->grf,
513 list->muxdiv_offset, list->mux_shift, list->mux_width,
514 list->mux_flags);
515 break;
516 case branch_muxpmugrf:
517 clk = rockchip_clk_register_muxgrf(list->name, list->parent_names, list->num_parents, flags,
518 ctx->pmugrf, list->muxdiv_offset, list->mux_shift, list->mux_width,
519 list->mux_flags);
520 break;
521 case branch_divider:
522 if (list->div_table) {
523 clk = clk_register_divider_table(NULL, list->name, list->parent_names[0], flags,
524 ctx->reg_base + list->muxdiv_offset, list->div_shift,
525 list->div_width, list->div_flags, list->div_table, &ctx->lock);
526 } else {
527 clk = clk_register_divider(NULL, list->name, list->parent_names[0], flags,
528 ctx->reg_base + list->muxdiv_offset, list->div_shift, list->div_width,
529 list->div_flags, &ctx->lock);
530 }
531 break;
532 case branch_fraction_divider:
533 clk = rockchip_clk_register_frac_branch(ctx, list->name, list->parent_names, list->num_parents,
534 ctx->reg_base, list->muxdiv_offset, list->div_flags,
535 list->gate_offset, list->gate_shift, list->gate_flags, flags,
536 list->child, list->max_prate, &ctx->lock);
537 break;
538 case branch_half_divider:
539 clk = rockchip_clk_register_halfdiv(list->name, list->parent_names, list->num_parents, ctx->reg_base,
540 list->muxdiv_offset, list->mux_shift, list->mux_width,
541 list->mux_flags, list->div_offset, list->div_shift, list->div_width,
542 list->div_flags, list->gate_offset, list->gate_shift,
543 list->gate_flags, flags, &ctx->lock);
544 break;
545 case branch_gate:
546 flags |= CLK_SET_RATE_PARENT;
547
548 clk =
549 clk_register_gate(NULL, list->name, list->parent_names[0], flags, ctx->reg_base + list->gate_offset,
550 list->gate_shift, list->gate_flags, &ctx->lock);
551 break;
552 case branch_composite:
553 clk = rockchip_clk_register_branch(list->name, list->parent_names, list->num_parents, ctx->reg_base,
554 list->muxdiv_offset, list->mux_shift, list->mux_width,
555 list->mux_flags, list->mux_table, list->div_offset, list->div_shift,
556 list->div_width, list->div_flags, list->div_table, list->gate_offset,
557 list->gate_shift, list->gate_flags, flags, &ctx->lock);
558 break;
559 case branch_composite_brother:
560 clk = rockchip_clk_register_composite_brother_branch(
561 ctx, list->name, list->parent_names, list->num_parents, ctx->reg_base, list->muxdiv_offset,
562 list->mux_shift, list->mux_width, list->mux_flags, list->mux_table, list->div_offset,
563 list->div_shift, list->div_width, list->div_flags, list->div_table, list->gate_offset,
564 list->gate_shift, list->gate_flags, flags, list->child, &ctx->lock);
565 break;
566 case branch_mmc:
567 clk = rockchip_clk_register_mmc(list->name, list->parent_names, list->num_parents,
568 ctx->reg_base + list->muxdiv_offset, list->div_shift);
569 break;
570 case branch_inverter:
571 clk = rockchip_clk_register_inverter(list->name, list->parent_names, list->num_parents,
572 ctx->reg_base + list->muxdiv_offset, list->div_shift,
573 list->div_flags, &ctx->lock);
574 break;
575 case branch_factor:
576 clk = rockchip_clk_register_factor_branch(
577 list->name, list->parent_names, list->num_parents, ctx->reg_base, list->div_shift, list->div_width,
578 list->gate_offset, list->gate_shift, list->gate_flags, flags, &ctx->lock);
579 break;
580 case branch_ddrclk:
581 clk = rockchip_clk_register_ddrclk(list->name, list->flags, list->parent_names, list->num_parents,
582 list->muxdiv_offset, list->mux_shift, list->mux_width,
583 list->div_shift, list->div_width, list->div_flags, ctx->reg_base);
584 break;
585 case branch_dclk_divider:
586 clk = rockchip_clk_register_dclk_branch(
587 list->name, list->parent_names, list->num_parents, ctx->reg_base, list->muxdiv_offset,
588 list->mux_shift, list->mux_width, list->mux_flags, list->div_offset, list->div_shift,
589 list->div_width, list->div_flags, list->div_table, list->gate_offset, list->gate_shift,
590 list->gate_flags, flags, list->max_prate, &ctx->lock);
591 break;
592 }
593
594 /* none of the cases above matched */
595 if (!clk) {
596 pr_err("%s: unknown clock type %d\n", __func__, list->branch_type);
597 continue;
598 }
599
600 if (IS_ERR(clk)) {
601 pr_err("%s: failed to register clock %s: %ld\n", __func__, list->name, PTR_ERR(clk));
602 continue;
603 }
604
605 rockchip_clk_add_lookup(ctx, clk, list->id);
606 }
607 }
608 EXPORT_SYMBOL_GPL(rockchip_clk_register_branches);
609
rockchip_clk_register_armclk(struct rockchip_clk_provider * ctx,unsigned int lookup_id,const char * name,u8 num_parents,struct clk * parent,struct clk * alt_parent,const struct rockchip_cpuclk_reg_data * reg_data,const struct rockchip_cpuclk_rate_table * rates,int nrates)610 void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx, unsigned int lookup_id, const char *name,
611 u8 num_parents, struct clk *parent, struct clk *alt_parent,
612 const struct rockchip_cpuclk_reg_data *reg_data,
613 const struct rockchip_cpuclk_rate_table *rates, int nrates)
614 {
615 struct clk *clk;
616
617 clk = rockchip_clk_register_cpuclk(name, num_parents, parent, alt_parent, reg_data, rates, nrates, ctx->reg_base,
618 &ctx->lock);
619 if (IS_ERR(clk)) {
620 pr_err("%s: failed to register clock %s: %ld\n", __func__, name, PTR_ERR(clk));
621 return;
622 }
623
624 rockchip_clk_add_lookup(ctx, clk, lookup_id);
625 }
626 EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk);
627
628 void (*rk_dump_cru)(void);
629 EXPORT_SYMBOL(rk_dump_cru);
630
rk_clk_panic(struct notifier_block * this,unsigned long ev,void * ptr)631 static int rk_clk_panic(struct notifier_block *this, unsigned long ev, void *ptr)
632 {
633 if (rk_dump_cru) {
634 rk_dump_cru();
635 }
636 return NOTIFY_DONE;
637 }
638
639 static struct notifier_block rk_clk_panic_block = {
640 .notifier_call = rk_clk_panic,
641 };
642
643 static void __iomem *rst_base;
644 static unsigned int reg_restart;
645 static void (*cb_restart)(void);
rockchip_restart_notify(struct notifier_block * this,unsigned long mode,void * cmd)646 static int rockchip_restart_notify(struct notifier_block *this, unsigned long mode, void *cmd)
647 {
648 if (cb_restart) {
649 cb_restart();
650 }
651
652 writel(0xfdb9, rst_base + reg_restart);
653 return NOTIFY_DONE;
654 }
655
656 static struct notifier_block rockchip_restart_handler = {
657 .notifier_call = rockchip_restart_notify,
658 .priority = 128,
659 };
660
rockchip_register_restart_notifier(struct rockchip_clk_provider * ctx,unsigned int reg,void (* cb)(void))661 void rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx, unsigned int reg, void (*cb)(void))
662 {
663 int ret;
664
665 rst_base = ctx->reg_base;
666 reg_restart = reg;
667 cb_restart = cb;
668 ret = register_restart_handler(&rockchip_restart_handler);
669 if (ret) {
670 pr_err("%s: cannot register restart handler, %d\n", __func__, ret);
671 }
672 atomic_notifier_chain_register(&panic_notifier_list, &rk_clk_panic_block);
673 }
674 EXPORT_SYMBOL_GPL(rockchip_register_restart_notifier);
675