1 /*
2 * Copyright (c) 2014 MundoReader S.L.
3 * Author: Heiko Stuebner <heiko@sntech.de>
4 *
5 * based on
6 *
7 * samsung/clk.c
8 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
9 * Copyright (c) 2013 Linaro Ltd.
10 * Author: Thomas Abraham <thomas.ab@samsung.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 */
22
23 #include <linux/slab.h>
24 #include <linux/clk.h>
25 #include <linux/clk-provider.h>
26 #include <linux/mfd/syscon.h>
27 #include <linux/regmap.h>
28 #include <linux/reboot.h>
29 #include "clk.h"
30
31 /**
32 * Register a clock branch.
33 * Most clock branches have a form like
34 *
35 * src1 --|--\
36 * |M |--[GATE]-[DIV]-
37 * src2 --|--/
38 *
39 * sometimes without one of those components.
40 */
rockchip_clk_register_branch(const char * name,const char ** parent_names,u8 num_parents,void __iomem * base,int muxdiv_offset,u8 mux_shift,u8 mux_width,u8 mux_flags,u8 div_shift,u8 div_width,u8 div_flags,struct clk_div_table * div_table,int gate_offset,u8 gate_shift,u8 gate_flags,unsigned long flags,spinlock_t * lock)41 static struct clk *rockchip_clk_register_branch(const char *name,
42 const char **parent_names, u8 num_parents, void __iomem *base,
43 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
44 u8 div_shift, u8 div_width, u8 div_flags,
45 struct clk_div_table *div_table, int gate_offset,
46 u8 gate_shift, u8 gate_flags, unsigned long flags,
47 spinlock_t *lock)
48 {
49 struct clk *clk;
50 struct clk_mux *mux = NULL;
51 struct clk_gate *gate = NULL;
52 struct clk_divider *div = NULL;
53 const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
54 *gate_ops = NULL;
55
56 if (num_parents > 1) {
57 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
58 if (!mux)
59 return ERR_PTR(-ENOMEM);
60
61 mux->reg = base + muxdiv_offset;
62 mux->shift = mux_shift;
63 mux->mask = BIT(mux_width) - 1;
64 mux->flags = mux_flags;
65 mux->lock = lock;
66 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
67 : &clk_mux_ops;
68 }
69
70 if (gate_offset >= 0) {
71 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
72 if (!gate)
73 goto err_gate;
74
75 gate->flags = gate_flags;
76 gate->reg = base + gate_offset;
77 gate->bit_idx = gate_shift;
78 gate->lock = lock;
79 gate_ops = &clk_gate_ops;
80 }
81
82 if (div_width > 0) {
83 div = kzalloc(sizeof(*div), GFP_KERNEL);
84 if (!div)
85 goto err_div;
86
87 div->flags = div_flags;
88 div->reg = base + muxdiv_offset;
89 div->shift = div_shift;
90 div->width = div_width;
91 div->lock = lock;
92 div->table = div_table;
93 div_ops = &clk_divider_ops;
94 }
95
96 clk = clk_register_composite(NULL, name, parent_names, num_parents,
97 mux ? &mux->hw : NULL, mux_ops,
98 div ? &div->hw : NULL, div_ops,
99 gate ? &gate->hw : NULL, gate_ops,
100 flags);
101
102 return clk;
103 err_div:
104 kfree(gate);
105 err_gate:
106 kfree(mux);
107 return ERR_PTR(-ENOMEM);
108 }
109
rockchip_clk_register_frac_branch(const char * name,const char ** parent_names,u8 num_parents,void __iomem * base,int muxdiv_offset,u8 div_flags,int gate_offset,u8 gate_shift,u8 gate_flags,unsigned long flags,spinlock_t * lock)110 static struct clk *rockchip_clk_register_frac_branch(const char *name,
111 const char **parent_names, u8 num_parents, void __iomem *base,
112 int muxdiv_offset, u8 div_flags,
113 int gate_offset, u8 gate_shift, u8 gate_flags,
114 unsigned long flags, spinlock_t *lock)
115 {
116 struct clk *clk;
117 struct clk_gate *gate = NULL;
118 struct clk_fractional_divider *div = NULL;
119 const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
120
121 if (gate_offset >= 0) {
122 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
123 if (!gate)
124 return ERR_PTR(-ENOMEM);
125
126 gate->flags = gate_flags;
127 gate->reg = base + gate_offset;
128 gate->bit_idx = gate_shift;
129 gate->lock = lock;
130 gate_ops = &clk_gate_ops;
131 }
132
133 if (muxdiv_offset < 0)
134 return ERR_PTR(-EINVAL);
135
136 div = kzalloc(sizeof(*div), GFP_KERNEL);
137 if (!div)
138 return ERR_PTR(-ENOMEM);
139
140 div->flags = div_flags;
141 div->reg = base + muxdiv_offset;
142 div->mshift = 16;
143 div->mmask = 0xffff0000;
144 div->nshift = 0;
145 div->nmask = 0xffff;
146 div->lock = lock;
147 div_ops = &clk_fractional_divider_ops;
148
149 clk = clk_register_composite(NULL, name, parent_names, num_parents,
150 NULL, NULL,
151 &div->hw, div_ops,
152 gate ? &gate->hw : NULL, gate_ops,
153 flags);
154
155 return clk;
156 }
157
158 static DEFINE_SPINLOCK(clk_lock);
159 static struct clk **clk_table;
160 static void __iomem *reg_base;
161 static struct clk_onecell_data clk_data;
162 static struct device_node *cru_node;
163 static struct regmap *grf;
164
rockchip_clk_init(struct device_node * np,void __iomem * base,unsigned long nr_clks)165 void __init rockchip_clk_init(struct device_node *np, void __iomem *base,
166 unsigned long nr_clks)
167 {
168 reg_base = base;
169 cru_node = np;
170 grf = ERR_PTR(-EPROBE_DEFER);
171
172 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
173 if (!clk_table)
174 pr_err("%s: could not allocate clock lookup table\n", __func__);
175
176 clk_data.clks = clk_table;
177 clk_data.clk_num = nr_clks;
178 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
179 }
180
rockchip_clk_get_grf(void)181 struct regmap *rockchip_clk_get_grf(void)
182 {
183 if (IS_ERR(grf))
184 grf = syscon_regmap_lookup_by_phandle(cru_node, "rockchip,grf");
185 return grf;
186 }
187
rockchip_clk_add_lookup(struct clk * clk,unsigned int id)188 void rockchip_clk_add_lookup(struct clk *clk, unsigned int id)
189 {
190 if (clk_table && id)
191 clk_table[id] = clk;
192 }
193
rockchip_clk_register_plls(struct rockchip_pll_clock * list,unsigned int nr_pll,int grf_lock_offset)194 void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list,
195 unsigned int nr_pll, int grf_lock_offset)
196 {
197 struct clk *clk;
198 int idx;
199
200 for (idx = 0; idx < nr_pll; idx++, list++) {
201 clk = rockchip_clk_register_pll(list->type, list->name,
202 list->parent_names, list->num_parents,
203 reg_base, list->con_offset, grf_lock_offset,
204 list->lock_shift, list->mode_offset,
205 list->mode_shift, list->rate_table, &clk_lock);
206 if (IS_ERR(clk)) {
207 pr_err("%s: failed to register clock %s\n", __func__,
208 list->name);
209 continue;
210 }
211
212 rockchip_clk_add_lookup(clk, list->id);
213 }
214 }
215
rockchip_clk_register_branches(struct rockchip_clk_branch * list,unsigned int nr_clk)216 void __init rockchip_clk_register_branches(
217 struct rockchip_clk_branch *list,
218 unsigned int nr_clk)
219 {
220 struct clk *clk = NULL;
221 unsigned int idx;
222 unsigned long flags;
223
224 for (idx = 0; idx < nr_clk; idx++, list++) {
225 flags = list->flags;
226
227 /* catch simple muxes */
228 switch (list->branch_type) {
229 case branch_mux:
230 clk = clk_register_mux(NULL, list->name,
231 list->parent_names, list->num_parents,
232 flags, reg_base + list->muxdiv_offset,
233 list->mux_shift, list->mux_width,
234 list->mux_flags, &clk_lock);
235 break;
236 case branch_divider:
237 if (list->div_table)
238 clk = clk_register_divider_table(NULL,
239 list->name, list->parent_names[0],
240 flags, reg_base + list->muxdiv_offset,
241 list->div_shift, list->div_width,
242 list->div_flags, list->div_table,
243 &clk_lock);
244 else
245 clk = clk_register_divider(NULL, list->name,
246 list->parent_names[0], flags,
247 reg_base + list->muxdiv_offset,
248 list->div_shift, list->div_width,
249 list->div_flags, &clk_lock);
250 break;
251 case branch_fraction_divider:
252 /* keep all gates untouched for now */
253 flags |= CLK_IGNORE_UNUSED;
254
255 clk = rockchip_clk_register_frac_branch(list->name,
256 list->parent_names, list->num_parents,
257 reg_base, list->muxdiv_offset, list->div_flags,
258 list->gate_offset, list->gate_shift,
259 list->gate_flags, flags, &clk_lock);
260 break;
261 case branch_gate:
262 flags |= CLK_SET_RATE_PARENT;
263
264 /* keep all gates untouched for now */
265 flags |= CLK_IGNORE_UNUSED;
266
267 clk = clk_register_gate(NULL, list->name,
268 list->parent_names[0], flags,
269 reg_base + list->gate_offset,
270 list->gate_shift, list->gate_flags, &clk_lock);
271 break;
272 case branch_composite:
273 /* keep all gates untouched for now */
274 flags |= CLK_IGNORE_UNUSED;
275
276 clk = rockchip_clk_register_branch(list->name,
277 list->parent_names, list->num_parents,
278 reg_base, list->muxdiv_offset, list->mux_shift,
279 list->mux_width, list->mux_flags,
280 list->div_shift, list->div_width,
281 list->div_flags, list->div_table,
282 list->gate_offset, list->gate_shift,
283 list->gate_flags, flags, &clk_lock);
284 break;
285 }
286
287 /* none of the cases above matched */
288 if (!clk) {
289 pr_err("%s: unknown clock type %d\n",
290 __func__, list->branch_type);
291 continue;
292 }
293
294 if (IS_ERR(clk)) {
295 pr_err("%s: failed to register clock %s: %ld\n",
296 __func__, list->name, PTR_ERR(clk));
297 continue;
298 }
299
300 rockchip_clk_add_lookup(clk, list->id);
301 }
302 }
303
rockchip_clk_register_armclk(unsigned int lookup_id,const char * name,const char ** parent_names,u8 num_parents,const struct rockchip_cpuclk_reg_data * reg_data,const struct rockchip_cpuclk_rate_table * rates,int nrates)304 void __init rockchip_clk_register_armclk(unsigned int lookup_id,
305 const char *name, const char **parent_names,
306 u8 num_parents,
307 const struct rockchip_cpuclk_reg_data *reg_data,
308 const struct rockchip_cpuclk_rate_table *rates,
309 int nrates)
310 {
311 struct clk *clk;
312
313 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
314 reg_data, rates, nrates, reg_base,
315 &clk_lock);
316 if (IS_ERR(clk)) {
317 pr_err("%s: failed to register clock %s: %ld\n",
318 __func__, name, PTR_ERR(clk));
319 return;
320 }
321
322 rockchip_clk_add_lookup(clk, lookup_id);
323 }
324
rockchip_clk_protect_critical(const char * clocks[],int nclocks)325 void __init rockchip_clk_protect_critical(const char *clocks[], int nclocks)
326 {
327 int i;
328
329 /* Protect the clocks that needs to stay on */
330 for (i = 0; i < nclocks; i++) {
331 struct clk *clk = __clk_lookup(clocks[i]);
332
333 if (clk)
334 clk_prepare_enable(clk);
335 }
336 }
337
338 static unsigned int reg_restart;
rockchip_restart_notify(struct notifier_block * this,unsigned long mode,void * cmd)339 static int rockchip_restart_notify(struct notifier_block *this,
340 unsigned long mode, void *cmd)
341 {
342 writel(0xfdb9, reg_base + reg_restart);
343 return NOTIFY_DONE;
344 }
345
346 static struct notifier_block rockchip_restart_handler = {
347 .notifier_call = rockchip_restart_notify,
348 .priority = 128,
349 };
350
rockchip_register_restart_notifier(unsigned int reg)351 void __init rockchip_register_restart_notifier(unsigned int reg)
352 {
353 int ret;
354
355 reg_restart = reg;
356 ret = register_restart_handler(&rockchip_restart_handler);
357 if (ret)
358 pr_err("%s: cannot register restart handler, %d\n",
359 __func__, ret);
360 }
361