1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
4 * Author: Thomas Abraham <thomas.ab@samsung.com>
5 *
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
8 *
9 * This file contains the utility function to register CPU clock for Samsung
10 * Exynos platforms. A CPU clock is defined as a clock supplied to a CPU or a
11 * group of CPUs. The CPU clock is typically derived from a hierarchy of clock
12 * blocks which includes mux and divider blocks. There are a number of other
13 * auxiliary clocks supplied to the CPU domain such as the debug blocks and AXI
14 * clock for CPU domain. The rates of these auxiliary clocks are related to the
15 * CPU clock rate and this relation is usually specified in the hardware manual
16 * of the SoC or supplied after the SoC characterization.
17 *
18 * The below implementation of the CPU clock allows the rate changes of the CPU
19 * clock and the corresponding rate changes of the auxillary clocks of the CPU
20 * domain. The platform clock driver provides a clock register configuration
21 * for each configurable rate which is then used to program the clock hardware
22 * registers to acheive a fast co-oridinated rate change for all the CPU domain
23 * clocks.
24 *
25 * On a rate change request for the CPU clock, the rate change is propagated
26 * upto the PLL supplying the clock to the CPU domain clock blocks. While the
27 * CPU domain PLL is reconfigured, the CPU domain clocks are driven using an
28 * alternate clock source. If required, the alternate clock source is divided
29 * down in order to keep the output clock rate within the previous OPP limits.
30 */
31
32 #include <linux/errno.h>
33 #include <linux/io.h>
34 #include <linux/slab.h>
35 #include <linux/clk.h>
36 #include <linux/clk-provider.h>
37 #include "clk-cpu.h"
38
39 #define E4210_SRC_CPU 0x0
40 #define E4210_STAT_CPU 0x200
41 #define E4210_DIV_CPU0 0x300
42 #define E4210_DIV_CPU1 0x304
43 #define E4210_DIV_STAT_CPU0 0x400
44 #define E4210_DIV_STAT_CPU1 0x404
45
46 #define E5433_MUX_SEL2 0x008
47 #define E5433_MUX_STAT2 0x208
48 #define E5433_DIV_CPU0 0x400
49 #define E5433_DIV_CPU1 0x404
50 #define E5433_DIV_STAT_CPU0 0x500
51 #define E5433_DIV_STAT_CPU1 0x504
52
53 #define E4210_DIV0_RATIO0_MASK 0x7
54 #define E4210_DIV1_HPM_MASK (0x7 << 4)
55 #define E4210_DIV1_COPY_MASK (0x7 << 0)
56 #define E4210_MUX_HPM_MASK (1 << 20)
57 #define E4210_DIV0_ATB_SHIFT 16
58 #define E4210_DIV0_ATB_MASK (DIV_MASK << E4210_DIV0_ATB_SHIFT)
59
60 #define MAX_DIV 8
61 #define DIV_MASK 7
62 #define DIV_MASK_ALL 0xffffffff
63 #define MUX_MASK 7
64
65 /*
66 * Helper function to wait until divider(s) have stabilized after the divider
67 * value has changed.
68 */
wait_until_divider_stable(void __iomem * div_reg,unsigned long mask)69 static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask)
70 {
71 unsigned long timeout = jiffies + msecs_to_jiffies(10);
72
73 do {
74 if (!(readl(div_reg) & mask))
75 return;
76 } while (time_before(jiffies, timeout));
77
78 if (!(readl(div_reg) & mask))
79 return;
80
81 pr_err("%s: timeout in divider stablization\n", __func__);
82 }
83
84 /*
85 * Helper function to wait until mux has stabilized after the mux selection
86 * value was changed.
87 */
wait_until_mux_stable(void __iomem * mux_reg,u32 mux_pos,unsigned long mux_value)88 static void wait_until_mux_stable(void __iomem *mux_reg, u32 mux_pos,
89 unsigned long mux_value)
90 {
91 unsigned long timeout = jiffies + msecs_to_jiffies(10);
92
93 do {
94 if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
95 return;
96 } while (time_before(jiffies, timeout));
97
98 if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
99 return;
100
101 pr_err("%s: re-parenting mux timed-out\n", __func__);
102 }
103
104 /* common round rate callback useable for all types of CPU clocks */
exynos_cpuclk_round_rate(struct clk_hw * hw,unsigned long drate,unsigned long * prate)105 static long exynos_cpuclk_round_rate(struct clk_hw *hw,
106 unsigned long drate, unsigned long *prate)
107 {
108 struct clk_hw *parent = clk_hw_get_parent(hw);
109 *prate = clk_hw_round_rate(parent, drate);
110 return *prate;
111 }
112
113 /* common recalc rate callback useable for all types of CPU clocks */
exynos_cpuclk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)114 static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw,
115 unsigned long parent_rate)
116 {
117 /*
118 * The CPU clock output (armclk) rate is the same as its parent
119 * rate. Although there exist certain dividers inside the CPU
120 * clock block that could be used to divide the parent clock,
121 * the driver does not make use of them currently, except during
122 * frequency transitions.
123 */
124 return parent_rate;
125 }
126
127 static const struct clk_ops exynos_cpuclk_clk_ops = {
128 .recalc_rate = exynos_cpuclk_recalc_rate,
129 .round_rate = exynos_cpuclk_round_rate,
130 };
131
132 /*
133 * Helper function to set the 'safe' dividers for the CPU clock. The parameters
134 * div and mask contain the divider value and the register bit mask of the
135 * dividers to be programmed.
136 */
exynos_set_safe_div(void __iomem * base,unsigned long div,unsigned long mask)137 static void exynos_set_safe_div(void __iomem *base, unsigned long div,
138 unsigned long mask)
139 {
140 unsigned long div0;
141
142 div0 = readl(base + E4210_DIV_CPU0);
143 div0 = (div0 & ~mask) | (div & mask);
144 writel(div0, base + E4210_DIV_CPU0);
145 wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, mask);
146 }
147
148 /* handler for pre-rate change notification from parent clock */
exynos_cpuclk_pre_rate_change(struct clk_notifier_data * ndata,struct exynos_cpuclk * cpuclk,void __iomem * base)149 static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
150 struct exynos_cpuclk *cpuclk, void __iomem *base)
151 {
152 const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
153 unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
154 unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
155 unsigned long div0, div1 = 0, mux_reg;
156 unsigned long flags;
157
158 /* find out the divider values to use for clock data */
159 while ((cfg_data->prate * 1000) != ndata->new_rate) {
160 if (cfg_data->prate == 0)
161 return -EINVAL;
162 cfg_data++;
163 }
164
165 spin_lock_irqsave(cpuclk->lock, flags);
166
167 /*
168 * For the selected PLL clock frequency, get the pre-defined divider
169 * values. If the clock for sclk_hpm is not sourced from apll, then
170 * the values for DIV_COPY and DIV_HPM dividers need not be set.
171 */
172 div0 = cfg_data->div0;
173 if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
174 div1 = cfg_data->div1;
175 if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
176 div1 = readl(base + E4210_DIV_CPU1) &
177 (E4210_DIV1_HPM_MASK | E4210_DIV1_COPY_MASK);
178 }
179
180 /*
181 * If the old parent clock speed is less than the clock speed of
182 * the alternate parent, then it should be ensured that at no point
183 * the armclk speed is more than the old_prate until the dividers are
184 * set. Also workaround the issue of the dividers being set to lower
185 * values before the parent clock speed is set to new lower speed
186 * (this can result in too high speed of armclk output clocks).
187 */
188 if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
189 unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
190
191 alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
192 WARN_ON(alt_div >= MAX_DIV);
193
194 if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
195 /*
196 * In Exynos4210, ATB clock parent is also mout_core. So
197 * ATB clock also needs to be mantained at safe speed.
198 */
199 alt_div |= E4210_DIV0_ATB_MASK;
200 alt_div_mask |= E4210_DIV0_ATB_MASK;
201 }
202 exynos_set_safe_div(base, alt_div, alt_div_mask);
203 div0 |= alt_div;
204 }
205
206 /* select sclk_mpll as the alternate parent */
207 mux_reg = readl(base + E4210_SRC_CPU);
208 writel(mux_reg | (1 << 16), base + E4210_SRC_CPU);
209 wait_until_mux_stable(base + E4210_STAT_CPU, 16, 2);
210
211 /* alternate parent is active now. set the dividers */
212 writel(div0, base + E4210_DIV_CPU0);
213 wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
214
215 if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
216 writel(div1, base + E4210_DIV_CPU1);
217 wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
218 DIV_MASK_ALL);
219 }
220
221 spin_unlock_irqrestore(cpuclk->lock, flags);
222 return 0;
223 }
224
225 /* handler for post-rate change notification from parent clock */
exynos_cpuclk_post_rate_change(struct clk_notifier_data * ndata,struct exynos_cpuclk * cpuclk,void __iomem * base)226 static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
227 struct exynos_cpuclk *cpuclk, void __iomem *base)
228 {
229 const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
230 unsigned long div = 0, div_mask = DIV_MASK;
231 unsigned long mux_reg;
232 unsigned long flags;
233
234 /* find out the divider values to use for clock data */
235 if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
236 while ((cfg_data->prate * 1000) != ndata->new_rate) {
237 if (cfg_data->prate == 0)
238 return -EINVAL;
239 cfg_data++;
240 }
241 }
242
243 spin_lock_irqsave(cpuclk->lock, flags);
244
245 /* select mout_apll as the alternate parent */
246 mux_reg = readl(base + E4210_SRC_CPU);
247 writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
248 wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
249
250 if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
251 div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
252 div_mask |= E4210_DIV0_ATB_MASK;
253 }
254
255 exynos_set_safe_div(base, div, div_mask);
256 spin_unlock_irqrestore(cpuclk->lock, flags);
257 return 0;
258 }
259
260 /*
261 * Helper function to set the 'safe' dividers for the CPU clock. The parameters
262 * div and mask contain the divider value and the register bit mask of the
263 * dividers to be programmed.
264 */
exynos5433_set_safe_div(void __iomem * base,unsigned long div,unsigned long mask)265 static void exynos5433_set_safe_div(void __iomem *base, unsigned long div,
266 unsigned long mask)
267 {
268 unsigned long div0;
269
270 div0 = readl(base + E5433_DIV_CPU0);
271 div0 = (div0 & ~mask) | (div & mask);
272 writel(div0, base + E5433_DIV_CPU0);
273 wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, mask);
274 }
275
276 /* handler for pre-rate change notification from parent clock */
exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data * ndata,struct exynos_cpuclk * cpuclk,void __iomem * base)277 static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
278 struct exynos_cpuclk *cpuclk, void __iomem *base)
279 {
280 const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
281 unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
282 unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
283 unsigned long div0, div1 = 0, mux_reg;
284 unsigned long flags;
285
286 /* find out the divider values to use for clock data */
287 while ((cfg_data->prate * 1000) != ndata->new_rate) {
288 if (cfg_data->prate == 0)
289 return -EINVAL;
290 cfg_data++;
291 }
292
293 spin_lock_irqsave(cpuclk->lock, flags);
294
295 /*
296 * For the selected PLL clock frequency, get the pre-defined divider
297 * values.
298 */
299 div0 = cfg_data->div0;
300 div1 = cfg_data->div1;
301
302 /*
303 * If the old parent clock speed is less than the clock speed of
304 * the alternate parent, then it should be ensured that at no point
305 * the armclk speed is more than the old_prate until the dividers are
306 * set. Also workaround the issue of the dividers being set to lower
307 * values before the parent clock speed is set to new lower speed
308 * (this can result in too high speed of armclk output clocks).
309 */
310 if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
311 unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
312
313 alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
314 WARN_ON(alt_div >= MAX_DIV);
315
316 exynos5433_set_safe_div(base, alt_div, alt_div_mask);
317 div0 |= alt_div;
318 }
319
320 /* select the alternate parent */
321 mux_reg = readl(base + E5433_MUX_SEL2);
322 writel(mux_reg | 1, base + E5433_MUX_SEL2);
323 wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 2);
324
325 /* alternate parent is active now. set the dividers */
326 writel(div0, base + E5433_DIV_CPU0);
327 wait_until_divider_stable(base + E5433_DIV_STAT_CPU0, DIV_MASK_ALL);
328
329 writel(div1, base + E5433_DIV_CPU1);
330 wait_until_divider_stable(base + E5433_DIV_STAT_CPU1, DIV_MASK_ALL);
331
332 spin_unlock_irqrestore(cpuclk->lock, flags);
333 return 0;
334 }
335
336 /* handler for post-rate change notification from parent clock */
exynos5433_cpuclk_post_rate_change(struct clk_notifier_data * ndata,struct exynos_cpuclk * cpuclk,void __iomem * base)337 static int exynos5433_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
338 struct exynos_cpuclk *cpuclk, void __iomem *base)
339 {
340 unsigned long div = 0, div_mask = DIV_MASK;
341 unsigned long mux_reg;
342 unsigned long flags;
343
344 spin_lock_irqsave(cpuclk->lock, flags);
345
346 /* select apll as the alternate parent */
347 mux_reg = readl(base + E5433_MUX_SEL2);
348 writel(mux_reg & ~1, base + E5433_MUX_SEL2);
349 wait_until_mux_stable(base + E5433_MUX_STAT2, 0, 1);
350
351 exynos5433_set_safe_div(base, div, div_mask);
352 spin_unlock_irqrestore(cpuclk->lock, flags);
353 return 0;
354 }
355
356 /*
357 * This notifier function is called for the pre-rate and post-rate change
358 * notifications of the parent clock of cpuclk.
359 */
exynos_cpuclk_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)360 static int exynos_cpuclk_notifier_cb(struct notifier_block *nb,
361 unsigned long event, void *data)
362 {
363 struct clk_notifier_data *ndata = data;
364 struct exynos_cpuclk *cpuclk;
365 void __iomem *base;
366 int err = 0;
367
368 cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
369 base = cpuclk->ctrl_base;
370
371 if (event == PRE_RATE_CHANGE)
372 err = exynos_cpuclk_pre_rate_change(ndata, cpuclk, base);
373 else if (event == POST_RATE_CHANGE)
374 err = exynos_cpuclk_post_rate_change(ndata, cpuclk, base);
375
376 return notifier_from_errno(err);
377 }
378
379 /*
380 * This notifier function is called for the pre-rate and post-rate change
381 * notifications of the parent clock of cpuclk.
382 */
exynos5433_cpuclk_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)383 static int exynos5433_cpuclk_notifier_cb(struct notifier_block *nb,
384 unsigned long event, void *data)
385 {
386 struct clk_notifier_data *ndata = data;
387 struct exynos_cpuclk *cpuclk;
388 void __iomem *base;
389 int err = 0;
390
391 cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb);
392 base = cpuclk->ctrl_base;
393
394 if (event == PRE_RATE_CHANGE)
395 err = exynos5433_cpuclk_pre_rate_change(ndata, cpuclk, base);
396 else if (event == POST_RATE_CHANGE)
397 err = exynos5433_cpuclk_post_rate_change(ndata, cpuclk, base);
398
399 return notifier_from_errno(err);
400 }
401
402 /* helper function to register a CPU clock */
exynos_register_cpu_clock(struct samsung_clk_provider * ctx,unsigned int lookup_id,const char * name,const struct clk_hw * parent,const struct clk_hw * alt_parent,unsigned long offset,const struct exynos_cpuclk_cfg_data * cfg,unsigned long num_cfgs,unsigned long flags)403 static int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
404 unsigned int lookup_id, const char *name,
405 const struct clk_hw *parent, const struct clk_hw *alt_parent,
406 unsigned long offset, const struct exynos_cpuclk_cfg_data *cfg,
407 unsigned long num_cfgs, unsigned long flags)
408 {
409 struct exynos_cpuclk *cpuclk;
410 struct clk_init_data init;
411 const char *parent_name;
412 int ret = 0;
413
414 if (IS_ERR(parent) || IS_ERR(alt_parent)) {
415 pr_err("%s: invalid parent clock(s)\n", __func__);
416 return -EINVAL;
417 }
418
419 cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
420 if (!cpuclk)
421 return -ENOMEM;
422
423 parent_name = clk_hw_get_name(parent);
424
425 init.name = name;
426 init.flags = CLK_SET_RATE_PARENT;
427 init.parent_names = &parent_name;
428 init.num_parents = 1;
429 init.ops = &exynos_cpuclk_clk_ops;
430
431 cpuclk->alt_parent = alt_parent;
432 cpuclk->hw.init = &init;
433 cpuclk->ctrl_base = ctx->reg_base + offset;
434 cpuclk->lock = &ctx->lock;
435 cpuclk->flags = flags;
436 if (flags & CLK_CPU_HAS_E5433_REGS_LAYOUT)
437 cpuclk->clk_nb.notifier_call = exynos5433_cpuclk_notifier_cb;
438 else
439 cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
440
441
442 ret = clk_notifier_register(parent->clk, &cpuclk->clk_nb);
443 if (ret) {
444 pr_err("%s: failed to register clock notifier for %s\n",
445 __func__, name);
446 goto free_cpuclk;
447 }
448
449 cpuclk->cfg = kmemdup(cfg, sizeof(*cfg) * num_cfgs, GFP_KERNEL);
450 if (!cpuclk->cfg) {
451 ret = -ENOMEM;
452 goto unregister_clk_nb;
453 }
454
455 ret = clk_hw_register(NULL, &cpuclk->hw);
456 if (ret) {
457 pr_err("%s: could not register cpuclk %s\n", __func__, name);
458 goto free_cpuclk_data;
459 }
460
461 samsung_clk_add_lookup(ctx, &cpuclk->hw, lookup_id);
462 return 0;
463
464 free_cpuclk_data:
465 kfree(cpuclk->cfg);
466 unregister_clk_nb:
467 clk_notifier_unregister(parent->clk, &cpuclk->clk_nb);
468 free_cpuclk:
469 kfree(cpuclk);
470 return ret;
471 }
472
samsung_clk_register_cpu(struct samsung_clk_provider * ctx,const struct samsung_cpu_clock * list,unsigned int nr_clk)473 void __init samsung_clk_register_cpu(struct samsung_clk_provider *ctx,
474 const struct samsung_cpu_clock *list, unsigned int nr_clk)
475 {
476 unsigned int idx;
477 unsigned int num_cfgs;
478 struct clk_hw **hws = ctx->clk_data.hws;
479
480 for (idx = 0; idx < nr_clk; idx++, list++) {
481 /* find count of configuration rates in cfg */
482 for (num_cfgs = 0; list->cfg[num_cfgs].prate != 0; )
483 num_cfgs++;
484
485 exynos_register_cpu_clock(ctx, list->id, list->name, hws[list->parent_id],
486 hws[list->alt_parent_id], list->offset, list->cfg, num_cfgs,
487 list->flags);
488 }
489 }
490