1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2015 Atmel Corporation,
4 * Nicolas Ferre <nicolas.ferre@atmel.com>
5 *
6 * Based on clk-programmable & clk-peripheral drivers by Boris BREZILLON.
7 */
8
9 #include <linux/bitfield.h>
10 #include <linux/clk-provider.h>
11 #include <linux/clkdev.h>
12 #include <linux/clk/at91_pmc.h>
13 #include <linux/of.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/regmap.h>
16
17 #include "pmc.h"
18
19 #define GENERATED_MAX_DIV 255
20
21 struct clk_generated {
22 struct clk_hw hw;
23 struct regmap *regmap;
24 struct clk_range range;
25 spinlock_t *lock;
26 u32 *mux_table;
27 u32 id;
28 u32 gckdiv;
29 const struct clk_pcr_layout *layout;
30 u8 parent_id;
31 int chg_pid;
32 };
33
34 #define to_clk_generated(hw) \
35 container_of(hw, struct clk_generated, hw)
36
clk_generated_enable(struct clk_hw * hw)37 static int clk_generated_enable(struct clk_hw *hw)
38 {
39 struct clk_generated *gck = to_clk_generated(hw);
40 unsigned long flags;
41
42 pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n",
43 __func__, gck->gckdiv, gck->parent_id);
44
45 spin_lock_irqsave(gck->lock, flags);
46 regmap_write(gck->regmap, gck->layout->offset,
47 (gck->id & gck->layout->pid_mask));
48 regmap_update_bits(gck->regmap, gck->layout->offset,
49 AT91_PMC_PCR_GCKDIV_MASK | gck->layout->gckcss_mask |
50 gck->layout->cmd | AT91_PMC_PCR_GCKEN,
51 field_prep(gck->layout->gckcss_mask, gck->parent_id) |
52 gck->layout->cmd |
53 FIELD_PREP(AT91_PMC_PCR_GCKDIV_MASK, gck->gckdiv) |
54 AT91_PMC_PCR_GCKEN);
55 spin_unlock_irqrestore(gck->lock, flags);
56 return 0;
57 }
58
clk_generated_disable(struct clk_hw * hw)59 static void clk_generated_disable(struct clk_hw *hw)
60 {
61 struct clk_generated *gck = to_clk_generated(hw);
62 unsigned long flags;
63
64 spin_lock_irqsave(gck->lock, flags);
65 regmap_write(gck->regmap, gck->layout->offset,
66 (gck->id & gck->layout->pid_mask));
67 regmap_update_bits(gck->regmap, gck->layout->offset,
68 gck->layout->cmd | AT91_PMC_PCR_GCKEN,
69 gck->layout->cmd);
70 spin_unlock_irqrestore(gck->lock, flags);
71 }
72
clk_generated_is_enabled(struct clk_hw * hw)73 static int clk_generated_is_enabled(struct clk_hw *hw)
74 {
75 struct clk_generated *gck = to_clk_generated(hw);
76 unsigned long flags;
77 unsigned int status;
78
79 spin_lock_irqsave(gck->lock, flags);
80 regmap_write(gck->regmap, gck->layout->offset,
81 (gck->id & gck->layout->pid_mask));
82 regmap_read(gck->regmap, gck->layout->offset, &status);
83 spin_unlock_irqrestore(gck->lock, flags);
84
85 return !!(status & AT91_PMC_PCR_GCKEN);
86 }
87
88 static unsigned long
clk_generated_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)89 clk_generated_recalc_rate(struct clk_hw *hw,
90 unsigned long parent_rate)
91 {
92 struct clk_generated *gck = to_clk_generated(hw);
93
94 return DIV_ROUND_CLOSEST(parent_rate, gck->gckdiv + 1);
95 }
96
clk_generated_best_diff(struct clk_rate_request * req,struct clk_hw * parent,unsigned long parent_rate,u32 div,int * best_diff,long * best_rate)97 static void clk_generated_best_diff(struct clk_rate_request *req,
98 struct clk_hw *parent,
99 unsigned long parent_rate, u32 div,
100 int *best_diff, long *best_rate)
101 {
102 unsigned long tmp_rate;
103 int tmp_diff;
104
105 if (!div)
106 tmp_rate = parent_rate;
107 else
108 tmp_rate = parent_rate / div;
109 tmp_diff = abs(req->rate - tmp_rate);
110
111 if (*best_diff < 0 || *best_diff >= tmp_diff) {
112 *best_rate = tmp_rate;
113 *best_diff = tmp_diff;
114 req->best_parent_rate = parent_rate;
115 req->best_parent_hw = parent;
116 }
117 }
118
clk_generated_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)119 static int clk_generated_determine_rate(struct clk_hw *hw,
120 struct clk_rate_request *req)
121 {
122 struct clk_generated *gck = to_clk_generated(hw);
123 struct clk_hw *parent = NULL;
124 struct clk_rate_request req_parent = *req;
125 long best_rate = -EINVAL;
126 unsigned long min_rate, parent_rate;
127 int best_diff = -1;
128 int i;
129 u32 div;
130
131 /* do not look for a rate that is outside of our range */
132 if (gck->range.max && req->rate > gck->range.max)
133 req->rate = gck->range.max;
134 if (gck->range.min && req->rate < gck->range.min)
135 req->rate = gck->range.min;
136
137 for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
138 if (gck->chg_pid == i)
139 continue;
140
141 parent = clk_hw_get_parent_by_index(hw, i);
142 if (!parent)
143 continue;
144
145 parent_rate = clk_hw_get_rate(parent);
146 min_rate = DIV_ROUND_CLOSEST(parent_rate, GENERATED_MAX_DIV + 1);
147 if (!parent_rate ||
148 (gck->range.max && min_rate > gck->range.max))
149 continue;
150
151 div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
152 if (div > GENERATED_MAX_DIV + 1)
153 div = GENERATED_MAX_DIV + 1;
154
155 clk_generated_best_diff(req, parent, parent_rate, div,
156 &best_diff, &best_rate);
157
158 if (!best_diff)
159 break;
160 }
161
162 /*
163 * The audio_pll rate can be modified, unlike the five others clocks
164 * that should never be altered.
165 * The audio_pll can technically be used by multiple consumers. However,
166 * with the rate locking, the first consumer to enable to clock will be
167 * the one definitely setting the rate of the clock.
168 * Since audio IPs are most likely to request the same rate, we enforce
169 * that the only clks able to modify gck rate are those of audio IPs.
170 */
171
172 if (gck->chg_pid < 0)
173 goto end;
174
175 parent = clk_hw_get_parent_by_index(hw, gck->chg_pid);
176 if (!parent)
177 goto end;
178
179 for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
180 req_parent.rate = req->rate * div;
181 if (__clk_determine_rate(parent, &req_parent))
182 continue;
183 clk_generated_best_diff(req, parent, req_parent.rate, div,
184 &best_diff, &best_rate);
185
186 if (!best_diff)
187 break;
188 }
189
190 end:
191 pr_debug("GCLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
192 __func__, best_rate,
193 __clk_get_name((req->best_parent_hw)->clk),
194 req->best_parent_rate);
195
196 if (best_rate < 0 || (gck->range.max && best_rate > gck->range.max))
197 return -EINVAL;
198
199 req->rate = best_rate;
200 return 0;
201 }
202
203 /* No modification of hardware as we have the flag CLK_SET_PARENT_GATE set */
clk_generated_set_parent(struct clk_hw * hw,u8 index)204 static int clk_generated_set_parent(struct clk_hw *hw, u8 index)
205 {
206 struct clk_generated *gck = to_clk_generated(hw);
207
208 if (index >= clk_hw_get_num_parents(hw))
209 return -EINVAL;
210
211 if (gck->mux_table)
212 gck->parent_id = clk_mux_index_to_val(gck->mux_table, 0, index);
213 else
214 gck->parent_id = index;
215
216 return 0;
217 }
218
clk_generated_get_parent(struct clk_hw * hw)219 static u8 clk_generated_get_parent(struct clk_hw *hw)
220 {
221 struct clk_generated *gck = to_clk_generated(hw);
222
223 return gck->parent_id;
224 }
225
226 /* No modification of hardware as we have the flag CLK_SET_RATE_GATE set */
clk_generated_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)227 static int clk_generated_set_rate(struct clk_hw *hw,
228 unsigned long rate,
229 unsigned long parent_rate)
230 {
231 struct clk_generated *gck = to_clk_generated(hw);
232 u32 div;
233
234 if (!rate)
235 return -EINVAL;
236
237 if (gck->range.max && rate > gck->range.max)
238 return -EINVAL;
239
240 div = DIV_ROUND_CLOSEST(parent_rate, rate);
241 if (div > GENERATED_MAX_DIV + 1 || !div)
242 return -EINVAL;
243
244 gck->gckdiv = div - 1;
245 return 0;
246 }
247
248 static const struct clk_ops generated_ops = {
249 .enable = clk_generated_enable,
250 .disable = clk_generated_disable,
251 .is_enabled = clk_generated_is_enabled,
252 .recalc_rate = clk_generated_recalc_rate,
253 .determine_rate = clk_generated_determine_rate,
254 .get_parent = clk_generated_get_parent,
255 .set_parent = clk_generated_set_parent,
256 .set_rate = clk_generated_set_rate,
257 };
258
259 /**
260 * clk_generated_startup - Initialize a given clock to its default parent and
261 * divisor parameter.
262 *
263 * @gck: Generated clock to set the startup parameters for.
264 *
265 * Take parameters from the hardware and update local clock configuration
266 * accordingly.
267 */
clk_generated_startup(struct clk_generated * gck)268 static void clk_generated_startup(struct clk_generated *gck)
269 {
270 u32 tmp;
271 unsigned long flags;
272
273 spin_lock_irqsave(gck->lock, flags);
274 regmap_write(gck->regmap, gck->layout->offset,
275 (gck->id & gck->layout->pid_mask));
276 regmap_read(gck->regmap, gck->layout->offset, &tmp);
277 spin_unlock_irqrestore(gck->lock, flags);
278
279 gck->parent_id = field_get(gck->layout->gckcss_mask, tmp);
280 gck->gckdiv = FIELD_GET(AT91_PMC_PCR_GCKDIV_MASK, tmp);
281 }
282
283 struct clk_hw * __init
at91_clk_register_generated(struct regmap * regmap,spinlock_t * lock,const struct clk_pcr_layout * layout,const char * name,const char ** parent_names,u32 * mux_table,u8 num_parents,u8 id,const struct clk_range * range,int chg_pid)284 at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
285 const struct clk_pcr_layout *layout,
286 const char *name, const char **parent_names,
287 u32 *mux_table, u8 num_parents, u8 id,
288 const struct clk_range *range,
289 int chg_pid)
290 {
291 struct clk_generated *gck;
292 struct clk_init_data init;
293 struct clk_hw *hw;
294 int ret;
295
296 gck = kzalloc(sizeof(*gck), GFP_KERNEL);
297 if (!gck)
298 return ERR_PTR(-ENOMEM);
299
300 init.name = name;
301 init.ops = &generated_ops;
302 init.parent_names = parent_names;
303 init.num_parents = num_parents;
304 init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
305 if (chg_pid >= 0)
306 init.flags |= CLK_SET_RATE_PARENT;
307
308 gck->id = id;
309 gck->hw.init = &init;
310 gck->regmap = regmap;
311 gck->lock = lock;
312 gck->range = *range;
313 gck->chg_pid = chg_pid;
314 gck->layout = layout;
315 gck->mux_table = mux_table;
316
317 clk_generated_startup(gck);
318 hw = &gck->hw;
319 ret = clk_hw_register(NULL, &gck->hw);
320 if (ret) {
321 kfree(gck);
322 hw = ERR_PTR(ret);
323 } else {
324 pmc_register_id(id);
325 }
326
327 return hw;
328 }
329