1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * drivers/clk/tegra/clk-emc.c
4 *
5 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
6 *
7 * Author:
8 * Mikko Perttunen <mperttunen@nvidia.com>
9 */
10
11 #include <linux/clk-provider.h>
12 #include <linux/clk.h>
13 #include <linux/clkdev.h>
14 #include <linux/delay.h>
15 #include <linux/io.h>
16 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_platform.h>
19 #include <linux/platform_device.h>
20 #include <linux/sort.h>
21 #include <linux/string.h>
22
23 #include <soc/tegra/fuse.h>
24 #include <soc/tegra/emc.h>
25
26 #include "clk.h"
27
28 #define CLK_SOURCE_EMC 0x19c
29
30 #define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT 0
31 #define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK 0xff
32 #define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK) << \
33 CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT)
34
35 #define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT 29
36 #define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK 0x7
37 #define CLK_SOURCE_EMC_EMC_2X_CLK_SRC(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK) << \
38 CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT)
39
40 static const char * const emc_parent_clk_names[] = {
41 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud",
42 "pll_c2", "pll_c3", "pll_c_ud"
43 };
44
45 /*
46 * List of clock sources for various parents the EMC clock can have.
47 * When we change the timing to a timing with a parent that has the same
48 * clock source as the current parent, we must first change to a backup
49 * timing that has a different clock source.
50 */
51
52 #define EMC_SRC_PLL_M 0
53 #define EMC_SRC_PLL_C 1
54 #define EMC_SRC_PLL_P 2
55 #define EMC_SRC_CLK_M 3
56 #define EMC_SRC_PLL_C2 4
57 #define EMC_SRC_PLL_C3 5
58
59 static const char emc_parent_clk_sources[] = {
60 EMC_SRC_PLL_M, EMC_SRC_PLL_C, EMC_SRC_PLL_P, EMC_SRC_CLK_M,
61 EMC_SRC_PLL_M, EMC_SRC_PLL_C2, EMC_SRC_PLL_C3, EMC_SRC_PLL_C
62 };
63
64 struct emc_timing {
65 unsigned long rate, parent_rate;
66 u8 parent_index;
67 struct clk *parent;
68 u32 ram_code;
69 };
70
71 struct tegra_clk_emc {
72 struct clk_hw hw;
73 void __iomem *clk_regs;
74 struct clk *prev_parent;
75 bool changing_timing;
76
77 struct device_node *emc_node;
78 struct tegra_emc *emc;
79
80 int num_timings;
81 struct emc_timing *timings;
82 spinlock_t *lock;
83 };
84
85 /* Common clock framework callback implementations */
86
emc_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)87 static unsigned long emc_recalc_rate(struct clk_hw *hw,
88 unsigned long parent_rate)
89 {
90 struct tegra_clk_emc *tegra;
91 u32 val, div;
92
93 tegra = container_of(hw, struct tegra_clk_emc, hw);
94
95 /*
96 * CCF wrongly assumes that the parent won't change during set_rate,
97 * so get the parent rate explicitly.
98 */
99 parent_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
100
101 val = readl(tegra->clk_regs + CLK_SOURCE_EMC);
102 div = val & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK;
103
104 return parent_rate / (div + 2) * 2;
105 }
106
107 /*
108 * Rounds up unless no higher rate exists, in which case down. This way is
109 * safer since things have EMC rate floors. Also don't touch parent_rate
110 * since we don't want the CCF to play with our parent clocks.
111 */
emc_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)112 static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
113 {
114 struct tegra_clk_emc *tegra;
115 u8 ram_code = tegra_read_ram_code();
116 struct emc_timing *timing = NULL;
117 int i, k, t;
118
119 tegra = container_of(hw, struct tegra_clk_emc, hw);
120
121 for (k = 0; k < tegra->num_timings; k++) {
122 if (tegra->timings[k].ram_code == ram_code)
123 break;
124 }
125
126 for (t = k; t < tegra->num_timings; t++) {
127 if (tegra->timings[t].ram_code != ram_code)
128 break;
129 }
130
131 for (i = k; i < t; i++) {
132 timing = tegra->timings + i;
133
134 if (timing->rate < req->rate && i != t - 1)
135 continue;
136
137 if (timing->rate > req->max_rate) {
138 i = max(i, k + 1);
139 req->rate = tegra->timings[i - 1].rate;
140 return 0;
141 }
142
143 if (timing->rate < req->min_rate)
144 continue;
145
146 req->rate = timing->rate;
147 return 0;
148 }
149
150 if (timing) {
151 req->rate = timing->rate;
152 return 0;
153 }
154
155 req->rate = clk_hw_get_rate(hw);
156 return 0;
157 }
158
emc_get_parent(struct clk_hw * hw)159 static u8 emc_get_parent(struct clk_hw *hw)
160 {
161 struct tegra_clk_emc *tegra;
162 u32 val;
163
164 tegra = container_of(hw, struct tegra_clk_emc, hw);
165
166 val = readl(tegra->clk_regs + CLK_SOURCE_EMC);
167
168 return (val >> CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT)
169 & CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK;
170 }
171
emc_ensure_emc_driver(struct tegra_clk_emc * tegra)172 static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra)
173 {
174 struct platform_device *pdev;
175
176 if (tegra->emc)
177 return tegra->emc;
178
179 if (!tegra->emc_node)
180 return NULL;
181
182 pdev = of_find_device_by_node(tegra->emc_node);
183 if (!pdev) {
184 pr_err("%s: could not get external memory controller\n",
185 __func__);
186 return NULL;
187 }
188
189 of_node_put(tegra->emc_node);
190 tegra->emc_node = NULL;
191
192 tegra->emc = platform_get_drvdata(pdev);
193 if (!tegra->emc) {
194 put_device(&pdev->dev);
195 pr_err("%s: cannot find EMC driver\n", __func__);
196 return NULL;
197 }
198
199 return tegra->emc;
200 }
201
emc_set_timing(struct tegra_clk_emc * tegra,struct emc_timing * timing)202 static int emc_set_timing(struct tegra_clk_emc *tegra,
203 struct emc_timing *timing)
204 {
205 int err;
206 u8 div;
207 u32 car_value;
208 unsigned long flags = 0;
209 struct tegra_emc *emc = emc_ensure_emc_driver(tegra);
210
211 if (!emc)
212 return -ENOENT;
213
214 pr_debug("going to rate %ld prate %ld p %s\n", timing->rate,
215 timing->parent_rate, __clk_get_name(timing->parent));
216
217 if (emc_get_parent(&tegra->hw) == timing->parent_index &&
218 clk_get_rate(timing->parent) != timing->parent_rate) {
219 WARN_ONCE(1, "parent %s rate mismatch %lu %lu\n",
220 __clk_get_name(timing->parent),
221 clk_get_rate(timing->parent),
222 timing->parent_rate);
223 return -EINVAL;
224 }
225
226 tegra->changing_timing = true;
227
228 err = clk_set_rate(timing->parent, timing->parent_rate);
229 if (err) {
230 pr_err("cannot change parent %s rate to %ld: %d\n",
231 __clk_get_name(timing->parent), timing->parent_rate,
232 err);
233
234 return err;
235 }
236
237 err = clk_prepare_enable(timing->parent);
238 if (err) {
239 pr_err("cannot enable parent clock: %d\n", err);
240 return err;
241 }
242
243 div = timing->parent_rate / (timing->rate / 2) - 2;
244
245 err = tegra_emc_prepare_timing_change(emc, timing->rate);
246 if (err)
247 return err;
248
249 spin_lock_irqsave(tegra->lock, flags);
250
251 car_value = readl(tegra->clk_regs + CLK_SOURCE_EMC);
252
253 car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_SRC(~0);
254 car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_SRC(timing->parent_index);
255
256 car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(~0);
257 car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(div);
258
259 writel(car_value, tegra->clk_regs + CLK_SOURCE_EMC);
260
261 spin_unlock_irqrestore(tegra->lock, flags);
262
263 tegra_emc_complete_timing_change(emc, timing->rate);
264
265 clk_hw_reparent(&tegra->hw, __clk_get_hw(timing->parent));
266 clk_disable_unprepare(tegra->prev_parent);
267
268 tegra->prev_parent = timing->parent;
269 tegra->changing_timing = false;
270
271 return 0;
272 }
273
274 /*
275 * Get backup timing to use as an intermediate step when a change between
276 * two timings with the same clock source has been requested. First try to
277 * find a timing with a higher clock rate to avoid a rate below any set rate
278 * floors. If that is not possible, find a lower rate.
279 */
get_backup_timing(struct tegra_clk_emc * tegra,int timing_index)280 static struct emc_timing *get_backup_timing(struct tegra_clk_emc *tegra,
281 int timing_index)
282 {
283 int i;
284 u32 ram_code = tegra_read_ram_code();
285 struct emc_timing *timing;
286
287 for (i = timing_index+1; i < tegra->num_timings; i++) {
288 timing = tegra->timings + i;
289 if (timing->ram_code != ram_code)
290 break;
291
292 if (emc_parent_clk_sources[timing->parent_index] !=
293 emc_parent_clk_sources[
294 tegra->timings[timing_index].parent_index])
295 return timing;
296 }
297
298 for (i = timing_index-1; i >= 0; --i) {
299 timing = tegra->timings + i;
300 if (timing->ram_code != ram_code)
301 break;
302
303 if (emc_parent_clk_sources[timing->parent_index] !=
304 emc_parent_clk_sources[
305 tegra->timings[timing_index].parent_index])
306 return timing;
307 }
308
309 return NULL;
310 }
311
emc_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)312 static int emc_set_rate(struct clk_hw *hw, unsigned long rate,
313 unsigned long parent_rate)
314 {
315 struct tegra_clk_emc *tegra;
316 struct emc_timing *timing = NULL;
317 int i, err;
318 u32 ram_code = tegra_read_ram_code();
319
320 tegra = container_of(hw, struct tegra_clk_emc, hw);
321
322 if (clk_hw_get_rate(hw) == rate)
323 return 0;
324
325 /*
326 * When emc_set_timing changes the parent rate, CCF will propagate
327 * that downward to us, so ignore any set_rate calls while a rate
328 * change is already going on.
329 */
330 if (tegra->changing_timing)
331 return 0;
332
333 for (i = 0; i < tegra->num_timings; i++) {
334 if (tegra->timings[i].rate == rate &&
335 tegra->timings[i].ram_code == ram_code) {
336 timing = tegra->timings + i;
337 break;
338 }
339 }
340
341 if (!timing) {
342 pr_err("cannot switch to rate %ld without emc table\n", rate);
343 return -EINVAL;
344 }
345
346 if (emc_parent_clk_sources[emc_get_parent(hw)] ==
347 emc_parent_clk_sources[timing->parent_index] &&
348 clk_get_rate(timing->parent) != timing->parent_rate) {
349 /*
350 * Parent clock source not changed but parent rate has changed,
351 * need to temporarily switch to another parent
352 */
353
354 struct emc_timing *backup_timing;
355
356 backup_timing = get_backup_timing(tegra, i);
357 if (!backup_timing) {
358 pr_err("cannot find backup timing\n");
359 return -EINVAL;
360 }
361
362 pr_debug("using %ld as backup rate when going to %ld\n",
363 backup_timing->rate, rate);
364
365 err = emc_set_timing(tegra, backup_timing);
366 if (err) {
367 pr_err("cannot set backup timing: %d\n", err);
368 return err;
369 }
370 }
371
372 return emc_set_timing(tegra, timing);
373 }
374
375 /* Initialization and deinitialization */
376
load_one_timing_from_dt(struct tegra_clk_emc * tegra,struct emc_timing * timing,struct device_node * node)377 static int load_one_timing_from_dt(struct tegra_clk_emc *tegra,
378 struct emc_timing *timing,
379 struct device_node *node)
380 {
381 int err, i;
382 u32 tmp;
383
384 err = of_property_read_u32(node, "clock-frequency", &tmp);
385 if (err) {
386 pr_err("timing %pOF: failed to read rate\n", node);
387 return err;
388 }
389
390 timing->rate = tmp;
391
392 err = of_property_read_u32(node, "nvidia,parent-clock-frequency", &tmp);
393 if (err) {
394 pr_err("timing %pOF: failed to read parent rate\n", node);
395 return err;
396 }
397
398 timing->parent_rate = tmp;
399
400 timing->parent = of_clk_get_by_name(node, "emc-parent");
401 if (IS_ERR(timing->parent)) {
402 pr_err("timing %pOF: failed to get parent clock\n", node);
403 return PTR_ERR(timing->parent);
404 }
405
406 timing->parent_index = 0xff;
407 i = match_string(emc_parent_clk_names, ARRAY_SIZE(emc_parent_clk_names),
408 __clk_get_name(timing->parent));
409 if (i < 0) {
410 pr_err("timing %pOF: %s is not a valid parent\n",
411 node, __clk_get_name(timing->parent));
412 clk_put(timing->parent);
413 return -EINVAL;
414 }
415
416 timing->parent_index = i;
417 return 0;
418 }
419
cmp_timings(const void * _a,const void * _b)420 static int cmp_timings(const void *_a, const void *_b)
421 {
422 const struct emc_timing *a = _a;
423 const struct emc_timing *b = _b;
424
425 if (a->rate < b->rate)
426 return -1;
427 else if (a->rate == b->rate)
428 return 0;
429 else
430 return 1;
431 }
432
load_timings_from_dt(struct tegra_clk_emc * tegra,struct device_node * node,u32 ram_code)433 static int load_timings_from_dt(struct tegra_clk_emc *tegra,
434 struct device_node *node,
435 u32 ram_code)
436 {
437 struct emc_timing *timings_ptr;
438 struct device_node *child;
439 int child_count = of_get_child_count(node);
440 int i = 0, err;
441 size_t size;
442
443 size = (tegra->num_timings + child_count) * sizeof(struct emc_timing);
444
445 tegra->timings = krealloc(tegra->timings, size, GFP_KERNEL);
446 if (!tegra->timings)
447 return -ENOMEM;
448
449 timings_ptr = tegra->timings + tegra->num_timings;
450 tegra->num_timings += child_count;
451
452 for_each_child_of_node(node, child) {
453 struct emc_timing *timing = timings_ptr + (i++);
454
455 err = load_one_timing_from_dt(tegra, timing, child);
456 if (err) {
457 of_node_put(child);
458 kfree(tegra->timings);
459 return err;
460 }
461
462 timing->ram_code = ram_code;
463 }
464
465 sort(timings_ptr, child_count, sizeof(struct emc_timing),
466 cmp_timings, NULL);
467
468 return 0;
469 }
470
471 static const struct clk_ops tegra_clk_emc_ops = {
472 .recalc_rate = emc_recalc_rate,
473 .determine_rate = emc_determine_rate,
474 .set_rate = emc_set_rate,
475 .get_parent = emc_get_parent,
476 };
477
tegra_clk_register_emc(void __iomem * base,struct device_node * np,spinlock_t * lock)478 struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
479 spinlock_t *lock)
480 {
481 struct tegra_clk_emc *tegra;
482 struct clk_init_data init;
483 struct device_node *node;
484 u32 node_ram_code;
485 struct clk *clk;
486 int err;
487
488 tegra = kcalloc(1, sizeof(*tegra), GFP_KERNEL);
489 if (!tegra)
490 return ERR_PTR(-ENOMEM);
491
492 tegra->clk_regs = base;
493 tegra->lock = lock;
494
495 tegra->num_timings = 0;
496
497 for_each_child_of_node(np, node) {
498 err = of_property_read_u32(node, "nvidia,ram-code",
499 &node_ram_code);
500 if (err)
501 continue;
502
503 /*
504 * Store timings for all ram codes as we cannot read the
505 * fuses until the apbmisc driver is loaded.
506 */
507 err = load_timings_from_dt(tegra, node, node_ram_code);
508 if (err) {
509 of_node_put(node);
510 kfree(tegra);
511 return ERR_PTR(err);
512 }
513 }
514
515 if (tegra->num_timings == 0)
516 pr_warn("%s: no memory timings registered\n", __func__);
517
518 tegra->emc_node = of_parse_phandle(np,
519 "nvidia,external-memory-controller", 0);
520 if (!tegra->emc_node)
521 pr_warn("%s: couldn't find node for EMC driver\n", __func__);
522
523 init.name = "emc";
524 init.ops = &tegra_clk_emc_ops;
525 init.flags = CLK_IS_CRITICAL;
526 init.parent_names = emc_parent_clk_names;
527 init.num_parents = ARRAY_SIZE(emc_parent_clk_names);
528
529 tegra->hw.init = &init;
530
531 clk = clk_register(NULL, &tegra->hw);
532 if (IS_ERR(clk))
533 return clk;
534
535 tegra->prev_parent = clk_hw_get_parent_by_index(
536 &tegra->hw, emc_get_parent(&tegra->hw))->clk;
537 tegra->changing_timing = false;
538
539 /* Allow debugging tools to see the EMC clock */
540 clk_register_clkdev(clk, "emc", "tegra-clk-debug");
541
542 return clk;
543 };
544