1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * drivers/clk/tegra/clk-emc.c
4 *
5 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
6 *
7 * Author:
8 * Mikko Perttunen <mperttunen@nvidia.com>
9 */
10
11 #include <linux/clk-provider.h>
12 #include <linux/clk.h>
13 #include <linux/clkdev.h>
14 #include <linux/delay.h>
15 #include <linux/io.h>
16 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_platform.h>
19 #include <linux/platform_device.h>
20 #include <linux/sort.h>
21 #include <linux/string.h>
22
23 #include <soc/tegra/fuse.h>
24 #include <soc/tegra/emc.h>
25
26 #include "clk.h"
27
28 #define CLK_SOURCE_EMC 0x19c
29
30 #define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT 0
31 #define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK 0xff
32 #define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK) << \
33 CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT)
34
35 #define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT 29
36 #define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK 0x7
37 #define CLK_SOURCE_EMC_EMC_2X_CLK_SRC(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK) << \
38 CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT)
39
40 static const char * const emc_parent_clk_names[] = {
41 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud",
42 "pll_c2", "pll_c3", "pll_c_ud"
43 };
44
45 /*
46 * List of clock sources for various parents the EMC clock can have.
47 * When we change the timing to a timing with a parent that has the same
48 * clock source as the current parent, we must first change to a backup
49 * timing that has a different clock source.
50 */
51
52 #define EMC_SRC_PLL_M 0
53 #define EMC_SRC_PLL_C 1
54 #define EMC_SRC_PLL_P 2
55 #define EMC_SRC_CLK_M 3
56 #define EMC_SRC_PLL_C2 4
57 #define EMC_SRC_PLL_C3 5
58
59 static const char emc_parent_clk_sources[] = {
60 EMC_SRC_PLL_M, EMC_SRC_PLL_C, EMC_SRC_PLL_P, EMC_SRC_CLK_M,
61 EMC_SRC_PLL_M, EMC_SRC_PLL_C2, EMC_SRC_PLL_C3, EMC_SRC_PLL_C
62 };
63
64 struct emc_timing {
65 unsigned long rate, parent_rate;
66 u8 parent_index;
67 struct clk *parent;
68 u32 ram_code;
69 };
70
71 struct tegra_clk_emc {
72 struct clk_hw hw;
73 void __iomem *clk_regs;
74 struct clk *prev_parent;
75 bool changing_timing;
76
77 struct device_node *emc_node;
78 struct tegra_emc *emc;
79
80 int num_timings;
81 struct emc_timing *timings;
82 spinlock_t *lock;
83 };
84
85 /* Common clock framework callback implementations */
86
emc_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)87 static unsigned long emc_recalc_rate(struct clk_hw *hw,
88 unsigned long parent_rate)
89 {
90 struct tegra_clk_emc *tegra;
91 u32 val, div;
92
93 tegra = container_of(hw, struct tegra_clk_emc, hw);
94
95 /*
96 * CCF wrongly assumes that the parent won't change during set_rate,
97 * so get the parent rate explicitly.
98 */
99 parent_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
100
101 val = readl(tegra->clk_regs + CLK_SOURCE_EMC);
102 div = val & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK;
103
104 return parent_rate / (div + 2) * 2;
105 }
106
107 /*
108 * Rounds up unless no higher rate exists, in which case down. This way is
109 * safer since things have EMC rate floors. Also don't touch parent_rate
110 * since we don't want the CCF to play with our parent clocks.
111 */
emc_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)112 static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
113 {
114 struct tegra_clk_emc *tegra;
115 u8 ram_code = tegra_read_ram_code();
116 struct emc_timing *timing = NULL;
117 int i, k, t;
118
119 tegra = container_of(hw, struct tegra_clk_emc, hw);
120
121 for (k = 0; k < tegra->num_timings; k++) {
122 if (tegra->timings[k].ram_code == ram_code)
123 break;
124 }
125
126 for (t = k; t < tegra->num_timings; t++) {
127 if (tegra->timings[t].ram_code != ram_code)
128 break;
129 }
130
131 for (i = k; i < t; i++) {
132 timing = tegra->timings + i;
133
134 if (timing->rate < req->rate && i != t - 1)
135 continue;
136
137 if (timing->rate > req->max_rate) {
138 i = max(i, k + 1);
139 req->rate = tegra->timings[i - 1].rate;
140 return 0;
141 }
142
143 if (timing->rate < req->min_rate)
144 continue;
145
146 req->rate = timing->rate;
147 return 0;
148 }
149
150 if (timing) {
151 req->rate = timing->rate;
152 return 0;
153 }
154
155 req->rate = clk_hw_get_rate(hw);
156 return 0;
157 }
158
emc_get_parent(struct clk_hw * hw)159 static u8 emc_get_parent(struct clk_hw *hw)
160 {
161 struct tegra_clk_emc *tegra;
162 u32 val;
163
164 tegra = container_of(hw, struct tegra_clk_emc, hw);
165
166 val = readl(tegra->clk_regs + CLK_SOURCE_EMC);
167
168 return (val >> CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT)
169 & CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK;
170 }
171
emc_ensure_emc_driver(struct tegra_clk_emc * tegra)172 static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra)
173 {
174 struct platform_device *pdev;
175
176 if (tegra->emc)
177 return tegra->emc;
178
179 if (!tegra->emc_node)
180 return NULL;
181
182 pdev = of_find_device_by_node(tegra->emc_node);
183 if (!pdev) {
184 pr_err("%s: could not get external memory controller\n",
185 __func__);
186 return NULL;
187 }
188
189 of_node_put(tegra->emc_node);
190 tegra->emc_node = NULL;
191
192 tegra->emc = platform_get_drvdata(pdev);
193 if (!tegra->emc) {
194 put_device(&pdev->dev);
195 pr_err("%s: cannot find EMC driver\n", __func__);
196 return NULL;
197 }
198
199 return tegra->emc;
200 }
201
emc_set_timing(struct tegra_clk_emc * tegra,struct emc_timing * timing)202 static int emc_set_timing(struct tegra_clk_emc *tegra,
203 struct emc_timing *timing)
204 {
205 int err;
206 u8 div;
207 u32 car_value;
208 unsigned long flags = 0;
209 struct tegra_emc *emc = emc_ensure_emc_driver(tegra);
210
211 if (!emc)
212 return -ENOENT;
213
214 pr_debug("going to rate %ld prate %ld p %s\n", timing->rate,
215 timing->parent_rate, __clk_get_name(timing->parent));
216
217 if (emc_get_parent(&tegra->hw) == timing->parent_index &&
218 clk_get_rate(timing->parent) != timing->parent_rate) {
219 WARN_ONCE(1, "parent %s rate mismatch %lu %lu\n",
220 __clk_get_name(timing->parent),
221 clk_get_rate(timing->parent),
222 timing->parent_rate);
223 return -EINVAL;
224 }
225
226 tegra->changing_timing = true;
227
228 err = clk_set_rate(timing->parent, timing->parent_rate);
229 if (err) {
230 pr_err("cannot change parent %s rate to %ld: %d\n",
231 __clk_get_name(timing->parent), timing->parent_rate,
232 err);
233
234 return err;
235 }
236
237 err = clk_prepare_enable(timing->parent);
238 if (err) {
239 pr_err("cannot enable parent clock: %d\n", err);
240 return err;
241 }
242
243 div = timing->parent_rate / (timing->rate / 2) - 2;
244
245 err = tegra_emc_prepare_timing_change(emc, timing->rate);
246 if (err)
247 return err;
248
249 spin_lock_irqsave(tegra->lock, flags);
250
251 car_value = readl(tegra->clk_regs + CLK_SOURCE_EMC);
252
253 car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_SRC(~0);
254 car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_SRC(timing->parent_index);
255
256 car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(~0);
257 car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(div);
258
259 writel(car_value, tegra->clk_regs + CLK_SOURCE_EMC);
260
261 spin_unlock_irqrestore(tegra->lock, flags);
262
263 tegra_emc_complete_timing_change(emc, timing->rate);
264
265 clk_hw_reparent(&tegra->hw, __clk_get_hw(timing->parent));
266 clk_disable_unprepare(tegra->prev_parent);
267
268 tegra->prev_parent = timing->parent;
269 tegra->changing_timing = false;
270
271 return 0;
272 }
273
274 /*
275 * Get backup timing to use as an intermediate step when a change between
276 * two timings with the same clock source has been requested. First try to
277 * find a timing with a higher clock rate to avoid a rate below any set rate
278 * floors. If that is not possible, find a lower rate.
279 */
get_backup_timing(struct tegra_clk_emc * tegra,int timing_index)280 static struct emc_timing *get_backup_timing(struct tegra_clk_emc *tegra,
281 int timing_index)
282 {
283 int i;
284 u32 ram_code = tegra_read_ram_code();
285 struct emc_timing *timing;
286
287 for (i = timing_index+1; i < tegra->num_timings; i++) {
288 timing = tegra->timings + i;
289 if (timing->ram_code != ram_code)
290 break;
291
292 if (emc_parent_clk_sources[timing->parent_index] !=
293 emc_parent_clk_sources[
294 tegra->timings[timing_index].parent_index])
295 return timing;
296 }
297
298 for (i = timing_index-1; i >= 0; --i) {
299 timing = tegra->timings + i;
300 if (timing->ram_code != ram_code)
301 break;
302
303 if (emc_parent_clk_sources[timing->parent_index] !=
304 emc_parent_clk_sources[
305 tegra->timings[timing_index].parent_index])
306 return timing;
307 }
308
309 return NULL;
310 }
311
emc_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)312 static int emc_set_rate(struct clk_hw *hw, unsigned long rate,
313 unsigned long parent_rate)
314 {
315 struct tegra_clk_emc *tegra;
316 struct emc_timing *timing = NULL;
317 int i, err;
318 u32 ram_code = tegra_read_ram_code();
319
320 tegra = container_of(hw, struct tegra_clk_emc, hw);
321
322 if (clk_hw_get_rate(hw) == rate)
323 return 0;
324
325 /*
326 * When emc_set_timing changes the parent rate, CCF will propagate
327 * that downward to us, so ignore any set_rate calls while a rate
328 * change is already going on.
329 */
330 if (tegra->changing_timing)
331 return 0;
332
333 for (i = 0; i < tegra->num_timings; i++) {
334 if (tegra->timings[i].rate == rate &&
335 tegra->timings[i].ram_code == ram_code) {
336 timing = tegra->timings + i;
337 break;
338 }
339 }
340
341 if (!timing) {
342 pr_err("cannot switch to rate %ld without emc table\n", rate);
343 return -EINVAL;
344 }
345
346 if (emc_parent_clk_sources[emc_get_parent(hw)] ==
347 emc_parent_clk_sources[timing->parent_index] &&
348 clk_get_rate(timing->parent) != timing->parent_rate) {
349 /*
350 * Parent clock source not changed but parent rate has changed,
351 * need to temporarily switch to another parent
352 */
353
354 struct emc_timing *backup_timing;
355
356 backup_timing = get_backup_timing(tegra, i);
357 if (!backup_timing) {
358 pr_err("cannot find backup timing\n");
359 return -EINVAL;
360 }
361
362 pr_debug("using %ld as backup rate when going to %ld\n",
363 backup_timing->rate, rate);
364
365 err = emc_set_timing(tegra, backup_timing);
366 if (err) {
367 pr_err("cannot set backup timing: %d\n", err);
368 return err;
369 }
370 }
371
372 return emc_set_timing(tegra, timing);
373 }
374
375 /* Initialization and deinitialization */
376
load_one_timing_from_dt(struct tegra_clk_emc * tegra,struct emc_timing * timing,struct device_node * node)377 static int load_one_timing_from_dt(struct tegra_clk_emc *tegra,
378 struct emc_timing *timing,
379 struct device_node *node)
380 {
381 int err, i;
382 u32 tmp;
383
384 err = of_property_read_u32(node, "clock-frequency", &tmp);
385 if (err) {
386 pr_err("timing %pOF: failed to read rate\n", node);
387 return err;
388 }
389
390 timing->rate = tmp;
391
392 err = of_property_read_u32(node, "nvidia,parent-clock-frequency", &tmp);
393 if (err) {
394 pr_err("timing %pOF: failed to read parent rate\n", node);
395 return err;
396 }
397
398 timing->parent_rate = tmp;
399
400 timing->parent = of_clk_get_by_name(node, "emc-parent");
401 if (IS_ERR(timing->parent)) {
402 pr_err("timing %pOF: failed to get parent clock\n", node);
403 return PTR_ERR(timing->parent);
404 }
405
406 timing->parent_index = 0xff;
407 for (i = 0; i < ARRAY_SIZE(emc_parent_clk_names); i++) {
408 if (!strcmp(emc_parent_clk_names[i],
409 __clk_get_name(timing->parent))) {
410 timing->parent_index = i;
411 break;
412 }
413 }
414 if (timing->parent_index == 0xff) {
415 pr_err("timing %pOF: %s is not a valid parent\n",
416 node, __clk_get_name(timing->parent));
417 clk_put(timing->parent);
418 return -EINVAL;
419 }
420
421 return 0;
422 }
423
cmp_timings(const void * _a,const void * _b)424 static int cmp_timings(const void *_a, const void *_b)
425 {
426 const struct emc_timing *a = _a;
427 const struct emc_timing *b = _b;
428
429 if (a->rate < b->rate)
430 return -1;
431 else if (a->rate == b->rate)
432 return 0;
433 else
434 return 1;
435 }
436
load_timings_from_dt(struct tegra_clk_emc * tegra,struct device_node * node,u32 ram_code)437 static int load_timings_from_dt(struct tegra_clk_emc *tegra,
438 struct device_node *node,
439 u32 ram_code)
440 {
441 struct emc_timing *timings_ptr;
442 struct device_node *child;
443 int child_count = of_get_child_count(node);
444 int i = 0, err;
445 size_t size;
446
447 size = (tegra->num_timings + child_count) * sizeof(struct emc_timing);
448
449 tegra->timings = krealloc(tegra->timings, size, GFP_KERNEL);
450 if (!tegra->timings)
451 return -ENOMEM;
452
453 timings_ptr = tegra->timings + tegra->num_timings;
454 tegra->num_timings += child_count;
455
456 for_each_child_of_node(node, child) {
457 struct emc_timing *timing = timings_ptr + (i++);
458
459 err = load_one_timing_from_dt(tegra, timing, child);
460 if (err) {
461 of_node_put(child);
462 kfree(tegra->timings);
463 return err;
464 }
465
466 timing->ram_code = ram_code;
467 }
468
469 sort(timings_ptr, child_count, sizeof(struct emc_timing),
470 cmp_timings, NULL);
471
472 return 0;
473 }
474
475 static const struct clk_ops tegra_clk_emc_ops = {
476 .recalc_rate = emc_recalc_rate,
477 .determine_rate = emc_determine_rate,
478 .set_rate = emc_set_rate,
479 .get_parent = emc_get_parent,
480 };
481
tegra_clk_register_emc(void __iomem * base,struct device_node * np,spinlock_t * lock)482 struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
483 spinlock_t *lock)
484 {
485 struct tegra_clk_emc *tegra;
486 struct clk_init_data init;
487 struct device_node *node;
488 u32 node_ram_code;
489 struct clk *clk;
490 int err;
491
492 tegra = kcalloc(1, sizeof(*tegra), GFP_KERNEL);
493 if (!tegra)
494 return ERR_PTR(-ENOMEM);
495
496 tegra->clk_regs = base;
497 tegra->lock = lock;
498
499 tegra->num_timings = 0;
500
501 for_each_child_of_node(np, node) {
502 err = of_property_read_u32(node, "nvidia,ram-code",
503 &node_ram_code);
504 if (err)
505 continue;
506
507 /*
508 * Store timings for all ram codes as we cannot read the
509 * fuses until the apbmisc driver is loaded.
510 */
511 err = load_timings_from_dt(tegra, node, node_ram_code);
512 if (err) {
513 of_node_put(node);
514 kfree(tegra);
515 return ERR_PTR(err);
516 }
517 }
518
519 if (tegra->num_timings == 0)
520 pr_warn("%s: no memory timings registered\n", __func__);
521
522 tegra->emc_node = of_parse_phandle(np,
523 "nvidia,external-memory-controller", 0);
524 if (!tegra->emc_node)
525 pr_warn("%s: couldn't find node for EMC driver\n", __func__);
526
527 init.name = "emc";
528 init.ops = &tegra_clk_emc_ops;
529 init.flags = CLK_IS_CRITICAL;
530 init.parent_names = emc_parent_clk_names;
531 init.num_parents = ARRAY_SIZE(emc_parent_clk_names);
532
533 tegra->hw.init = &init;
534
535 clk = clk_register(NULL, &tegra->hw);
536 if (IS_ERR(clk))
537 return clk;
538
539 tegra->prev_parent = clk_hw_get_parent_by_index(
540 &tegra->hw, emc_get_parent(&tegra->hw))->clk;
541 tegra->changing_timing = false;
542
543 /* Allow debugging tools to see the EMC clock */
544 clk_register_clkdev(clk, "emc", "tegra-clk-debug");
545
546 return clk;
547 };
548