• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2018, The Linux Foundation. All rights reserved.
3 
4 #include <linux/kernel.h>
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/err.h>
9 #include <linux/io.h>
10 #include <linux/of.h>
11 #include <linux/of_device.h>
12 #include <linux/clk.h>
13 #include <linux/clk-provider.h>
14 #include <linux/slab.h>
15 
16 #include "clk-krait.h"
17 
18 static unsigned int sec_mux_map[] = {
19 	2,
20 	0,
21 };
22 
23 static unsigned int pri_mux_map[] = {
24 	1,
25 	2,
26 	0,
27 };
28 
29 /*
30  * Notifier function for switching the muxes to safe parent
31  * while the hfpll is getting reprogrammed.
32  */
krait_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)33 static int krait_notifier_cb(struct notifier_block *nb,
34 			     unsigned long event,
35 			     void *data)
36 {
37 	int ret = 0;
38 	struct krait_mux_clk *mux = container_of(nb, struct krait_mux_clk,
39 						 clk_nb);
40 	/* Switch to safe parent */
41 	if (event == PRE_RATE_CHANGE) {
42 		mux->old_index = krait_mux_clk_ops.get_parent(&mux->hw);
43 		ret = krait_mux_clk_ops.set_parent(&mux->hw, mux->safe_sel);
44 		mux->reparent = false;
45 	/*
46 	 * By the time POST_RATE_CHANGE notifier is called,
47 	 * clk framework itself would have changed the parent for the new rate.
48 	 * Only otherwise, put back to the old parent.
49 	 */
50 	} else if (event == POST_RATE_CHANGE) {
51 		if (!mux->reparent)
52 			ret = krait_mux_clk_ops.set_parent(&mux->hw,
53 							   mux->old_index);
54 	}
55 
56 	return notifier_from_errno(ret);
57 }
58 
krait_notifier_register(struct device * dev,struct clk * clk,struct krait_mux_clk * mux)59 static int krait_notifier_register(struct device *dev, struct clk *clk,
60 				   struct krait_mux_clk *mux)
61 {
62 	int ret = 0;
63 
64 	mux->clk_nb.notifier_call = krait_notifier_cb;
65 	ret = clk_notifier_register(clk, &mux->clk_nb);
66 	if (ret)
67 		dev_err(dev, "failed to register clock notifier: %d\n", ret);
68 
69 	return ret;
70 }
71 
72 static int
krait_add_div(struct device * dev,int id,const char * s,unsigned int offset)73 krait_add_div(struct device *dev, int id, const char *s, unsigned int offset)
74 {
75 	struct krait_div2_clk *div;
76 	struct clk_init_data init = {
77 		.num_parents = 1,
78 		.ops = &krait_div2_clk_ops,
79 		.flags = CLK_SET_RATE_PARENT,
80 	};
81 	const char *p_names[1];
82 	struct clk *clk;
83 
84 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
85 	if (!div)
86 		return -ENOMEM;
87 
88 	div->width = 2;
89 	div->shift = 6;
90 	div->lpl = id >= 0;
91 	div->offset = offset;
92 	div->hw.init = &init;
93 
94 	init.name = kasprintf(GFP_KERNEL, "hfpll%s_div", s);
95 	if (!init.name)
96 		return -ENOMEM;
97 
98 	init.parent_names = p_names;
99 	p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s);
100 	if (!p_names[0]) {
101 		kfree(init.name);
102 		return -ENOMEM;
103 	}
104 
105 	clk = devm_clk_register(dev, &div->hw);
106 	kfree(p_names[0]);
107 	kfree(init.name);
108 
109 	return PTR_ERR_OR_ZERO(clk);
110 }
111 
112 static int
krait_add_sec_mux(struct device * dev,int id,const char * s,unsigned int offset,bool unique_aux)113 krait_add_sec_mux(struct device *dev, int id, const char *s,
114 		  unsigned int offset, bool unique_aux)
115 {
116 	int ret;
117 	struct krait_mux_clk *mux;
118 	static const char *sec_mux_list[] = {
119 		"acpu_aux",
120 		"qsb",
121 	};
122 	struct clk_init_data init = {
123 		.parent_names = sec_mux_list,
124 		.num_parents = ARRAY_SIZE(sec_mux_list),
125 		.ops = &krait_mux_clk_ops,
126 		.flags = CLK_SET_RATE_PARENT,
127 	};
128 	struct clk *clk;
129 
130 	mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
131 	if (!mux)
132 		return -ENOMEM;
133 
134 	mux->offset = offset;
135 	mux->lpl = id >= 0;
136 	mux->mask = 0x3;
137 	mux->shift = 2;
138 	mux->parent_map = sec_mux_map;
139 	mux->hw.init = &init;
140 	mux->safe_sel = 0;
141 
142 	/* Checking for qcom,krait-cc-v1 or qcom,krait-cc-v2 is not
143 	 * enough to limit this to apq/ipq8064. Directly check machine
144 	 * compatible to correctly handle this errata.
145 	 */
146 	if (of_machine_is_compatible("qcom,ipq8064") ||
147 	    of_machine_is_compatible("qcom,apq8064"))
148 		mux->disable_sec_src_gating = true;
149 
150 	init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
151 	if (!init.name)
152 		return -ENOMEM;
153 
154 	if (unique_aux) {
155 		sec_mux_list[0] = kasprintf(GFP_KERNEL, "acpu%s_aux", s);
156 		if (!sec_mux_list[0]) {
157 			clk = ERR_PTR(-ENOMEM);
158 			goto err_aux;
159 		}
160 	}
161 
162 	clk = devm_clk_register(dev, &mux->hw);
163 
164 	ret = krait_notifier_register(dev, clk, mux);
165 	if (ret)
166 		goto unique_aux;
167 
168 unique_aux:
169 	if (unique_aux)
170 		kfree(sec_mux_list[0]);
171 err_aux:
172 	kfree(init.name);
173 	return PTR_ERR_OR_ZERO(clk);
174 }
175 
176 static struct clk *
krait_add_pri_mux(struct device * dev,int id,const char * s,unsigned int offset)177 krait_add_pri_mux(struct device *dev, int id, const char *s,
178 		  unsigned int offset)
179 {
180 	int ret;
181 	struct krait_mux_clk *mux;
182 	const char *p_names[3];
183 	struct clk_init_data init = {
184 		.parent_names = p_names,
185 		.num_parents = ARRAY_SIZE(p_names),
186 		.ops = &krait_mux_clk_ops,
187 		.flags = CLK_SET_RATE_PARENT,
188 	};
189 	struct clk *clk;
190 
191 	mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
192 	if (!mux)
193 		return ERR_PTR(-ENOMEM);
194 
195 	mux->mask = 0x3;
196 	mux->shift = 0;
197 	mux->offset = offset;
198 	mux->lpl = id >= 0;
199 	mux->parent_map = pri_mux_map;
200 	mux->hw.init = &init;
201 	mux->safe_sel = 2;
202 
203 	init.name = kasprintf(GFP_KERNEL, "krait%s_pri_mux", s);
204 	if (!init.name)
205 		return ERR_PTR(-ENOMEM);
206 
207 	p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s);
208 	if (!p_names[0]) {
209 		clk = ERR_PTR(-ENOMEM);
210 		goto err_p0;
211 	}
212 
213 	p_names[1] = kasprintf(GFP_KERNEL, "hfpll%s_div", s);
214 	if (!p_names[1]) {
215 		clk = ERR_PTR(-ENOMEM);
216 		goto err_p1;
217 	}
218 
219 	p_names[2] = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
220 	if (!p_names[2]) {
221 		clk = ERR_PTR(-ENOMEM);
222 		goto err_p2;
223 	}
224 
225 	clk = devm_clk_register(dev, &mux->hw);
226 
227 	ret = krait_notifier_register(dev, clk, mux);
228 	if (ret)
229 		goto err_p3;
230 err_p3:
231 	kfree(p_names[2]);
232 err_p2:
233 	kfree(p_names[1]);
234 err_p1:
235 	kfree(p_names[0]);
236 err_p0:
237 	kfree(init.name);
238 	return clk;
239 }
240 
241 /* id < 0 for L2, otherwise id == physical CPU number */
krait_add_clks(struct device * dev,int id,bool unique_aux)242 static struct clk *krait_add_clks(struct device *dev, int id, bool unique_aux)
243 {
244 	int ret;
245 	unsigned int offset;
246 	void *p = NULL;
247 	const char *s;
248 	struct clk *clk;
249 
250 	if (id >= 0) {
251 		offset = 0x4501 + (0x1000 * id);
252 		s = p = kasprintf(GFP_KERNEL, "%d", id);
253 		if (!s)
254 			return ERR_PTR(-ENOMEM);
255 	} else {
256 		offset = 0x500;
257 		s = "_l2";
258 	}
259 
260 	ret = krait_add_div(dev, id, s, offset);
261 	if (ret) {
262 		clk = ERR_PTR(ret);
263 		goto err;
264 	}
265 
266 	ret = krait_add_sec_mux(dev, id, s, offset, unique_aux);
267 	if (ret) {
268 		clk = ERR_PTR(ret);
269 		goto err;
270 	}
271 
272 	clk = krait_add_pri_mux(dev, id, s, offset);
273 err:
274 	kfree(p);
275 	return clk;
276 }
277 
krait_of_get(struct of_phandle_args * clkspec,void * data)278 static struct clk *krait_of_get(struct of_phandle_args *clkspec, void *data)
279 {
280 	unsigned int idx = clkspec->args[0];
281 	struct clk **clks = data;
282 
283 	if (idx >= 5) {
284 		pr_err("%s: invalid clock index %d\n", __func__, idx);
285 		return ERR_PTR(-EINVAL);
286 	}
287 
288 	return clks[idx] ? : ERR_PTR(-ENODEV);
289 }
290 
291 static const struct of_device_id krait_cc_match_table[] = {
292 	{ .compatible = "qcom,krait-cc-v1", (void *)1UL },
293 	{ .compatible = "qcom,krait-cc-v2" },
294 	{}
295 };
296 MODULE_DEVICE_TABLE(of, krait_cc_match_table);
297 
krait_cc_probe(struct platform_device * pdev)298 static int krait_cc_probe(struct platform_device *pdev)
299 {
300 	struct device *dev = &pdev->dev;
301 	const struct of_device_id *id;
302 	unsigned long cur_rate, aux_rate;
303 	int cpu;
304 	struct clk *clk;
305 	struct clk **clks;
306 	struct clk *l2_pri_mux_clk;
307 
308 	id = of_match_device(krait_cc_match_table, dev);
309 	if (!id)
310 		return -ENODEV;
311 
312 	/* Rate is 1 because 0 causes problems for __clk_mux_determine_rate */
313 	clk = clk_register_fixed_rate(dev, "qsb", NULL, 0, 1);
314 	if (IS_ERR(clk))
315 		return PTR_ERR(clk);
316 
317 	if (!id->data) {
318 		clk = clk_register_fixed_factor(dev, "acpu_aux",
319 						"gpll0_vote", 0, 1, 2);
320 		if (IS_ERR(clk))
321 			return PTR_ERR(clk);
322 	}
323 
324 	/* Krait configurations have at most 4 CPUs and one L2 */
325 	clks = devm_kcalloc(dev, 5, sizeof(*clks), GFP_KERNEL);
326 	if (!clks)
327 		return -ENOMEM;
328 
329 	for_each_possible_cpu(cpu) {
330 		clk = krait_add_clks(dev, cpu, id->data);
331 		if (IS_ERR(clk))
332 			return PTR_ERR(clk);
333 		clks[cpu] = clk;
334 	}
335 
336 	l2_pri_mux_clk = krait_add_clks(dev, -1, id->data);
337 	if (IS_ERR(l2_pri_mux_clk))
338 		return PTR_ERR(l2_pri_mux_clk);
339 	clks[4] = l2_pri_mux_clk;
340 
341 	/*
342 	 * We don't want the CPU or L2 clocks to be turned off at late init
343 	 * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the
344 	 * refcount of these clocks. Any cpufreq/hotplug manager can assume
345 	 * that the clocks have already been prepared and enabled by the time
346 	 * they take over.
347 	 */
348 	for_each_online_cpu(cpu) {
349 		clk_prepare_enable(l2_pri_mux_clk);
350 		WARN(clk_prepare_enable(clks[cpu]),
351 		     "Unable to turn on CPU%d clock", cpu);
352 	}
353 
354 	/*
355 	 * Force reinit of HFPLLs and muxes to overwrite any potential
356 	 * incorrect configuration of HFPLLs and muxes by the bootloader.
357 	 * While at it, also make sure the cores are running at known rates
358 	 * and print the current rate.
359 	 *
360 	 * The clocks are set to aux clock rate first to make sure the
361 	 * secondary mux is not sourcing off of QSB. The rate is then set to
362 	 * two different rates to force a HFPLL reinit under all
363 	 * circumstances.
364 	 */
365 	cur_rate = clk_get_rate(l2_pri_mux_clk);
366 	aux_rate = 384000000;
367 	if (cur_rate == 1) {
368 		pr_info("L2 @ QSB rate. Forcing new rate.\n");
369 		cur_rate = aux_rate;
370 	}
371 	clk_set_rate(l2_pri_mux_clk, aux_rate);
372 	clk_set_rate(l2_pri_mux_clk, 2);
373 	clk_set_rate(l2_pri_mux_clk, cur_rate);
374 	pr_info("L2 @ %lu KHz\n", clk_get_rate(l2_pri_mux_clk) / 1000);
375 	for_each_possible_cpu(cpu) {
376 		clk = clks[cpu];
377 		cur_rate = clk_get_rate(clk);
378 		if (cur_rate == 1) {
379 			pr_info("CPU%d @ QSB rate. Forcing new rate.\n", cpu);
380 			cur_rate = aux_rate;
381 		}
382 
383 		clk_set_rate(clk, aux_rate);
384 		clk_set_rate(clk, 2);
385 		clk_set_rate(clk, cur_rate);
386 		pr_info("CPU%d @ %lu KHz\n", cpu, clk_get_rate(clk) / 1000);
387 	}
388 
389 	of_clk_add_provider(dev->of_node, krait_of_get, clks);
390 
391 	return 0;
392 }
393 
394 static struct platform_driver krait_cc_driver = {
395 	.probe = krait_cc_probe,
396 	.driver = {
397 		.name = "krait-cc",
398 		.of_match_table = krait_cc_match_table,
399 	},
400 };
401 module_platform_driver(krait_cc_driver);
402 
403 MODULE_DESCRIPTION("Krait CPU Clock Driver");
404 MODULE_LICENSE("GPL v2");
405 MODULE_ALIAS("platform:krait-cc");
406