• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2022, 2023 Linaro Ltd.
4  */
5 #include <linux/bitfield.h>
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 #include <linux/interconnect-clk.h>
9 #include <linux/interconnect-provider.h>
10 #include <linux/of.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/regmap.h>
14 
15 #include <dt-bindings/interconnect/qcom,msm8996-cbf.h>
16 
17 #include "clk-alpha-pll.h"
18 #include "clk-regmap.h"
19 
20 /* Need to match the order of clocks in DT binding */
21 enum {
22 	DT_XO,
23 	DT_APCS_AUX,
24 };
25 
26 enum {
27 	CBF_XO_INDEX,
28 	CBF_PLL_INDEX,
29 	CBF_DIV_INDEX,
30 	CBF_APCS_AUX_INDEX,
31 };
32 
33 #define DIV_THRESHOLD		600000000
34 
35 #define CBF_MUX_OFFSET		0x18
36 #define CBF_MUX_PARENT_MASK		GENMASK(1, 0)
37 #define CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK GENMASK(5, 4)
38 #define CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL \
39 	FIELD_PREP(CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK, 0x03)
40 #define CBF_MUX_AUTO_CLK_SEL_BIT	BIT(6)
41 
42 #define CBF_PLL_OFFSET 0xf000
43 
44 static const u8 cbf_pll_regs[PLL_OFF_MAX_REGS] = {
45 	[PLL_OFF_L_VAL] = 0x08,
46 	[PLL_OFF_ALPHA_VAL] = 0x10,
47 	[PLL_OFF_USER_CTL] = 0x18,
48 	[PLL_OFF_CONFIG_CTL] = 0x20,
49 	[PLL_OFF_CONFIG_CTL_U] = 0x24,
50 	[PLL_OFF_TEST_CTL] = 0x30,
51 	[PLL_OFF_TEST_CTL_U] = 0x34,
52 	[PLL_OFF_STATUS] = 0x28,
53 };
54 
55 static struct alpha_pll_config cbfpll_config = {
56 	.l = 72,
57 	.config_ctl_val = 0x200d4828,
58 	.config_ctl_hi_val = 0x006,
59 	.test_ctl_val = 0x1c000000,
60 	.test_ctl_hi_val = 0x00004000,
61 	.pre_div_mask = BIT(12),
62 	.post_div_mask = 0x3 << 8,
63 	.post_div_val = 0x1 << 8,
64 	.main_output_mask = BIT(0),
65 	.early_output_mask = BIT(3),
66 };
67 
68 static struct clk_alpha_pll cbf_pll = {
69 	.offset = CBF_PLL_OFFSET,
70 	.regs = cbf_pll_regs,
71 	.flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE,
72 	.clkr.hw.init = &(struct clk_init_data){
73 		.name = "cbf_pll",
74 		.parent_data = (const struct clk_parent_data[]) {
75 			{ .index = DT_XO, },
76 		},
77 		.num_parents = 1,
78 		.ops = &clk_alpha_pll_hwfsm_ops,
79 	},
80 };
81 
82 static struct clk_fixed_factor cbf_pll_postdiv = {
83 	.mult = 1,
84 	.div = 2,
85 	.hw.init = &(struct clk_init_data){
86 		.name = "cbf_pll_postdiv",
87 		.parent_hws = (const struct clk_hw*[]){
88 			&cbf_pll.clkr.hw
89 		},
90 		.num_parents = 1,
91 		.ops = &clk_fixed_factor_ops,
92 		.flags = CLK_SET_RATE_PARENT,
93 	},
94 };
95 
96 static const struct clk_parent_data cbf_mux_parent_data[] = {
97 	{ .index = DT_XO },
98 	{ .hw = &cbf_pll.clkr.hw },
99 	{ .hw = &cbf_pll_postdiv.hw },
100 	{ .index = DT_APCS_AUX },
101 };
102 
103 struct clk_cbf_8996_mux {
104 	u32 reg;
105 	struct notifier_block nb;
106 	struct clk_regmap clkr;
107 };
108 
to_clk_cbf_8996_mux(struct clk_regmap * clkr)109 static struct clk_cbf_8996_mux *to_clk_cbf_8996_mux(struct clk_regmap *clkr)
110 {
111 	return container_of(clkr, struct clk_cbf_8996_mux, clkr);
112 }
113 
114 static int cbf_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
115 			       void *data);
116 
clk_cbf_8996_mux_get_parent(struct clk_hw * hw)117 static u8 clk_cbf_8996_mux_get_parent(struct clk_hw *hw)
118 {
119 	struct clk_regmap *clkr = to_clk_regmap(hw);
120 	struct clk_cbf_8996_mux *mux = to_clk_cbf_8996_mux(clkr);
121 	u32 val;
122 
123 	regmap_read(clkr->regmap, mux->reg, &val);
124 
125 	return FIELD_GET(CBF_MUX_PARENT_MASK, val);
126 }
127 
clk_cbf_8996_mux_set_parent(struct clk_hw * hw,u8 index)128 static int clk_cbf_8996_mux_set_parent(struct clk_hw *hw, u8 index)
129 {
130 	struct clk_regmap *clkr = to_clk_regmap(hw);
131 	struct clk_cbf_8996_mux *mux = to_clk_cbf_8996_mux(clkr);
132 	u32 val;
133 
134 	val = FIELD_PREP(CBF_MUX_PARENT_MASK, index);
135 
136 	return regmap_update_bits(clkr->regmap, mux->reg, CBF_MUX_PARENT_MASK, val);
137 }
138 
clk_cbf_8996_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)139 static int clk_cbf_8996_mux_determine_rate(struct clk_hw *hw,
140 					   struct clk_rate_request *req)
141 {
142 	struct clk_hw *parent;
143 
144 	if (req->rate < (DIV_THRESHOLD / cbf_pll_postdiv.div))
145 		return -EINVAL;
146 
147 	if (req->rate < DIV_THRESHOLD)
148 		parent = clk_hw_get_parent_by_index(hw, CBF_DIV_INDEX);
149 	else
150 		parent = clk_hw_get_parent_by_index(hw, CBF_PLL_INDEX);
151 
152 	if (!parent)
153 		return -EINVAL;
154 
155 	req->best_parent_rate = clk_hw_round_rate(parent, req->rate);
156 	req->best_parent_hw = parent;
157 
158 	return 0;
159 }
160 
161 static const struct clk_ops clk_cbf_8996_mux_ops = {
162 	.set_parent = clk_cbf_8996_mux_set_parent,
163 	.get_parent = clk_cbf_8996_mux_get_parent,
164 	.determine_rate = clk_cbf_8996_mux_determine_rate,
165 };
166 
167 static struct clk_cbf_8996_mux cbf_mux = {
168 	.reg = CBF_MUX_OFFSET,
169 	.nb.notifier_call = cbf_clk_notifier_cb,
170 	.clkr.hw.init = &(struct clk_init_data) {
171 		.name = "cbf_mux",
172 		.parent_data = cbf_mux_parent_data,
173 		.num_parents = ARRAY_SIZE(cbf_mux_parent_data),
174 		.ops = &clk_cbf_8996_mux_ops,
175 		/* CPU clock is critical and should never be gated */
176 		.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
177 	},
178 };
179 
cbf_clk_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)180 static int cbf_clk_notifier_cb(struct notifier_block *nb, unsigned long event,
181 			       void *data)
182 {
183 	struct clk_notifier_data *cnd = data;
184 
185 	switch (event) {
186 	case PRE_RATE_CHANGE:
187 		/*
188 		 * Avoid overvolting. clk_core_set_rate_nolock() walks from top
189 		 * to bottom, so it will change the rate of the PLL before
190 		 * chaging the parent of PMUX. This can result in pmux getting
191 		 * clocked twice the expected rate.
192 		 *
193 		 * Manually switch to PLL/2 here.
194 		 */
195 		if (cnd->old_rate > DIV_THRESHOLD &&
196 		    cnd->new_rate < DIV_THRESHOLD)
197 			clk_cbf_8996_mux_set_parent(&cbf_mux.clkr.hw, CBF_DIV_INDEX);
198 		break;
199 	case ABORT_RATE_CHANGE:
200 		/* Revert manual change */
201 		if (cnd->new_rate < DIV_THRESHOLD &&
202 		    cnd->old_rate > DIV_THRESHOLD)
203 			clk_cbf_8996_mux_set_parent(&cbf_mux.clkr.hw, CBF_PLL_INDEX);
204 		break;
205 	default:
206 		break;
207 	}
208 
209 	return notifier_from_errno(0);
210 };
211 
212 static struct clk_hw *cbf_msm8996_hw_clks[] = {
213 	&cbf_pll_postdiv.hw,
214 };
215 
216 static struct clk_regmap *cbf_msm8996_clks[] = {
217 	&cbf_pll.clkr,
218 	&cbf_mux.clkr,
219 };
220 
221 static const struct regmap_config cbf_msm8996_regmap_config = {
222 	.reg_bits		= 32,
223 	.reg_stride		= 4,
224 	.val_bits		= 32,
225 	.max_register		= 0x10000,
226 	.fast_io		= true,
227 	.val_format_endian	= REGMAP_ENDIAN_LITTLE,
228 };
229 
230 #ifdef CONFIG_INTERCONNECT
231 
232 /* Random ID that doesn't clash with main qnoc and OSM */
233 #define CBF_MASTER_NODE 2000
234 
qcom_msm8996_cbf_icc_register(struct platform_device * pdev,struct clk_hw * cbf_hw)235 static int qcom_msm8996_cbf_icc_register(struct platform_device *pdev, struct clk_hw *cbf_hw)
236 {
237 	struct device *dev = &pdev->dev;
238 	struct clk *clk = devm_clk_hw_get_clk(dev, cbf_hw, "cbf");
239 	const struct icc_clk_data data[] = {
240 		{ .clk = clk, .name = "cbf", },
241 	};
242 	struct icc_provider *provider;
243 
244 	provider = icc_clk_register(dev, CBF_MASTER_NODE, ARRAY_SIZE(data), data);
245 	if (IS_ERR(provider))
246 		return PTR_ERR(provider);
247 
248 	platform_set_drvdata(pdev, provider);
249 
250 	return 0;
251 }
252 
qcom_msm8996_cbf_icc_remove(struct platform_device * pdev)253 static int qcom_msm8996_cbf_icc_remove(struct platform_device *pdev)
254 {
255 	struct icc_provider *provider = platform_get_drvdata(pdev);
256 
257 	icc_clk_unregister(provider);
258 
259 	return 0;
260 }
261 #define qcom_msm8996_cbf_icc_sync_state icc_sync_state
262 #else
qcom_msm8996_cbf_icc_register(struct platform_device * pdev,struct clk_hw * cbf_hw)263 static int qcom_msm8996_cbf_icc_register(struct platform_device *pdev,  struct clk_hw *cbf_hw)
264 {
265 	dev_warn(&pdev->dev, "CONFIG_INTERCONNECT is disabled, CBF clock is fixed\n");
266 
267 	return 0;
268 }
269 #define qcom_msm8996_cbf_icc_remove(pdev) (0)
270 #define qcom_msm8996_cbf_icc_sync_state NULL
271 #endif
272 
qcom_msm8996_cbf_probe(struct platform_device * pdev)273 static int qcom_msm8996_cbf_probe(struct platform_device *pdev)
274 {
275 	void __iomem *base;
276 	struct regmap *regmap;
277 	struct device *dev = &pdev->dev;
278 	int i, ret;
279 
280 	base = devm_platform_ioremap_resource(pdev, 0);
281 	if (IS_ERR(base))
282 		return PTR_ERR(base);
283 
284 	regmap = devm_regmap_init_mmio(dev, base, &cbf_msm8996_regmap_config);
285 	if (IS_ERR(regmap))
286 		return PTR_ERR(regmap);
287 
288 	/* Select GPLL0 for 300MHz for the CBF clock */
289 	regmap_write(regmap, CBF_MUX_OFFSET, 0x3);
290 
291 	/* Ensure write goes through before PLLs are reconfigured */
292 	udelay(5);
293 
294 	/* Set the auto clock sel always-on source to GPLL0/2 (300MHz) */
295 	regmap_update_bits(regmap, CBF_MUX_OFFSET,
296 			   CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK,
297 			   CBF_MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL);
298 
299 	clk_alpha_pll_configure(&cbf_pll, regmap, &cbfpll_config);
300 
301 	/* Wait for PLL(s) to lock */
302 	udelay(50);
303 
304 	/* Enable auto clock selection for CBF */
305 	regmap_update_bits(regmap, CBF_MUX_OFFSET,
306 			   CBF_MUX_AUTO_CLK_SEL_BIT,
307 			   CBF_MUX_AUTO_CLK_SEL_BIT);
308 
309 	/* Ensure write goes through before muxes are switched */
310 	udelay(5);
311 
312 	/* Switch CBF to use the primary PLL */
313 	regmap_update_bits(regmap, CBF_MUX_OFFSET, CBF_MUX_PARENT_MASK, 0x1);
314 
315 	if (of_device_is_compatible(dev->of_node, "qcom,msm8996pro-cbf")) {
316 		cbfpll_config.post_div_val = 0x3 << 8;
317 		cbf_pll_postdiv.div = 4;
318 	}
319 
320 	for (i = 0; i < ARRAY_SIZE(cbf_msm8996_hw_clks); i++) {
321 		ret = devm_clk_hw_register(dev, cbf_msm8996_hw_clks[i]);
322 		if (ret)
323 			return ret;
324 	}
325 
326 	for (i = 0; i < ARRAY_SIZE(cbf_msm8996_clks); i++) {
327 		ret = devm_clk_register_regmap(dev, cbf_msm8996_clks[i]);
328 		if (ret)
329 			return ret;
330 	}
331 
332 	ret = devm_clk_notifier_register(dev, cbf_mux.clkr.hw.clk, &cbf_mux.nb);
333 	if (ret)
334 		return ret;
335 
336 	ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &cbf_mux.clkr.hw);
337 	if (ret)
338 		return ret;
339 
340 	return qcom_msm8996_cbf_icc_register(pdev, &cbf_mux.clkr.hw);
341 }
342 
qcom_msm8996_cbf_remove(struct platform_device * pdev)343 static int qcom_msm8996_cbf_remove(struct platform_device *pdev)
344 {
345 	return qcom_msm8996_cbf_icc_remove(pdev);
346 }
347 
348 static const struct of_device_id qcom_msm8996_cbf_match_table[] = {
349 	{ .compatible = "qcom,msm8996-cbf" },
350 	{ .compatible = "qcom,msm8996pro-cbf" },
351 	{ /* sentinel */ },
352 };
353 MODULE_DEVICE_TABLE(of, qcom_msm8996_cbf_match_table);
354 
355 static struct platform_driver qcom_msm8996_cbf_driver = {
356 	.probe = qcom_msm8996_cbf_probe,
357 	.remove = qcom_msm8996_cbf_remove,
358 	.driver = {
359 		.name = "qcom-msm8996-cbf",
360 		.of_match_table = qcom_msm8996_cbf_match_table,
361 		.sync_state = qcom_msm8996_cbf_icc_sync_state,
362 	},
363 };
364 
365 /* Register early enough to fix the clock to be used for other cores */
qcom_msm8996_cbf_init(void)366 static int __init qcom_msm8996_cbf_init(void)
367 {
368 	return platform_driver_register(&qcom_msm8996_cbf_driver);
369 }
370 postcore_initcall(qcom_msm8996_cbf_init);
371 
qcom_msm8996_cbf_exit(void)372 static void __exit qcom_msm8996_cbf_exit(void)
373 {
374 	platform_driver_unregister(&qcom_msm8996_cbf_driver);
375 }
376 module_exit(qcom_msm8996_cbf_exit);
377 
378 MODULE_DESCRIPTION("QCOM MSM8996 CPU Bus Fabric Clock Driver");
379 MODULE_LICENSE("GPL");
380