• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
4  *
5  * Authors:
6  *   Serge Semin <Sergey.Semin@baikalelectronics.ru>
7  *   Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
8  *
9  * Baikal-T1 CCU Dividers interface driver
10  */
11 
12 #define pr_fmt(fmt) "bt1-ccu-div: " fmt
13 
14 #include <linux/kernel.h>
15 #include <linux/printk.h>
16 #include <linux/bits.h>
17 #include <linux/bitfield.h>
18 #include <linux/slab.h>
19 #include <linux/clk-provider.h>
20 #include <linux/of.h>
21 #include <linux/spinlock.h>
22 #include <linux/regmap.h>
23 #include <linux/delay.h>
24 #include <linux/time64.h>
25 #include <linux/debugfs.h>
26 
27 #include "ccu-div.h"
28 
29 #define CCU_DIV_CTL			0x00
30 #define CCU_DIV_CTL_EN			BIT(0)
31 #define CCU_DIV_CTL_RST			BIT(1)
32 #define CCU_DIV_CTL_SET_CLKDIV		BIT(2)
33 #define CCU_DIV_CTL_CLKDIV_FLD		4
34 #define CCU_DIV_CTL_CLKDIV_MASK(_width) \
35 	GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD)
36 #define CCU_DIV_CTL_LOCK_SHIFTED	BIT(27)
37 #define CCU_DIV_CTL_GATE_REF_BUF	BIT(28)
38 #define CCU_DIV_CTL_LOCK_NORMAL		BIT(31)
39 
40 #define CCU_DIV_RST_DELAY_US		1
41 #define CCU_DIV_LOCK_CHECK_RETRIES	50
42 
43 #define CCU_DIV_CLKDIV_MIN		0
44 #define CCU_DIV_CLKDIV_MAX(_mask) \
45 	((_mask) >> CCU_DIV_CTL_CLKDIV_FLD)
46 
47 /*
48  * Use the next two methods until there are generic field setter and
49  * getter available with non-constant mask support.
50  */
ccu_div_get(u32 mask,u32 val)51 static inline u32 ccu_div_get(u32 mask, u32 val)
52 {
53 	return (val & mask) >> CCU_DIV_CTL_CLKDIV_FLD;
54 }
55 
ccu_div_prep(u32 mask,u32 val)56 static inline u32 ccu_div_prep(u32 mask, u32 val)
57 {
58 	return (val << CCU_DIV_CTL_CLKDIV_FLD) & mask;
59 }
60 
ccu_div_lock_delay_ns(unsigned long ref_clk,unsigned long div)61 static inline unsigned long ccu_div_lock_delay_ns(unsigned long ref_clk,
62 						  unsigned long div)
63 {
64 	u64 ns = 4ULL * (div ?: 1) * NSEC_PER_SEC;
65 
66 	do_div(ns, ref_clk);
67 
68 	return ns;
69 }
70 
ccu_div_calc_freq(unsigned long ref_clk,unsigned long div)71 static inline unsigned long ccu_div_calc_freq(unsigned long ref_clk,
72 					      unsigned long div)
73 {
74 	return ref_clk / (div ?: 1);
75 }
76 
ccu_div_var_update_clkdiv(struct ccu_div * div,unsigned long parent_rate,unsigned long divider)77 static int ccu_div_var_update_clkdiv(struct ccu_div *div,
78 				     unsigned long parent_rate,
79 				     unsigned long divider)
80 {
81 	unsigned long nd;
82 	u32 val = 0;
83 	u32 lock;
84 	int count;
85 
86 	nd = ccu_div_lock_delay_ns(parent_rate, divider);
87 
88 	if (div->features & CCU_DIV_LOCK_SHIFTED)
89 		lock = CCU_DIV_CTL_LOCK_SHIFTED;
90 	else
91 		lock = CCU_DIV_CTL_LOCK_NORMAL;
92 
93 	regmap_update_bits(div->sys_regs, div->reg_ctl,
94 			   CCU_DIV_CTL_SET_CLKDIV, CCU_DIV_CTL_SET_CLKDIV);
95 
96 	/*
97 	 * Until there is nsec-version of readl_poll_timeout() is available
98 	 * we have to implement the next polling loop.
99 	 */
100 	count = CCU_DIV_LOCK_CHECK_RETRIES;
101 	do {
102 		ndelay(nd);
103 		regmap_read(div->sys_regs, div->reg_ctl, &val);
104 		if (val & lock)
105 			return 0;
106 	} while (--count);
107 
108 	return -ETIMEDOUT;
109 }
110 
ccu_div_var_enable(struct clk_hw * hw)111 static int ccu_div_var_enable(struct clk_hw *hw)
112 {
113 	struct clk_hw *parent_hw = clk_hw_get_parent(hw);
114 	struct ccu_div *div = to_ccu_div(hw);
115 	unsigned long flags;
116 	u32 val = 0;
117 	int ret;
118 
119 	if (!parent_hw) {
120 		pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw));
121 		return -EINVAL;
122 	}
123 
124 	regmap_read(div->sys_regs, div->reg_ctl, &val);
125 	if (val & CCU_DIV_CTL_EN)
126 		return 0;
127 
128 	spin_lock_irqsave(&div->lock, flags);
129 	ret = ccu_div_var_update_clkdiv(div, clk_hw_get_rate(parent_hw),
130 					ccu_div_get(div->mask, val));
131 	if (!ret)
132 		regmap_update_bits(div->sys_regs, div->reg_ctl,
133 				   CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
134 	spin_unlock_irqrestore(&div->lock, flags);
135 	if (ret)
136 		pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
137 
138 	return ret;
139 }
140 
ccu_div_gate_enable(struct clk_hw * hw)141 static int ccu_div_gate_enable(struct clk_hw *hw)
142 {
143 	struct ccu_div *div = to_ccu_div(hw);
144 	unsigned long flags;
145 
146 	spin_lock_irqsave(&div->lock, flags);
147 	regmap_update_bits(div->sys_regs, div->reg_ctl,
148 			   CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
149 	spin_unlock_irqrestore(&div->lock, flags);
150 
151 	return 0;
152 }
153 
ccu_div_gate_disable(struct clk_hw * hw)154 static void ccu_div_gate_disable(struct clk_hw *hw)
155 {
156 	struct ccu_div *div = to_ccu_div(hw);
157 	unsigned long flags;
158 
159 	spin_lock_irqsave(&div->lock, flags);
160 	regmap_update_bits(div->sys_regs, div->reg_ctl, CCU_DIV_CTL_EN, 0);
161 	spin_unlock_irqrestore(&div->lock, flags);
162 }
163 
ccu_div_gate_is_enabled(struct clk_hw * hw)164 static int ccu_div_gate_is_enabled(struct clk_hw *hw)
165 {
166 	struct ccu_div *div = to_ccu_div(hw);
167 	u32 val = 0;
168 
169 	regmap_read(div->sys_regs, div->reg_ctl, &val);
170 
171 	return !!(val & CCU_DIV_CTL_EN);
172 }
173 
ccu_div_buf_enable(struct clk_hw * hw)174 static int ccu_div_buf_enable(struct clk_hw *hw)
175 {
176 	struct ccu_div *div = to_ccu_div(hw);
177 	unsigned long flags;
178 
179 	spin_lock_irqsave(&div->lock, flags);
180 	regmap_update_bits(div->sys_regs, div->reg_ctl,
181 			   CCU_DIV_CTL_GATE_REF_BUF, 0);
182 	spin_unlock_irqrestore(&div->lock, flags);
183 
184 	return 0;
185 }
186 
ccu_div_buf_disable(struct clk_hw * hw)187 static void ccu_div_buf_disable(struct clk_hw *hw)
188 {
189 	struct ccu_div *div = to_ccu_div(hw);
190 	unsigned long flags;
191 
192 	spin_lock_irqsave(&div->lock, flags);
193 	regmap_update_bits(div->sys_regs, div->reg_ctl,
194 			   CCU_DIV_CTL_GATE_REF_BUF, CCU_DIV_CTL_GATE_REF_BUF);
195 	spin_unlock_irqrestore(&div->lock, flags);
196 }
197 
ccu_div_buf_is_enabled(struct clk_hw * hw)198 static int ccu_div_buf_is_enabled(struct clk_hw *hw)
199 {
200 	struct ccu_div *div = to_ccu_div(hw);
201 	u32 val = 0;
202 
203 	regmap_read(div->sys_regs, div->reg_ctl, &val);
204 
205 	return !(val & CCU_DIV_CTL_GATE_REF_BUF);
206 }
207 
ccu_div_var_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)208 static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw,
209 					     unsigned long parent_rate)
210 {
211 	struct ccu_div *div = to_ccu_div(hw);
212 	unsigned long divider;
213 	u32 val = 0;
214 
215 	regmap_read(div->sys_regs, div->reg_ctl, &val);
216 	divider = ccu_div_get(div->mask, val);
217 
218 	return ccu_div_calc_freq(parent_rate, divider);
219 }
220 
ccu_div_var_calc_divider(unsigned long rate,unsigned long parent_rate,unsigned int mask)221 static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
222 						     unsigned long parent_rate,
223 						     unsigned int mask)
224 {
225 	unsigned long divider;
226 
227 	divider = parent_rate / rate;
228 	return clamp_t(unsigned long, divider, CCU_DIV_CLKDIV_MIN,
229 		       CCU_DIV_CLKDIV_MAX(mask));
230 }
231 
ccu_div_var_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)232 static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate,
233 				   unsigned long *parent_rate)
234 {
235 	struct ccu_div *div = to_ccu_div(hw);
236 	unsigned long divider;
237 
238 	divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask);
239 
240 	return ccu_div_calc_freq(*parent_rate, divider);
241 }
242 
243 /*
244  * This method is used for the clock divider blocks, which support the
245  * on-the-fly rate change. So due to lacking the EN bit functionality
246  * they can't be gated before the rate adjustment.
247  */
ccu_div_var_set_rate_slow(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)248 static int ccu_div_var_set_rate_slow(struct clk_hw *hw, unsigned long rate,
249 				     unsigned long parent_rate)
250 {
251 	struct ccu_div *div = to_ccu_div(hw);
252 	unsigned long flags, divider;
253 	u32 val;
254 	int ret;
255 
256 	divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
257 	if (divider == 1 && div->features & CCU_DIV_SKIP_ONE) {
258 		divider = 0;
259 	} else if (div->features & CCU_DIV_SKIP_ONE_TO_THREE) {
260 		if (divider == 1 || divider == 2)
261 			divider = 0;
262 		else if (divider == 3)
263 			divider = 4;
264 	}
265 
266 	val = ccu_div_prep(div->mask, divider);
267 
268 	spin_lock_irqsave(&div->lock, flags);
269 	regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, val);
270 	ret = ccu_div_var_update_clkdiv(div, parent_rate, divider);
271 	spin_unlock_irqrestore(&div->lock, flags);
272 	if (ret)
273 		pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
274 
275 	return ret;
276 }
277 
278 /*
279  * This method is used for the clock divider blocks, which don't support
280  * the on-the-fly rate change.
281  */
ccu_div_var_set_rate_fast(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)282 static int ccu_div_var_set_rate_fast(struct clk_hw *hw, unsigned long rate,
283 				     unsigned long parent_rate)
284 {
285 	struct ccu_div *div = to_ccu_div(hw);
286 	unsigned long flags, divider;
287 	u32 val;
288 
289 	divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
290 	val = ccu_div_prep(div->mask, divider);
291 
292 	/*
293 	 * Also disable the clock divider block if it was enabled by default
294 	 * or by the bootloader.
295 	 */
296 	spin_lock_irqsave(&div->lock, flags);
297 	regmap_update_bits(div->sys_regs, div->reg_ctl,
298 			   div->mask | CCU_DIV_CTL_EN, val);
299 	spin_unlock_irqrestore(&div->lock, flags);
300 
301 	return 0;
302 }
303 
ccu_div_fixed_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)304 static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
305 					       unsigned long parent_rate)
306 {
307 	struct ccu_div *div = to_ccu_div(hw);
308 
309 	return ccu_div_calc_freq(parent_rate, div->divider);
310 }
311 
ccu_div_fixed_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)312 static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate,
313 				     unsigned long *parent_rate)
314 {
315 	struct ccu_div *div = to_ccu_div(hw);
316 
317 	return ccu_div_calc_freq(*parent_rate, div->divider);
318 }
319 
ccu_div_fixed_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)320 static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
321 				  unsigned long parent_rate)
322 {
323 	return 0;
324 }
325 
ccu_div_reset_domain(struct ccu_div * div)326 int ccu_div_reset_domain(struct ccu_div *div)
327 {
328 	unsigned long flags;
329 
330 	if (!div || !(div->features & CCU_DIV_RESET_DOMAIN))
331 		return -EINVAL;
332 
333 	spin_lock_irqsave(&div->lock, flags);
334 	regmap_update_bits(div->sys_regs, div->reg_ctl,
335 			   CCU_DIV_CTL_RST, CCU_DIV_CTL_RST);
336 	spin_unlock_irqrestore(&div->lock, flags);
337 
338 	/* The next delay must be enough to cover all the resets. */
339 	udelay(CCU_DIV_RST_DELAY_US);
340 
341 	return 0;
342 }
343 
344 #ifdef CONFIG_DEBUG_FS
345 
346 struct ccu_div_dbgfs_bit {
347 	struct ccu_div *div;
348 	const char *name;
349 	u32 mask;
350 };
351 
352 #define CCU_DIV_DBGFS_BIT_ATTR(_name, _mask) {	\
353 		.name = _name,			\
354 		.mask = _mask			\
355 	}
356 
357 static const struct ccu_div_dbgfs_bit ccu_div_bits[] = {
358 	CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN),
359 	CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST),
360 	CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV),
361 	CCU_DIV_DBGFS_BIT_ATTR("div_buf", CCU_DIV_CTL_GATE_REF_BUF),
362 	CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL)
363 };
364 
365 #define CCU_DIV_DBGFS_BIT_NUM	ARRAY_SIZE(ccu_div_bits)
366 
367 /*
368  * It can be dangerous to change the Divider settings behind clock framework
369  * back, therefore we don't provide any kernel config based compile time option
370  * for this feature to enable.
371  */
372 #undef CCU_DIV_ALLOW_WRITE_DEBUGFS
373 #ifdef CCU_DIV_ALLOW_WRITE_DEBUGFS
374 
ccu_div_dbgfs_bit_set(void * priv,u64 val)375 static int ccu_div_dbgfs_bit_set(void *priv, u64 val)
376 {
377 	const struct ccu_div_dbgfs_bit *bit = priv;
378 	struct ccu_div *div = bit->div;
379 	unsigned long flags;
380 
381 	spin_lock_irqsave(&div->lock, flags);
382 	regmap_update_bits(div->sys_regs, div->reg_ctl,
383 			   bit->mask, val ? bit->mask : 0);
384 	spin_unlock_irqrestore(&div->lock, flags);
385 
386 	return 0;
387 }
388 
ccu_div_dbgfs_var_clkdiv_set(void * priv,u64 val)389 static int ccu_div_dbgfs_var_clkdiv_set(void *priv, u64 val)
390 {
391 	struct ccu_div *div = priv;
392 	unsigned long flags;
393 	u32 data;
394 
395 	val = clamp_t(u64, val, CCU_DIV_CLKDIV_MIN,
396 		      CCU_DIV_CLKDIV_MAX(div->mask));
397 	data = ccu_div_prep(div->mask, val);
398 
399 	spin_lock_irqsave(&div->lock, flags);
400 	regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, data);
401 	spin_unlock_irqrestore(&div->lock, flags);
402 
403 	return 0;
404 }
405 
406 #define ccu_div_dbgfs_mode		0644
407 
408 #else /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
409 
410 #define ccu_div_dbgfs_bit_set		NULL
411 #define ccu_div_dbgfs_var_clkdiv_set	NULL
412 #define ccu_div_dbgfs_mode		0444
413 
414 #endif /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
415 
ccu_div_dbgfs_bit_get(void * priv,u64 * val)416 static int ccu_div_dbgfs_bit_get(void *priv, u64 *val)
417 {
418 	const struct ccu_div_dbgfs_bit *bit = priv;
419 	struct ccu_div *div = bit->div;
420 	u32 data = 0;
421 
422 	regmap_read(div->sys_regs, div->reg_ctl, &data);
423 	*val = !!(data & bit->mask);
424 
425 	return 0;
426 }
427 DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_bit_fops,
428 	ccu_div_dbgfs_bit_get, ccu_div_dbgfs_bit_set, "%llu\n");
429 
ccu_div_dbgfs_var_clkdiv_get(void * priv,u64 * val)430 static int ccu_div_dbgfs_var_clkdiv_get(void *priv, u64 *val)
431 {
432 	struct ccu_div *div = priv;
433 	u32 data = 0;
434 
435 	regmap_read(div->sys_regs, div->reg_ctl, &data);
436 	*val = ccu_div_get(div->mask, data);
437 
438 	return 0;
439 }
440 DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_var_clkdiv_fops,
441 	ccu_div_dbgfs_var_clkdiv_get, ccu_div_dbgfs_var_clkdiv_set, "%llu\n");
442 
ccu_div_dbgfs_fixed_clkdiv_get(void * priv,u64 * val)443 static int ccu_div_dbgfs_fixed_clkdiv_get(void *priv, u64 *val)
444 {
445 	struct ccu_div *div = priv;
446 
447 	*val = div->divider;
448 
449 	return 0;
450 }
451 DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_fixed_clkdiv_fops,
452 	ccu_div_dbgfs_fixed_clkdiv_get, NULL, "%llu\n");
453 
ccu_div_var_debug_init(struct clk_hw * hw,struct dentry * dentry)454 static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry)
455 {
456 	struct ccu_div *div = to_ccu_div(hw);
457 	struct ccu_div_dbgfs_bit *bits;
458 	int didx, bidx, num = 2;
459 	const char *name;
460 
461 	num += !!(div->flags & CLK_SET_RATE_GATE) +
462 		!!(div->features & CCU_DIV_RESET_DOMAIN);
463 
464 	bits = kcalloc(num, sizeof(*bits), GFP_KERNEL);
465 	if (!bits)
466 		return;
467 
468 	for (didx = 0, bidx = 0; bidx < CCU_DIV_DBGFS_BIT_NUM; ++bidx) {
469 		name = ccu_div_bits[bidx].name;
470 		if (!(div->flags & CLK_SET_RATE_GATE) &&
471 		    !strcmp("div_en", name)) {
472 			continue;
473 		}
474 
475 		if (!(div->features & CCU_DIV_RESET_DOMAIN) &&
476 		    !strcmp("div_rst", name)) {
477 			continue;
478 		}
479 
480 		if (!strcmp("div_buf", name))
481 			continue;
482 
483 		bits[didx] = ccu_div_bits[bidx];
484 		bits[didx].div = div;
485 
486 		if (div->features & CCU_DIV_LOCK_SHIFTED &&
487 		    !strcmp("div_lock", name)) {
488 			bits[didx].mask = CCU_DIV_CTL_LOCK_SHIFTED;
489 		}
490 
491 		debugfs_create_file_unsafe(bits[didx].name, ccu_div_dbgfs_mode,
492 					   dentry, &bits[didx],
493 					   &ccu_div_dbgfs_bit_fops);
494 		++didx;
495 	}
496 
497 	debugfs_create_file_unsafe("div_clkdiv", ccu_div_dbgfs_mode, dentry,
498 				   div, &ccu_div_dbgfs_var_clkdiv_fops);
499 }
500 
ccu_div_gate_debug_init(struct clk_hw * hw,struct dentry * dentry)501 static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry)
502 {
503 	struct ccu_div *div = to_ccu_div(hw);
504 	struct ccu_div_dbgfs_bit *bit;
505 
506 	bit = kmalloc(sizeof(*bit), GFP_KERNEL);
507 	if (!bit)
508 		return;
509 
510 	*bit = ccu_div_bits[0];
511 	bit->div = div;
512 	debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
513 				   &ccu_div_dbgfs_bit_fops);
514 
515 	debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
516 				   &ccu_div_dbgfs_fixed_clkdiv_fops);
517 }
518 
ccu_div_buf_debug_init(struct clk_hw * hw,struct dentry * dentry)519 static void ccu_div_buf_debug_init(struct clk_hw *hw, struct dentry *dentry)
520 {
521 	struct ccu_div *div = to_ccu_div(hw);
522 	struct ccu_div_dbgfs_bit *bit;
523 
524 	bit = kmalloc(sizeof(*bit), GFP_KERNEL);
525 	if (!bit)
526 		return;
527 
528 	*bit = ccu_div_bits[3];
529 	bit->div = div;
530 	debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
531 				   &ccu_div_dbgfs_bit_fops);
532 }
533 
ccu_div_fixed_debug_init(struct clk_hw * hw,struct dentry * dentry)534 static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
535 {
536 	struct ccu_div *div = to_ccu_div(hw);
537 
538 	debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
539 				   &ccu_div_dbgfs_fixed_clkdiv_fops);
540 }
541 
542 #else /* !CONFIG_DEBUG_FS */
543 
544 #define ccu_div_var_debug_init NULL
545 #define ccu_div_gate_debug_init NULL
546 #define ccu_div_buf_debug_init NULL
547 #define ccu_div_fixed_debug_init NULL
548 
549 #endif /* !CONFIG_DEBUG_FS */
550 
551 static const struct clk_ops ccu_div_var_gate_to_set_ops = {
552 	.enable = ccu_div_var_enable,
553 	.disable = ccu_div_gate_disable,
554 	.is_enabled = ccu_div_gate_is_enabled,
555 	.recalc_rate = ccu_div_var_recalc_rate,
556 	.round_rate = ccu_div_var_round_rate,
557 	.set_rate = ccu_div_var_set_rate_fast,
558 	.debug_init = ccu_div_var_debug_init
559 };
560 
561 static const struct clk_ops ccu_div_var_nogate_ops = {
562 	.recalc_rate = ccu_div_var_recalc_rate,
563 	.round_rate = ccu_div_var_round_rate,
564 	.set_rate = ccu_div_var_set_rate_slow,
565 	.debug_init = ccu_div_var_debug_init
566 };
567 
568 static const struct clk_ops ccu_div_gate_ops = {
569 	.enable = ccu_div_gate_enable,
570 	.disable = ccu_div_gate_disable,
571 	.is_enabled = ccu_div_gate_is_enabled,
572 	.recalc_rate = ccu_div_fixed_recalc_rate,
573 	.round_rate = ccu_div_fixed_round_rate,
574 	.set_rate = ccu_div_fixed_set_rate,
575 	.debug_init = ccu_div_gate_debug_init
576 };
577 
578 static const struct clk_ops ccu_div_buf_ops = {
579 	.enable = ccu_div_buf_enable,
580 	.disable = ccu_div_buf_disable,
581 	.is_enabled = ccu_div_buf_is_enabled,
582 	.debug_init = ccu_div_buf_debug_init
583 };
584 
585 static const struct clk_ops ccu_div_fixed_ops = {
586 	.recalc_rate = ccu_div_fixed_recalc_rate,
587 	.round_rate = ccu_div_fixed_round_rate,
588 	.set_rate = ccu_div_fixed_set_rate,
589 	.debug_init = ccu_div_fixed_debug_init
590 };
591 
ccu_div_hw_register(const struct ccu_div_init_data * div_init)592 struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
593 {
594 	struct clk_parent_data parent_data = { };
595 	struct clk_init_data hw_init = { };
596 	struct ccu_div *div;
597 	int ret;
598 
599 	if (!div_init)
600 		return ERR_PTR(-EINVAL);
601 
602 	div = kzalloc(sizeof(*div), GFP_KERNEL);
603 	if (!div)
604 		return ERR_PTR(-ENOMEM);
605 
606 	/*
607 	 * Note since Baikal-T1 System Controller registers are MMIO-backed
608 	 * we won't check the regmap IO operations return status, because it
609 	 * must be zero anyway.
610 	 */
611 	div->hw.init = &hw_init;
612 	div->id = div_init->id;
613 	div->reg_ctl = div_init->base + CCU_DIV_CTL;
614 	div->sys_regs = div_init->sys_regs;
615 	div->flags = div_init->flags;
616 	div->features = div_init->features;
617 	spin_lock_init(&div->lock);
618 
619 	hw_init.name = div_init->name;
620 	hw_init.flags = div_init->flags;
621 
622 	if (div_init->type == CCU_DIV_VAR) {
623 		if (hw_init.flags & CLK_SET_RATE_GATE)
624 			hw_init.ops = &ccu_div_var_gate_to_set_ops;
625 		else
626 			hw_init.ops = &ccu_div_var_nogate_ops;
627 		div->mask = CCU_DIV_CTL_CLKDIV_MASK(div_init->width);
628 	} else if (div_init->type == CCU_DIV_GATE) {
629 		hw_init.ops = &ccu_div_gate_ops;
630 		div->divider = div_init->divider;
631 	} else if (div_init->type == CCU_DIV_BUF) {
632 		hw_init.ops = &ccu_div_buf_ops;
633 	} else if (div_init->type == CCU_DIV_FIXED) {
634 		hw_init.ops = &ccu_div_fixed_ops;
635 		div->divider = div_init->divider;
636 	} else {
637 		ret = -EINVAL;
638 		goto err_free_div;
639 	}
640 
641 	if (!div_init->parent_name) {
642 		ret = -EINVAL;
643 		goto err_free_div;
644 	}
645 	parent_data.fw_name = div_init->parent_name;
646 	parent_data.name = div_init->parent_name;
647 	hw_init.parent_data = &parent_data;
648 	hw_init.num_parents = 1;
649 
650 	ret = of_clk_hw_register(div_init->np, &div->hw);
651 	if (ret)
652 		goto err_free_div;
653 
654 	return div;
655 
656 err_free_div:
657 	kfree(div);
658 
659 	return ERR_PTR(ret);
660 }
661 
ccu_div_hw_unregister(struct ccu_div * div)662 void ccu_div_hw_unregister(struct ccu_div *div)
663 {
664 	clk_hw_unregister(&div->hw);
665 
666 	kfree(div);
667 }
668