1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2016 Maxime Ripard
4 * Maxime Ripard <maxime.ripard@free-electrons.com>
5 */
6
7 #include <linux/clk-provider.h>
8 #include <linux/io.h>
9
10 #include "ccu_gate.h"
11 #include "ccu_nkmp.h"
12
13 struct _ccu_nkmp {
14 unsigned long n, min_n, max_n;
15 unsigned long k, min_k, max_k;
16 unsigned long m, min_m, max_m;
17 unsigned long p, min_p, max_p;
18 };
19
ccu_nkmp_calc_rate(unsigned long parent,unsigned long n,unsigned long k,unsigned long m,unsigned long p)20 static u64 ccu_nkmp_calc_rate(unsigned long parent,
21 unsigned long n, unsigned long k,
22 unsigned long m, unsigned long p)
23 {
24 u64 rate = parent;
25
26 rate *= n * k;
27 do_div(rate, m * p);
28
29 return rate;
30 }
31
ccu_nkmp_find_best(unsigned long parent,u64 rate,struct _ccu_nkmp * nkmp)32 static void ccu_nkmp_find_best(unsigned long parent, u64 rate,
33 struct _ccu_nkmp *nkmp)
34 {
35 u64 best_rate = 0;
36 unsigned long best_n = 0, best_k = 0, best_m = 0, best_p = 0;
37 unsigned long _n, _k, _m, _p;
38
39 for (_k = nkmp->min_k; _k <= nkmp->max_k; _k++) {
40 for (_n = nkmp->min_n; _n <= nkmp->max_n; _n++) {
41 for (_m = nkmp->min_m; _m <= nkmp->max_m; _m++) {
42 for (_p = nkmp->min_p; _p <= nkmp->max_p; _p <<= 1) {
43 u64 tmp_rate;
44
45 tmp_rate = ccu_nkmp_calc_rate(parent,
46 _n, _k,
47 _m, _p);
48
49 if (tmp_rate > rate)
50 continue;
51
52 if ((rate - tmp_rate) < (rate - best_rate)) {
53 best_rate = tmp_rate;
54 best_n = _n;
55 best_k = _k;
56 best_m = _m;
57 best_p = _p;
58 }
59 }
60 }
61 }
62 }
63
64 nkmp->n = best_n;
65 nkmp->k = best_k;
66 nkmp->m = best_m;
67 nkmp->p = best_p;
68 }
69
ccu_nkmp_disable(struct clk_hw * hw)70 static void ccu_nkmp_disable(struct clk_hw *hw)
71 {
72 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
73
74 return ccu_gate_helper_disable(&nkmp->common, nkmp->enable);
75 }
76
ccu_nkmp_enable(struct clk_hw * hw)77 static int ccu_nkmp_enable(struct clk_hw *hw)
78 {
79 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
80
81 return ccu_gate_helper_enable(&nkmp->common, nkmp->enable);
82 }
83
ccu_nkmp_is_enabled(struct clk_hw * hw)84 static int ccu_nkmp_is_enabled(struct clk_hw *hw)
85 {
86 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
87
88 return ccu_gate_helper_is_enabled(&nkmp->common, nkmp->enable);
89 }
90
ccu_nkmp_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)91 static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
92 unsigned long parent_rate)
93 {
94 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
95 unsigned long n, m, k, p, rate;
96 u32 reg;
97
98 reg = readl(nkmp->common.base + nkmp->common.reg);
99
100 n = reg >> nkmp->n.shift;
101 n &= (1 << nkmp->n.width) - 1;
102 n += nkmp->n.offset;
103 if (!n)
104 n++;
105
106 k = reg >> nkmp->k.shift;
107 k &= (1 << nkmp->k.width) - 1;
108 k += nkmp->k.offset;
109 if (!k)
110 k++;
111
112 m = reg >> nkmp->m.shift;
113 m &= (1 << nkmp->m.width) - 1;
114 m += nkmp->m.offset;
115 if (!m)
116 m++;
117
118 p = reg >> nkmp->p.shift;
119 p &= (1 << nkmp->p.width) - 1;
120
121 rate = ccu_nkmp_calc_rate(parent_rate, n, k, m, 1 << p);
122 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
123 rate /= nkmp->fixed_post_div;
124
125 return rate;
126 }
127
ccu_nkmp_round_rate(struct clk_hw * hw,unsigned long _rate,unsigned long * parent_rate)128 static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long _rate,
129 unsigned long *parent_rate)
130 {
131 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
132 struct _ccu_nkmp _nkmp;
133 u64 rate = _rate;
134
135 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
136 rate *= nkmp->fixed_post_div;
137
138 if (nkmp->max_rate && rate > nkmp->max_rate) {
139 rate = nkmp->max_rate;
140 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
141 do_div(rate, nkmp->fixed_post_div);
142 return rate;
143 }
144
145 _nkmp.min_n = nkmp->n.min ?: 1;
146 _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width;
147 _nkmp.min_k = nkmp->k.min ?: 1;
148 _nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width;
149 _nkmp.min_m = 1;
150 _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
151 _nkmp.min_p = 1;
152 _nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
153
154 ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
155
156 rate = ccu_nkmp_calc_rate(*parent_rate, _nkmp.n, _nkmp.k,
157 _nkmp.m, _nkmp.p);
158 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
159 do_div(rate, nkmp->fixed_post_div);
160
161 return rate;
162 }
163
ccu_nkmp_set_rate(struct clk_hw * hw,unsigned long _rate,unsigned long parent_rate)164 static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long _rate,
165 unsigned long parent_rate)
166 {
167 struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
168 u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0;
169 struct _ccu_nkmp _nkmp;
170 unsigned long flags;
171 u64 rate = _rate;
172 u32 reg;
173
174 if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
175 rate = rate * nkmp->fixed_post_div;
176
177 _nkmp.min_n = nkmp->n.min ?: 1;
178 _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width;
179 _nkmp.min_k = nkmp->k.min ?: 1;
180 _nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width;
181 _nkmp.min_m = 1;
182 _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
183 _nkmp.min_p = 1;
184 _nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
185
186 ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
187
188 /*
189 * If width is 0, GENMASK() macro may not generate expected mask (0)
190 * as it falls under undefined behaviour by C standard due to shifts
191 * which are equal or greater than width of left operand. This can
192 * be easily avoided by explicitly checking if width is 0.
193 */
194 if (nkmp->n.width)
195 n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1,
196 nkmp->n.shift);
197 if (nkmp->k.width)
198 k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1,
199 nkmp->k.shift);
200 if (nkmp->m.width)
201 m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1,
202 nkmp->m.shift);
203 if (nkmp->p.width)
204 p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1,
205 nkmp->p.shift);
206
207 spin_lock_irqsave(nkmp->common.lock, flags);
208
209 reg = readl(nkmp->common.base + nkmp->common.reg);
210 reg &= ~(n_mask | k_mask | m_mask | p_mask);
211
212 reg |= ((_nkmp.n - nkmp->n.offset) << nkmp->n.shift) & n_mask;
213 reg |= ((_nkmp.k - nkmp->k.offset) << nkmp->k.shift) & k_mask;
214 reg |= ((_nkmp.m - nkmp->m.offset) << nkmp->m.shift) & m_mask;
215 reg |= (ilog2(_nkmp.p) << nkmp->p.shift) & p_mask;
216
217 writel(reg, nkmp->common.base + nkmp->common.reg);
218
219 spin_unlock_irqrestore(nkmp->common.lock, flags);
220
221 ccu_helper_wait_for_lock(&nkmp->common, nkmp->lock);
222
223 return 0;
224 }
225
226 const struct clk_ops ccu_nkmp_ops = {
227 .disable = ccu_nkmp_disable,
228 .enable = ccu_nkmp_enable,
229 .is_enabled = ccu_nkmp_is_enabled,
230
231 .recalc_rate = ccu_nkmp_recalc_rate,
232 .round_rate = ccu_nkmp_round_rate,
233 .set_rate = ccu_nkmp_set_rate,
234 };
235 EXPORT_SYMBOL_GPL(ccu_nkmp_ops);
236