1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2016 Maxime Ripard
4 * Maxime Ripard <maxime.ripard@free-electrons.com>
5 */
6
7 #include <linux/clk-provider.h>
8 #include <linux/io.h>
9
10 #include "ccu_frac.h"
11 #include "ccu_gate.h"
12 #include "ccu_nm.h"
13
14 struct _ccu_nm {
15 unsigned long n, min_n, max_n;
16 unsigned long m, min_m, max_m;
17 };
18
ccu_nm_calc_rate(unsigned long parent,unsigned long n,unsigned long m)19 static u64 ccu_nm_calc_rate(unsigned long parent,
20 unsigned long n, unsigned long m)
21 {
22 u64 rate = parent;
23
24 rate *= n;
25 do_div(rate, m);
26
27 return rate;
28 }
29
ccu_nm_find_best(unsigned long parent,u64 rate,struct _ccu_nm * nm)30 static void ccu_nm_find_best(unsigned long parent, u64 rate,
31 struct _ccu_nm *nm)
32 {
33 u64 best_rate = 0;
34 unsigned long best_n = 0, best_m = 0;
35 unsigned long _n, _m;
36
37 for (_n = nm->min_n; _n <= nm->max_n; _n++) {
38 for (_m = nm->min_m; _m <= nm->max_m; _m++) {
39 u64 tmp_rate = ccu_nm_calc_rate(parent,
40 _n, _m);
41
42 if (tmp_rate > rate)
43 continue;
44
45 if ((rate - tmp_rate) < (rate - best_rate)) {
46 best_rate = tmp_rate;
47 best_n = _n;
48 best_m = _m;
49 }
50 }
51 }
52
53 nm->n = best_n;
54 nm->m = best_m;
55 }
56
ccu_nm_disable(struct clk_hw * hw)57 static void ccu_nm_disable(struct clk_hw *hw)
58 {
59 struct ccu_nm *nm = hw_to_ccu_nm(hw);
60
61 return ccu_gate_helper_disable(&nm->common, nm->enable);
62 }
63
ccu_nm_enable(struct clk_hw * hw)64 static int ccu_nm_enable(struct clk_hw *hw)
65 {
66 struct ccu_nm *nm = hw_to_ccu_nm(hw);
67
68 return ccu_gate_helper_enable(&nm->common, nm->enable);
69 }
70
ccu_nm_is_enabled(struct clk_hw * hw)71 static int ccu_nm_is_enabled(struct clk_hw *hw)
72 {
73 struct ccu_nm *nm = hw_to_ccu_nm(hw);
74
75 return ccu_gate_helper_is_enabled(&nm->common, nm->enable);
76 }
77
ccu_nm_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)78 static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
79 unsigned long parent_rate)
80 {
81 struct ccu_nm *nm = hw_to_ccu_nm(hw);
82 u64 rate;
83 unsigned long n, m;
84 u32 reg;
85
86 if (ccu_frac_helper_is_enabled(&nm->common, &nm->frac)) {
87 rate = ccu_frac_helper_read_rate(&nm->common, &nm->frac);
88
89 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
90 do_div(rate, nm->fixed_post_div);
91
92 return rate;
93 }
94
95 reg = readl(nm->common.base + nm->common.reg);
96
97 n = reg >> nm->n.shift;
98 n &= (1 << nm->n.width) - 1;
99 n += nm->n.offset;
100 if (!n)
101 n++;
102
103 m = reg >> nm->m.shift;
104 m &= (1 << nm->m.width) - 1;
105 m += nm->m.offset;
106 if (!m)
107 m++;
108
109 if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
110 rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
111 else
112 rate = ccu_nm_calc_rate(parent_rate, n, m);
113
114 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
115 do_div(rate, nm->fixed_post_div);
116
117 return rate;
118 }
119
ccu_nm_round_rate(struct clk_hw * hw,unsigned long _rate,unsigned long * parent_rate)120 static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long _rate,
121 unsigned long *parent_rate)
122 {
123 struct ccu_nm *nm = hw_to_ccu_nm(hw);
124 struct _ccu_nm _nm;
125 u64 rate = _rate;
126
127 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
128 rate *= nm->fixed_post_div;
129
130 if (rate < nm->min_rate) {
131 rate = nm->min_rate;
132 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
133 do_div(rate, nm->fixed_post_div);
134 return rate;
135 }
136
137 if (nm->max_rate && rate > nm->max_rate) {
138 rate = nm->max_rate;
139 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
140 do_div(rate, nm->fixed_post_div);
141 return rate;
142 }
143
144 if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
145 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
146 do_div(rate, nm->fixed_post_div);
147 return rate;
148 }
149
150 if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
151 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
152 do_div(rate, nm->fixed_post_div);
153 return rate;
154 }
155
156 _nm.min_n = nm->n.min ?: 1;
157 _nm.max_n = nm->n.max ?: 1 << nm->n.width;
158 _nm.min_m = 1;
159 _nm.max_m = nm->m.max ?: 1 << nm->m.width;
160
161 ccu_nm_find_best(*parent_rate, rate, &_nm);
162 rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m);
163
164 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
165 do_div(rate, nm->fixed_post_div);
166
167 return rate;
168 }
169
ccu_nm_set_rate(struct clk_hw * hw,unsigned long _rate,unsigned long parent_rate)170 static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long _rate,
171 unsigned long parent_rate)
172 {
173 struct ccu_nm *nm = hw_to_ccu_nm(hw);
174 struct _ccu_nm _nm;
175 unsigned long flags;
176 u32 reg;
177 u64 rate = _rate;
178
179 /* Adjust target rate according to post-dividers */
180 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
181 rate = rate * nm->fixed_post_div;
182
183 if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
184 spin_lock_irqsave(nm->common.lock, flags);
185
186 /* most SoCs require M to be 0 if fractional mode is used */
187 reg = readl(nm->common.base + nm->common.reg);
188 reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
189 writel(reg, nm->common.base + nm->common.reg);
190
191 spin_unlock_irqrestore(nm->common.lock, flags);
192
193 ccu_frac_helper_enable(&nm->common, &nm->frac);
194
195 return ccu_frac_helper_set_rate(&nm->common, &nm->frac,
196 rate, nm->lock);
197 } else {
198 ccu_frac_helper_disable(&nm->common, &nm->frac);
199 }
200
201 _nm.min_n = nm->n.min ?: 1;
202 _nm.max_n = nm->n.max ?: 1 << nm->n.width;
203 _nm.min_m = 1;
204 _nm.max_m = nm->m.max ?: 1 << nm->m.width;
205
206 if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
207 ccu_sdm_helper_enable(&nm->common, &nm->sdm, rate);
208
209 /* Sigma delta modulation requires specific N and M factors */
210 ccu_sdm_helper_get_factors(&nm->common, &nm->sdm, rate,
211 &_nm.m, &_nm.n);
212 } else {
213 ccu_sdm_helper_disable(&nm->common, &nm->sdm);
214 ccu_nm_find_best(parent_rate, rate, &_nm);
215 }
216
217 spin_lock_irqsave(nm->common.lock, flags);
218
219 reg = readl(nm->common.base + nm->common.reg);
220 reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
221 reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
222
223 reg |= (_nm.n - nm->n.offset) << nm->n.shift;
224 reg |= (_nm.m - nm->m.offset) << nm->m.shift;
225 writel(reg, nm->common.base + nm->common.reg);
226
227 spin_unlock_irqrestore(nm->common.lock, flags);
228
229 ccu_helper_wait_for_lock(&nm->common, nm->lock);
230
231 return 0;
232 }
233
234 const struct clk_ops ccu_nm_ops = {
235 .disable = ccu_nm_disable,
236 .enable = ccu_nm_enable,
237 .is_enabled = ccu_nm_is_enabled,
238
239 .recalc_rate = ccu_nm_recalc_rate,
240 .round_rate = ccu_nm_round_rate,
241 .set_rate = ccu_nm_set_rate,
242 };
243 EXPORT_SYMBOL_GPL(ccu_nm_ops);
244