1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
4 * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
5 * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
6 *
7 * Simple multiplexer clock implementation
8 */
9
10 #include <linux/clk-provider.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/io.h>
14 #include <linux/err.h>
15
16 /*
17 * DOC: basic adjustable multiplexer clock that cannot gate
18 *
19 * Traits of this clock:
20 * prepare - clk_prepare only ensures that parents are prepared
21 * enable - clk_enable only ensures that parents are enabled
22 * rate - rate is only affected by parent switching. No clk_set_rate support
23 * parent - parent is adjustable through clk_set_parent
24 */
25
clk_mux_readl(struct clk_mux * mux)26 static inline u32 clk_mux_readl(struct clk_mux *mux)
27 {
28 if (mux->flags & CLK_MUX_BIG_ENDIAN)
29 return ioread32be(mux->reg);
30
31 return readl(mux->reg);
32 }
33
clk_mux_writel(struct clk_mux * mux,u32 val)34 static inline void clk_mux_writel(struct clk_mux *mux, u32 val)
35 {
36 if (mux->flags & CLK_MUX_BIG_ENDIAN)
37 iowrite32be(val, mux->reg);
38 else
39 writel(val, mux->reg);
40 }
41
clk_mux_val_to_index(struct clk_hw * hw,u32 * table,unsigned int flags,unsigned int val)42 int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
43 unsigned int val)
44 {
45 int num_parents = clk_hw_get_num_parents(hw);
46
47 if (table) {
48 int i;
49
50 for (i = 0; i < num_parents; i++)
51 if (table[i] == val)
52 return i;
53 return -EINVAL;
54 }
55
56 if (val && (flags & CLK_MUX_INDEX_BIT))
57 val = ffs(val) - 1;
58
59 if (val && (flags & CLK_MUX_INDEX_ONE))
60 val--;
61
62 if (val >= num_parents)
63 return -EINVAL;
64
65 return val;
66 }
67 EXPORT_SYMBOL_GPL(clk_mux_val_to_index);
68
clk_mux_index_to_val(u32 * table,unsigned int flags,u8 index)69 unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index)
70 {
71 unsigned int val = index;
72
73 if (table) {
74 val = table[index];
75 } else {
76 if (flags & CLK_MUX_INDEX_BIT)
77 val = 1 << index;
78
79 if (flags & CLK_MUX_INDEX_ONE)
80 val++;
81 }
82
83 return val;
84 }
85 EXPORT_SYMBOL_GPL(clk_mux_index_to_val);
86
clk_mux_get_parent(struct clk_hw * hw)87 static u8 clk_mux_get_parent(struct clk_hw *hw)
88 {
89 struct clk_mux *mux = to_clk_mux(hw);
90 u32 val;
91
92 val = clk_mux_readl(mux) >> mux->shift;
93 val &= mux->mask;
94
95 return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
96 }
97
clk_mux_set_parent(struct clk_hw * hw,u8 index)98 static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
99 {
100 struct clk_mux *mux = to_clk_mux(hw);
101 u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
102 unsigned long flags = 0;
103 u32 reg;
104
105 if (mux->lock)
106 spin_lock_irqsave(mux->lock, flags);
107 else
108 __acquire(mux->lock);
109
110 if (mux->flags & CLK_MUX_HIWORD_MASK) {
111 reg = mux->mask << (mux->shift + 16);
112 } else {
113 reg = clk_mux_readl(mux);
114 reg &= ~(mux->mask << mux->shift);
115 }
116 val = val << mux->shift;
117 reg |= val;
118 clk_mux_writel(mux, reg);
119
120 if (mux->lock)
121 spin_unlock_irqrestore(mux->lock, flags);
122 else
123 __release(mux->lock);
124
125 return 0;
126 }
127
clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)128 static int clk_mux_determine_rate(struct clk_hw *hw,
129 struct clk_rate_request *req)
130 {
131 struct clk_mux *mux = to_clk_mux(hw);
132
133 return clk_mux_determine_rate_flags(hw, req, mux->flags);
134 }
135
136 const struct clk_ops clk_mux_ops = {
137 .get_parent = clk_mux_get_parent,
138 .set_parent = clk_mux_set_parent,
139 .determine_rate = clk_mux_determine_rate,
140 };
141 EXPORT_SYMBOL_GPL(clk_mux_ops);
142
143 const struct clk_ops clk_mux_ro_ops = {
144 .get_parent = clk_mux_get_parent,
145 };
146 EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
147
clk_hw_register_mux_table(struct device * dev,const char * name,const char * const * parent_names,u8 num_parents,unsigned long flags,void __iomem * reg,u8 shift,u32 mask,u8 clk_mux_flags,u32 * table,spinlock_t * lock)148 struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
149 const char * const *parent_names, u8 num_parents,
150 unsigned long flags,
151 void __iomem *reg, u8 shift, u32 mask,
152 u8 clk_mux_flags, u32 *table, spinlock_t *lock)
153 {
154 struct clk_mux *mux;
155 struct clk_hw *hw;
156 struct clk_init_data init;
157 u8 width = 0;
158 int ret;
159
160 if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
161 width = fls(mask) - ffs(mask) + 1;
162 if (width + shift > 16) {
163 pr_err("mux value exceeds LOWORD field\n");
164 return ERR_PTR(-EINVAL);
165 }
166 }
167
168 /* allocate the mux */
169 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
170 if (!mux)
171 return ERR_PTR(-ENOMEM);
172
173 init.name = name;
174 if (clk_mux_flags & CLK_MUX_READ_ONLY)
175 init.ops = &clk_mux_ro_ops;
176 else
177 init.ops = &clk_mux_ops;
178 init.flags = flags;
179 init.parent_names = parent_names;
180 init.num_parents = num_parents;
181
182 /* struct clk_mux assignments */
183 mux->reg = reg;
184 mux->shift = shift;
185 mux->mask = mask;
186 mux->flags = clk_mux_flags;
187 mux->lock = lock;
188 mux->table = table;
189 mux->hw.init = &init;
190
191 hw = &mux->hw;
192 ret = clk_hw_register(dev, hw);
193 if (ret) {
194 kfree(mux);
195 hw = ERR_PTR(ret);
196 }
197
198 return hw;
199 }
200 EXPORT_SYMBOL_GPL(clk_hw_register_mux_table);
201
clk_register_mux_table(struct device * dev,const char * name,const char * const * parent_names,u8 num_parents,unsigned long flags,void __iomem * reg,u8 shift,u32 mask,u8 clk_mux_flags,u32 * table,spinlock_t * lock)202 struct clk *clk_register_mux_table(struct device *dev, const char *name,
203 const char * const *parent_names, u8 num_parents,
204 unsigned long flags,
205 void __iomem *reg, u8 shift, u32 mask,
206 u8 clk_mux_flags, u32 *table, spinlock_t *lock)
207 {
208 struct clk_hw *hw;
209
210 hw = clk_hw_register_mux_table(dev, name, parent_names, num_parents,
211 flags, reg, shift, mask, clk_mux_flags,
212 table, lock);
213 if (IS_ERR(hw))
214 return ERR_CAST(hw);
215 return hw->clk;
216 }
217 EXPORT_SYMBOL_GPL(clk_register_mux_table);
218
clk_register_mux(struct device * dev,const char * name,const char * const * parent_names,u8 num_parents,unsigned long flags,void __iomem * reg,u8 shift,u8 width,u8 clk_mux_flags,spinlock_t * lock)219 struct clk *clk_register_mux(struct device *dev, const char *name,
220 const char * const *parent_names, u8 num_parents,
221 unsigned long flags,
222 void __iomem *reg, u8 shift, u8 width,
223 u8 clk_mux_flags, spinlock_t *lock)
224 {
225 u32 mask = BIT(width) - 1;
226
227 return clk_register_mux_table(dev, name, parent_names, num_parents,
228 flags, reg, shift, mask, clk_mux_flags,
229 NULL, lock);
230 }
231 EXPORT_SYMBOL_GPL(clk_register_mux);
232
clk_hw_register_mux(struct device * dev,const char * name,const char * const * parent_names,u8 num_parents,unsigned long flags,void __iomem * reg,u8 shift,u8 width,u8 clk_mux_flags,spinlock_t * lock)233 struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name,
234 const char * const *parent_names, u8 num_parents,
235 unsigned long flags,
236 void __iomem *reg, u8 shift, u8 width,
237 u8 clk_mux_flags, spinlock_t *lock)
238 {
239 u32 mask = BIT(width) - 1;
240
241 return clk_hw_register_mux_table(dev, name, parent_names, num_parents,
242 flags, reg, shift, mask, clk_mux_flags,
243 NULL, lock);
244 }
245 EXPORT_SYMBOL_GPL(clk_hw_register_mux);
246
clk_unregister_mux(struct clk * clk)247 void clk_unregister_mux(struct clk *clk)
248 {
249 struct clk_mux *mux;
250 struct clk_hw *hw;
251
252 hw = __clk_get_hw(clk);
253 if (!hw)
254 return;
255
256 mux = to_clk_mux(hw);
257
258 clk_unregister(clk);
259 kfree(mux);
260 }
261 EXPORT_SYMBOL_GPL(clk_unregister_mux);
262
clk_hw_unregister_mux(struct clk_hw * hw)263 void clk_hw_unregister_mux(struct clk_hw *hw)
264 {
265 struct clk_mux *mux;
266
267 mux = to_clk_mux(hw);
268
269 clk_hw_unregister(hw);
270 kfree(mux);
271 }
272 EXPORT_SYMBOL_GPL(clk_hw_unregister_mux);
273