1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2016 Maxime Ripard
4 *
5 * Maxime Ripard <maxime.ripard@free-electrons.com>
6 */
7
8 #include <linux/clk.h>
9 #include <linux/clk-provider.h>
10 #include <linux/iopoll.h>
11 #include <linux/slab.h>
12 #include <linux/syscore_ops.h>
13 #include <linux/module.h>
14 #include <linux/clkdev.h>
15 #include "ccu_common.h"
16 #include "ccu_gate.h"
17 #include "ccu_reset.h"
18
19 static DEFINE_SPINLOCK(ccu_lock);
20
ccu_helper_wait_for_lock(struct ccu_common * common,u32 lock)21 void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
22 {
23 void __iomem *addr;
24 u32 reg;
25
26 if (!lock)
27 return;
28
29 if (common->features & CCU_FEATURE_LOCK_REG)
30 addr = common->base + common->lock_reg;
31 else
32 addr = common->base + common->reg;
33
34 WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000));
35 }
36
37 /*
38 * This clock notifier is called when the frequency of a PLL clock is
39 * changed. In common PLL designs, changes to the dividers take effect
40 * almost immediately, while changes to the multipliers (implemented
41 * as dividers in the feedback loop) take a few cycles to work into
42 * the feedback loop for the PLL to stablize.
43 *
44 * Sometimes when the PLL clock rate is changed, the decrease in the
45 * divider is too much for the decrease in the multiplier to catch up.
46 * The PLL clock rate will spike, and in some cases, might lock up
47 * completely.
48 *
49 * This notifier callback will gate and then ungate the clock,
50 * effectively resetting it, so it proceeds to work. Care must be
51 * taken to reparent consumers to other temporary clocks during the
52 * rate change, and that this notifier callback must be the first
53 * to be registered.
54 */
ccu_pll_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)55 static int ccu_pll_notifier_cb(struct notifier_block *nb,
56 unsigned long event, void *data)
57 {
58 struct ccu_pll_nb *pll = to_ccu_pll_nb(nb);
59 int ret = 0;
60
61 if (event != POST_RATE_CHANGE)
62 goto out;
63
64 ccu_gate_helper_disable(pll->common, pll->enable);
65
66 ret = ccu_gate_helper_enable(pll->common, pll->enable);
67 if (ret)
68 goto out;
69
70 ccu_helper_wait_for_lock(pll->common, pll->lock);
71
72 out:
73 return notifier_from_errno(ret);
74 }
75
ccu_pll_notifier_register(struct ccu_pll_nb * pll_nb)76 int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
77 {
78 pll_nb->clk_nb.notifier_call = ccu_pll_notifier_cb;
79
80 return clk_notifier_register(pll_nb->common->hw.clk,
81 &pll_nb->clk_nb);
82 }
83 EXPORT_SYMBOL_GPL(ccu_pll_notifier_register);
84
85 #ifdef CONFIG_PM_SLEEP
86
87 static LIST_HEAD(ccu_reg_cache_list);
88
89 struct sunxi_clock_reg_cache {
90 struct list_head node;
91 void __iomem *reg_base;
92 struct ccu_reg_dump *rdump;
93 unsigned int rd_num;
94 const struct ccu_reg_dump *rsuspend;
95 unsigned int rsuspend_num;
96 };
97
ccu_save(void __iomem * base,struct ccu_reg_dump * rd,unsigned int num_regs)98 static void ccu_save(void __iomem *base, struct ccu_reg_dump *rd,
99 unsigned int num_regs)
100 {
101 for (; num_regs > 0; --num_regs, ++rd)
102 rd->value = readl(base + rd->offset);
103 }
104
ccu_restore(void __iomem * base,const struct ccu_reg_dump * rd,unsigned int num_regs)105 static void ccu_restore(void __iomem *base,
106 const struct ccu_reg_dump *rd,
107 unsigned int num_regs)
108 {
109 for (; num_regs > 0; --num_regs, ++rd)
110 writel(rd->value, base + rd->offset);
111 }
112
ccu_alloc_reg_dump(struct ccu_common ** rdump,unsigned long nr_rdump)113 static struct ccu_reg_dump *ccu_alloc_reg_dump(struct ccu_common **rdump,
114 unsigned long nr_rdump)
115 {
116 struct ccu_reg_dump *rd;
117 unsigned int i;
118
119 rd = kcalloc(nr_rdump, sizeof(*rd), GFP_KERNEL);
120 if (!rd)
121 return NULL;
122
123 for (i = 0; i < nr_rdump; ++i) {
124 struct ccu_common *ccu_clks = rdump[i];
125
126 rd[i].offset = ccu_clks->reg;
127 }
128
129 return rd;
130 }
131
ccu_suspend(void)132 static int ccu_suspend(void)
133 {
134 struct sunxi_clock_reg_cache *reg_cache;
135
136 list_for_each_entry(reg_cache, &ccu_reg_cache_list, node) {
137 ccu_save(reg_cache->reg_base, reg_cache->rdump,
138 reg_cache->rd_num);
139 ccu_restore(reg_cache->reg_base, reg_cache->rsuspend,
140 reg_cache->rsuspend_num);
141 }
142 return 0;
143 }
144
ccu_resume(void)145 static void ccu_resume(void)
146 {
147 struct sunxi_clock_reg_cache *reg_cache;
148
149 list_for_each_entry(reg_cache, &ccu_reg_cache_list, node)
150 ccu_restore(reg_cache->reg_base, reg_cache->rdump,
151 reg_cache->rd_num);
152 }
153
154 static struct syscore_ops sunxi_clk_syscore_ops = {
155 .suspend = ccu_suspend,
156 .resume = ccu_resume,
157 };
158
sunxi_ccu_sleep_init(void __iomem * reg_base,struct ccu_common ** rdump,unsigned long nr_rdump,const struct ccu_reg_dump * rsuspend,unsigned long nr_rsuspend)159 void sunxi_ccu_sleep_init(void __iomem *reg_base,
160 struct ccu_common **rdump,
161 unsigned long nr_rdump,
162 const struct ccu_reg_dump *rsuspend,
163 unsigned long nr_rsuspend)
164 {
165 struct sunxi_clock_reg_cache *reg_cache;
166
167 reg_cache = kzalloc(sizeof(struct sunxi_clock_reg_cache),
168 GFP_KERNEL);
169 if (!reg_cache)
170 panic("could not allocate register reg_cache.\n");
171 reg_cache->rdump = ccu_alloc_reg_dump(rdump, nr_rdump);
172
173 if (!reg_cache->rdump)
174 panic("could not allocate register dump storage.\n");
175
176 if (list_empty(&ccu_reg_cache_list))
177 register_syscore_ops(&sunxi_clk_syscore_ops);
178
179 reg_cache->reg_base = reg_base;
180 reg_cache->rd_num = nr_rdump;
181 reg_cache->rsuspend = rsuspend;
182 reg_cache->rsuspend_num = nr_rsuspend;
183 list_add_tail(®_cache->node, &ccu_reg_cache_list);
184 }
185 #else
sunxi_ccu_sleep_init(void __iomem * reg_base,struct ccu_common ** rdump,unsigned long nr_rdump,const struct ccu_reg_dump * rsuspend,unsigned long nr_rsuspend)186 void sunxi_ccu_sleep_init(void __iomem *reg_base,
187 struct ccu_common **rdump,
188 unsigned long nr_rdump,
189 const struct ccu_reg_dump *rsuspend,
190 unsigned long nr_rsuspend)
191 { }
192 #endif
193 EXPORT_SYMBOL_GPL(sunxi_ccu_sleep_init);
194
sunxi_ccu_probe(struct device_node * node,void __iomem * reg,const struct sunxi_ccu_desc * desc)195 int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
196 const struct sunxi_ccu_desc *desc)
197 {
198 struct ccu_reset *reset;
199 int i, ret;
200
201 for (i = 0; i < desc->num_ccu_clks; i++) {
202 struct ccu_common *cclk = desc->ccu_clks[i];
203
204 if (!cclk)
205 continue;
206
207 cclk->base = reg;
208 cclk->lock = &ccu_lock;
209 }
210
211 for (i = 0; i < desc->hw_clks->num ; i++) {
212 struct clk_hw *hw = desc->hw_clks->hws[i];
213 const char *name;
214
215 if (!hw)
216 continue;
217
218 name = hw->init->name;
219 ret = of_clk_hw_register(node, hw);
220
221 /* add this CONFIG for clk SATA */
222 #ifdef CONFIG_COMMON_CLK_DEBUG
223 clk_hw_register_clkdev(hw, name, NULL);
224 #endif
225
226 if (ret) {
227 pr_err("Couldn't register clock %d - %s\n", i, name);
228 goto err_clk_unreg;
229 }
230 }
231
232 ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
233 desc->hw_clks);
234 if (ret)
235 goto err_clk_unreg;
236
237 reset = kzalloc(sizeof(*reset), GFP_KERNEL);
238 if (!reset) {
239 ret = -ENOMEM;
240 goto err_alloc_reset;
241 }
242
243 reset->rcdev.of_node = node;
244 reset->rcdev.ops = &ccu_reset_ops;
245 reset->rcdev.owner = THIS_MODULE;
246 reset->rcdev.nr_resets = desc->num_resets;
247 reset->base = reg;
248 reset->lock = &ccu_lock;
249 reset->reset_map = desc->resets;
250
251 ret = reset_controller_register(&reset->rcdev);
252 if (ret)
253 goto err_of_clk_unreg;
254
255 pr_info("%s: sunxi ccu init OK\n", node->name);
256
257 return 0;
258
259 err_of_clk_unreg:
260 kfree(reset);
261 err_alloc_reset:
262 of_clk_del_provider(node);
263 err_clk_unreg:
264 while (--i >= 0) {
265 struct clk_hw *hw = desc->hw_clks->hws[i];
266
267 if (!hw)
268 continue;
269 clk_hw_unregister(hw);
270 }
271 return ret;
272 }
273
set_reg(char __iomem * addr,u32 val,u8 bw,u8 bs)274 void set_reg(char __iomem *addr, u32 val, u8 bw, u8 bs)
275 {
276 u32 mask = (1UL << bw) - 1UL;
277 u32 tmp = 0;
278
279 tmp = readl(addr);
280 tmp &= ~(mask << bs);
281
282 writel(tmp | ((val & mask) << bs), addr);
283 }
284
set_reg_key(char __iomem * addr,u32 key,u8 kbw,u8 kbs,u32 val,u8 bw,u8 bs)285 void set_reg_key(char __iomem *addr,
286 u32 key, u8 kbw, u8 kbs,
287 u32 val, u8 bw, u8 bs)
288 {
289 u32 mask = (1UL << bw) - 1UL;
290 u32 kmask = (1UL << kbw) - 1UL;
291 u32 tmp = 0;
292
293 tmp = readl(addr);
294 tmp &= ~(mask << bs);
295
296 writel(tmp | ((val & mask) << bs) | ((key & kmask) << kbs), addr);
297 }
298 EXPORT_SYMBOL_GPL(sunxi_ccu_probe);
299
300 MODULE_LICENSE("GPL v2");
301