1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2012 Freescale Semiconductor, Inc.
4 * Copyright 2012 Linaro Ltd.
5 */
6
7 #include <linux/clk.h>
8 #include <linux/clk-provider.h>
9 #include <linux/io.h>
10 #include <linux/slab.h>
11 #include <linux/jiffies.h>
12 #include <linux/err.h>
13 #include "clk.h"
14
clk_busy_wait(void __iomem * reg,u8 shift)15 static int clk_busy_wait(void __iomem *reg, u8 shift)
16 {
17 unsigned long timeout = jiffies + msecs_to_jiffies(10);
18
19 while (readl_relaxed(reg) & (1 << shift))
20 if (time_after(jiffies, timeout))
21 return -ETIMEDOUT;
22
23 return 0;
24 }
25
26 struct clk_busy_divider {
27 struct clk_divider div;
28 const struct clk_ops *div_ops;
29 void __iomem *reg;
30 u8 shift;
31 };
32
to_clk_busy_divider(struct clk_hw * hw)33 static inline struct clk_busy_divider *to_clk_busy_divider(struct clk_hw *hw)
34 {
35 struct clk_divider *div = to_clk_divider(hw);
36
37 return container_of(div, struct clk_busy_divider, div);
38 }
39
clk_busy_divider_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)40 static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
41 unsigned long parent_rate)
42 {
43 struct clk_busy_divider *busy = to_clk_busy_divider(hw);
44
45 return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
46 }
47
clk_busy_divider_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)48 static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
49 unsigned long *prate)
50 {
51 struct clk_busy_divider *busy = to_clk_busy_divider(hw);
52
53 return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
54 }
55
clk_busy_divider_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)56 static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
57 unsigned long parent_rate)
58 {
59 struct clk_busy_divider *busy = to_clk_busy_divider(hw);
60 int ret;
61
62 ret = busy->div_ops->set_rate(&busy->div.hw, rate, parent_rate);
63 if (!ret)
64 ret = clk_busy_wait(busy->reg, busy->shift);
65
66 return ret;
67 }
68
69 static const struct clk_ops clk_busy_divider_ops = {
70 .recalc_rate = clk_busy_divider_recalc_rate,
71 .round_rate = clk_busy_divider_round_rate,
72 .set_rate = clk_busy_divider_set_rate,
73 };
74
imx_clk_hw_busy_divider(const char * name,const char * parent_name,void __iomem * reg,u8 shift,u8 width,void __iomem * busy_reg,u8 busy_shift)75 struct clk_hw *imx_clk_hw_busy_divider(const char *name, const char *parent_name,
76 void __iomem *reg, u8 shift, u8 width,
77 void __iomem *busy_reg, u8 busy_shift)
78 {
79 struct clk_busy_divider *busy;
80 struct clk_hw *hw;
81 struct clk_init_data init;
82 int ret;
83
84 busy = kzalloc(sizeof(*busy), GFP_KERNEL);
85 if (!busy)
86 return ERR_PTR(-ENOMEM);
87
88 busy->reg = busy_reg;
89 busy->shift = busy_shift;
90
91 busy->div.reg = reg;
92 busy->div.shift = shift;
93 busy->div.width = width;
94 busy->div.lock = &imx_ccm_lock;
95 busy->div_ops = &clk_divider_ops;
96
97 init.name = name;
98 init.ops = &clk_busy_divider_ops;
99 init.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL;
100 init.parent_names = &parent_name;
101 init.num_parents = 1;
102
103 busy->div.hw.init = &init;
104
105 hw = &busy->div.hw;
106
107 ret = clk_hw_register(NULL, hw);
108 if (ret) {
109 kfree(busy);
110 return ERR_PTR(ret);
111 }
112
113 return hw;
114 }
115
116 struct clk_busy_mux {
117 struct clk_mux mux;
118 const struct clk_ops *mux_ops;
119 void __iomem *reg;
120 u8 shift;
121 };
122
to_clk_busy_mux(struct clk_hw * hw)123 static inline struct clk_busy_mux *to_clk_busy_mux(struct clk_hw *hw)
124 {
125 struct clk_mux *mux = to_clk_mux(hw);
126
127 return container_of(mux, struct clk_busy_mux, mux);
128 }
129
clk_busy_mux_get_parent(struct clk_hw * hw)130 static u8 clk_busy_mux_get_parent(struct clk_hw *hw)
131 {
132 struct clk_busy_mux *busy = to_clk_busy_mux(hw);
133
134 return busy->mux_ops->get_parent(&busy->mux.hw);
135 }
136
clk_busy_mux_set_parent(struct clk_hw * hw,u8 index)137 static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
138 {
139 struct clk_busy_mux *busy = to_clk_busy_mux(hw);
140 int ret;
141
142 ret = busy->mux_ops->set_parent(&busy->mux.hw, index);
143 if (!ret)
144 ret = clk_busy_wait(busy->reg, busy->shift);
145
146 return ret;
147 }
148
149 static const struct clk_ops clk_busy_mux_ops = {
150 .get_parent = clk_busy_mux_get_parent,
151 .set_parent = clk_busy_mux_set_parent,
152 };
153
imx_clk_hw_busy_mux(const char * name,void __iomem * reg,u8 shift,u8 width,void __iomem * busy_reg,u8 busy_shift,const char * const * parent_names,int num_parents)154 struct clk_hw *imx_clk_hw_busy_mux(const char *name, void __iomem *reg, u8 shift,
155 u8 width, void __iomem *busy_reg, u8 busy_shift,
156 const char * const *parent_names, int num_parents)
157 {
158 struct clk_busy_mux *busy;
159 struct clk_hw *hw;
160 struct clk_init_data init;
161 int ret;
162
163 busy = kzalloc(sizeof(*busy), GFP_KERNEL);
164 if (!busy)
165 return ERR_PTR(-ENOMEM);
166
167 busy->reg = busy_reg;
168 busy->shift = busy_shift;
169
170 busy->mux.reg = reg;
171 busy->mux.shift = shift;
172 busy->mux.mask = BIT(width) - 1;
173 busy->mux.lock = &imx_ccm_lock;
174 busy->mux_ops = &clk_mux_ops;
175
176 init.name = name;
177 init.ops = &clk_busy_mux_ops;
178 init.flags = CLK_IS_CRITICAL;
179 init.parent_names = parent_names;
180 init.num_parents = num_parents;
181
182 busy->mux.hw.init = &init;
183
184 hw = &busy->mux.hw;
185
186 ret = clk_hw_register(NULL, hw);
187 if (ret) {
188 kfree(busy);
189 return ERR_PTR(ret);
190 }
191
192 return hw;
193 }
194