• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
3 
4 #include <linux/err.h>
5 #include <linux/init.h>
6 #include <linux/kernel.h>
7 #include <linux/mutex.h>
8 #include <linux/pm_domain.h>
9 #include <linux/slab.h>
10 #include <linux/of.h>
11 #include <linux/of_device.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_opp.h>
14 #include <soc/qcom/cmd-db.h>
15 #include <soc/qcom/rpmh.h>
16 #include <dt-bindings/power/qcom-rpmpd.h>
17 
18 #define domain_to_rpmhpd(domain) container_of(domain, struct rpmhpd, pd)
19 
20 #define RPMH_ARC_MAX_LEVELS	16
21 
22 /**
23  * struct rpmhpd - top level RPMh power domain resource data structure
24  * @dev:		rpmh power domain controller device
25  * @pd:			generic_pm_domain corrresponding to the power domain
26  * @peer:		A peer power domain in case Active only Voting is
27  *			supported
28  * @active_only:	True if it represents an Active only peer
29  * @level:		An array of level (vlvl) to corner (hlvl) mappings
30  *			derived from cmd-db
31  * @level_count:	Number of levels supported by the power domain. max
32  *			being 16 (0 - 15)
33  * @enabled:		true if the power domain is enabled
34  * @res_name:		Resource name used for cmd-db lookup
35  * @addr:		Resource address as looped up using resource name from
36  *			cmd-db
37  */
38 struct rpmhpd {
39 	struct device	*dev;
40 	struct generic_pm_domain pd;
41 	struct generic_pm_domain *parent;
42 	struct rpmhpd	*peer;
43 	const bool	active_only;
44 	unsigned int	corner;
45 	unsigned int	active_corner;
46 	u32		level[RPMH_ARC_MAX_LEVELS];
47 	size_t		level_count;
48 	bool		enabled;
49 	const char	*res_name;
50 	u32		addr;
51 };
52 
53 struct rpmhpd_desc {
54 	struct rpmhpd **rpmhpds;
55 	size_t num_pds;
56 };
57 
58 static DEFINE_MUTEX(rpmhpd_lock);
59 
60 /* SDM845 RPMH powerdomains */
61 
62 static struct rpmhpd sdm845_ebi = {
63 	.pd = { .name = "ebi", },
64 	.res_name = "ebi.lvl",
65 };
66 
67 static struct rpmhpd sdm845_lmx = {
68 	.pd = { .name = "lmx", },
69 	.res_name = "lmx.lvl",
70 };
71 
72 static struct rpmhpd sdm845_lcx = {
73 	.pd = { .name = "lcx", },
74 	.res_name = "lcx.lvl",
75 };
76 
77 static struct rpmhpd sdm845_gfx = {
78 	.pd = { .name = "gfx", },
79 	.res_name = "gfx.lvl",
80 };
81 
82 static struct rpmhpd sdm845_mss = {
83 	.pd = { .name = "mss", },
84 	.res_name = "mss.lvl",
85 };
86 
87 static struct rpmhpd sdm845_mx_ao;
88 static struct rpmhpd sdm845_mx = {
89 	.pd = { .name = "mx", },
90 	.peer = &sdm845_mx_ao,
91 	.res_name = "mx.lvl",
92 };
93 
94 static struct rpmhpd sdm845_mx_ao = {
95 	.pd = { .name = "mx_ao", },
96 	.peer = &sdm845_mx,
97 	.res_name = "mx.lvl",
98 };
99 
100 static struct rpmhpd sdm845_cx_ao;
101 static struct rpmhpd sdm845_cx = {
102 	.pd = { .name = "cx", },
103 	.peer = &sdm845_cx_ao,
104 	.parent = &sdm845_mx.pd,
105 	.res_name = "cx.lvl",
106 };
107 
108 static struct rpmhpd sdm845_cx_ao = {
109 	.pd = { .name = "cx_ao", },
110 	.peer = &sdm845_cx,
111 	.parent = &sdm845_mx_ao.pd,
112 	.res_name = "cx.lvl",
113 };
114 
115 static struct rpmhpd *sdm845_rpmhpds[] = {
116 	[SDM845_EBI] = &sdm845_ebi,
117 	[SDM845_MX] = &sdm845_mx,
118 	[SDM845_MX_AO] = &sdm845_mx_ao,
119 	[SDM845_CX] = &sdm845_cx,
120 	[SDM845_CX_AO] = &sdm845_cx_ao,
121 	[SDM845_LMX] = &sdm845_lmx,
122 	[SDM845_LCX] = &sdm845_lcx,
123 	[SDM845_GFX] = &sdm845_gfx,
124 	[SDM845_MSS] = &sdm845_mss,
125 };
126 
127 static const struct rpmhpd_desc sdm845_desc = {
128 	.rpmhpds = sdm845_rpmhpds,
129 	.num_pds = ARRAY_SIZE(sdm845_rpmhpds),
130 };
131 
132 static const struct of_device_id rpmhpd_match_table[] = {
133 	{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
134 	{ }
135 };
136 
rpmhpd_send_corner(struct rpmhpd * pd,int state,unsigned int corner,bool sync)137 static int rpmhpd_send_corner(struct rpmhpd *pd, int state,
138 			      unsigned int corner, bool sync)
139 {
140 	struct tcs_cmd cmd = {
141 		.addr = pd->addr,
142 		.data = corner,
143 	};
144 
145 	/*
146 	 * Wait for an ack only when we are increasing the
147 	 * perf state of the power domain
148 	 */
149 	if (sync)
150 		return rpmh_write(pd->dev, state, &cmd, 1);
151 	else
152 		return rpmh_write_async(pd->dev, state, &cmd, 1);
153 }
154 
to_active_sleep(struct rpmhpd * pd,unsigned int corner,unsigned int * active,unsigned int * sleep)155 static void to_active_sleep(struct rpmhpd *pd, unsigned int corner,
156 			    unsigned int *active, unsigned int *sleep)
157 {
158 	*active = corner;
159 
160 	if (pd->active_only)
161 		*sleep = 0;
162 	else
163 		*sleep = *active;
164 }
165 
166 /*
167  * This function is used to aggregate the votes across the active only
168  * resources and its peers. The aggregated votes are sent to RPMh as
169  * ACTIVE_ONLY votes (which take effect immediately), as WAKE_ONLY votes
170  * (applied by RPMh on system wakeup) and as SLEEP votes (applied by RPMh
171  * on system sleep).
172  * We send ACTIVE_ONLY votes for resources without any peers. For others,
173  * which have an active only peer, all 3 votes are sent.
174  */
rpmhpd_aggregate_corner(struct rpmhpd * pd,unsigned int corner)175 static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
176 {
177 	int ret;
178 	struct rpmhpd *peer = pd->peer;
179 	unsigned int active_corner, sleep_corner;
180 	unsigned int this_active_corner = 0, this_sleep_corner = 0;
181 	unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
182 
183 	to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
184 
185 	if (peer && peer->enabled)
186 		to_active_sleep(peer, peer->corner, &peer_active_corner,
187 				&peer_sleep_corner);
188 
189 	active_corner = max(this_active_corner, peer_active_corner);
190 
191 	ret = rpmhpd_send_corner(pd, RPMH_ACTIVE_ONLY_STATE, active_corner,
192 				 active_corner > pd->active_corner);
193 	if (ret)
194 		return ret;
195 
196 	pd->active_corner = active_corner;
197 
198 	if (peer) {
199 		peer->active_corner = active_corner;
200 
201 		ret = rpmhpd_send_corner(pd, RPMH_WAKE_ONLY_STATE,
202 					 active_corner, false);
203 		if (ret)
204 			return ret;
205 
206 		sleep_corner = max(this_sleep_corner, peer_sleep_corner);
207 
208 		return rpmhpd_send_corner(pd, RPMH_SLEEP_STATE, sleep_corner,
209 					  false);
210 	}
211 
212 	return ret;
213 }
214 
rpmhpd_power_on(struct generic_pm_domain * domain)215 static int rpmhpd_power_on(struct generic_pm_domain *domain)
216 {
217 	struct rpmhpd *pd = domain_to_rpmhpd(domain);
218 	int ret = 0;
219 
220 	mutex_lock(&rpmhpd_lock);
221 
222 	if (pd->corner)
223 		ret = rpmhpd_aggregate_corner(pd, pd->corner);
224 
225 	if (!ret)
226 		pd->enabled = true;
227 
228 	mutex_unlock(&rpmhpd_lock);
229 
230 	return ret;
231 }
232 
rpmhpd_power_off(struct generic_pm_domain * domain)233 static int rpmhpd_power_off(struct generic_pm_domain *domain)
234 {
235 	struct rpmhpd *pd = domain_to_rpmhpd(domain);
236 	int ret = 0;
237 
238 	mutex_lock(&rpmhpd_lock);
239 
240 	ret = rpmhpd_aggregate_corner(pd, pd->level[0]);
241 
242 	if (!ret)
243 		pd->enabled = false;
244 
245 	mutex_unlock(&rpmhpd_lock);
246 
247 	return ret;
248 }
249 
rpmhpd_set_performance_state(struct generic_pm_domain * domain,unsigned int level)250 static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
251 					unsigned int level)
252 {
253 	struct rpmhpd *pd = domain_to_rpmhpd(domain);
254 	int ret = 0, i;
255 
256 	mutex_lock(&rpmhpd_lock);
257 
258 	for (i = 0; i < pd->level_count; i++)
259 		if (level <= pd->level[i])
260 			break;
261 
262 	/*
263 	 * If the level requested is more than that supported by the
264 	 * max corner, just set it to max anyway.
265 	 */
266 	if (i == pd->level_count)
267 		i--;
268 
269 	if (pd->enabled) {
270 		ret = rpmhpd_aggregate_corner(pd, i);
271 		if (ret)
272 			goto out;
273 	}
274 
275 	pd->corner = i;
276 out:
277 	mutex_unlock(&rpmhpd_lock);
278 
279 	return ret;
280 }
281 
rpmhpd_get_performance_state(struct generic_pm_domain * genpd,struct dev_pm_opp * opp)282 static unsigned int rpmhpd_get_performance_state(struct generic_pm_domain *genpd,
283 						 struct dev_pm_opp *opp)
284 {
285 	return dev_pm_opp_get_level(opp);
286 }
287 
rpmhpd_update_level_mapping(struct rpmhpd * rpmhpd)288 static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
289 {
290 	int i;
291 	const u16 *buf;
292 
293 	buf = cmd_db_read_aux_data(rpmhpd->res_name, &rpmhpd->level_count);
294 	if (IS_ERR(buf))
295 		return PTR_ERR(buf);
296 
297 	/* 2 bytes used for each command DB aux data entry */
298 	rpmhpd->level_count >>= 1;
299 
300 	if (rpmhpd->level_count > RPMH_ARC_MAX_LEVELS)
301 		return -EINVAL;
302 
303 	for (i = 0; i < rpmhpd->level_count; i++) {
304 		rpmhpd->level[i] = buf[i];
305 
306 		/*
307 		 * The AUX data may be zero padded.  These 0 valued entries at
308 		 * the end of the map must be ignored.
309 		 */
310 		if (i > 0 && rpmhpd->level[i] == 0) {
311 			rpmhpd->level_count = i;
312 			break;
313 		}
314 		pr_debug("%s: ARC hlvl=%2d --> vlvl=%4u\n", rpmhpd->res_name, i,
315 			 rpmhpd->level[i]);
316 	}
317 
318 	return 0;
319 }
320 
rpmhpd_probe(struct platform_device * pdev)321 static int rpmhpd_probe(struct platform_device *pdev)
322 {
323 	int i, ret;
324 	size_t num_pds;
325 	struct device *dev = &pdev->dev;
326 	struct genpd_onecell_data *data;
327 	struct rpmhpd **rpmhpds;
328 	const struct rpmhpd_desc *desc;
329 
330 	desc = of_device_get_match_data(dev);
331 	if (!desc)
332 		return -EINVAL;
333 
334 	rpmhpds = desc->rpmhpds;
335 	num_pds = desc->num_pds;
336 
337 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
338 	if (!data)
339 		return -ENOMEM;
340 
341 	data->domains = devm_kcalloc(dev, num_pds, sizeof(*data->domains),
342 				     GFP_KERNEL);
343 	if (!data->domains)
344 		return -ENOMEM;
345 
346 	data->num_domains = num_pds;
347 
348 	for (i = 0; i < num_pds; i++) {
349 		if (!rpmhpds[i]) {
350 			dev_warn(dev, "rpmhpds[%d] is empty\n", i);
351 			continue;
352 		}
353 
354 		rpmhpds[i]->dev = dev;
355 		rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name);
356 		if (!rpmhpds[i]->addr) {
357 			dev_err(dev, "Could not find RPMh address for resource %s\n",
358 				rpmhpds[i]->res_name);
359 			return -ENODEV;
360 		}
361 
362 		ret = cmd_db_read_slave_id(rpmhpds[i]->res_name);
363 		if (ret != CMD_DB_HW_ARC) {
364 			dev_err(dev, "RPMh slave ID mismatch\n");
365 			return -EINVAL;
366 		}
367 
368 		ret = rpmhpd_update_level_mapping(rpmhpds[i]);
369 		if (ret)
370 			return ret;
371 
372 		rpmhpds[i]->pd.power_off = rpmhpd_power_off;
373 		rpmhpds[i]->pd.power_on = rpmhpd_power_on;
374 		rpmhpds[i]->pd.set_performance_state = rpmhpd_set_performance_state;
375 		rpmhpds[i]->pd.opp_to_performance_state = rpmhpd_get_performance_state;
376 		pm_genpd_init(&rpmhpds[i]->pd, NULL, true);
377 
378 		data->domains[i] = &rpmhpds[i]->pd;
379 	}
380 
381 	/* Add subdomains */
382 	for (i = 0; i < num_pds; i++) {
383 		if (!rpmhpds[i])
384 			continue;
385 		if (rpmhpds[i]->parent)
386 			pm_genpd_add_subdomain(rpmhpds[i]->parent,
387 					       &rpmhpds[i]->pd);
388 	}
389 
390 	return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
391 }
392 
393 static struct platform_driver rpmhpd_driver = {
394 	.driver = {
395 		.name = "qcom-rpmhpd",
396 		.of_match_table = rpmhpd_match_table,
397 		.suppress_bind_attrs = true,
398 	},
399 	.probe = rpmhpd_probe,
400 };
401 
rpmhpd_init(void)402 static int __init rpmhpd_init(void)
403 {
404 	return platform_driver_register(&rpmhpd_driver);
405 }
406 core_initcall(rpmhpd_init);
407