• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Generic OPP Interface
3  *
4  * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5  *	Nishanth Menon
6  *	Romit Dasgupta
7  *	Kevin Hilman
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/clk.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/regulator/consumer.h>
23 
24 #include "opp.h"
25 
26 /*
27  * The root of the list of all opp-tables. All opp_table structures branch off
28  * from here, with each opp_table containing the list of opps it supports in
29  * various states of availability.
30  */
31 LIST_HEAD(opp_tables);
32 /* Lock to allow exclusive modification to the device and opp lists */
33 DEFINE_MUTEX(opp_table_lock);
34 
35 #define opp_rcu_lockdep_assert()					\
36 do {									\
37 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
38 			 !lockdep_is_held(&opp_table_lock),		\
39 			 "Missing rcu_read_lock() or "			\
40 			 "opp_table_lock protection");			\
41 } while (0)
42 
_find_opp_dev(const struct device * dev,struct opp_table * opp_table)43 static struct opp_device *_find_opp_dev(const struct device *dev,
44 					struct opp_table *opp_table)
45 {
46 	struct opp_device *opp_dev;
47 
48 	list_for_each_entry(opp_dev, &opp_table->dev_list, node)
49 		if (opp_dev->dev == dev)
50 			return opp_dev;
51 
52 	return NULL;
53 }
54 
55 /**
56  * _find_opp_table() - find opp_table struct using device pointer
57  * @dev:	device pointer used to lookup OPP table
58  *
59  * Search OPP table for one containing matching device. Does a RCU reader
60  * operation to grab the pointer needed.
61  *
62  * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
63  * -EINVAL based on type of error.
64  *
65  * Locking: For readers, this function must be called under rcu_read_lock().
66  * opp_table is a RCU protected pointer, which means that opp_table is valid
67  * as long as we are under RCU lock.
68  *
69  * For Writers, this function must be called with opp_table_lock held.
70  */
_find_opp_table(struct device * dev)71 struct opp_table *_find_opp_table(struct device *dev)
72 {
73 	struct opp_table *opp_table;
74 
75 	opp_rcu_lockdep_assert();
76 
77 	if (IS_ERR_OR_NULL(dev)) {
78 		pr_err("%s: Invalid parameters\n", __func__);
79 		return ERR_PTR(-EINVAL);
80 	}
81 
82 	list_for_each_entry_rcu(opp_table, &opp_tables, node)
83 		if (_find_opp_dev(dev, opp_table))
84 			return opp_table;
85 
86 	return ERR_PTR(-ENODEV);
87 }
88 
89 /**
90  * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
91  * @opp:	opp for which voltage has to be returned for
92  *
93  * Return: voltage in micro volt corresponding to the opp, else
94  * return 0
95  *
96  * Locking: This function must be called under rcu_read_lock(). opp is a rcu
97  * protected pointer. This means that opp which could have been fetched by
98  * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
99  * under RCU lock. The pointer returned by the opp_find_freq family must be
100  * used in the same section as the usage of this function with the pointer
101  * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
102  * pointer.
103  */
dev_pm_opp_get_voltage(struct dev_pm_opp * opp)104 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
105 {
106 	struct dev_pm_opp *tmp_opp;
107 	unsigned long v = 0;
108 
109 	opp_rcu_lockdep_assert();
110 
111 	tmp_opp = rcu_dereference(opp);
112 	if (IS_ERR_OR_NULL(tmp_opp))
113 		pr_err("%s: Invalid parameters\n", __func__);
114 	else
115 		v = tmp_opp->u_volt;
116 
117 	return v;
118 }
119 EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
120 
121 /**
122  * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
123  * @opp:	opp for which frequency has to be returned for
124  *
125  * Return: frequency in hertz corresponding to the opp, else
126  * return 0
127  *
128  * Locking: This function must be called under rcu_read_lock(). opp is a rcu
129  * protected pointer. This means that opp which could have been fetched by
130  * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
131  * under RCU lock. The pointer returned by the opp_find_freq family must be
132  * used in the same section as the usage of this function with the pointer
133  * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
134  * pointer.
135  */
dev_pm_opp_get_freq(struct dev_pm_opp * opp)136 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
137 {
138 	struct dev_pm_opp *tmp_opp;
139 	unsigned long f = 0;
140 
141 	opp_rcu_lockdep_assert();
142 
143 	tmp_opp = rcu_dereference(opp);
144 	if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
145 		pr_err("%s: Invalid parameters\n", __func__);
146 	else
147 		f = tmp_opp->rate;
148 
149 	return f;
150 }
151 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
152 
153 /**
154  * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
155  * @opp: opp for which turbo mode is being verified
156  *
157  * Turbo OPPs are not for normal use, and can be enabled (under certain
158  * conditions) for short duration of times to finish high throughput work
159  * quickly. Running on them for longer times may overheat the chip.
160  *
161  * Return: true if opp is turbo opp, else false.
162  *
163  * Locking: This function must be called under rcu_read_lock(). opp is a rcu
164  * protected pointer. This means that opp which could have been fetched by
165  * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
166  * under RCU lock. The pointer returned by the opp_find_freq family must be
167  * used in the same section as the usage of this function with the pointer
168  * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
169  * pointer.
170  */
dev_pm_opp_is_turbo(struct dev_pm_opp * opp)171 bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
172 {
173 	struct dev_pm_opp *tmp_opp;
174 
175 	opp_rcu_lockdep_assert();
176 
177 	tmp_opp = rcu_dereference(opp);
178 	if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
179 		pr_err("%s: Invalid parameters\n", __func__);
180 		return false;
181 	}
182 
183 	return tmp_opp->turbo;
184 }
185 EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
186 
187 /**
188  * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
189  * @dev:	device for which we do this operation
190  *
191  * Return: This function returns the max clock latency in nanoseconds.
192  *
193  * Locking: This function takes rcu_read_lock().
194  */
dev_pm_opp_get_max_clock_latency(struct device * dev)195 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
196 {
197 	struct opp_table *opp_table;
198 	unsigned long clock_latency_ns;
199 
200 	rcu_read_lock();
201 
202 	opp_table = _find_opp_table(dev);
203 	if (IS_ERR(opp_table))
204 		clock_latency_ns = 0;
205 	else
206 		clock_latency_ns = opp_table->clock_latency_ns_max;
207 
208 	rcu_read_unlock();
209 	return clock_latency_ns;
210 }
211 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
212 
213 /**
214  * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
215  * @dev: device for which we do this operation
216  *
217  * Return: This function returns the max voltage latency in nanoseconds.
218  *
219  * Locking: This function takes rcu_read_lock().
220  */
dev_pm_opp_get_max_volt_latency(struct device * dev)221 unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
222 {
223 	struct opp_table *opp_table;
224 	struct dev_pm_opp *opp;
225 	struct regulator *reg;
226 	unsigned long latency_ns = 0;
227 	unsigned long min_uV = ~0, max_uV = 0;
228 	int ret;
229 
230 	rcu_read_lock();
231 
232 	opp_table = _find_opp_table(dev);
233 	if (IS_ERR(opp_table)) {
234 		rcu_read_unlock();
235 		return 0;
236 	}
237 
238 	reg = opp_table->regulator;
239 	if (IS_ERR(reg)) {
240 		/* Regulator may not be required for device */
241 		rcu_read_unlock();
242 		return 0;
243 	}
244 
245 	list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
246 		if (!opp->available)
247 			continue;
248 
249 		if (opp->u_volt_min < min_uV)
250 			min_uV = opp->u_volt_min;
251 		if (opp->u_volt_max > max_uV)
252 			max_uV = opp->u_volt_max;
253 	}
254 
255 	rcu_read_unlock();
256 
257 	/*
258 	 * The caller needs to ensure that opp_table (and hence the regulator)
259 	 * isn't freed, while we are executing this routine.
260 	 */
261 	ret = regulator_set_voltage_time(reg, min_uV, max_uV);
262 	if (ret > 0)
263 		latency_ns = ret * 1000;
264 
265 	return latency_ns;
266 }
267 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
268 
269 /**
270  * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
271  *					     nanoseconds
272  * @dev: device for which we do this operation
273  *
274  * Return: This function returns the max transition latency, in nanoseconds, to
275  * switch from one OPP to other.
276  *
277  * Locking: This function takes rcu_read_lock().
278  */
dev_pm_opp_get_max_transition_latency(struct device * dev)279 unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
280 {
281 	return dev_pm_opp_get_max_volt_latency(dev) +
282 		dev_pm_opp_get_max_clock_latency(dev);
283 }
284 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
285 
286 /**
287  * dev_pm_opp_get_suspend_opp() - Get suspend opp
288  * @dev:	device for which we do this operation
289  *
290  * Return: This function returns pointer to the suspend opp if it is
291  * defined and available, otherwise it returns NULL.
292  *
293  * Locking: This function must be called under rcu_read_lock(). opp is a rcu
294  * protected pointer. The reason for the same is that the opp pointer which is
295  * returned will remain valid for use with opp_get_{voltage, freq} only while
296  * under the locked area. The pointer returned must be used prior to unlocking
297  * with rcu_read_unlock() to maintain the integrity of the pointer.
298  */
dev_pm_opp_get_suspend_opp(struct device * dev)299 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
300 {
301 	struct opp_table *opp_table;
302 
303 	opp_rcu_lockdep_assert();
304 
305 	opp_table = _find_opp_table(dev);
306 	if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
307 	    !opp_table->suspend_opp->available)
308 		return NULL;
309 
310 	return opp_table->suspend_opp;
311 }
312 EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
313 
314 /**
315  * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
316  * @dev:	device for which we do this operation
317  *
318  * Return: This function returns the number of available opps if there are any,
319  * else returns 0 if none or the corresponding error value.
320  *
321  * Locking: This function takes rcu_read_lock().
322  */
dev_pm_opp_get_opp_count(struct device * dev)323 int dev_pm_opp_get_opp_count(struct device *dev)
324 {
325 	struct opp_table *opp_table;
326 	struct dev_pm_opp *temp_opp;
327 	int count = 0;
328 
329 	rcu_read_lock();
330 
331 	opp_table = _find_opp_table(dev);
332 	if (IS_ERR(opp_table)) {
333 		count = PTR_ERR(opp_table);
334 		dev_dbg(dev, "%s: OPP table not found (%d)\n",
335 			__func__, count);
336 		goto out_unlock;
337 	}
338 
339 	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
340 		if (temp_opp->available)
341 			count++;
342 	}
343 
344 out_unlock:
345 	rcu_read_unlock();
346 	return count;
347 }
348 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
349 
350 /**
351  * dev_pm_opp_find_freq_exact() - search for an exact frequency
352  * @dev:		device for which we do this operation
353  * @freq:		frequency to search for
354  * @available:		true/false - match for available opp
355  *
356  * Return: Searches for exact match in the opp table and returns pointer to the
357  * matching opp if found, else returns ERR_PTR in case of error and should
358  * be handled using IS_ERR. Error return values can be:
359  * EINVAL:	for bad pointer
360  * ERANGE:	no match found for search
361  * ENODEV:	if device not found in list of registered devices
362  *
363  * Note: available is a modifier for the search. if available=true, then the
364  * match is for exact matching frequency and is available in the stored OPP
365  * table. if false, the match is for exact frequency which is not available.
366  *
367  * This provides a mechanism to enable an opp which is not available currently
368  * or the opposite as well.
369  *
370  * Locking: This function must be called under rcu_read_lock(). opp is a rcu
371  * protected pointer. The reason for the same is that the opp pointer which is
372  * returned will remain valid for use with opp_get_{voltage, freq} only while
373  * under the locked area. The pointer returned must be used prior to unlocking
374  * with rcu_read_unlock() to maintain the integrity of the pointer.
375  */
dev_pm_opp_find_freq_exact(struct device * dev,unsigned long freq,bool available)376 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
377 					      unsigned long freq,
378 					      bool available)
379 {
380 	struct opp_table *opp_table;
381 	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
382 
383 	opp_rcu_lockdep_assert();
384 
385 	opp_table = _find_opp_table(dev);
386 	if (IS_ERR(opp_table)) {
387 		int r = PTR_ERR(opp_table);
388 
389 		dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
390 		return ERR_PTR(r);
391 	}
392 
393 	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
394 		if (temp_opp->available == available &&
395 				temp_opp->rate == freq) {
396 			opp = temp_opp;
397 			break;
398 		}
399 	}
400 
401 	return opp;
402 }
403 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
404 
_find_freq_ceil(struct opp_table * opp_table,unsigned long * freq)405 static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
406 						   unsigned long *freq)
407 {
408 	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
409 
410 	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
411 		if (temp_opp->available && temp_opp->rate >= *freq) {
412 			opp = temp_opp;
413 			*freq = opp->rate;
414 			break;
415 		}
416 	}
417 
418 	return opp;
419 }
420 
421 /**
422  * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
423  * @dev:	device for which we do this operation
424  * @freq:	Start frequency
425  *
426  * Search for the matching ceil *available* OPP from a starting freq
427  * for a device.
428  *
429  * Return: matching *opp and refreshes *freq accordingly, else returns
430  * ERR_PTR in case of error and should be handled using IS_ERR. Error return
431  * values can be:
432  * EINVAL:	for bad pointer
433  * ERANGE:	no match found for search
434  * ENODEV:	if device not found in list of registered devices
435  *
436  * Locking: This function must be called under rcu_read_lock(). opp is a rcu
437  * protected pointer. The reason for the same is that the opp pointer which is
438  * returned will remain valid for use with opp_get_{voltage, freq} only while
439  * under the locked area. The pointer returned must be used prior to unlocking
440  * with rcu_read_unlock() to maintain the integrity of the pointer.
441  */
dev_pm_opp_find_freq_ceil(struct device * dev,unsigned long * freq)442 struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
443 					     unsigned long *freq)
444 {
445 	struct opp_table *opp_table;
446 
447 	opp_rcu_lockdep_assert();
448 
449 	if (!dev || !freq) {
450 		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
451 		return ERR_PTR(-EINVAL);
452 	}
453 
454 	opp_table = _find_opp_table(dev);
455 	if (IS_ERR(opp_table))
456 		return ERR_CAST(opp_table);
457 
458 	return _find_freq_ceil(opp_table, freq);
459 }
460 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
461 
462 /**
463  * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
464  * @dev:	device for which we do this operation
465  * @freq:	Start frequency
466  *
467  * Search for the matching floor *available* OPP from a starting freq
468  * for a device.
469  *
470  * Return: matching *opp and refreshes *freq accordingly, else returns
471  * ERR_PTR in case of error and should be handled using IS_ERR. Error return
472  * values can be:
473  * EINVAL:	for bad pointer
474  * ERANGE:	no match found for search
475  * ENODEV:	if device not found in list of registered devices
476  *
477  * Locking: This function must be called under rcu_read_lock(). opp is a rcu
478  * protected pointer. The reason for the same is that the opp pointer which is
479  * returned will remain valid for use with opp_get_{voltage, freq} only while
480  * under the locked area. The pointer returned must be used prior to unlocking
481  * with rcu_read_unlock() to maintain the integrity of the pointer.
482  */
dev_pm_opp_find_freq_floor(struct device * dev,unsigned long * freq)483 struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
484 					      unsigned long *freq)
485 {
486 	struct opp_table *opp_table;
487 	struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
488 
489 	opp_rcu_lockdep_assert();
490 
491 	if (!dev || !freq) {
492 		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
493 		return ERR_PTR(-EINVAL);
494 	}
495 
496 	opp_table = _find_opp_table(dev);
497 	if (IS_ERR(opp_table))
498 		return ERR_CAST(opp_table);
499 
500 	list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
501 		if (temp_opp->available) {
502 			/* go to the next node, before choosing prev */
503 			if (temp_opp->rate > *freq)
504 				break;
505 			else
506 				opp = temp_opp;
507 		}
508 	}
509 	if (!IS_ERR(opp))
510 		*freq = opp->rate;
511 
512 	return opp;
513 }
514 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
515 
516 /*
517  * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
518  * while clk returned here is used.
519  */
_get_opp_clk(struct device * dev)520 static struct clk *_get_opp_clk(struct device *dev)
521 {
522 	struct opp_table *opp_table;
523 	struct clk *clk;
524 
525 	rcu_read_lock();
526 
527 	opp_table = _find_opp_table(dev);
528 	if (IS_ERR(opp_table)) {
529 		dev_err(dev, "%s: device opp doesn't exist\n", __func__);
530 		clk = ERR_CAST(opp_table);
531 		goto unlock;
532 	}
533 
534 	clk = opp_table->clk;
535 	if (IS_ERR(clk))
536 		dev_err(dev, "%s: No clock available for the device\n",
537 			__func__);
538 
539 unlock:
540 	rcu_read_unlock();
541 	return clk;
542 }
543 
_set_opp_voltage(struct device * dev,struct regulator * reg,unsigned long u_volt,unsigned long u_volt_min,unsigned long u_volt_max)544 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
545 			    unsigned long u_volt, unsigned long u_volt_min,
546 			    unsigned long u_volt_max)
547 {
548 	int ret;
549 
550 	/* Regulator not available for device */
551 	if (IS_ERR(reg)) {
552 		dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
553 			PTR_ERR(reg));
554 		return 0;
555 	}
556 
557 	dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
558 		u_volt, u_volt_max);
559 
560 	ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
561 					    u_volt_max);
562 	if (ret)
563 		dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
564 			__func__, u_volt_min, u_volt, u_volt_max, ret);
565 
566 	return ret;
567 }
568 
569 /**
570  * dev_pm_opp_set_rate() - Configure new OPP based on frequency
571  * @dev:	 device for which we do this operation
572  * @target_freq: frequency to achieve
573  *
574  * This configures the power-supplies and clock source to the levels specified
575  * by the OPP corresponding to the target_freq.
576  *
577  * Locking: This function takes rcu_read_lock().
578  */
dev_pm_opp_set_rate(struct device * dev,unsigned long target_freq)579 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
580 {
581 	struct opp_table *opp_table;
582 	struct dev_pm_opp *old_opp, *opp;
583 	struct regulator *reg;
584 	struct clk *clk;
585 	unsigned long freq, old_freq;
586 	unsigned long u_volt, u_volt_min, u_volt_max;
587 	unsigned long old_u_volt, old_u_volt_min, old_u_volt_max;
588 	int ret;
589 
590 	if (unlikely(!target_freq)) {
591 		dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
592 			target_freq);
593 		return -EINVAL;
594 	}
595 
596 	clk = _get_opp_clk(dev);
597 	if (IS_ERR(clk))
598 		return PTR_ERR(clk);
599 
600 	freq = clk_round_rate(clk, target_freq);
601 	if ((long)freq <= 0)
602 		freq = target_freq;
603 
604 	old_freq = clk_get_rate(clk);
605 
606 	/* Return early if nothing to do */
607 	if (old_freq == freq) {
608 		dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
609 			__func__, freq);
610 		return 0;
611 	}
612 
613 	rcu_read_lock();
614 
615 	opp_table = _find_opp_table(dev);
616 	if (IS_ERR(opp_table)) {
617 		dev_err(dev, "%s: device opp doesn't exist\n", __func__);
618 		rcu_read_unlock();
619 		return PTR_ERR(opp_table);
620 	}
621 
622 	old_opp = _find_freq_ceil(opp_table, &old_freq);
623 	if (IS_ERR(old_opp)) {
624 		dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
625 			__func__, old_freq, PTR_ERR(old_opp));
626 	}
627 
628 	opp = _find_freq_ceil(opp_table, &freq);
629 	if (IS_ERR(opp)) {
630 		ret = PTR_ERR(opp);
631 		dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
632 			__func__, freq, ret);
633 		rcu_read_unlock();
634 		return ret;
635 	}
636 
637 	if (IS_ERR(old_opp)) {
638 		old_u_volt = 0;
639 	} else {
640 		old_u_volt = old_opp->u_volt;
641 		old_u_volt_min = old_opp->u_volt_min;
642 		old_u_volt_max = old_opp->u_volt_max;
643 	}
644 
645 	u_volt = opp->u_volt;
646 	u_volt_min = opp->u_volt_min;
647 	u_volt_max = opp->u_volt_max;
648 
649 	reg = opp_table->regulator;
650 
651 	rcu_read_unlock();
652 
653 	/* Scaling up? Scale voltage before frequency */
654 	if (freq > old_freq) {
655 		ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
656 				       u_volt_max);
657 		if (ret)
658 			goto restore_voltage;
659 	}
660 
661 	/* Change frequency */
662 
663 	dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
664 		__func__, old_freq, freq);
665 
666 	ret = clk_set_rate(clk, freq);
667 	if (ret) {
668 		dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
669 			ret);
670 		goto restore_voltage;
671 	}
672 
673 	/* Scaling down? Scale voltage after frequency */
674 	if (freq < old_freq) {
675 		ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
676 				       u_volt_max);
677 		if (ret)
678 			goto restore_freq;
679 	}
680 
681 	return 0;
682 
683 restore_freq:
684 	if (clk_set_rate(clk, old_freq))
685 		dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
686 			__func__, old_freq);
687 restore_voltage:
688 	/* This shouldn't harm even if the voltages weren't updated earlier */
689 	if (old_u_volt) {
690 		_set_opp_voltage(dev, reg, old_u_volt, old_u_volt_min,
691 				 old_u_volt_max);
692 	}
693 
694 	return ret;
695 }
696 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
697 
698 /* OPP-dev Helpers */
_kfree_opp_dev_rcu(struct rcu_head * head)699 static void _kfree_opp_dev_rcu(struct rcu_head *head)
700 {
701 	struct opp_device *opp_dev;
702 
703 	opp_dev = container_of(head, struct opp_device, rcu_head);
704 	kfree_rcu(opp_dev, rcu_head);
705 }
706 
_remove_opp_dev(struct opp_device * opp_dev,struct opp_table * opp_table)707 static void _remove_opp_dev(struct opp_device *opp_dev,
708 			    struct opp_table *opp_table)
709 {
710 	opp_debug_unregister(opp_dev, opp_table);
711 	list_del_rcu(&opp_dev->node);
712 	call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
713 		  _kfree_opp_dev_rcu);
714 }
715 
_add_opp_dev(const struct device * dev,struct opp_table * opp_table)716 struct opp_device *_add_opp_dev(const struct device *dev,
717 				struct opp_table *opp_table)
718 {
719 	struct opp_device *opp_dev;
720 	int ret;
721 
722 	opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
723 	if (!opp_dev)
724 		return NULL;
725 
726 	/* Initialize opp-dev */
727 	opp_dev->dev = dev;
728 	list_add_rcu(&opp_dev->node, &opp_table->dev_list);
729 
730 	/* Create debugfs entries for the opp_table */
731 	ret = opp_debug_register(opp_dev, opp_table);
732 	if (ret)
733 		dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
734 			__func__, ret);
735 
736 	return opp_dev;
737 }
738 
739 /**
740  * _add_opp_table() - Find OPP table or allocate a new one
741  * @dev:	device for which we do this operation
742  *
743  * It tries to find an existing table first, if it couldn't find one, it
744  * allocates a new OPP table and returns that.
745  *
746  * Return: valid opp_table pointer if success, else NULL.
747  */
_add_opp_table(struct device * dev)748 static struct opp_table *_add_opp_table(struct device *dev)
749 {
750 	struct opp_table *opp_table;
751 	struct opp_device *opp_dev;
752 	int ret;
753 
754 	/* Check for existing table for 'dev' first */
755 	opp_table = _find_opp_table(dev);
756 	if (!IS_ERR(opp_table))
757 		return opp_table;
758 
759 	/*
760 	 * Allocate a new OPP table. In the infrequent case where a new
761 	 * device is needed to be added, we pay this penalty.
762 	 */
763 	opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
764 	if (!opp_table)
765 		return NULL;
766 
767 	INIT_LIST_HEAD(&opp_table->dev_list);
768 
769 	opp_dev = _add_opp_dev(dev, opp_table);
770 	if (!opp_dev) {
771 		kfree(opp_table);
772 		return NULL;
773 	}
774 
775 	_of_init_opp_table(opp_table, dev);
776 
777 	/* Set regulator to a non-NULL error value */
778 	opp_table->regulator = ERR_PTR(-ENXIO);
779 
780 	/* Find clk for the device */
781 	opp_table->clk = clk_get(dev, NULL);
782 	if (IS_ERR(opp_table->clk)) {
783 		ret = PTR_ERR(opp_table->clk);
784 		if (ret != -EPROBE_DEFER)
785 			dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
786 				ret);
787 	}
788 
789 	srcu_init_notifier_head(&opp_table->srcu_head);
790 	INIT_LIST_HEAD(&opp_table->opp_list);
791 
792 	/* Secure the device table modification */
793 	list_add_rcu(&opp_table->node, &opp_tables);
794 	return opp_table;
795 }
796 
797 /**
798  * _kfree_device_rcu() - Free opp_table RCU handler
799  * @head:	RCU head
800  */
_kfree_device_rcu(struct rcu_head * head)801 static void _kfree_device_rcu(struct rcu_head *head)
802 {
803 	struct opp_table *opp_table = container_of(head, struct opp_table,
804 						   rcu_head);
805 
806 	kfree_rcu(opp_table, rcu_head);
807 }
808 
809 /**
810  * _remove_opp_table() - Removes a OPP table
811  * @opp_table: OPP table to be removed.
812  *
813  * Removes/frees OPP table if it doesn't contain any OPPs.
814  */
_remove_opp_table(struct opp_table * opp_table)815 static void _remove_opp_table(struct opp_table *opp_table)
816 {
817 	struct opp_device *opp_dev;
818 
819 	if (!list_empty(&opp_table->opp_list))
820 		return;
821 
822 	if (opp_table->supported_hw)
823 		return;
824 
825 	if (opp_table->prop_name)
826 		return;
827 
828 	if (!IS_ERR(opp_table->regulator))
829 		return;
830 
831 	/* Release clk */
832 	if (!IS_ERR(opp_table->clk))
833 		clk_put(opp_table->clk);
834 
835 	opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
836 				   node);
837 
838 	_remove_opp_dev(opp_dev, opp_table);
839 
840 	/* dev_list must be empty now */
841 	WARN_ON(!list_empty(&opp_table->dev_list));
842 
843 	list_del_rcu(&opp_table->node);
844 	call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
845 		  _kfree_device_rcu);
846 }
847 
848 /**
849  * _kfree_opp_rcu() - Free OPP RCU handler
850  * @head:	RCU head
851  */
_kfree_opp_rcu(struct rcu_head * head)852 static void _kfree_opp_rcu(struct rcu_head *head)
853 {
854 	struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
855 
856 	kfree_rcu(opp, rcu_head);
857 }
858 
859 /**
860  * _opp_remove()  - Remove an OPP from a table definition
861  * @opp_table:	points back to the opp_table struct this opp belongs to
862  * @opp:	pointer to the OPP to remove
863  * @notify:	OPP_EVENT_REMOVE notification should be sent or not
864  *
865  * This function removes an opp definition from the opp table.
866  *
867  * Locking: The internal opp_table and opp structures are RCU protected.
868  * It is assumed that the caller holds required mutex for an RCU updater
869  * strategy.
870  */
_opp_remove(struct opp_table * opp_table,struct dev_pm_opp * opp,bool notify)871 void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
872 		 bool notify)
873 {
874 	/*
875 	 * Notify the changes in the availability of the operable
876 	 * frequency/voltage list.
877 	 */
878 	if (notify)
879 		srcu_notifier_call_chain(&opp_table->srcu_head,
880 					 OPP_EVENT_REMOVE, opp);
881 	opp_debug_remove_one(opp);
882 	list_del_rcu(&opp->node);
883 	call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
884 
885 	_remove_opp_table(opp_table);
886 }
887 
888 /**
889  * dev_pm_opp_remove()  - Remove an OPP from OPP table
890  * @dev:	device for which we do this operation
891  * @freq:	OPP to remove with matching 'freq'
892  *
893  * This function removes an opp from the opp table.
894  *
895  * Locking: The internal opp_table and opp structures are RCU protected.
896  * Hence this function internally uses RCU updater strategy with mutex locks
897  * to keep the integrity of the internal data structures. Callers should ensure
898  * that this function is *NOT* called under RCU protection or in contexts where
899  * mutex cannot be locked.
900  */
dev_pm_opp_remove(struct device * dev,unsigned long freq)901 void dev_pm_opp_remove(struct device *dev, unsigned long freq)
902 {
903 	struct dev_pm_opp *opp;
904 	struct opp_table *opp_table;
905 	bool found = false;
906 
907 	/* Hold our table modification lock here */
908 	mutex_lock(&opp_table_lock);
909 
910 	opp_table = _find_opp_table(dev);
911 	if (IS_ERR(opp_table))
912 		goto unlock;
913 
914 	list_for_each_entry(opp, &opp_table->opp_list, node) {
915 		if (opp->rate == freq) {
916 			found = true;
917 			break;
918 		}
919 	}
920 
921 	if (!found) {
922 		dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
923 			 __func__, freq);
924 		goto unlock;
925 	}
926 
927 	_opp_remove(opp_table, opp, true);
928 unlock:
929 	mutex_unlock(&opp_table_lock);
930 }
931 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
932 
_allocate_opp(struct device * dev,struct opp_table ** opp_table)933 struct dev_pm_opp *_allocate_opp(struct device *dev,
934 				 struct opp_table **opp_table)
935 {
936 	struct dev_pm_opp *opp;
937 
938 	/* allocate new OPP node */
939 	opp = kzalloc(sizeof(*opp), GFP_KERNEL);
940 	if (!opp)
941 		return NULL;
942 
943 	INIT_LIST_HEAD(&opp->node);
944 
945 	*opp_table = _add_opp_table(dev);
946 	if (!*opp_table) {
947 		kfree(opp);
948 		return NULL;
949 	}
950 
951 	return opp;
952 }
953 
_opp_supported_by_regulators(struct dev_pm_opp * opp,struct opp_table * opp_table)954 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
955 					 struct opp_table *opp_table)
956 {
957 	struct regulator *reg = opp_table->regulator;
958 
959 	if (!IS_ERR(reg) &&
960 	    !regulator_is_supported_voltage(reg, opp->u_volt_min,
961 					    opp->u_volt_max)) {
962 		pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
963 			__func__, opp->u_volt_min, opp->u_volt_max);
964 		return false;
965 	}
966 
967 	return true;
968 }
969 
_opp_add(struct device * dev,struct dev_pm_opp * new_opp,struct opp_table * opp_table)970 int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
971 	     struct opp_table *opp_table)
972 {
973 	struct dev_pm_opp *opp;
974 	struct list_head *head = &opp_table->opp_list;
975 	int ret;
976 
977 	/*
978 	 * Insert new OPP in order of increasing frequency and discard if
979 	 * already present.
980 	 *
981 	 * Need to use &opp_table->opp_list in the condition part of the 'for'
982 	 * loop, don't replace it with head otherwise it will become an infinite
983 	 * loop.
984 	 */
985 	list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
986 		if (new_opp->rate > opp->rate) {
987 			head = &opp->node;
988 			continue;
989 		}
990 
991 		if (new_opp->rate < opp->rate)
992 			break;
993 
994 		/* Duplicate OPPs */
995 		dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
996 			 __func__, opp->rate, opp->u_volt, opp->available,
997 			 new_opp->rate, new_opp->u_volt, new_opp->available);
998 
999 		return opp->available && new_opp->u_volt == opp->u_volt ?
1000 			0 : -EEXIST;
1001 	}
1002 
1003 	new_opp->opp_table = opp_table;
1004 	list_add_rcu(&new_opp->node, head);
1005 
1006 	ret = opp_debug_create_one(new_opp, opp_table);
1007 	if (ret)
1008 		dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
1009 			__func__, ret);
1010 
1011 	if (!_opp_supported_by_regulators(new_opp, opp_table)) {
1012 		new_opp->available = false;
1013 		dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1014 			 __func__, new_opp->rate);
1015 	}
1016 
1017 	return 0;
1018 }
1019 
1020 /**
1021  * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1022  * @dev:	device for which we do this operation
1023  * @freq:	Frequency in Hz for this OPP
1024  * @u_volt:	Voltage in uVolts for this OPP
1025  * @dynamic:	Dynamically added OPPs.
1026  *
1027  * This function adds an opp definition to the opp table and returns status.
1028  * The opp is made available by default and it can be controlled using
1029  * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1030  *
1031  * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1032  * and freed by dev_pm_opp_of_remove_table.
1033  *
1034  * Locking: The internal opp_table and opp structures are RCU protected.
1035  * Hence this function internally uses RCU updater strategy with mutex locks
1036  * to keep the integrity of the internal data structures. Callers should ensure
1037  * that this function is *NOT* called under RCU protection or in contexts where
1038  * mutex cannot be locked.
1039  *
1040  * Return:
1041  * 0		On success OR
1042  *		Duplicate OPPs (both freq and volt are same) and opp->available
1043  * -EEXIST	Freq are same and volt are different OR
1044  *		Duplicate OPPs (both freq and volt are same) and !opp->available
1045  * -ENOMEM	Memory allocation failure
1046  */
_opp_add_v1(struct device * dev,unsigned long freq,long u_volt,bool dynamic)1047 int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
1048 		bool dynamic)
1049 {
1050 	struct opp_table *opp_table;
1051 	struct dev_pm_opp *new_opp;
1052 	unsigned long tol;
1053 	int ret;
1054 
1055 	/* Hold our table modification lock here */
1056 	mutex_lock(&opp_table_lock);
1057 
1058 	new_opp = _allocate_opp(dev, &opp_table);
1059 	if (!new_opp) {
1060 		ret = -ENOMEM;
1061 		goto unlock;
1062 	}
1063 
1064 	/* populate the opp table */
1065 	new_opp->rate = freq;
1066 	tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
1067 	new_opp->u_volt = u_volt;
1068 	new_opp->u_volt_min = u_volt - tol;
1069 	new_opp->u_volt_max = u_volt + tol;
1070 	new_opp->available = true;
1071 	new_opp->dynamic = dynamic;
1072 
1073 	ret = _opp_add(dev, new_opp, opp_table);
1074 	if (ret)
1075 		goto free_opp;
1076 
1077 	mutex_unlock(&opp_table_lock);
1078 
1079 	/*
1080 	 * Notify the changes in the availability of the operable
1081 	 * frequency/voltage list.
1082 	 */
1083 	srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
1084 	return 0;
1085 
1086 free_opp:
1087 	_opp_remove(opp_table, new_opp, false);
1088 unlock:
1089 	mutex_unlock(&opp_table_lock);
1090 	return ret;
1091 }
1092 
1093 /**
1094  * dev_pm_opp_set_supported_hw() - Set supported platforms
1095  * @dev: Device for which supported-hw has to be set.
1096  * @versions: Array of hierarchy of versions to match.
1097  * @count: Number of elements in the array.
1098  *
1099  * This is required only for the V2 bindings, and it enables a platform to
1100  * specify the hierarchy of versions it supports. OPP layer will then enable
1101  * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1102  * property.
1103  *
1104  * Locking: The internal opp_table and opp structures are RCU protected.
1105  * Hence this function internally uses RCU updater strategy with mutex locks
1106  * to keep the integrity of the internal data structures. Callers should ensure
1107  * that this function is *NOT* called under RCU protection or in contexts where
1108  * mutex cannot be locked.
1109  */
dev_pm_opp_set_supported_hw(struct device * dev,const u32 * versions,unsigned int count)1110 int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
1111 				unsigned int count)
1112 {
1113 	struct opp_table *opp_table;
1114 	int ret = 0;
1115 
1116 	/* Hold our table modification lock here */
1117 	mutex_lock(&opp_table_lock);
1118 
1119 	opp_table = _add_opp_table(dev);
1120 	if (!opp_table) {
1121 		ret = -ENOMEM;
1122 		goto unlock;
1123 	}
1124 
1125 	/* Make sure there are no concurrent readers while updating opp_table */
1126 	WARN_ON(!list_empty(&opp_table->opp_list));
1127 
1128 	/* Do we already have a version hierarchy associated with opp_table? */
1129 	if (opp_table->supported_hw) {
1130 		dev_err(dev, "%s: Already have supported hardware list\n",
1131 			__func__);
1132 		ret = -EBUSY;
1133 		goto err;
1134 	}
1135 
1136 	opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
1137 					GFP_KERNEL);
1138 	if (!opp_table->supported_hw) {
1139 		ret = -ENOMEM;
1140 		goto err;
1141 	}
1142 
1143 	opp_table->supported_hw_count = count;
1144 	mutex_unlock(&opp_table_lock);
1145 	return 0;
1146 
1147 err:
1148 	_remove_opp_table(opp_table);
1149 unlock:
1150 	mutex_unlock(&opp_table_lock);
1151 
1152 	return ret;
1153 }
1154 EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1155 
1156 /**
1157  * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1158  * @dev: Device for which supported-hw has to be put.
1159  *
1160  * This is required only for the V2 bindings, and is called for a matching
1161  * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
1162  * will not be freed.
1163  *
1164  * Locking: The internal opp_table and opp structures are RCU protected.
1165  * Hence this function internally uses RCU updater strategy with mutex locks
1166  * to keep the integrity of the internal data structures. Callers should ensure
1167  * that this function is *NOT* called under RCU protection or in contexts where
1168  * mutex cannot be locked.
1169  */
dev_pm_opp_put_supported_hw(struct device * dev)1170 void dev_pm_opp_put_supported_hw(struct device *dev)
1171 {
1172 	struct opp_table *opp_table;
1173 
1174 	/* Hold our table modification lock here */
1175 	mutex_lock(&opp_table_lock);
1176 
1177 	/* Check for existing table for 'dev' first */
1178 	opp_table = _find_opp_table(dev);
1179 	if (IS_ERR(opp_table)) {
1180 		dev_err(dev, "Failed to find opp_table: %ld\n",
1181 			PTR_ERR(opp_table));
1182 		goto unlock;
1183 	}
1184 
1185 	/* Make sure there are no concurrent readers while updating opp_table */
1186 	WARN_ON(!list_empty(&opp_table->opp_list));
1187 
1188 	if (!opp_table->supported_hw) {
1189 		dev_err(dev, "%s: Doesn't have supported hardware list\n",
1190 			__func__);
1191 		goto unlock;
1192 	}
1193 
1194 	kfree(opp_table->supported_hw);
1195 	opp_table->supported_hw = NULL;
1196 	opp_table->supported_hw_count = 0;
1197 
1198 	/* Try freeing opp_table if this was the last blocking resource */
1199 	_remove_opp_table(opp_table);
1200 
1201 unlock:
1202 	mutex_unlock(&opp_table_lock);
1203 }
1204 EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1205 
1206 /**
1207  * dev_pm_opp_set_prop_name() - Set prop-extn name
1208  * @dev: Device for which the prop-name has to be set.
1209  * @name: name to postfix to properties.
1210  *
1211  * This is required only for the V2 bindings, and it enables a platform to
1212  * specify the extn to be used for certain property names. The properties to
1213  * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1214  * should postfix the property name with -<name> while looking for them.
1215  *
1216  * Locking: The internal opp_table and opp structures are RCU protected.
1217  * Hence this function internally uses RCU updater strategy with mutex locks
1218  * to keep the integrity of the internal data structures. Callers should ensure
1219  * that this function is *NOT* called under RCU protection or in contexts where
1220  * mutex cannot be locked.
1221  */
dev_pm_opp_set_prop_name(struct device * dev,const char * name)1222 int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1223 {
1224 	struct opp_table *opp_table;
1225 	int ret = 0;
1226 
1227 	/* Hold our table modification lock here */
1228 	mutex_lock(&opp_table_lock);
1229 
1230 	opp_table = _add_opp_table(dev);
1231 	if (!opp_table) {
1232 		ret = -ENOMEM;
1233 		goto unlock;
1234 	}
1235 
1236 	/* Make sure there are no concurrent readers while updating opp_table */
1237 	WARN_ON(!list_empty(&opp_table->opp_list));
1238 
1239 	/* Do we already have a prop-name associated with opp_table? */
1240 	if (opp_table->prop_name) {
1241 		dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1242 			opp_table->prop_name);
1243 		ret = -EBUSY;
1244 		goto err;
1245 	}
1246 
1247 	opp_table->prop_name = kstrdup(name, GFP_KERNEL);
1248 	if (!opp_table->prop_name) {
1249 		ret = -ENOMEM;
1250 		goto err;
1251 	}
1252 
1253 	mutex_unlock(&opp_table_lock);
1254 	return 0;
1255 
1256 err:
1257 	_remove_opp_table(opp_table);
1258 unlock:
1259 	mutex_unlock(&opp_table_lock);
1260 
1261 	return ret;
1262 }
1263 EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1264 
1265 /**
1266  * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1267  * @dev: Device for which the prop-name has to be put.
1268  *
1269  * This is required only for the V2 bindings, and is called for a matching
1270  * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
1271  * will not be freed.
1272  *
1273  * Locking: The internal opp_table and opp structures are RCU protected.
1274  * Hence this function internally uses RCU updater strategy with mutex locks
1275  * to keep the integrity of the internal data structures. Callers should ensure
1276  * that this function is *NOT* called under RCU protection or in contexts where
1277  * mutex cannot be locked.
1278  */
dev_pm_opp_put_prop_name(struct device * dev)1279 void dev_pm_opp_put_prop_name(struct device *dev)
1280 {
1281 	struct opp_table *opp_table;
1282 
1283 	/* Hold our table modification lock here */
1284 	mutex_lock(&opp_table_lock);
1285 
1286 	/* Check for existing table for 'dev' first */
1287 	opp_table = _find_opp_table(dev);
1288 	if (IS_ERR(opp_table)) {
1289 		dev_err(dev, "Failed to find opp_table: %ld\n",
1290 			PTR_ERR(opp_table));
1291 		goto unlock;
1292 	}
1293 
1294 	/* Make sure there are no concurrent readers while updating opp_table */
1295 	WARN_ON(!list_empty(&opp_table->opp_list));
1296 
1297 	if (!opp_table->prop_name) {
1298 		dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1299 		goto unlock;
1300 	}
1301 
1302 	kfree(opp_table->prop_name);
1303 	opp_table->prop_name = NULL;
1304 
1305 	/* Try freeing opp_table if this was the last blocking resource */
1306 	_remove_opp_table(opp_table);
1307 
1308 unlock:
1309 	mutex_unlock(&opp_table_lock);
1310 }
1311 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1312 
1313 /**
1314  * dev_pm_opp_set_regulator() - Set regulator name for the device
1315  * @dev: Device for which regulator name is being set.
1316  * @name: Name of the regulator.
1317  *
1318  * In order to support OPP switching, OPP layer needs to know the name of the
1319  * device's regulator, as the core would be required to switch voltages as well.
1320  *
1321  * This must be called before any OPPs are initialized for the device.
1322  *
1323  * Locking: The internal opp_table and opp structures are RCU protected.
1324  * Hence this function internally uses RCU updater strategy with mutex locks
1325  * to keep the integrity of the internal data structures. Callers should ensure
1326  * that this function is *NOT* called under RCU protection or in contexts where
1327  * mutex cannot be locked.
1328  */
dev_pm_opp_set_regulator(struct device * dev,const char * name)1329 struct opp_table *dev_pm_opp_set_regulator(struct device *dev, const char *name)
1330 {
1331 	struct opp_table *opp_table;
1332 	struct regulator *reg;
1333 	int ret;
1334 
1335 	mutex_lock(&opp_table_lock);
1336 
1337 	opp_table = _add_opp_table(dev);
1338 	if (!opp_table) {
1339 		ret = -ENOMEM;
1340 		goto unlock;
1341 	}
1342 
1343 	/* This should be called before OPPs are initialized */
1344 	if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1345 		ret = -EBUSY;
1346 		goto err;
1347 	}
1348 
1349 	/* Already have a regulator set */
1350 	if (WARN_ON(!IS_ERR(opp_table->regulator))) {
1351 		ret = -EBUSY;
1352 		goto err;
1353 	}
1354 	/* Allocate the regulator */
1355 	reg = regulator_get_optional(dev, name);
1356 	if (IS_ERR(reg)) {
1357 		ret = PTR_ERR(reg);
1358 		if (ret != -EPROBE_DEFER)
1359 			dev_err(dev, "%s: no regulator (%s) found: %d\n",
1360 				__func__, name, ret);
1361 		goto err;
1362 	}
1363 
1364 	opp_table->regulator = reg;
1365 
1366 	mutex_unlock(&opp_table_lock);
1367 	return opp_table;
1368 
1369 err:
1370 	_remove_opp_table(opp_table);
1371 unlock:
1372 	mutex_unlock(&opp_table_lock);
1373 
1374 	return ERR_PTR(ret);
1375 }
1376 EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
1377 
1378 /**
1379  * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
1380  * @opp_table: OPP table returned from dev_pm_opp_set_regulator().
1381  *
1382  * Locking: The internal opp_table and opp structures are RCU protected.
1383  * Hence this function internally uses RCU updater strategy with mutex locks
1384  * to keep the integrity of the internal data structures. Callers should ensure
1385  * that this function is *NOT* called under RCU protection or in contexts where
1386  * mutex cannot be locked.
1387  */
dev_pm_opp_put_regulator(struct opp_table * opp_table)1388 void dev_pm_opp_put_regulator(struct opp_table *opp_table)
1389 {
1390 	mutex_lock(&opp_table_lock);
1391 
1392 	if (IS_ERR(opp_table->regulator)) {
1393 		pr_err("%s: Doesn't have regulator set\n", __func__);
1394 		goto unlock;
1395 	}
1396 
1397 	/* Make sure there are no concurrent readers while updating opp_table */
1398 	WARN_ON(!list_empty(&opp_table->opp_list));
1399 
1400 	regulator_put(opp_table->regulator);
1401 	opp_table->regulator = ERR_PTR(-ENXIO);
1402 
1403 	/* Try freeing opp_table if this was the last blocking resource */
1404 	_remove_opp_table(opp_table);
1405 
1406 unlock:
1407 	mutex_unlock(&opp_table_lock);
1408 }
1409 EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
1410 
1411 /**
1412  * dev_pm_opp_add()  - Add an OPP table from a table definitions
1413  * @dev:	device for which we do this operation
1414  * @freq:	Frequency in Hz for this OPP
1415  * @u_volt:	Voltage in uVolts for this OPP
1416  *
1417  * This function adds an opp definition to the opp table and returns status.
1418  * The opp is made available by default and it can be controlled using
1419  * dev_pm_opp_enable/disable functions.
1420  *
1421  * Locking: The internal opp_table and opp structures are RCU protected.
1422  * Hence this function internally uses RCU updater strategy with mutex locks
1423  * to keep the integrity of the internal data structures. Callers should ensure
1424  * that this function is *NOT* called under RCU protection or in contexts where
1425  * mutex cannot be locked.
1426  *
1427  * Return:
1428  * 0		On success OR
1429  *		Duplicate OPPs (both freq and volt are same) and opp->available
1430  * -EEXIST	Freq are same and volt are different OR
1431  *		Duplicate OPPs (both freq and volt are same) and !opp->available
1432  * -ENOMEM	Memory allocation failure
1433  */
dev_pm_opp_add(struct device * dev,unsigned long freq,unsigned long u_volt)1434 int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1435 {
1436 	return _opp_add_v1(dev, freq, u_volt, true);
1437 }
1438 EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1439 
1440 /**
1441  * _opp_set_availability() - helper to set the availability of an opp
1442  * @dev:		device for which we do this operation
1443  * @freq:		OPP frequency to modify availability
1444  * @availability_req:	availability status requested for this opp
1445  *
1446  * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1447  * share a common logic which is isolated here.
1448  *
1449  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1450  * copy operation, returns 0 if no modification was done OR modification was
1451  * successful.
1452  *
1453  * Locking: The internal opp_table and opp structures are RCU protected.
1454  * Hence this function internally uses RCU updater strategy with mutex locks to
1455  * keep the integrity of the internal data structures. Callers should ensure
1456  * that this function is *NOT* called under RCU protection or in contexts where
1457  * mutex locking or synchronize_rcu() blocking calls cannot be used.
1458  */
_opp_set_availability(struct device * dev,unsigned long freq,bool availability_req)1459 static int _opp_set_availability(struct device *dev, unsigned long freq,
1460 				 bool availability_req)
1461 {
1462 	struct opp_table *opp_table;
1463 	struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
1464 	int r = 0;
1465 
1466 	/* keep the node allocated */
1467 	new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
1468 	if (!new_opp)
1469 		return -ENOMEM;
1470 
1471 	mutex_lock(&opp_table_lock);
1472 
1473 	/* Find the opp_table */
1474 	opp_table = _find_opp_table(dev);
1475 	if (IS_ERR(opp_table)) {
1476 		r = PTR_ERR(opp_table);
1477 		dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1478 		goto unlock;
1479 	}
1480 
1481 	/* Do we have the frequency? */
1482 	list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
1483 		if (tmp_opp->rate == freq) {
1484 			opp = tmp_opp;
1485 			break;
1486 		}
1487 	}
1488 	if (IS_ERR(opp)) {
1489 		r = PTR_ERR(opp);
1490 		goto unlock;
1491 	}
1492 
1493 	/* Is update really needed? */
1494 	if (opp->available == availability_req)
1495 		goto unlock;
1496 	/* copy the old data over */
1497 	*new_opp = *opp;
1498 
1499 	/* plug in new node */
1500 	new_opp->available = availability_req;
1501 
1502 	list_replace_rcu(&opp->node, &new_opp->node);
1503 	mutex_unlock(&opp_table_lock);
1504 	call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1505 
1506 	/* Notify the change of the OPP availability */
1507 	if (availability_req)
1508 		srcu_notifier_call_chain(&opp_table->srcu_head,
1509 					 OPP_EVENT_ENABLE, new_opp);
1510 	else
1511 		srcu_notifier_call_chain(&opp_table->srcu_head,
1512 					 OPP_EVENT_DISABLE, new_opp);
1513 
1514 	return 0;
1515 
1516 unlock:
1517 	mutex_unlock(&opp_table_lock);
1518 	kfree(new_opp);
1519 	return r;
1520 }
1521 
1522 /**
1523  * dev_pm_opp_enable() - Enable a specific OPP
1524  * @dev:	device for which we do this operation
1525  * @freq:	OPP frequency to enable
1526  *
1527  * Enables a provided opp. If the operation is valid, this returns 0, else the
1528  * corresponding error value. It is meant to be used for users an OPP available
1529  * after being temporarily made unavailable with dev_pm_opp_disable.
1530  *
1531  * Locking: The internal opp_table and opp structures are RCU protected.
1532  * Hence this function indirectly uses RCU and mutex locks to keep the
1533  * integrity of the internal data structures. Callers should ensure that
1534  * this function is *NOT* called under RCU protection or in contexts where
1535  * mutex locking or synchronize_rcu() blocking calls cannot be used.
1536  *
1537  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1538  * copy operation, returns 0 if no modification was done OR modification was
1539  * successful.
1540  */
dev_pm_opp_enable(struct device * dev,unsigned long freq)1541 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
1542 {
1543 	return _opp_set_availability(dev, freq, true);
1544 }
1545 EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1546 
1547 /**
1548  * dev_pm_opp_disable() - Disable a specific OPP
1549  * @dev:	device for which we do this operation
1550  * @freq:	OPP frequency to disable
1551  *
1552  * Disables a provided opp. If the operation is valid, this returns
1553  * 0, else the corresponding error value. It is meant to be a temporary
1554  * control by users to make this OPP not available until the circumstances are
1555  * right to make it available again (with a call to dev_pm_opp_enable).
1556  *
1557  * Locking: The internal opp_table and opp structures are RCU protected.
1558  * Hence this function indirectly uses RCU and mutex locks to keep the
1559  * integrity of the internal data structures. Callers should ensure that
1560  * this function is *NOT* called under RCU protection or in contexts where
1561  * mutex locking or synchronize_rcu() blocking calls cannot be used.
1562  *
1563  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1564  * copy operation, returns 0 if no modification was done OR modification was
1565  * successful.
1566  */
dev_pm_opp_disable(struct device * dev,unsigned long freq)1567 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
1568 {
1569 	return _opp_set_availability(dev, freq, false);
1570 }
1571 EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
1572 
1573 /**
1574  * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
1575  * @dev:	device pointer used to lookup OPP table.
1576  *
1577  * Return: pointer to  notifier head if found, otherwise -ENODEV or
1578  * -EINVAL based on type of error casted as pointer. value must be checked
1579  *  with IS_ERR to determine valid pointer or error result.
1580  *
1581  * Locking: This function must be called under rcu_read_lock(). opp_table is a
1582  * RCU protected pointer. The reason for the same is that the opp pointer which
1583  * is returned will remain valid for use with opp_get_{voltage, freq} only while
1584  * under the locked area. The pointer returned must be used prior to unlocking
1585  * with rcu_read_unlock() to maintain the integrity of the pointer.
1586  */
dev_pm_opp_get_notifier(struct device * dev)1587 struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
1588 {
1589 	struct opp_table *opp_table = _find_opp_table(dev);
1590 
1591 	if (IS_ERR(opp_table))
1592 		return ERR_CAST(opp_table); /* matching type */
1593 
1594 	return &opp_table->srcu_head;
1595 }
1596 EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
1597 
1598 /*
1599  * Free OPPs either created using static entries present in DT or even the
1600  * dynamically added entries based on remove_all param.
1601  */
_dev_pm_opp_remove_table(struct device * dev,bool remove_all)1602 void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
1603 {
1604 	struct opp_table *opp_table;
1605 	struct dev_pm_opp *opp, *tmp;
1606 
1607 	/* Hold our table modification lock here */
1608 	mutex_lock(&opp_table_lock);
1609 
1610 	/* Check for existing table for 'dev' */
1611 	opp_table = _find_opp_table(dev);
1612 	if (IS_ERR(opp_table)) {
1613 		int error = PTR_ERR(opp_table);
1614 
1615 		if (error != -ENODEV)
1616 			WARN(1, "%s: opp_table: %d\n",
1617 			     IS_ERR_OR_NULL(dev) ?
1618 					"Invalid device" : dev_name(dev),
1619 			     error);
1620 		goto unlock;
1621 	}
1622 
1623 	/* Find if opp_table manages a single device */
1624 	if (list_is_singular(&opp_table->dev_list)) {
1625 		/* Free static OPPs */
1626 		list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
1627 			if (remove_all || !opp->dynamic)
1628 				_opp_remove(opp_table, opp, true);
1629 		}
1630 	} else {
1631 		_remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
1632 	}
1633 
1634 unlock:
1635 	mutex_unlock(&opp_table_lock);
1636 }
1637 
1638 /**
1639  * dev_pm_opp_remove_table() - Free all OPPs associated with the device
1640  * @dev:	device pointer used to lookup OPP table.
1641  *
1642  * Free both OPPs created using static entries present in DT and the
1643  * dynamically added entries.
1644  *
1645  * Locking: The internal opp_table and opp structures are RCU protected.
1646  * Hence this function indirectly uses RCU updater strategy with mutex locks
1647  * to keep the integrity of the internal data structures. Callers should ensure
1648  * that this function is *NOT* called under RCU protection or in contexts where
1649  * mutex cannot be locked.
1650  */
dev_pm_opp_remove_table(struct device * dev)1651 void dev_pm_opp_remove_table(struct device *dev)
1652 {
1653 	_dev_pm_opp_remove_table(dev, true);
1654 }
1655 EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
1656