• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Generic OPP OF helpers
3  *
4  * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5  *	Nishanth Menon
6  *	Romit Dasgupta
7  *	Kevin Hilman
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/cpu.h>
17 #include <linux/errno.h>
18 #include <linux/device.h>
19 #include <linux/of.h>
20 #include <linux/export.h>
21 
22 #include "opp.h"
23 
_managed_opp(const struct device_node * np)24 static struct opp_table *_managed_opp(const struct device_node *np)
25 {
26 	struct opp_table *opp_table;
27 
28 	list_for_each_entry_rcu(opp_table, &opp_tables, node) {
29 		if (opp_table->np == np) {
30 			/*
31 			 * Multiple devices can point to the same OPP table and
32 			 * so will have same node-pointer, np.
33 			 *
34 			 * But the OPPs will be considered as shared only if the
35 			 * OPP table contains a "opp-shared" property.
36 			 */
37 			if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED)
38 				return opp_table;
39 
40 			return NULL;
41 		}
42 	}
43 
44 	return NULL;
45 }
46 
_of_init_opp_table(struct opp_table * opp_table,struct device * dev)47 void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
48 {
49 	struct device_node *np;
50 
51 	/*
52 	 * Only required for backward compatibility with v1 bindings, but isn't
53 	 * harmful for other cases. And so we do it unconditionally.
54 	 */
55 	np = of_node_get(dev->of_node);
56 	if (np) {
57 		u32 val;
58 
59 		if (!of_property_read_u32(np, "clock-latency", &val))
60 			opp_table->clock_latency_ns_max = val;
61 		of_property_read_u32(np, "voltage-tolerance",
62 				     &opp_table->voltage_tolerance_v1);
63 		of_node_put(np);
64 	}
65 }
66 
_opp_is_supported(struct device * dev,struct opp_table * opp_table,struct device_node * np)67 static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
68 			      struct device_node *np)
69 {
70 	unsigned int count = opp_table->supported_hw_count;
71 	u32 version;
72 	int ret;
73 
74 	if (!opp_table->supported_hw) {
75 		/*
76 		 * In the case that no supported_hw has been set by the
77 		 * platform but there is an opp-supported-hw value set for
78 		 * an OPP then the OPP should not be enabled as there is
79 		 * no way to see if the hardware supports it.
80 		 */
81 		if (of_find_property(np, "opp-supported-hw", NULL))
82 			return false;
83 		else
84 			return true;
85 	}
86 
87 	while (count--) {
88 		ret = of_property_read_u32_index(np, "opp-supported-hw", count,
89 						 &version);
90 		if (ret) {
91 			dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
92 				 __func__, count, ret);
93 			return false;
94 		}
95 
96 		/* Both of these are bitwise masks of the versions */
97 		if (!(version & opp_table->supported_hw[count]))
98 			return false;
99 	}
100 
101 	return true;
102 }
103 
104 /* TODO: Support multiple regulators */
opp_parse_supplies(struct dev_pm_opp * opp,struct device * dev,struct opp_table * opp_table)105 static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
106 			      struct opp_table *opp_table)
107 {
108 	u32 microvolt[3] = {0};
109 	u32 val;
110 	int count, ret;
111 	struct property *prop = NULL;
112 	char name[NAME_MAX];
113 
114 	/* Search for "opp-microvolt-<name>" */
115 	if (opp_table->prop_name) {
116 		snprintf(name, sizeof(name), "opp-microvolt-%s",
117 			 opp_table->prop_name);
118 		prop = of_find_property(opp->np, name, NULL);
119 	}
120 
121 	if (!prop) {
122 		/* Search for "opp-microvolt" */
123 		sprintf(name, "opp-microvolt");
124 		prop = of_find_property(opp->np, name, NULL);
125 
126 		/* Missing property isn't a problem, but an invalid entry is */
127 		if (!prop)
128 			return 0;
129 	}
130 
131 	count = of_property_count_u32_elems(opp->np, name);
132 	if (count < 0) {
133 		dev_err(dev, "%s: Invalid %s property (%d)\n",
134 			__func__, name, count);
135 		return count;
136 	}
137 
138 	/* There can be one or three elements here */
139 	if (count != 1 && count != 3) {
140 		dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
141 			__func__, name, count);
142 		return -EINVAL;
143 	}
144 
145 	ret = of_property_read_u32_array(opp->np, name, microvolt, count);
146 	if (ret) {
147 		dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
148 		return -EINVAL;
149 	}
150 
151 	opp->u_volt = microvolt[0];
152 
153 	if (count == 1) {
154 		opp->u_volt_min = opp->u_volt;
155 		opp->u_volt_max = opp->u_volt;
156 	} else {
157 		opp->u_volt_min = microvolt[1];
158 		opp->u_volt_max = microvolt[2];
159 	}
160 
161 	/* Search for "opp-microamp-<name>" */
162 	prop = NULL;
163 	if (opp_table->prop_name) {
164 		snprintf(name, sizeof(name), "opp-microamp-%s",
165 			 opp_table->prop_name);
166 		prop = of_find_property(opp->np, name, NULL);
167 	}
168 
169 	if (!prop) {
170 		/* Search for "opp-microamp" */
171 		sprintf(name, "opp-microamp");
172 		prop = of_find_property(opp->np, name, NULL);
173 	}
174 
175 	if (prop && !of_property_read_u32(opp->np, name, &val))
176 		opp->u_amp = val;
177 
178 	return 0;
179 }
180 
181 /**
182  * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
183  *				  entries
184  * @dev:	device pointer used to lookup OPP table.
185  *
186  * Free OPPs created using static entries present in DT.
187  *
188  * Locking: The internal opp_table and opp structures are RCU protected.
189  * Hence this function indirectly uses RCU updater strategy with mutex locks
190  * to keep the integrity of the internal data structures. Callers should ensure
191  * that this function is *NOT* called under RCU protection or in contexts where
192  * mutex cannot be locked.
193  */
dev_pm_opp_of_remove_table(struct device * dev)194 void dev_pm_opp_of_remove_table(struct device *dev)
195 {
196 	_dev_pm_opp_remove_table(dev, false);
197 }
198 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
199 
200 /* Returns opp descriptor node for a device, caller must do of_node_put() */
_of_get_opp_desc_node(struct device * dev)201 struct device_node *_of_get_opp_desc_node(struct device *dev)
202 {
203 	/*
204 	 * TODO: Support for multiple OPP tables.
205 	 *
206 	 * There should be only ONE phandle present in "operating-points-v2"
207 	 * property.
208 	 */
209 
210 	return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
211 }
212 
213 /**
214  * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
215  * @dev:	device for which we do this operation
216  * @np:		device node
217  *
218  * This function adds an opp definition to the opp table and returns status. The
219  * opp can be controlled using dev_pm_opp_enable/disable functions and may be
220  * removed by dev_pm_opp_remove.
221  *
222  * Locking: The internal opp_table and opp structures are RCU protected.
223  * Hence this function internally uses RCU updater strategy with mutex locks
224  * to keep the integrity of the internal data structures. Callers should ensure
225  * that this function is *NOT* called under RCU protection or in contexts where
226  * mutex cannot be locked.
227  *
228  * Return:
229  * 0		On success OR
230  *		Duplicate OPPs (both freq and volt are same) and opp->available
231  * -EEXIST	Freq are same and volt are different OR
232  *		Duplicate OPPs (both freq and volt are same) and !opp->available
233  * -ENOMEM	Memory allocation failure
234  * -EINVAL	Failed parsing the OPP node
235  */
_opp_add_static_v2(struct device * dev,struct device_node * np)236 static int _opp_add_static_v2(struct device *dev, struct device_node *np)
237 {
238 	struct opp_table *opp_table;
239 	struct dev_pm_opp *new_opp;
240 	u64 rate;
241 	u32 val;
242 	int ret;
243 
244 	/* Hold our table modification lock here */
245 	mutex_lock(&opp_table_lock);
246 
247 	new_opp = _allocate_opp(dev, &opp_table);
248 	if (!new_opp) {
249 		ret = -ENOMEM;
250 		goto unlock;
251 	}
252 
253 	ret = of_property_read_u64(np, "opp-hz", &rate);
254 	if (ret < 0) {
255 		dev_err(dev, "%s: opp-hz not found\n", __func__);
256 		goto free_opp;
257 	}
258 
259 	/* Check if the OPP supports hardware's hierarchy of versions or not */
260 	if (!_opp_is_supported(dev, opp_table, np)) {
261 		dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
262 		goto free_opp;
263 	}
264 
265 	/*
266 	 * Rate is defined as an unsigned long in clk API, and so casting
267 	 * explicitly to its type. Must be fixed once rate is 64 bit
268 	 * guaranteed in clk API.
269 	 */
270 	new_opp->rate = (unsigned long)rate;
271 	new_opp->turbo = of_property_read_bool(np, "turbo-mode");
272 
273 	new_opp->np = np;
274 	new_opp->dynamic = false;
275 	new_opp->available = true;
276 
277 	if (!of_property_read_u32(np, "clock-latency-ns", &val))
278 		new_opp->clock_latency_ns = val;
279 
280 	ret = opp_parse_supplies(new_opp, dev, opp_table);
281 	if (ret)
282 		goto free_opp;
283 
284 	ret = _opp_add(dev, new_opp, opp_table);
285 	if (ret)
286 		goto free_opp;
287 
288 	/* OPP to select on device suspend */
289 	if (of_property_read_bool(np, "opp-suspend")) {
290 		if (opp_table->suspend_opp) {
291 			dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
292 				 __func__, opp_table->suspend_opp->rate,
293 				 new_opp->rate);
294 		} else {
295 			new_opp->suspend = true;
296 			opp_table->suspend_opp = new_opp;
297 		}
298 	}
299 
300 	if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
301 		opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
302 
303 	mutex_unlock(&opp_table_lock);
304 
305 	pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
306 		 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
307 		 new_opp->u_volt_min, new_opp->u_volt_max,
308 		 new_opp->clock_latency_ns);
309 
310 	/*
311 	 * Notify the changes in the availability of the operable
312 	 * frequency/voltage list.
313 	 */
314 	srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
315 	return 0;
316 
317 free_opp:
318 	_opp_remove(opp_table, new_opp, false);
319 unlock:
320 	mutex_unlock(&opp_table_lock);
321 	return ret;
322 }
323 
324 /* Initializes OPP tables based on new bindings */
_of_add_opp_table_v2(struct device * dev,struct device_node * opp_np)325 static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
326 {
327 	struct device_node *np;
328 	struct opp_table *opp_table;
329 	int ret = 0, count = 0;
330 
331 	mutex_lock(&opp_table_lock);
332 
333 	opp_table = _managed_opp(opp_np);
334 	if (opp_table) {
335 		/* OPPs are already managed */
336 		if (!_add_opp_dev(dev, opp_table))
337 			ret = -ENOMEM;
338 		mutex_unlock(&opp_table_lock);
339 		return ret;
340 	}
341 	mutex_unlock(&opp_table_lock);
342 
343 	/* We have opp-table node now, iterate over it and add OPPs */
344 	for_each_available_child_of_node(opp_np, np) {
345 		count++;
346 
347 		ret = _opp_add_static_v2(dev, np);
348 		if (ret) {
349 			dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
350 				ret);
351 			of_node_put(np);
352 			goto free_table;
353 		}
354 	}
355 
356 	/* There should be one of more OPP defined */
357 	if (WARN_ON(!count))
358 		return -ENOENT;
359 
360 	mutex_lock(&opp_table_lock);
361 
362 	opp_table = _find_opp_table(dev);
363 	if (WARN_ON(IS_ERR(opp_table))) {
364 		ret = PTR_ERR(opp_table);
365 		mutex_unlock(&opp_table_lock);
366 		goto free_table;
367 	}
368 
369 	opp_table->np = opp_np;
370 	if (of_property_read_bool(opp_np, "opp-shared"))
371 		opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
372 	else
373 		opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
374 
375 	mutex_unlock(&opp_table_lock);
376 
377 	return 0;
378 
379 free_table:
380 	dev_pm_opp_of_remove_table(dev);
381 
382 	return ret;
383 }
384 
385 /* Initializes OPP tables based on old-deprecated bindings */
_of_add_opp_table_v1(struct device * dev)386 static int _of_add_opp_table_v1(struct device *dev)
387 {
388 	const struct property *prop;
389 	const __be32 *val;
390 	int nr, ret;
391 
392 	prop = of_find_property(dev->of_node, "operating-points", NULL);
393 	if (!prop)
394 		return -ENODEV;
395 	if (!prop->value)
396 		return -ENODATA;
397 
398 	/*
399 	 * Each OPP is a set of tuples consisting of frequency and
400 	 * voltage like <freq-kHz vol-uV>.
401 	 */
402 	nr = prop->length / sizeof(u32);
403 	if (nr % 2) {
404 		dev_err(dev, "%s: Invalid OPP table\n", __func__);
405 		return -EINVAL;
406 	}
407 
408 	val = prop->value;
409 	while (nr) {
410 		unsigned long freq = be32_to_cpup(val++) * 1000;
411 		unsigned long volt = be32_to_cpup(val++);
412 
413 		ret = _opp_add_v1(dev, freq, volt, false);
414 		if (ret) {
415 			dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
416 				__func__, freq, ret);
417 			dev_pm_opp_of_remove_table(dev);
418 			return ret;
419 		}
420 		nr -= 2;
421 	}
422 
423 	return 0;
424 }
425 
426 /**
427  * dev_pm_opp_of_add_table() - Initialize opp table from device tree
428  * @dev:	device pointer used to lookup OPP table.
429  *
430  * Register the initial OPP table with the OPP library for given device.
431  *
432  * Locking: The internal opp_table and opp structures are RCU protected.
433  * Hence this function indirectly uses RCU updater strategy with mutex locks
434  * to keep the integrity of the internal data structures. Callers should ensure
435  * that this function is *NOT* called under RCU protection or in contexts where
436  * mutex cannot be locked.
437  *
438  * Return:
439  * 0		On success OR
440  *		Duplicate OPPs (both freq and volt are same) and opp->available
441  * -EEXIST	Freq are same and volt are different OR
442  *		Duplicate OPPs (both freq and volt are same) and !opp->available
443  * -ENOMEM	Memory allocation failure
444  * -ENODEV	when 'operating-points' property is not found or is invalid data
445  *		in device node.
446  * -ENODATA	when empty 'operating-points' property is found
447  * -EINVAL	when invalid entries are found in opp-v2 table
448  */
dev_pm_opp_of_add_table(struct device * dev)449 int dev_pm_opp_of_add_table(struct device *dev)
450 {
451 	struct device_node *opp_np;
452 	int ret;
453 
454 	/*
455 	 * OPPs have two version of bindings now. The older one is deprecated,
456 	 * try for the new binding first.
457 	 */
458 	opp_np = _of_get_opp_desc_node(dev);
459 	if (!opp_np) {
460 		/*
461 		 * Try old-deprecated bindings for backward compatibility with
462 		 * older dtbs.
463 		 */
464 		return _of_add_opp_table_v1(dev);
465 	}
466 
467 	ret = _of_add_opp_table_v2(dev, opp_np);
468 	of_node_put(opp_np);
469 
470 	return ret;
471 }
472 EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
473 
474 /* CPU device specific helpers */
475 
476 /**
477  * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
478  * @cpumask:	cpumask for which OPP table needs to be removed
479  *
480  * This removes the OPP tables for CPUs present in the @cpumask.
481  * This should be used only to remove static entries created from DT.
482  *
483  * Locking: The internal opp_table and opp structures are RCU protected.
484  * Hence this function internally uses RCU updater strategy with mutex locks
485  * to keep the integrity of the internal data structures. Callers should ensure
486  * that this function is *NOT* called under RCU protection or in contexts where
487  * mutex cannot be locked.
488  */
dev_pm_opp_of_cpumask_remove_table(const struct cpumask * cpumask)489 void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
490 {
491 	_dev_pm_opp_cpumask_remove_table(cpumask, true);
492 }
493 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
494 
495 /**
496  * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
497  * @cpumask:	cpumask for which OPP table needs to be added.
498  *
499  * This adds the OPP tables for CPUs present in the @cpumask.
500  *
501  * Locking: The internal opp_table and opp structures are RCU protected.
502  * Hence this function internally uses RCU updater strategy with mutex locks
503  * to keep the integrity of the internal data structures. Callers should ensure
504  * that this function is *NOT* called under RCU protection or in contexts where
505  * mutex cannot be locked.
506  */
dev_pm_opp_of_cpumask_add_table(const struct cpumask * cpumask)507 int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
508 {
509 	struct device *cpu_dev;
510 	int cpu, ret = 0;
511 
512 	WARN_ON(cpumask_empty(cpumask));
513 
514 	for_each_cpu(cpu, cpumask) {
515 		cpu_dev = get_cpu_device(cpu);
516 		if (!cpu_dev) {
517 			pr_err("%s: failed to get cpu%d device\n", __func__,
518 			       cpu);
519 			continue;
520 		}
521 
522 		ret = dev_pm_opp_of_add_table(cpu_dev);
523 		if (ret) {
524 			pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
525 			       __func__, cpu, ret);
526 
527 			/* Free all other OPPs */
528 			dev_pm_opp_of_cpumask_remove_table(cpumask);
529 			break;
530 		}
531 	}
532 
533 	return ret;
534 }
535 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
536 
537 /*
538  * Works only for OPP v2 bindings.
539  *
540  * Returns -ENOENT if operating-points-v2 bindings aren't supported.
541  */
542 /**
543  * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
544  *				      @cpu_dev using operating-points-v2
545  *				      bindings.
546  *
547  * @cpu_dev:	CPU device for which we do this operation
548  * @cpumask:	cpumask to update with information of sharing CPUs
549  *
550  * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
551  *
552  * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
553  *
554  * Locking: The internal opp_table and opp structures are RCU protected.
555  * Hence this function internally uses RCU updater strategy with mutex locks
556  * to keep the integrity of the internal data structures. Callers should ensure
557  * that this function is *NOT* called under RCU protection or in contexts where
558  * mutex cannot be locked.
559  */
dev_pm_opp_of_get_sharing_cpus(struct device * cpu_dev,struct cpumask * cpumask)560 int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
561 				   struct cpumask *cpumask)
562 {
563 	struct device_node *np, *tmp_np;
564 	struct device *tcpu_dev;
565 	int cpu, ret = 0;
566 
567 	/* Get OPP descriptor node */
568 	np = _of_get_opp_desc_node(cpu_dev);
569 	if (!np) {
570 		dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
571 		return -ENOENT;
572 	}
573 
574 	cpumask_set_cpu(cpu_dev->id, cpumask);
575 
576 	/* OPPs are shared ? */
577 	if (!of_property_read_bool(np, "opp-shared"))
578 		goto put_cpu_node;
579 
580 	for_each_possible_cpu(cpu) {
581 		if (cpu == cpu_dev->id)
582 			continue;
583 
584 		tcpu_dev = get_cpu_device(cpu);
585 		if (!tcpu_dev) {
586 			dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
587 				__func__, cpu);
588 			ret = -ENODEV;
589 			goto put_cpu_node;
590 		}
591 
592 		/* Get OPP descriptor node */
593 		tmp_np = _of_get_opp_desc_node(tcpu_dev);
594 		if (!tmp_np) {
595 			dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
596 				__func__);
597 			ret = -ENOENT;
598 			goto put_cpu_node;
599 		}
600 
601 		/* CPUs are sharing opp node */
602 		if (np == tmp_np)
603 			cpumask_set_cpu(cpu, cpumask);
604 
605 		of_node_put(tmp_np);
606 	}
607 
608 put_cpu_node:
609 	of_node_put(np);
610 	return ret;
611 }
612 EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
613