• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Interconnect framework core driver
4  *
5  * Copyright (c) 2017-2019, Linaro Ltd.
6  * Author: Georgi Djakov <georgi.djakov@linaro.org>
7  */
8 
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/idr.h>
12 #include <linux/init.h>
13 #include <linux/interconnect.h>
14 #include <linux/interconnect-provider.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/slab.h>
19 #include <linux/of.h>
20 #include <linux/overflow.h>
21 
22 #include "internal.h"
23 
24 #define CREATE_TRACE_POINTS
25 #include "trace.h"
26 
27 static DEFINE_IDR(icc_idr);
28 static LIST_HEAD(icc_providers);
29 static int providers_count;
30 static bool synced_state;
31 static DEFINE_MUTEX(icc_lock);
32 static struct dentry *icc_debugfs_dir;
33 
icc_summary_show_one(struct seq_file * s,struct icc_node * n)34 static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
35 {
36 	if (!n)
37 		return;
38 
39 	seq_printf(s, "%-42s %12u %12u\n",
40 		   n->name, n->avg_bw, n->peak_bw);
41 }
42 
icc_summary_show(struct seq_file * s,void * data)43 static int icc_summary_show(struct seq_file *s, void *data)
44 {
45 	struct icc_provider *provider;
46 
47 	seq_puts(s, " node                                  tag          avg         peak\n");
48 	seq_puts(s, "--------------------------------------------------------------------\n");
49 
50 	mutex_lock(&icc_lock);
51 
52 	list_for_each_entry(provider, &icc_providers, provider_list) {
53 		struct icc_node *n;
54 
55 		list_for_each_entry(n, &provider->nodes, node_list) {
56 			struct icc_req *r;
57 
58 			icc_summary_show_one(s, n);
59 			hlist_for_each_entry(r, &n->req_list, req_node) {
60 				u32 avg_bw = 0, peak_bw = 0;
61 
62 				if (!r->dev)
63 					continue;
64 
65 				if (r->enabled) {
66 					avg_bw = r->avg_bw;
67 					peak_bw = r->peak_bw;
68 				}
69 
70 				seq_printf(s, "  %-27s %12u %12u %12u\n",
71 					   dev_name(r->dev), r->tag, avg_bw, peak_bw);
72 			}
73 		}
74 	}
75 
76 	mutex_unlock(&icc_lock);
77 
78 	return 0;
79 }
80 DEFINE_SHOW_ATTRIBUTE(icc_summary);
81 
icc_graph_show_link(struct seq_file * s,int level,struct icc_node * n,struct icc_node * m)82 static void icc_graph_show_link(struct seq_file *s, int level,
83 				struct icc_node *n, struct icc_node *m)
84 {
85 	seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n",
86 		   level == 2 ? "\t\t" : "\t",
87 		   n->id, n->name, m->id, m->name);
88 }
89 
icc_graph_show_node(struct seq_file * s,struct icc_node * n)90 static void icc_graph_show_node(struct seq_file *s, struct icc_node *n)
91 {
92 	seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s",
93 		   n->id, n->name, n->id, n->name);
94 	seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw);
95 	seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw);
96 	seq_puts(s, "\"]\n");
97 }
98 
icc_graph_show(struct seq_file * s,void * data)99 static int icc_graph_show(struct seq_file *s, void *data)
100 {
101 	struct icc_provider *provider;
102 	struct icc_node *n;
103 	int cluster_index = 0;
104 	int i;
105 
106 	seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n");
107 	mutex_lock(&icc_lock);
108 
109 	/* draw providers as cluster subgraphs */
110 	cluster_index = 0;
111 	list_for_each_entry(provider, &icc_providers, provider_list) {
112 		seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index);
113 		if (provider->dev)
114 			seq_printf(s, "\t\tlabel = \"%s\"\n",
115 				   dev_name(provider->dev));
116 
117 		/* draw nodes */
118 		list_for_each_entry(n, &provider->nodes, node_list)
119 			icc_graph_show_node(s, n);
120 
121 		/* draw internal links */
122 		list_for_each_entry(n, &provider->nodes, node_list)
123 			for (i = 0; i < n->num_links; ++i)
124 				if (n->provider == n->links[i]->provider)
125 					icc_graph_show_link(s, 2, n,
126 							    n->links[i]);
127 
128 		seq_puts(s, "\t}\n");
129 	}
130 
131 	/* draw external links */
132 	list_for_each_entry(provider, &icc_providers, provider_list)
133 		list_for_each_entry(n, &provider->nodes, node_list)
134 			for (i = 0; i < n->num_links; ++i)
135 				if (n->provider != n->links[i]->provider)
136 					icc_graph_show_link(s, 1, n,
137 							    n->links[i]);
138 
139 	mutex_unlock(&icc_lock);
140 	seq_puts(s, "}");
141 
142 	return 0;
143 }
144 DEFINE_SHOW_ATTRIBUTE(icc_graph);
145 
node_find(const int id)146 static struct icc_node *node_find(const int id)
147 {
148 	return idr_find(&icc_idr, id);
149 }
150 
path_init(struct device * dev,struct icc_node * dst,ssize_t num_nodes)151 static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
152 				  ssize_t num_nodes)
153 {
154 	struct icc_node *node = dst;
155 	struct icc_path *path;
156 	int i;
157 
158 	path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
159 	if (!path)
160 		return ERR_PTR(-ENOMEM);
161 
162 	path->num_nodes = num_nodes;
163 
164 	for (i = num_nodes - 1; i >= 0; i--) {
165 		node->provider->users++;
166 		hlist_add_head(&path->reqs[i].req_node, &node->req_list);
167 		path->reqs[i].node = node;
168 		path->reqs[i].dev = dev;
169 		path->reqs[i].enabled = true;
170 		/* reference to previous node was saved during path traversal */
171 		node = node->reverse;
172 	}
173 
174 	return path;
175 }
176 
path_find(struct device * dev,struct icc_node * src,struct icc_node * dst)177 static struct icc_path *path_find(struct device *dev, struct icc_node *src,
178 				  struct icc_node *dst)
179 {
180 	struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
181 	struct icc_node *n, *node = NULL;
182 	struct list_head traverse_list;
183 	struct list_head edge_list;
184 	struct list_head visited_list;
185 	size_t i, depth = 1;
186 	bool found = false;
187 
188 	INIT_LIST_HEAD(&traverse_list);
189 	INIT_LIST_HEAD(&edge_list);
190 	INIT_LIST_HEAD(&visited_list);
191 
192 	list_add(&src->search_list, &traverse_list);
193 	src->reverse = NULL;
194 
195 	do {
196 		list_for_each_entry_safe(node, n, &traverse_list, search_list) {
197 			if (node == dst) {
198 				found = true;
199 				list_splice_init(&edge_list, &visited_list);
200 				list_splice_init(&traverse_list, &visited_list);
201 				break;
202 			}
203 			for (i = 0; i < node->num_links; i++) {
204 				struct icc_node *tmp = node->links[i];
205 
206 				if (!tmp) {
207 					path = ERR_PTR(-ENOENT);
208 					goto out;
209 				}
210 
211 				if (tmp->is_traversed)
212 					continue;
213 
214 				tmp->is_traversed = true;
215 				tmp->reverse = node;
216 				list_add_tail(&tmp->search_list, &edge_list);
217 			}
218 		}
219 
220 		if (found)
221 			break;
222 
223 		list_splice_init(&traverse_list, &visited_list);
224 		list_splice_init(&edge_list, &traverse_list);
225 
226 		/* count the hops including the source */
227 		depth++;
228 
229 	} while (!list_empty(&traverse_list));
230 
231 out:
232 
233 	/* reset the traversed state */
234 	list_for_each_entry_reverse(n, &visited_list, search_list)
235 		n->is_traversed = false;
236 
237 	if (found)
238 		path = path_init(dev, dst, depth);
239 
240 	return path;
241 }
242 
243 /*
244  * We want the path to honor all bandwidth requests, so the average and peak
245  * bandwidth requirements from each consumer are aggregated at each node.
246  * The aggregation is platform specific, so each platform can customize it by
247  * implementing its own aggregate() function.
248  */
249 
aggregate_requests(struct icc_node * node)250 static int aggregate_requests(struct icc_node *node)
251 {
252 	struct icc_provider *p = node->provider;
253 	struct icc_req *r;
254 	u32 avg_bw, peak_bw;
255 
256 	node->avg_bw = 0;
257 	node->peak_bw = 0;
258 
259 	if (p->pre_aggregate)
260 		p->pre_aggregate(node);
261 
262 	hlist_for_each_entry(r, &node->req_list, req_node) {
263 		if (r->enabled) {
264 			avg_bw = r->avg_bw;
265 			peak_bw = r->peak_bw;
266 		} else {
267 			avg_bw = 0;
268 			peak_bw = 0;
269 		}
270 		p->aggregate(node, r->tag, avg_bw, peak_bw,
271 			     &node->avg_bw, &node->peak_bw);
272 
273 		/* during boot use the initial bandwidth as a floor value */
274 		if (!synced_state) {
275 			node->avg_bw = max(node->avg_bw, node->init_avg);
276 			node->peak_bw = max(node->peak_bw, node->init_peak);
277 		}
278 	}
279 
280 	return 0;
281 }
282 
apply_constraints(struct icc_path * path)283 static int apply_constraints(struct icc_path *path)
284 {
285 	struct icc_node *next, *prev = NULL;
286 	struct icc_provider *p;
287 	int ret = -EINVAL;
288 	int i;
289 
290 	for (i = 0; i < path->num_nodes; i++) {
291 		next = path->reqs[i].node;
292 		p = next->provider;
293 
294 		/* both endpoints should be valid master-slave pairs */
295 		if (!prev || (p != prev->provider && !p->inter_set)) {
296 			prev = next;
297 			continue;
298 		}
299 
300 		/* set the constraints */
301 		ret = p->set(prev, next);
302 		if (ret)
303 			goto out;
304 
305 		prev = next;
306 	}
307 out:
308 	return ret;
309 }
310 
icc_std_aggregate(struct icc_node * node,u32 tag,u32 avg_bw,u32 peak_bw,u32 * agg_avg,u32 * agg_peak)311 int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
312 		      u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
313 {
314 	*agg_avg += avg_bw;
315 	*agg_peak = max(*agg_peak, peak_bw);
316 
317 	return 0;
318 }
319 EXPORT_SYMBOL_GPL(icc_std_aggregate);
320 
321 /* of_icc_xlate_onecell() - Translate function using a single index.
322  * @spec: OF phandle args to map into an interconnect node.
323  * @data: private data (pointer to struct icc_onecell_data)
324  *
325  * This is a generic translate function that can be used to model simple
326  * interconnect providers that have one device tree node and provide
327  * multiple interconnect nodes. A single cell is used as an index into
328  * an array of icc nodes specified in the icc_onecell_data struct when
329  * registering the provider.
330  */
of_icc_xlate_onecell(struct of_phandle_args * spec,void * data)331 struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
332 				      void *data)
333 {
334 	struct icc_onecell_data *icc_data = data;
335 	unsigned int idx = spec->args[0];
336 
337 	if (idx >= icc_data->num_nodes) {
338 		pr_err("%s: invalid index %u\n", __func__, idx);
339 		return ERR_PTR(-EINVAL);
340 	}
341 
342 	return icc_data->nodes[idx];
343 }
344 EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
345 
346 /**
347  * of_icc_get_from_provider() - Look-up interconnect node
348  * @spec: OF phandle args to use for look-up
349  *
350  * Looks for interconnect provider under the node specified by @spec and if
351  * found, uses xlate function of the provider to map phandle args to node.
352  *
353  * Returns a valid pointer to struct icc_node_data on success or ERR_PTR()
354  * on failure.
355  */
of_icc_get_from_provider(struct of_phandle_args * spec)356 struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
357 {
358 	struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
359 	struct icc_node_data *data = NULL;
360 	struct icc_provider *provider;
361 
362 	if (!spec)
363 		return ERR_PTR(-EINVAL);
364 
365 	mutex_lock(&icc_lock);
366 	list_for_each_entry(provider, &icc_providers, provider_list) {
367 		if (provider->dev->of_node == spec->np) {
368 			if (provider->xlate_extended) {
369 				data = provider->xlate_extended(spec, provider->data);
370 				if (!IS_ERR(data)) {
371 					node = data->node;
372 					break;
373 				}
374 			} else {
375 				node = provider->xlate(spec, provider->data);
376 				if (!IS_ERR(node))
377 					break;
378 			}
379 		}
380 	}
381 	mutex_unlock(&icc_lock);
382 
383 	if (!node)
384 		return ERR_PTR(-EINVAL);
385 
386 	if (IS_ERR(node))
387 		return ERR_CAST(node);
388 
389 	if (!data) {
390 		data = kzalloc(sizeof(*data), GFP_KERNEL);
391 		if (!data)
392 			return ERR_PTR(-ENOMEM);
393 		data->node = node;
394 	}
395 
396 	return data;
397 }
398 EXPORT_SYMBOL_GPL(of_icc_get_from_provider);
399 
devm_icc_release(struct device * dev,void * res)400 static void devm_icc_release(struct device *dev, void *res)
401 {
402 	icc_put(*(struct icc_path **)res);
403 }
404 
devm_of_icc_get(struct device * dev,const char * name)405 struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
406 {
407 	struct icc_path **ptr, *path;
408 
409 	ptr = devres_alloc(devm_icc_release, sizeof(*ptr), GFP_KERNEL);
410 	if (!ptr)
411 		return ERR_PTR(-ENOMEM);
412 
413 	path = of_icc_get(dev, name);
414 	if (!IS_ERR(path)) {
415 		*ptr = path;
416 		devres_add(dev, ptr);
417 	} else {
418 		devres_free(ptr);
419 	}
420 
421 	return path;
422 }
423 EXPORT_SYMBOL_GPL(devm_of_icc_get);
424 
425 /**
426  * of_icc_get_by_index() - get a path handle from a DT node based on index
427  * @dev: device pointer for the consumer device
428  * @idx: interconnect path index
429  *
430  * This function will search for a path between two endpoints and return an
431  * icc_path handle on success. Use icc_put() to release constraints when they
432  * are not needed anymore.
433  * If the interconnect API is disabled, NULL is returned and the consumer
434  * drivers will still build. Drivers are free to handle this specifically,
435  * but they don't have to.
436  *
437  * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
438  * when the API is disabled or the "interconnects" DT property is missing.
439  */
of_icc_get_by_index(struct device * dev,int idx)440 struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
441 {
442 	struct icc_path *path;
443 	struct icc_node_data *src_data, *dst_data;
444 	struct device_node *np;
445 	struct of_phandle_args src_args, dst_args;
446 	int ret;
447 
448 	if (!dev || !dev->of_node)
449 		return ERR_PTR(-ENODEV);
450 
451 	np = dev->of_node;
452 
453 	/*
454 	 * When the consumer DT node do not have "interconnects" property
455 	 * return a NULL path to skip setting constraints.
456 	 */
457 	if (!of_find_property(np, "interconnects", NULL))
458 		return NULL;
459 
460 	/*
461 	 * We use a combination of phandle and specifier for endpoint. For now
462 	 * lets support only global ids and extend this in the future if needed
463 	 * without breaking DT compatibility.
464 	 */
465 	ret = of_parse_phandle_with_args(np, "interconnects",
466 					 "#interconnect-cells", idx * 2,
467 					 &src_args);
468 	if (ret)
469 		return ERR_PTR(ret);
470 
471 	of_node_put(src_args.np);
472 
473 	ret = of_parse_phandle_with_args(np, "interconnects",
474 					 "#interconnect-cells", idx * 2 + 1,
475 					 &dst_args);
476 	if (ret)
477 		return ERR_PTR(ret);
478 
479 	of_node_put(dst_args.np);
480 
481 	src_data = of_icc_get_from_provider(&src_args);
482 
483 	if (IS_ERR(src_data)) {
484 		dev_err_probe(dev, PTR_ERR(src_data), "error finding src node\n");
485 		return ERR_CAST(src_data);
486 	}
487 
488 	dst_data = of_icc_get_from_provider(&dst_args);
489 
490 	if (IS_ERR(dst_data)) {
491 		dev_err_probe(dev, PTR_ERR(dst_data), "error finding dst node\n");
492 		kfree(src_data);
493 		return ERR_CAST(dst_data);
494 	}
495 
496 	mutex_lock(&icc_lock);
497 	path = path_find(dev, src_data->node, dst_data->node);
498 	mutex_unlock(&icc_lock);
499 	if (IS_ERR(path)) {
500 		dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
501 		goto free_icc_data;
502 	}
503 
504 	if (src_data->tag && src_data->tag == dst_data->tag)
505 		icc_set_tag(path, src_data->tag);
506 
507 	path->name = kasprintf(GFP_KERNEL, "%s-%s",
508 			       src_data->node->name, dst_data->node->name);
509 	if (!path->name) {
510 		kfree(path);
511 		path = ERR_PTR(-ENOMEM);
512 	}
513 
514 free_icc_data:
515 	kfree(src_data);
516 	kfree(dst_data);
517 	return path;
518 }
519 EXPORT_SYMBOL_GPL(of_icc_get_by_index);
520 
521 /**
522  * of_icc_get() - get a path handle from a DT node based on name
523  * @dev: device pointer for the consumer device
524  * @name: interconnect path name
525  *
526  * This function will search for a path between two endpoints and return an
527  * icc_path handle on success. Use icc_put() to release constraints when they
528  * are not needed anymore.
529  * If the interconnect API is disabled, NULL is returned and the consumer
530  * drivers will still build. Drivers are free to handle this specifically,
531  * but they don't have to.
532  *
533  * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
534  * when the API is disabled or the "interconnects" DT property is missing.
535  */
of_icc_get(struct device * dev,const char * name)536 struct icc_path *of_icc_get(struct device *dev, const char *name)
537 {
538 	struct device_node *np;
539 	int idx = 0;
540 
541 	if (!dev || !dev->of_node)
542 		return ERR_PTR(-ENODEV);
543 
544 	np = dev->of_node;
545 
546 	/*
547 	 * When the consumer DT node do not have "interconnects" property
548 	 * return a NULL path to skip setting constraints.
549 	 */
550 	if (!of_find_property(np, "interconnects", NULL))
551 		return NULL;
552 
553 	/*
554 	 * We use a combination of phandle and specifier for endpoint. For now
555 	 * lets support only global ids and extend this in the future if needed
556 	 * without breaking DT compatibility.
557 	 */
558 	if (name) {
559 		idx = of_property_match_string(np, "interconnect-names", name);
560 		if (idx < 0)
561 			return ERR_PTR(idx);
562 	}
563 
564 	return of_icc_get_by_index(dev, idx);
565 }
566 EXPORT_SYMBOL_GPL(of_icc_get);
567 
568 /**
569  * icc_set_tag() - set an optional tag on a path
570  * @path: the path we want to tag
571  * @tag: the tag value
572  *
573  * This function allows consumers to append a tag to the requests associated
574  * with a path, so that a different aggregation could be done based on this tag.
575  */
icc_set_tag(struct icc_path * path,u32 tag)576 void icc_set_tag(struct icc_path *path, u32 tag)
577 {
578 	int i;
579 
580 	if (!path)
581 		return;
582 
583 	mutex_lock(&icc_lock);
584 
585 	for (i = 0; i < path->num_nodes; i++)
586 		path->reqs[i].tag = tag;
587 
588 	mutex_unlock(&icc_lock);
589 }
590 EXPORT_SYMBOL_GPL(icc_set_tag);
591 
592 /**
593  * icc_get_name() - Get name of the icc path
594  * @path: reference to the path returned by icc_get()
595  *
596  * This function is used by an interconnect consumer to get the name of the icc
597  * path.
598  *
599  * Returns a valid pointer on success, or NULL otherwise.
600  */
icc_get_name(struct icc_path * path)601 const char *icc_get_name(struct icc_path *path)
602 {
603 	if (!path)
604 		return NULL;
605 
606 	return path->name;
607 }
608 EXPORT_SYMBOL_GPL(icc_get_name);
609 
610 /**
611  * icc_set_bw() - set bandwidth constraints on an interconnect path
612  * @path: reference to the path returned by icc_get()
613  * @avg_bw: average bandwidth in kilobytes per second
614  * @peak_bw: peak bandwidth in kilobytes per second
615  *
616  * This function is used by an interconnect consumer to express its own needs
617  * in terms of bandwidth for a previously requested path between two endpoints.
618  * The requests are aggregated and each node is updated accordingly. The entire
619  * path is locked by a mutex to ensure that the set() is completed.
620  * The @path can be NULL when the "interconnects" DT properties is missing,
621  * which will mean that no constraints will be set.
622  *
623  * Returns 0 on success, or an appropriate error code otherwise.
624  */
icc_set_bw(struct icc_path * path,u32 avg_bw,u32 peak_bw)625 int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
626 {
627 	struct icc_node *node;
628 	u32 old_avg, old_peak;
629 	size_t i;
630 	int ret;
631 
632 	if (!path)
633 		return 0;
634 
635 	if (WARN_ON(IS_ERR(path) || !path->num_nodes))
636 		return -EINVAL;
637 
638 	mutex_lock(&icc_lock);
639 
640 	old_avg = path->reqs[0].avg_bw;
641 	old_peak = path->reqs[0].peak_bw;
642 
643 	for (i = 0; i < path->num_nodes; i++) {
644 		node = path->reqs[i].node;
645 
646 		/* update the consumer request for this path */
647 		path->reqs[i].avg_bw = avg_bw;
648 		path->reqs[i].peak_bw = peak_bw;
649 
650 		/* aggregate requests for this node */
651 		aggregate_requests(node);
652 
653 		trace_icc_set_bw(path, node, i, avg_bw, peak_bw);
654 	}
655 
656 	ret = apply_constraints(path);
657 	if (ret) {
658 		pr_debug("interconnect: error applying constraints (%d)\n",
659 			 ret);
660 
661 		for (i = 0; i < path->num_nodes; i++) {
662 			node = path->reqs[i].node;
663 			path->reqs[i].avg_bw = old_avg;
664 			path->reqs[i].peak_bw = old_peak;
665 			aggregate_requests(node);
666 		}
667 		apply_constraints(path);
668 	}
669 
670 	mutex_unlock(&icc_lock);
671 
672 	trace_icc_set_bw_end(path, ret);
673 
674 	return ret;
675 }
676 EXPORT_SYMBOL_GPL(icc_set_bw);
677 
__icc_enable(struct icc_path * path,bool enable)678 static int __icc_enable(struct icc_path *path, bool enable)
679 {
680 	int i;
681 
682 	if (!path)
683 		return 0;
684 
685 	if (WARN_ON(IS_ERR(path) || !path->num_nodes))
686 		return -EINVAL;
687 
688 	mutex_lock(&icc_lock);
689 
690 	for (i = 0; i < path->num_nodes; i++)
691 		path->reqs[i].enabled = enable;
692 
693 	mutex_unlock(&icc_lock);
694 
695 	return icc_set_bw(path, path->reqs[0].avg_bw,
696 			  path->reqs[0].peak_bw);
697 }
698 
icc_enable(struct icc_path * path)699 int icc_enable(struct icc_path *path)
700 {
701 	return __icc_enable(path, true);
702 }
703 EXPORT_SYMBOL_GPL(icc_enable);
704 
icc_disable(struct icc_path * path)705 int icc_disable(struct icc_path *path)
706 {
707 	return __icc_enable(path, false);
708 }
709 EXPORT_SYMBOL_GPL(icc_disable);
710 
711 /**
712  * icc_get() - return a handle for path between two endpoints
713  * @dev: the device requesting the path
714  * @src_id: source device port id
715  * @dst_id: destination device port id
716  *
717  * This function will search for a path between two endpoints and return an
718  * icc_path handle on success. Use icc_put() to release
719  * constraints when they are not needed anymore.
720  * If the interconnect API is disabled, NULL is returned and the consumer
721  * drivers will still build. Drivers are free to handle this specifically,
722  * but they don't have to.
723  *
724  * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
725  * interconnect API is disabled.
726  */
icc_get(struct device * dev,const int src_id,const int dst_id)727 struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
728 {
729 	struct icc_node *src, *dst;
730 	struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
731 
732 	mutex_lock(&icc_lock);
733 
734 	src = node_find(src_id);
735 	if (!src)
736 		goto out;
737 
738 	dst = node_find(dst_id);
739 	if (!dst)
740 		goto out;
741 
742 	path = path_find(dev, src, dst);
743 	if (IS_ERR(path)) {
744 		dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
745 		goto out;
746 	}
747 
748 	path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name);
749 	if (!path->name) {
750 		kfree(path);
751 		path = ERR_PTR(-ENOMEM);
752 	}
753 out:
754 	mutex_unlock(&icc_lock);
755 	return path;
756 }
757 EXPORT_SYMBOL_GPL(icc_get);
758 
759 /**
760  * icc_put() - release the reference to the icc_path
761  * @path: interconnect path
762  *
763  * Use this function to release the constraints on a path when the path is
764  * no longer needed. The constraints will be re-aggregated.
765  */
icc_put(struct icc_path * path)766 void icc_put(struct icc_path *path)
767 {
768 	struct icc_node *node;
769 	size_t i;
770 	int ret;
771 
772 	if (!path || WARN_ON(IS_ERR(path)))
773 		return;
774 
775 	ret = icc_set_bw(path, 0, 0);
776 	if (ret)
777 		pr_err("%s: error (%d)\n", __func__, ret);
778 
779 	mutex_lock(&icc_lock);
780 	for (i = 0; i < path->num_nodes; i++) {
781 		node = path->reqs[i].node;
782 		hlist_del(&path->reqs[i].req_node);
783 		if (!WARN_ON(!node->provider->users))
784 			node->provider->users--;
785 	}
786 	mutex_unlock(&icc_lock);
787 
788 	kfree_const(path->name);
789 	kfree(path);
790 }
791 EXPORT_SYMBOL_GPL(icc_put);
792 
icc_node_create_nolock(int id)793 static struct icc_node *icc_node_create_nolock(int id)
794 {
795 	struct icc_node *node;
796 
797 	/* check if node already exists */
798 	node = node_find(id);
799 	if (node)
800 		return node;
801 
802 	node = kzalloc(sizeof(*node), GFP_KERNEL);
803 	if (!node)
804 		return ERR_PTR(-ENOMEM);
805 
806 	id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
807 	if (id < 0) {
808 		WARN(1, "%s: couldn't get idr\n", __func__);
809 		kfree(node);
810 		return ERR_PTR(id);
811 	}
812 
813 	node->id = id;
814 
815 	return node;
816 }
817 
818 /**
819  * icc_node_create() - create a node
820  * @id: node id
821  *
822  * Return: icc_node pointer on success, or ERR_PTR() on error
823  */
icc_node_create(int id)824 struct icc_node *icc_node_create(int id)
825 {
826 	struct icc_node *node;
827 
828 	mutex_lock(&icc_lock);
829 
830 	node = icc_node_create_nolock(id);
831 
832 	mutex_unlock(&icc_lock);
833 
834 	return node;
835 }
836 EXPORT_SYMBOL_GPL(icc_node_create);
837 
838 /**
839  * icc_node_destroy() - destroy a node
840  * @id: node id
841  */
icc_node_destroy(int id)842 void icc_node_destroy(int id)
843 {
844 	struct icc_node *node;
845 
846 	mutex_lock(&icc_lock);
847 
848 	node = node_find(id);
849 	if (node) {
850 		idr_remove(&icc_idr, node->id);
851 		WARN_ON(!hlist_empty(&node->req_list));
852 	}
853 
854 	mutex_unlock(&icc_lock);
855 
856 	if (!node)
857 		return;
858 
859 	kfree(node->links);
860 	kfree(node);
861 }
862 EXPORT_SYMBOL_GPL(icc_node_destroy);
863 
864 /**
865  * icc_link_create() - create a link between two nodes
866  * @node: source node id
867  * @dst_id: destination node id
868  *
869  * Create a link between two nodes. The nodes might belong to different
870  * interconnect providers and the @dst_id node might not exist (if the
871  * provider driver has not probed yet). So just create the @dst_id node
872  * and when the actual provider driver is probed, the rest of the node
873  * data is filled.
874  *
875  * Return: 0 on success, or an error code otherwise
876  */
icc_link_create(struct icc_node * node,const int dst_id)877 int icc_link_create(struct icc_node *node, const int dst_id)
878 {
879 	struct icc_node *dst;
880 	struct icc_node **new;
881 	int ret = 0;
882 
883 	if (!node->provider)
884 		return -EINVAL;
885 
886 	mutex_lock(&icc_lock);
887 
888 	dst = node_find(dst_id);
889 	if (!dst) {
890 		dst = icc_node_create_nolock(dst_id);
891 
892 		if (IS_ERR(dst)) {
893 			ret = PTR_ERR(dst);
894 			goto out;
895 		}
896 	}
897 
898 	new = krealloc(node->links,
899 		       (node->num_links + 1) * sizeof(*node->links),
900 		       GFP_KERNEL);
901 	if (!new) {
902 		ret = -ENOMEM;
903 		goto out;
904 	}
905 
906 	node->links = new;
907 	node->links[node->num_links++] = dst;
908 
909 out:
910 	mutex_unlock(&icc_lock);
911 
912 	return ret;
913 }
914 EXPORT_SYMBOL_GPL(icc_link_create);
915 
916 /**
917  * icc_link_destroy() - destroy a link between two nodes
918  * @src: pointer to source node
919  * @dst: pointer to destination node
920  *
921  * Return: 0 on success, or an error code otherwise
922  */
icc_link_destroy(struct icc_node * src,struct icc_node * dst)923 int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
924 {
925 	struct icc_node **new;
926 	size_t slot;
927 	int ret = 0;
928 
929 	if (IS_ERR_OR_NULL(src))
930 		return -EINVAL;
931 
932 	if (IS_ERR_OR_NULL(dst))
933 		return -EINVAL;
934 
935 	mutex_lock(&icc_lock);
936 
937 	for (slot = 0; slot < src->num_links; slot++)
938 		if (src->links[slot] == dst)
939 			break;
940 
941 	if (WARN_ON(slot == src->num_links)) {
942 		ret = -ENXIO;
943 		goto out;
944 	}
945 
946 	src->links[slot] = src->links[--src->num_links];
947 
948 	new = krealloc(src->links, src->num_links * sizeof(*src->links),
949 		       GFP_KERNEL);
950 	if (new)
951 		src->links = new;
952 	else
953 		ret = -ENOMEM;
954 
955 out:
956 	mutex_unlock(&icc_lock);
957 
958 	return ret;
959 }
960 EXPORT_SYMBOL_GPL(icc_link_destroy);
961 
962 /**
963  * icc_node_add() - add interconnect node to interconnect provider
964  * @node: pointer to the interconnect node
965  * @provider: pointer to the interconnect provider
966  */
icc_node_add(struct icc_node * node,struct icc_provider * provider)967 void icc_node_add(struct icc_node *node, struct icc_provider *provider)
968 {
969 	if (WARN_ON(node->provider))
970 		return;
971 
972 	mutex_lock(&icc_lock);
973 
974 	node->provider = provider;
975 	list_add_tail(&node->node_list, &provider->nodes);
976 
977 	/* get the initial bandwidth values and sync them with hardware */
978 	if (provider->get_bw) {
979 		provider->get_bw(node, &node->init_avg, &node->init_peak);
980 	} else {
981 		node->init_avg = INT_MAX;
982 		node->init_peak = INT_MAX;
983 	}
984 	node->avg_bw = node->init_avg;
985 	node->peak_bw = node->init_peak;
986 
987 	if (provider->pre_aggregate)
988 		provider->pre_aggregate(node);
989 
990 	if (provider->aggregate)
991 		provider->aggregate(node, 0, node->init_avg, node->init_peak,
992 				    &node->avg_bw, &node->peak_bw);
993 
994 	provider->set(node, node);
995 	node->avg_bw = 0;
996 	node->peak_bw = 0;
997 
998 	mutex_unlock(&icc_lock);
999 }
1000 EXPORT_SYMBOL_GPL(icc_node_add);
1001 
1002 /**
1003  * icc_node_del() - delete interconnect node from interconnect provider
1004  * @node: pointer to the interconnect node
1005  */
icc_node_del(struct icc_node * node)1006 void icc_node_del(struct icc_node *node)
1007 {
1008 	mutex_lock(&icc_lock);
1009 
1010 	list_del(&node->node_list);
1011 
1012 	mutex_unlock(&icc_lock);
1013 }
1014 EXPORT_SYMBOL_GPL(icc_node_del);
1015 
1016 /**
1017  * icc_nodes_remove() - remove all previously added nodes from provider
1018  * @provider: the interconnect provider we are removing nodes from
1019  *
1020  * Return: 0 on success, or an error code otherwise
1021  */
icc_nodes_remove(struct icc_provider * provider)1022 int icc_nodes_remove(struct icc_provider *provider)
1023 {
1024 	struct icc_node *n, *tmp;
1025 
1026 	if (WARN_ON(IS_ERR_OR_NULL(provider)))
1027 		return -EINVAL;
1028 
1029 	list_for_each_entry_safe_reverse(n, tmp, &provider->nodes, node_list) {
1030 		icc_node_del(n);
1031 		icc_node_destroy(n->id);
1032 	}
1033 
1034 	return 0;
1035 }
1036 EXPORT_SYMBOL_GPL(icc_nodes_remove);
1037 
1038 /**
1039  * icc_provider_init() - initialize a new interconnect provider
1040  * @provider: the interconnect provider to initialize
1041  *
1042  * Must be called before adding nodes to the provider.
1043  */
icc_provider_init(struct icc_provider * provider)1044 void icc_provider_init(struct icc_provider *provider)
1045 {
1046 	WARN_ON(!provider->set);
1047 
1048 	INIT_LIST_HEAD(&provider->nodes);
1049 }
1050 EXPORT_SYMBOL_GPL(icc_provider_init);
1051 
1052 /**
1053  * icc_provider_register() - register a new interconnect provider
1054  * @provider: the interconnect provider to register
1055  *
1056  * Return: 0 on success, or an error code otherwise
1057  */
icc_provider_register(struct icc_provider * provider)1058 int icc_provider_register(struct icc_provider *provider)
1059 {
1060 	if (WARN_ON(!provider->xlate && !provider->xlate_extended))
1061 		return -EINVAL;
1062 
1063 	mutex_lock(&icc_lock);
1064 	list_add_tail(&provider->provider_list, &icc_providers);
1065 	mutex_unlock(&icc_lock);
1066 
1067 	dev_dbg(provider->dev, "interconnect provider registered\n");
1068 
1069 	return 0;
1070 }
1071 EXPORT_SYMBOL_GPL(icc_provider_register);
1072 
1073 /**
1074  * icc_provider_deregister() - deregister an interconnect provider
1075  * @provider: the interconnect provider to deregister
1076  */
icc_provider_deregister(struct icc_provider * provider)1077 void icc_provider_deregister(struct icc_provider *provider)
1078 {
1079 	mutex_lock(&icc_lock);
1080 	WARN_ON(provider->users);
1081 
1082 	list_del(&provider->provider_list);
1083 	mutex_unlock(&icc_lock);
1084 }
1085 EXPORT_SYMBOL_GPL(icc_provider_deregister);
1086 
icc_provider_add(struct icc_provider * provider)1087 int icc_provider_add(struct icc_provider *provider)
1088 {
1089 	icc_provider_init(provider);
1090 
1091 	return icc_provider_register(provider);
1092 }
1093 EXPORT_SYMBOL_GPL(icc_provider_add);
1094 
icc_provider_del(struct icc_provider * provider)1095 void icc_provider_del(struct icc_provider *provider)
1096 {
1097 	WARN_ON(!list_empty(&provider->nodes));
1098 
1099 	icc_provider_deregister(provider);
1100 }
1101 EXPORT_SYMBOL_GPL(icc_provider_del);
1102 
1103 static const struct of_device_id __maybe_unused ignore_list[] = {
1104 	{ .compatible = "qcom,sc7180-ipa-virt" },
1105 	{ .compatible = "qcom,sdx55-ipa-virt" },
1106 	{ .compatible = "qcom,sm8150-ipa-virt" },
1107 	{}
1108 };
1109 
of_count_icc_providers(struct device_node * np)1110 static int of_count_icc_providers(struct device_node *np)
1111 {
1112 	struct device_node *child;
1113 	int count = 0;
1114 
1115 	for_each_available_child_of_node(np, child) {
1116 		if (of_property_read_bool(child, "#interconnect-cells") &&
1117 		    likely(!of_match_node(ignore_list, child)))
1118 			count++;
1119 		count += of_count_icc_providers(child);
1120 	}
1121 
1122 	return count;
1123 }
1124 
icc_sync_state(struct device * dev)1125 void icc_sync_state(struct device *dev)
1126 {
1127 	struct icc_provider *p;
1128 	struct icc_node *n;
1129 	static int count;
1130 
1131 	count++;
1132 
1133 	if (count < providers_count)
1134 		return;
1135 
1136 	mutex_lock(&icc_lock);
1137 	synced_state = true;
1138 	list_for_each_entry(p, &icc_providers, provider_list) {
1139 		dev_dbg(p->dev, "interconnect provider is in synced state\n");
1140 		list_for_each_entry(n, &p->nodes, node_list) {
1141 			if (n->init_avg || n->init_peak) {
1142 				n->init_avg = 0;
1143 				n->init_peak = 0;
1144 				aggregate_requests(n);
1145 				p->set(n, n);
1146 			}
1147 		}
1148 	}
1149 	mutex_unlock(&icc_lock);
1150 }
1151 EXPORT_SYMBOL_GPL(icc_sync_state);
1152 
icc_init(void)1153 static int __init icc_init(void)
1154 {
1155 	struct device_node *root = of_find_node_by_path("/");
1156 
1157 	providers_count = of_count_icc_providers(root);
1158 	of_node_put(root);
1159 
1160 	icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
1161 	debugfs_create_file("interconnect_summary", 0444,
1162 			    icc_debugfs_dir, NULL, &icc_summary_fops);
1163 	debugfs_create_file("interconnect_graph", 0444,
1164 			    icc_debugfs_dir, NULL, &icc_graph_fops);
1165 	return 0;
1166 }
1167 
1168 device_initcall(icc_init);
1169 
1170 MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
1171 MODULE_DESCRIPTION("Interconnect Driver Core");
1172 MODULE_LICENSE("GPL v2");
1173