1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Interconnect framework core driver
4 *
5 * Copyright (c) 2017-2019, Linaro Ltd.
6 * Author: Georgi Djakov <georgi.djakov@linaro.org>
7 */
8
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/idr.h>
12 #include <linux/init.h>
13 #include <linux/interconnect.h>
14 #include <linux/interconnect-provider.h>
15 #include <linux/list.h>
16 #include <linux/sched/mm.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/slab.h>
20 #include <linux/of.h>
21 #include <linux/overflow.h>
22
23 #include "internal.h"
24
25 #define CREATE_TRACE_POINTS
26 #include "trace.h"
27
28 static DEFINE_IDR(icc_idr);
29 static LIST_HEAD(icc_providers);
30 static int providers_count;
31 static bool synced_state;
32 static DEFINE_MUTEX(icc_lock);
33 static struct dentry *icc_debugfs_dir;
34
icc_summary_show_one(struct seq_file * s,struct icc_node * n)35 static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
36 {
37 if (!n)
38 return;
39
40 seq_printf(s, "%-42s %12u %12u\n",
41 n->name, n->avg_bw, n->peak_bw);
42 }
43
icc_summary_show(struct seq_file * s,void * data)44 static int icc_summary_show(struct seq_file *s, void *data)
45 {
46 struct icc_provider *provider;
47
48 seq_puts(s, " node tag avg peak\n");
49 seq_puts(s, "--------------------------------------------------------------------\n");
50
51 mutex_lock(&icc_lock);
52
53 list_for_each_entry(provider, &icc_providers, provider_list) {
54 struct icc_node *n;
55
56 list_for_each_entry(n, &provider->nodes, node_list) {
57 struct icc_req *r;
58
59 icc_summary_show_one(s, n);
60 hlist_for_each_entry(r, &n->req_list, req_node) {
61 u32 avg_bw = 0, peak_bw = 0;
62
63 if (!r->dev)
64 continue;
65
66 if (r->enabled) {
67 avg_bw = r->avg_bw;
68 peak_bw = r->peak_bw;
69 }
70
71 seq_printf(s, " %-27s %12u %12u %12u\n",
72 dev_name(r->dev), r->tag, avg_bw, peak_bw);
73 }
74 }
75 }
76
77 mutex_unlock(&icc_lock);
78
79 return 0;
80 }
81 DEFINE_SHOW_ATTRIBUTE(icc_summary);
82
icc_graph_show_link(struct seq_file * s,int level,struct icc_node * n,struct icc_node * m)83 static void icc_graph_show_link(struct seq_file *s, int level,
84 struct icc_node *n, struct icc_node *m)
85 {
86 seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n",
87 level == 2 ? "\t\t" : "\t",
88 n->id, n->name, m->id, m->name);
89 }
90
icc_graph_show_node(struct seq_file * s,struct icc_node * n)91 static void icc_graph_show_node(struct seq_file *s, struct icc_node *n)
92 {
93 seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s",
94 n->id, n->name, n->id, n->name);
95 seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw);
96 seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw);
97 seq_puts(s, "\"]\n");
98 }
99
icc_graph_show(struct seq_file * s,void * data)100 static int icc_graph_show(struct seq_file *s, void *data)
101 {
102 struct icc_provider *provider;
103 struct icc_node *n;
104 int cluster_index = 0;
105 int i;
106
107 seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n");
108 mutex_lock(&icc_lock);
109
110 /* draw providers as cluster subgraphs */
111 cluster_index = 0;
112 list_for_each_entry(provider, &icc_providers, provider_list) {
113 seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index);
114 if (provider->dev)
115 seq_printf(s, "\t\tlabel = \"%s\"\n",
116 dev_name(provider->dev));
117
118 /* draw nodes */
119 list_for_each_entry(n, &provider->nodes, node_list)
120 icc_graph_show_node(s, n);
121
122 /* draw internal links */
123 list_for_each_entry(n, &provider->nodes, node_list)
124 for (i = 0; i < n->num_links; ++i)
125 if (n->provider == n->links[i]->provider)
126 icc_graph_show_link(s, 2, n,
127 n->links[i]);
128
129 seq_puts(s, "\t}\n");
130 }
131
132 /* draw external links */
133 list_for_each_entry(provider, &icc_providers, provider_list)
134 list_for_each_entry(n, &provider->nodes, node_list)
135 for (i = 0; i < n->num_links; ++i)
136 if (n->provider != n->links[i]->provider)
137 icc_graph_show_link(s, 1, n,
138 n->links[i]);
139
140 mutex_unlock(&icc_lock);
141 seq_puts(s, "}");
142
143 return 0;
144 }
145 DEFINE_SHOW_ATTRIBUTE(icc_graph);
146
node_find(const int id)147 static struct icc_node *node_find(const int id)
148 {
149 return idr_find(&icc_idr, id);
150 }
151
path_init(struct device * dev,struct icc_node * dst,ssize_t num_nodes)152 static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
153 ssize_t num_nodes)
154 {
155 struct icc_node *node = dst;
156 struct icc_path *path;
157 int i;
158
159 path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
160 if (!path)
161 return ERR_PTR(-ENOMEM);
162
163 path->num_nodes = num_nodes;
164
165 for (i = num_nodes - 1; i >= 0; i--) {
166 node->provider->users++;
167 hlist_add_head(&path->reqs[i].req_node, &node->req_list);
168 path->reqs[i].node = node;
169 path->reqs[i].dev = dev;
170 path->reqs[i].enabled = true;
171 /* reference to previous node was saved during path traversal */
172 node = node->reverse;
173 }
174
175 return path;
176 }
177
path_find(struct device * dev,struct icc_node * src,struct icc_node * dst)178 static struct icc_path *path_find(struct device *dev, struct icc_node *src,
179 struct icc_node *dst)
180 {
181 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
182 struct icc_node *n, *node = NULL;
183 struct list_head traverse_list;
184 struct list_head edge_list;
185 struct list_head visited_list;
186 size_t i, depth = 1;
187 bool found = false;
188
189 INIT_LIST_HEAD(&traverse_list);
190 INIT_LIST_HEAD(&edge_list);
191 INIT_LIST_HEAD(&visited_list);
192
193 list_add(&src->search_list, &traverse_list);
194 src->reverse = NULL;
195
196 do {
197 list_for_each_entry_safe(node, n, &traverse_list, search_list) {
198 if (node == dst) {
199 found = true;
200 list_splice_init(&edge_list, &visited_list);
201 list_splice_init(&traverse_list, &visited_list);
202 break;
203 }
204 for (i = 0; i < node->num_links; i++) {
205 struct icc_node *tmp = node->links[i];
206
207 if (!tmp) {
208 path = ERR_PTR(-ENOENT);
209 goto out;
210 }
211
212 if (tmp->is_traversed)
213 continue;
214
215 tmp->is_traversed = true;
216 tmp->reverse = node;
217 list_add_tail(&tmp->search_list, &edge_list);
218 }
219 }
220
221 if (found)
222 break;
223
224 list_splice_init(&traverse_list, &visited_list);
225 list_splice_init(&edge_list, &traverse_list);
226
227 /* count the hops including the source */
228 depth++;
229
230 } while (!list_empty(&traverse_list));
231
232 out:
233
234 /* reset the traversed state */
235 list_for_each_entry_reverse(n, &visited_list, search_list)
236 n->is_traversed = false;
237
238 if (found)
239 path = path_init(dev, dst, depth);
240
241 return path;
242 }
243
244 /*
245 * We want the path to honor all bandwidth requests, so the average and peak
246 * bandwidth requirements from each consumer are aggregated at each node.
247 * The aggregation is platform specific, so each platform can customize it by
248 * implementing its own aggregate() function.
249 */
250
aggregate_requests(struct icc_node * node)251 static int aggregate_requests(struct icc_node *node)
252 {
253 struct icc_provider *p = node->provider;
254 struct icc_req *r;
255 u32 avg_bw, peak_bw;
256
257 node->avg_bw = 0;
258 node->peak_bw = 0;
259
260 if (p->pre_aggregate)
261 p->pre_aggregate(node);
262
263 hlist_for_each_entry(r, &node->req_list, req_node) {
264 if (r->enabled) {
265 avg_bw = r->avg_bw;
266 peak_bw = r->peak_bw;
267 } else {
268 avg_bw = 0;
269 peak_bw = 0;
270 }
271 p->aggregate(node, r->tag, avg_bw, peak_bw,
272 &node->avg_bw, &node->peak_bw);
273
274 /* during boot use the initial bandwidth as a floor value */
275 if (!synced_state) {
276 node->avg_bw = max(node->avg_bw, node->init_avg);
277 node->peak_bw = max(node->peak_bw, node->init_peak);
278 }
279 }
280
281 return 0;
282 }
283
apply_constraints(struct icc_path * path)284 static int apply_constraints(struct icc_path *path)
285 {
286 struct icc_node *next, *prev = NULL;
287 struct icc_provider *p;
288 int ret = -EINVAL;
289 int i;
290
291 for (i = 0; i < path->num_nodes; i++) {
292 next = path->reqs[i].node;
293 p = next->provider;
294
295 /* both endpoints should be valid master-slave pairs */
296 if (!prev || (p != prev->provider && !p->inter_set)) {
297 prev = next;
298 continue;
299 }
300
301 /* set the constraints */
302 ret = p->set(prev, next);
303 if (ret)
304 goto out;
305
306 prev = next;
307 }
308 out:
309 return ret;
310 }
311
icc_std_aggregate(struct icc_node * node,u32 tag,u32 avg_bw,u32 peak_bw,u32 * agg_avg,u32 * agg_peak)312 int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
313 u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
314 {
315 *agg_avg += avg_bw;
316 *agg_peak = max(*agg_peak, peak_bw);
317
318 return 0;
319 }
320 EXPORT_SYMBOL_GPL(icc_std_aggregate);
321
322 /* of_icc_xlate_onecell() - Translate function using a single index.
323 * @spec: OF phandle args to map into an interconnect node.
324 * @data: private data (pointer to struct icc_onecell_data)
325 *
326 * This is a generic translate function that can be used to model simple
327 * interconnect providers that have one device tree node and provide
328 * multiple interconnect nodes. A single cell is used as an index into
329 * an array of icc nodes specified in the icc_onecell_data struct when
330 * registering the provider.
331 */
of_icc_xlate_onecell(struct of_phandle_args * spec,void * data)332 struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
333 void *data)
334 {
335 struct icc_onecell_data *icc_data = data;
336 unsigned int idx = spec->args[0];
337
338 if (idx >= icc_data->num_nodes) {
339 pr_err("%s: invalid index %u\n", __func__, idx);
340 return ERR_PTR(-EINVAL);
341 }
342
343 return icc_data->nodes[idx];
344 }
345 EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
346
347 /**
348 * of_icc_get_from_provider() - Look-up interconnect node
349 * @spec: OF phandle args to use for look-up
350 *
351 * Looks for interconnect provider under the node specified by @spec and if
352 * found, uses xlate function of the provider to map phandle args to node.
353 *
354 * Returns a valid pointer to struct icc_node_data on success or ERR_PTR()
355 * on failure.
356 */
of_icc_get_from_provider(struct of_phandle_args * spec)357 struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
358 {
359 struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
360 struct icc_node_data *data = NULL;
361 struct icc_provider *provider;
362
363 if (!spec)
364 return ERR_PTR(-EINVAL);
365
366 mutex_lock(&icc_lock);
367 list_for_each_entry(provider, &icc_providers, provider_list) {
368 if (provider->dev->of_node == spec->np) {
369 if (provider->xlate_extended) {
370 data = provider->xlate_extended(spec, provider->data);
371 if (!IS_ERR(data)) {
372 node = data->node;
373 break;
374 }
375 } else {
376 node = provider->xlate(spec, provider->data);
377 if (!IS_ERR(node))
378 break;
379 }
380 }
381 }
382 mutex_unlock(&icc_lock);
383
384 if (!node)
385 return ERR_PTR(-EINVAL);
386
387 if (IS_ERR(node))
388 return ERR_CAST(node);
389
390 if (!data) {
391 data = kzalloc(sizeof(*data), GFP_KERNEL);
392 if (!data)
393 return ERR_PTR(-ENOMEM);
394 data->node = node;
395 }
396
397 return data;
398 }
399 EXPORT_SYMBOL_GPL(of_icc_get_from_provider);
400
devm_icc_release(struct device * dev,void * res)401 static void devm_icc_release(struct device *dev, void *res)
402 {
403 icc_put(*(struct icc_path **)res);
404 }
405
devm_of_icc_get(struct device * dev,const char * name)406 struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
407 {
408 struct icc_path **ptr, *path;
409
410 ptr = devres_alloc(devm_icc_release, sizeof(*ptr), GFP_KERNEL);
411 if (!ptr)
412 return ERR_PTR(-ENOMEM);
413
414 path = of_icc_get(dev, name);
415 if (!IS_ERR(path)) {
416 *ptr = path;
417 devres_add(dev, ptr);
418 } else {
419 devres_free(ptr);
420 }
421
422 return path;
423 }
424 EXPORT_SYMBOL_GPL(devm_of_icc_get);
425
426 /**
427 * of_icc_get_by_index() - get a path handle from a DT node based on index
428 * @dev: device pointer for the consumer device
429 * @idx: interconnect path index
430 *
431 * This function will search for a path between two endpoints and return an
432 * icc_path handle on success. Use icc_put() to release constraints when they
433 * are not needed anymore.
434 * If the interconnect API is disabled, NULL is returned and the consumer
435 * drivers will still build. Drivers are free to handle this specifically,
436 * but they don't have to.
437 *
438 * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
439 * when the API is disabled or the "interconnects" DT property is missing.
440 */
of_icc_get_by_index(struct device * dev,int idx)441 struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
442 {
443 struct icc_path *path;
444 struct icc_node_data *src_data, *dst_data;
445 struct device_node *np;
446 struct of_phandle_args src_args, dst_args;
447 int ret;
448
449 if (!dev || !dev->of_node)
450 return ERR_PTR(-ENODEV);
451
452 np = dev->of_node;
453
454 /*
455 * When the consumer DT node do not have "interconnects" property
456 * return a NULL path to skip setting constraints.
457 */
458 if (!of_find_property(np, "interconnects", NULL))
459 return NULL;
460
461 /*
462 * We use a combination of phandle and specifier for endpoint. For now
463 * lets support only global ids and extend this in the future if needed
464 * without breaking DT compatibility.
465 */
466 ret = of_parse_phandle_with_args(np, "interconnects",
467 "#interconnect-cells", idx * 2,
468 &src_args);
469 if (ret)
470 return ERR_PTR(ret);
471
472 of_node_put(src_args.np);
473
474 ret = of_parse_phandle_with_args(np, "interconnects",
475 "#interconnect-cells", idx * 2 + 1,
476 &dst_args);
477 if (ret)
478 return ERR_PTR(ret);
479
480 of_node_put(dst_args.np);
481
482 src_data = of_icc_get_from_provider(&src_args);
483
484 if (IS_ERR(src_data)) {
485 dev_err_probe(dev, PTR_ERR(src_data), "error finding src node\n");
486 return ERR_CAST(src_data);
487 }
488
489 dst_data = of_icc_get_from_provider(&dst_args);
490
491 if (IS_ERR(dst_data)) {
492 dev_err_probe(dev, PTR_ERR(dst_data), "error finding dst node\n");
493 kfree(src_data);
494 return ERR_CAST(dst_data);
495 }
496
497 mutex_lock(&icc_lock);
498 path = path_find(dev, src_data->node, dst_data->node);
499 mutex_unlock(&icc_lock);
500 if (IS_ERR(path)) {
501 dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
502 goto free_icc_data;
503 }
504
505 if (src_data->tag && src_data->tag == dst_data->tag)
506 icc_set_tag(path, src_data->tag);
507
508 path->name = kasprintf(GFP_KERNEL, "%s-%s",
509 src_data->node->name, dst_data->node->name);
510 if (!path->name) {
511 kfree(path);
512 path = ERR_PTR(-ENOMEM);
513 }
514
515 free_icc_data:
516 kfree(src_data);
517 kfree(dst_data);
518 return path;
519 }
520 EXPORT_SYMBOL_GPL(of_icc_get_by_index);
521
522 /**
523 * of_icc_get() - get a path handle from a DT node based on name
524 * @dev: device pointer for the consumer device
525 * @name: interconnect path name
526 *
527 * This function will search for a path between two endpoints and return an
528 * icc_path handle on success. Use icc_put() to release constraints when they
529 * are not needed anymore.
530 * If the interconnect API is disabled, NULL is returned and the consumer
531 * drivers will still build. Drivers are free to handle this specifically,
532 * but they don't have to.
533 *
534 * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
535 * when the API is disabled or the "interconnects" DT property is missing.
536 */
of_icc_get(struct device * dev,const char * name)537 struct icc_path *of_icc_get(struct device *dev, const char *name)
538 {
539 struct device_node *np;
540 int idx = 0;
541
542 if (!dev || !dev->of_node)
543 return ERR_PTR(-ENODEV);
544
545 np = dev->of_node;
546
547 /*
548 * When the consumer DT node do not have "interconnects" property
549 * return a NULL path to skip setting constraints.
550 */
551 if (!of_find_property(np, "interconnects", NULL))
552 return NULL;
553
554 /*
555 * We use a combination of phandle and specifier for endpoint. For now
556 * lets support only global ids and extend this in the future if needed
557 * without breaking DT compatibility.
558 */
559 if (name) {
560 idx = of_property_match_string(np, "interconnect-names", name);
561 if (idx < 0)
562 return ERR_PTR(idx);
563 }
564
565 return of_icc_get_by_index(dev, idx);
566 }
567 EXPORT_SYMBOL_GPL(of_icc_get);
568
569 /**
570 * icc_set_tag() - set an optional tag on a path
571 * @path: the path we want to tag
572 * @tag: the tag value
573 *
574 * This function allows consumers to append a tag to the requests associated
575 * with a path, so that a different aggregation could be done based on this tag.
576 */
icc_set_tag(struct icc_path * path,u32 tag)577 void icc_set_tag(struct icc_path *path, u32 tag)
578 {
579 int i;
580
581 if (!path)
582 return;
583
584 mutex_lock(&icc_lock);
585
586 for (i = 0; i < path->num_nodes; i++)
587 path->reqs[i].tag = tag;
588
589 mutex_unlock(&icc_lock);
590 }
591 EXPORT_SYMBOL_GPL(icc_set_tag);
592
593 /**
594 * icc_get_name() - Get name of the icc path
595 * @path: reference to the path returned by icc_get()
596 *
597 * This function is used by an interconnect consumer to get the name of the icc
598 * path.
599 *
600 * Returns a valid pointer on success, or NULL otherwise.
601 */
icc_get_name(struct icc_path * path)602 const char *icc_get_name(struct icc_path *path)
603 {
604 if (!path)
605 return NULL;
606
607 return path->name;
608 }
609 EXPORT_SYMBOL_GPL(icc_get_name);
610
611 /**
612 * icc_set_bw() - set bandwidth constraints on an interconnect path
613 * @path: reference to the path returned by icc_get()
614 * @avg_bw: average bandwidth in kilobytes per second
615 * @peak_bw: peak bandwidth in kilobytes per second
616 *
617 * This function is used by an interconnect consumer to express its own needs
618 * in terms of bandwidth for a previously requested path between two endpoints.
619 * The requests are aggregated and each node is updated accordingly. The entire
620 * path is locked by a mutex to ensure that the set() is completed.
621 * The @path can be NULL when the "interconnects" DT properties is missing,
622 * which will mean that no constraints will be set.
623 *
624 * Returns 0 on success, or an appropriate error code otherwise.
625 */
icc_set_bw(struct icc_path * path,u32 avg_bw,u32 peak_bw)626 int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
627 {
628 struct icc_node *node;
629 u32 old_avg, old_peak;
630 size_t i;
631 int ret;
632
633 if (!path)
634 return 0;
635
636 if (WARN_ON(IS_ERR(path) || !path->num_nodes))
637 return -EINVAL;
638
639 mutex_lock(&icc_lock);
640
641 old_avg = path->reqs[0].avg_bw;
642 old_peak = path->reqs[0].peak_bw;
643
644 for (i = 0; i < path->num_nodes; i++) {
645 node = path->reqs[i].node;
646
647 /* update the consumer request for this path */
648 path->reqs[i].avg_bw = avg_bw;
649 path->reqs[i].peak_bw = peak_bw;
650
651 /* aggregate requests for this node */
652 aggregate_requests(node);
653
654 trace_icc_set_bw(path, node, i, avg_bw, peak_bw);
655 }
656
657 ret = apply_constraints(path);
658 if (ret) {
659 pr_debug("interconnect: error applying constraints (%d)\n",
660 ret);
661
662 for (i = 0; i < path->num_nodes; i++) {
663 node = path->reqs[i].node;
664 path->reqs[i].avg_bw = old_avg;
665 path->reqs[i].peak_bw = old_peak;
666 aggregate_requests(node);
667 }
668 apply_constraints(path);
669 }
670
671 mutex_unlock(&icc_lock);
672
673 trace_icc_set_bw_end(path, ret);
674
675 return ret;
676 }
677 EXPORT_SYMBOL_GPL(icc_set_bw);
678
__icc_enable(struct icc_path * path,bool enable)679 static int __icc_enable(struct icc_path *path, bool enable)
680 {
681 int i;
682
683 if (!path)
684 return 0;
685
686 if (WARN_ON(IS_ERR(path) || !path->num_nodes))
687 return -EINVAL;
688
689 mutex_lock(&icc_lock);
690
691 for (i = 0; i < path->num_nodes; i++)
692 path->reqs[i].enabled = enable;
693
694 mutex_unlock(&icc_lock);
695
696 return icc_set_bw(path, path->reqs[0].avg_bw,
697 path->reqs[0].peak_bw);
698 }
699
icc_enable(struct icc_path * path)700 int icc_enable(struct icc_path *path)
701 {
702 return __icc_enable(path, true);
703 }
704 EXPORT_SYMBOL_GPL(icc_enable);
705
icc_disable(struct icc_path * path)706 int icc_disable(struct icc_path *path)
707 {
708 return __icc_enable(path, false);
709 }
710 EXPORT_SYMBOL_GPL(icc_disable);
711
712 /**
713 * icc_get() - return a handle for path between two endpoints
714 * @dev: the device requesting the path
715 * @src_id: source device port id
716 * @dst_id: destination device port id
717 *
718 * This function will search for a path between two endpoints and return an
719 * icc_path handle on success. Use icc_put() to release
720 * constraints when they are not needed anymore.
721 * If the interconnect API is disabled, NULL is returned and the consumer
722 * drivers will still build. Drivers are free to handle this specifically,
723 * but they don't have to.
724 *
725 * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the
726 * interconnect API is disabled.
727 */
icc_get(struct device * dev,const int src_id,const int dst_id)728 struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
729 {
730 struct icc_node *src, *dst;
731 struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
732
733 mutex_lock(&icc_lock);
734
735 src = node_find(src_id);
736 if (!src)
737 goto out;
738
739 dst = node_find(dst_id);
740 if (!dst)
741 goto out;
742
743 path = path_find(dev, src, dst);
744 if (IS_ERR(path)) {
745 dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
746 goto out;
747 }
748
749 path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name);
750 if (!path->name) {
751 kfree(path);
752 path = ERR_PTR(-ENOMEM);
753 }
754 out:
755 mutex_unlock(&icc_lock);
756 return path;
757 }
758 EXPORT_SYMBOL_GPL(icc_get);
759
760 /**
761 * icc_put() - release the reference to the icc_path
762 * @path: interconnect path
763 *
764 * Use this function to release the constraints on a path when the path is
765 * no longer needed. The constraints will be re-aggregated.
766 */
icc_put(struct icc_path * path)767 void icc_put(struct icc_path *path)
768 {
769 struct icc_node *node;
770 size_t i;
771 int ret;
772
773 if (!path || WARN_ON(IS_ERR(path)))
774 return;
775
776 ret = icc_set_bw(path, 0, 0);
777 if (ret)
778 pr_err("%s: error (%d)\n", __func__, ret);
779
780 mutex_lock(&icc_lock);
781 for (i = 0; i < path->num_nodes; i++) {
782 node = path->reqs[i].node;
783 hlist_del(&path->reqs[i].req_node);
784 if (!WARN_ON(!node->provider->users))
785 node->provider->users--;
786 }
787 mutex_unlock(&icc_lock);
788
789 kfree_const(path->name);
790 kfree(path);
791 }
792 EXPORT_SYMBOL_GPL(icc_put);
793
icc_node_create_nolock(int id)794 static struct icc_node *icc_node_create_nolock(int id)
795 {
796 struct icc_node *node;
797
798 /* check if node already exists */
799 node = node_find(id);
800 if (node)
801 return node;
802
803 node = kzalloc(sizeof(*node), GFP_KERNEL);
804 if (!node)
805 return ERR_PTR(-ENOMEM);
806
807 id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
808 if (id < 0) {
809 WARN(1, "%s: couldn't get idr\n", __func__);
810 kfree(node);
811 return ERR_PTR(id);
812 }
813
814 node->id = id;
815
816 return node;
817 }
818
819 /**
820 * icc_node_create() - create a node
821 * @id: node id
822 *
823 * Return: icc_node pointer on success, or ERR_PTR() on error
824 */
icc_node_create(int id)825 struct icc_node *icc_node_create(int id)
826 {
827 struct icc_node *node;
828
829 mutex_lock(&icc_lock);
830
831 node = icc_node_create_nolock(id);
832
833 mutex_unlock(&icc_lock);
834
835 return node;
836 }
837 EXPORT_SYMBOL_GPL(icc_node_create);
838
839 /**
840 * icc_node_destroy() - destroy a node
841 * @id: node id
842 */
icc_node_destroy(int id)843 void icc_node_destroy(int id)
844 {
845 struct icc_node *node;
846
847 mutex_lock(&icc_lock);
848
849 node = node_find(id);
850 if (node) {
851 idr_remove(&icc_idr, node->id);
852 WARN_ON(!hlist_empty(&node->req_list));
853 }
854
855 mutex_unlock(&icc_lock);
856
857 if (!node)
858 return;
859
860 kfree(node->links);
861 kfree(node);
862 }
863 EXPORT_SYMBOL_GPL(icc_node_destroy);
864
865 /**
866 * icc_link_create() - create a link between two nodes
867 * @node: source node id
868 * @dst_id: destination node id
869 *
870 * Create a link between two nodes. The nodes might belong to different
871 * interconnect providers and the @dst_id node might not exist (if the
872 * provider driver has not probed yet). So just create the @dst_id node
873 * and when the actual provider driver is probed, the rest of the node
874 * data is filled.
875 *
876 * Return: 0 on success, or an error code otherwise
877 */
icc_link_create(struct icc_node * node,const int dst_id)878 int icc_link_create(struct icc_node *node, const int dst_id)
879 {
880 struct icc_node *dst;
881 struct icc_node **new;
882 int ret = 0;
883
884 if (!node->provider)
885 return -EINVAL;
886
887 mutex_lock(&icc_lock);
888
889 dst = node_find(dst_id);
890 if (!dst) {
891 dst = icc_node_create_nolock(dst_id);
892
893 if (IS_ERR(dst)) {
894 ret = PTR_ERR(dst);
895 goto out;
896 }
897 }
898
899 new = krealloc(node->links,
900 (node->num_links + 1) * sizeof(*node->links),
901 GFP_KERNEL);
902 if (!new) {
903 ret = -ENOMEM;
904 goto out;
905 }
906
907 node->links = new;
908 node->links[node->num_links++] = dst;
909
910 out:
911 mutex_unlock(&icc_lock);
912
913 return ret;
914 }
915 EXPORT_SYMBOL_GPL(icc_link_create);
916
917 /**
918 * icc_link_destroy() - destroy a link between two nodes
919 * @src: pointer to source node
920 * @dst: pointer to destination node
921 *
922 * Return: 0 on success, or an error code otherwise
923 */
icc_link_destroy(struct icc_node * src,struct icc_node * dst)924 int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
925 {
926 struct icc_node **new;
927 size_t slot;
928 int ret = 0;
929
930 if (IS_ERR_OR_NULL(src))
931 return -EINVAL;
932
933 if (IS_ERR_OR_NULL(dst))
934 return -EINVAL;
935
936 mutex_lock(&icc_lock);
937
938 for (slot = 0; slot < src->num_links; slot++)
939 if (src->links[slot] == dst)
940 break;
941
942 if (WARN_ON(slot == src->num_links)) {
943 ret = -ENXIO;
944 goto out;
945 }
946
947 src->links[slot] = src->links[--src->num_links];
948
949 new = krealloc(src->links, src->num_links * sizeof(*src->links),
950 GFP_KERNEL);
951 if (new)
952 src->links = new;
953 else
954 ret = -ENOMEM;
955
956 out:
957 mutex_unlock(&icc_lock);
958
959 return ret;
960 }
961 EXPORT_SYMBOL_GPL(icc_link_destroy);
962
963 /**
964 * icc_node_add() - add interconnect node to interconnect provider
965 * @node: pointer to the interconnect node
966 * @provider: pointer to the interconnect provider
967 */
icc_node_add(struct icc_node * node,struct icc_provider * provider)968 void icc_node_add(struct icc_node *node, struct icc_provider *provider)
969 {
970 if (WARN_ON(node->provider))
971 return;
972
973 mutex_lock(&icc_lock);
974
975 node->provider = provider;
976 list_add_tail(&node->node_list, &provider->nodes);
977
978 /* get the initial bandwidth values and sync them with hardware */
979 if (provider->get_bw) {
980 provider->get_bw(node, &node->init_avg, &node->init_peak);
981 } else {
982 node->init_avg = INT_MAX;
983 node->init_peak = INT_MAX;
984 }
985 node->avg_bw = node->init_avg;
986 node->peak_bw = node->init_peak;
987
988 if (provider->pre_aggregate)
989 provider->pre_aggregate(node);
990
991 if (provider->aggregate)
992 provider->aggregate(node, 0, node->init_avg, node->init_peak,
993 &node->avg_bw, &node->peak_bw);
994
995 provider->set(node, node);
996 node->avg_bw = 0;
997 node->peak_bw = 0;
998
999 mutex_unlock(&icc_lock);
1000 }
1001 EXPORT_SYMBOL_GPL(icc_node_add);
1002
1003 /**
1004 * icc_node_del() - delete interconnect node from interconnect provider
1005 * @node: pointer to the interconnect node
1006 */
icc_node_del(struct icc_node * node)1007 void icc_node_del(struct icc_node *node)
1008 {
1009 mutex_lock(&icc_lock);
1010
1011 list_del(&node->node_list);
1012
1013 mutex_unlock(&icc_lock);
1014 }
1015 EXPORT_SYMBOL_GPL(icc_node_del);
1016
1017 /**
1018 * icc_nodes_remove() - remove all previously added nodes from provider
1019 * @provider: the interconnect provider we are removing nodes from
1020 *
1021 * Return: 0 on success, or an error code otherwise
1022 */
icc_nodes_remove(struct icc_provider * provider)1023 int icc_nodes_remove(struct icc_provider *provider)
1024 {
1025 struct icc_node *n, *tmp;
1026
1027 if (WARN_ON(IS_ERR_OR_NULL(provider)))
1028 return -EINVAL;
1029
1030 list_for_each_entry_safe_reverse(n, tmp, &provider->nodes, node_list) {
1031 icc_node_del(n);
1032 icc_node_destroy(n->id);
1033 }
1034
1035 return 0;
1036 }
1037 EXPORT_SYMBOL_GPL(icc_nodes_remove);
1038
1039 /**
1040 * icc_provider_add() - add a new interconnect provider
1041 * @provider: the interconnect provider that will be added into topology
1042 *
1043 * Return: 0 on success, or an error code otherwise
1044 */
icc_provider_add(struct icc_provider * provider)1045 int icc_provider_add(struct icc_provider *provider)
1046 {
1047 if (WARN_ON(!provider->set))
1048 return -EINVAL;
1049 if (WARN_ON(!provider->xlate && !provider->xlate_extended))
1050 return -EINVAL;
1051
1052 mutex_lock(&icc_lock);
1053
1054 INIT_LIST_HEAD(&provider->nodes);
1055 list_add_tail(&provider->provider_list, &icc_providers);
1056
1057 mutex_unlock(&icc_lock);
1058
1059 dev_dbg(provider->dev, "interconnect provider added to topology\n");
1060
1061 return 0;
1062 }
1063 EXPORT_SYMBOL_GPL(icc_provider_add);
1064
1065 /**
1066 * icc_provider_del() - delete previously added interconnect provider
1067 * @provider: the interconnect provider that will be removed from topology
1068 *
1069 * Return: 0 on success, or an error code otherwise
1070 */
icc_provider_del(struct icc_provider * provider)1071 int icc_provider_del(struct icc_provider *provider)
1072 {
1073 mutex_lock(&icc_lock);
1074 if (provider->users) {
1075 pr_warn("interconnect provider still has %d users\n",
1076 provider->users);
1077 mutex_unlock(&icc_lock);
1078 return -EBUSY;
1079 }
1080
1081 if (!list_empty(&provider->nodes)) {
1082 pr_warn("interconnect provider still has nodes\n");
1083 mutex_unlock(&icc_lock);
1084 return -EBUSY;
1085 }
1086
1087 list_del(&provider->provider_list);
1088 mutex_unlock(&icc_lock);
1089
1090 return 0;
1091 }
1092 EXPORT_SYMBOL_GPL(icc_provider_del);
1093
of_count_icc_providers(struct device_node * np)1094 static int of_count_icc_providers(struct device_node *np)
1095 {
1096 struct device_node *child;
1097 int count = 0;
1098 const struct of_device_id __maybe_unused ignore_list[] = {
1099 { .compatible = "qcom,sc7180-ipa-virt" },
1100 { .compatible = "qcom,sdx55-ipa-virt" },
1101 {}
1102 };
1103
1104 for_each_available_child_of_node(np, child) {
1105 if (of_property_read_bool(child, "#interconnect-cells") &&
1106 likely(!of_match_node(ignore_list, child)))
1107 count++;
1108 count += of_count_icc_providers(child);
1109 }
1110
1111 return count;
1112 }
1113
icc_sync_state(struct device * dev)1114 void icc_sync_state(struct device *dev)
1115 {
1116 struct icc_provider *p;
1117 struct icc_node *n;
1118 static int count;
1119
1120 count++;
1121
1122 if (count < providers_count)
1123 return;
1124
1125 mutex_lock(&icc_lock);
1126 synced_state = true;
1127 list_for_each_entry(p, &icc_providers, provider_list) {
1128 dev_dbg(p->dev, "interconnect provider is in synced state\n");
1129 list_for_each_entry(n, &p->nodes, node_list) {
1130 if (n->init_avg || n->init_peak) {
1131 n->init_avg = 0;
1132 n->init_peak = 0;
1133 aggregate_requests(n);
1134 p->set(n, n);
1135 }
1136 }
1137 }
1138 mutex_unlock(&icc_lock);
1139 }
1140 EXPORT_SYMBOL_GPL(icc_sync_state);
1141
icc_init(void)1142 static int __init icc_init(void)
1143 {
1144 struct device_node *root = of_find_node_by_path("/");
1145
1146 providers_count = of_count_icc_providers(root);
1147 of_node_put(root);
1148
1149 icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
1150 debugfs_create_file("interconnect_summary", 0444,
1151 icc_debugfs_dir, NULL, &icc_summary_fops);
1152 debugfs_create_file("interconnect_graph", 0444,
1153 icc_debugfs_dir, NULL, &icc_graph_fops);
1154 return 0;
1155 }
1156
1157 device_initcall(icc_init);
1158
1159 MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
1160 MODULE_DESCRIPTION("Interconnect Driver Core");
1161 MODULE_LICENSE("GPL v2");
1162