1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020 Linaro Ltd
4 */
5
6 #include <linux/clk.h>
7 #include <linux/device.h>
8 #include <linux/interconnect-provider.h>
9 #include <linux/io.h>
10 #include <linux/module.h>
11 #include <linux/of_device.h>
12 #include <linux/of_platform.h>
13 #include <linux/platform_device.h>
14 #include <linux/regmap.h>
15 #include <linux/slab.h>
16
17 #include "smd-rpm.h"
18 #include "icc-common.h"
19 #include "icc-rpm.h"
20
21 /* QNOC QoS */
22 #define QNOC_QOS_MCTL_LOWn_ADDR(n) (0x8 + (n * 0x1000))
23 #define QNOC_QOS_MCTL_DFLT_PRIO_MASK 0x70
24 #define QNOC_QOS_MCTL_DFLT_PRIO_SHIFT 4
25 #define QNOC_QOS_MCTL_URGFWD_EN_MASK 0x8
26 #define QNOC_QOS_MCTL_URGFWD_EN_SHIFT 3
27
28 /* BIMC QoS */
29 #define M_BKE_REG_BASE(n) (0x300 + (0x4000 * n))
30 #define M_BKE_EN_ADDR(n) (M_BKE_REG_BASE(n))
31 #define M_BKE_HEALTH_CFG_ADDR(i, n) (M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
32
33 #define M_BKE_HEALTH_CFG_LIMITCMDS_MASK 0x80000000
34 #define M_BKE_HEALTH_CFG_AREQPRIO_MASK 0x300
35 #define M_BKE_HEALTH_CFG_PRIOLVL_MASK 0x3
36 #define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT 0x8
37 #define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
38
39 #define M_BKE_EN_EN_BMASK 0x1
40
41 /* NoC QoS */
42 #define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000))
43 #define NOC_QOS_PRIORITY_P1_MASK 0xc
44 #define NOC_QOS_PRIORITY_P0_MASK 0x3
45 #define NOC_QOS_PRIORITY_P1_SHIFT 0x2
46
47 #define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
48 #define NOC_QOS_MODEn_MASK 0x3
49
qcom_icc_set_qnoc_qos(struct icc_node * src,u64 max_bw)50 static int qcom_icc_set_qnoc_qos(struct icc_node *src, u64 max_bw)
51 {
52 struct icc_provider *provider = src->provider;
53 struct qcom_icc_provider *qp = to_qcom_provider(provider);
54 struct qcom_icc_node *qn = src->data;
55 struct qcom_icc_qos *qos = &qn->qos;
56 int rc;
57
58 rc = regmap_update_bits(qp->regmap,
59 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
60 QNOC_QOS_MCTL_DFLT_PRIO_MASK,
61 qos->areq_prio << QNOC_QOS_MCTL_DFLT_PRIO_SHIFT);
62 if (rc)
63 return rc;
64
65 return regmap_update_bits(qp->regmap,
66 qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
67 QNOC_QOS_MCTL_URGFWD_EN_MASK,
68 !!qos->urg_fwd_en << QNOC_QOS_MCTL_URGFWD_EN_SHIFT);
69 }
70
qcom_icc_bimc_set_qos_health(struct qcom_icc_provider * qp,struct qcom_icc_qos * qos,int regnum)71 static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
72 struct qcom_icc_qos *qos,
73 int regnum)
74 {
75 u32 val;
76 u32 mask;
77
78 val = qos->prio_level;
79 mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
80
81 val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
82 mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
83
84 /* LIMITCMDS is not present on M_BKE_HEALTH_3 */
85 if (regnum != 3) {
86 val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
87 mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
88 }
89
90 return regmap_update_bits(qp->regmap,
91 qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
92 mask, val);
93 }
94
qcom_icc_set_bimc_qos(struct icc_node * src,u64 max_bw)95 static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw)
96 {
97 struct qcom_icc_provider *qp;
98 struct qcom_icc_node *qn;
99 struct icc_provider *provider;
100 u32 mode = NOC_QOS_MODE_BYPASS;
101 u32 val = 0;
102 int i, rc = 0;
103
104 qn = src->data;
105 provider = src->provider;
106 qp = to_qcom_provider(provider);
107
108 if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
109 mode = qn->qos.qos_mode;
110
111 /* QoS Priority: The QoS Health parameters are getting considered
112 * only if we are NOT in Bypass Mode.
113 */
114 if (mode != NOC_QOS_MODE_BYPASS) {
115 for (i = 3; i >= 0; i--) {
116 rc = qcom_icc_bimc_set_qos_health(qp,
117 &qn->qos, i);
118 if (rc)
119 return rc;
120 }
121
122 /* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
123 val = 1;
124 }
125
126 return regmap_update_bits(qp->regmap,
127 qp->qos_offset + M_BKE_EN_ADDR(qn->qos.qos_port),
128 M_BKE_EN_EN_BMASK, val);
129 }
130
qcom_icc_noc_set_qos_priority(struct qcom_icc_provider * qp,struct qcom_icc_qos * qos)131 static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp,
132 struct qcom_icc_qos *qos)
133 {
134 u32 val;
135 int rc;
136
137 /* Must be updated one at a time, P1 first, P0 last */
138 val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
139 rc = regmap_update_bits(qp->regmap,
140 qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
141 NOC_QOS_PRIORITY_P1_MASK, val);
142 if (rc)
143 return rc;
144
145 return regmap_update_bits(qp->regmap,
146 qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
147 NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
148 }
149
qcom_icc_set_noc_qos(struct icc_node * src,u64 max_bw)150 static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
151 {
152 struct qcom_icc_provider *qp;
153 struct qcom_icc_node *qn;
154 struct icc_provider *provider;
155 u32 mode = NOC_QOS_MODE_BYPASS;
156 int rc = 0;
157
158 qn = src->data;
159 provider = src->provider;
160 qp = to_qcom_provider(provider);
161
162 if (qn->qos.qos_port < 0) {
163 dev_dbg(src->provider->dev,
164 "NoC QoS: Skipping %s: vote aggregated on parent.\n",
165 qn->name);
166 return 0;
167 }
168
169 if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
170 mode = qn->qos.qos_mode;
171
172 if (mode == NOC_QOS_MODE_FIXED) {
173 dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n",
174 qn->name);
175 rc = qcom_icc_noc_set_qos_priority(qp, &qn->qos);
176 if (rc)
177 return rc;
178 } else if (mode == NOC_QOS_MODE_BYPASS) {
179 dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n",
180 qn->name);
181 }
182
183 return regmap_update_bits(qp->regmap,
184 qp->qos_offset + NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
185 NOC_QOS_MODEn_MASK, mode);
186 }
187
qcom_icc_qos_set(struct icc_node * node,u64 sum_bw)188 static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
189 {
190 struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
191 struct qcom_icc_node *qn = node->data;
192
193 dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
194
195 switch (qp->type) {
196 case QCOM_ICC_BIMC:
197 return qcom_icc_set_bimc_qos(node, sum_bw);
198 case QCOM_ICC_QNOC:
199 return qcom_icc_set_qnoc_qos(node, sum_bw);
200 default:
201 return qcom_icc_set_noc_qos(node, sum_bw);
202 }
203 }
204
qcom_icc_rpm_set(int mas_rpm_id,int slv_rpm_id,u64 sum_bw)205 static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
206 {
207 int ret = 0;
208
209 if (mas_rpm_id != -1) {
210 ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
211 RPM_BUS_MASTER_REQ,
212 mas_rpm_id,
213 sum_bw);
214 if (ret) {
215 pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
216 mas_rpm_id, ret);
217 return ret;
218 }
219 }
220
221 if (slv_rpm_id != -1) {
222 ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE,
223 RPM_BUS_SLAVE_REQ,
224 slv_rpm_id,
225 sum_bw);
226 if (ret) {
227 pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
228 slv_rpm_id, ret);
229 return ret;
230 }
231 }
232
233 return ret;
234 }
235
__qcom_icc_set(struct icc_node * n,struct qcom_icc_node * qn,u64 sum_bw)236 static int __qcom_icc_set(struct icc_node *n, struct qcom_icc_node *qn,
237 u64 sum_bw)
238 {
239 int ret;
240
241 if (!qn->qos.ap_owned) {
242 /* send bandwidth request message to the RPM processor */
243 ret = qcom_icc_rpm_set(qn->mas_rpm_id, qn->slv_rpm_id, sum_bw);
244 if (ret)
245 return ret;
246 } else if (qn->qos.qos_mode != -1) {
247 /* set bandwidth directly from the AP */
248 ret = qcom_icc_qos_set(n, sum_bw);
249 if (ret)
250 return ret;
251 }
252
253 return 0;
254 }
255
256 /**
257 * qcom_icc_pre_bw_aggregate - cleans up values before re-aggregate requests
258 * @node: icc node to operate on
259 */
qcom_icc_pre_bw_aggregate(struct icc_node * node)260 static void qcom_icc_pre_bw_aggregate(struct icc_node *node)
261 {
262 struct qcom_icc_node *qn;
263 size_t i;
264
265 qn = node->data;
266 for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
267 qn->sum_avg[i] = 0;
268 qn->max_peak[i] = 0;
269 }
270 }
271
272 /**
273 * qcom_icc_bw_aggregate - aggregate bw for buckets indicated by tag
274 * @node: node to aggregate
275 * @tag: tag to indicate which buckets to aggregate
276 * @avg_bw: new bw to sum aggregate
277 * @peak_bw: new bw to max aggregate
278 * @agg_avg: existing aggregate avg bw val
279 * @agg_peak: existing aggregate peak bw val
280 */
qcom_icc_bw_aggregate(struct icc_node * node,u32 tag,u32 avg_bw,u32 peak_bw,u32 * agg_avg,u32 * agg_peak)281 static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
282 u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
283 {
284 size_t i;
285 struct qcom_icc_node *qn;
286
287 qn = node->data;
288
289 if (!tag)
290 tag = QCOM_ICC_TAG_ALWAYS;
291
292 for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
293 if (tag & BIT(i)) {
294 qn->sum_avg[i] += avg_bw;
295 qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
296 }
297 }
298
299 *agg_avg += avg_bw;
300 *agg_peak = max_t(u32, *agg_peak, peak_bw);
301 return 0;
302 }
303
304 /**
305 * qcom_icc_bus_aggregate - aggregate bandwidth by traversing all nodes
306 * @provider: generic interconnect provider
307 * @agg_avg: an array for aggregated average bandwidth of buckets
308 * @agg_peak: an array for aggregated peak bandwidth of buckets
309 * @max_agg_avg: pointer to max value of aggregated average bandwidth
310 */
qcom_icc_bus_aggregate(struct icc_provider * provider,u64 * agg_avg,u64 * agg_peak,u64 * max_agg_avg)311 static void qcom_icc_bus_aggregate(struct icc_provider *provider,
312 u64 *agg_avg, u64 *agg_peak,
313 u64 *max_agg_avg)
314 {
315 struct icc_node *node;
316 struct qcom_icc_node *qn;
317 int i;
318
319 /* Initialise aggregate values */
320 for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
321 agg_avg[i] = 0;
322 agg_peak[i] = 0;
323 }
324
325 *max_agg_avg = 0;
326
327 /*
328 * Iterate nodes on the interconnect and aggregate bandwidth
329 * requests for every bucket.
330 */
331 list_for_each_entry(node, &provider->nodes, node_list) {
332 qn = node->data;
333 for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
334 agg_avg[i] += qn->sum_avg[i];
335 agg_peak[i] = max_t(u64, agg_peak[i], qn->max_peak[i]);
336 }
337 }
338
339 /* Find maximum values across all buckets */
340 for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++)
341 *max_agg_avg = max_t(u64, *max_agg_avg, agg_avg[i]);
342 }
343
qcom_icc_set(struct icc_node * src,struct icc_node * dst)344 static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
345 {
346 struct qcom_icc_provider *qp;
347 struct qcom_icc_node *src_qn = NULL, *dst_qn = NULL;
348 struct icc_provider *provider;
349 u64 sum_bw;
350 u64 rate;
351 u64 agg_avg[QCOM_ICC_NUM_BUCKETS], agg_peak[QCOM_ICC_NUM_BUCKETS];
352 u64 max_agg_avg;
353 int ret, i;
354 int bucket;
355
356 src_qn = src->data;
357 if (dst)
358 dst_qn = dst->data;
359 provider = src->provider;
360 qp = to_qcom_provider(provider);
361
362 qcom_icc_bus_aggregate(provider, agg_avg, agg_peak, &max_agg_avg);
363
364 sum_bw = icc_units_to_bps(max_agg_avg);
365
366 ret = __qcom_icc_set(src, src_qn, sum_bw);
367 if (ret)
368 return ret;
369 if (dst_qn) {
370 ret = __qcom_icc_set(dst, dst_qn, sum_bw);
371 if (ret)
372 return ret;
373 }
374
375 for (i = 0; i < qp->num_clks; i++) {
376 /*
377 * Use WAKE bucket for active clock, otherwise, use SLEEP bucket
378 * for other clocks. If a platform doesn't set interconnect
379 * path tags, by default use sleep bucket for all clocks.
380 *
381 * Note, AMC bucket is not supported yet.
382 */
383 if (!strcmp(qp->bus_clks[i].id, "bus_a"))
384 bucket = QCOM_ICC_BUCKET_WAKE;
385 else
386 bucket = QCOM_ICC_BUCKET_SLEEP;
387
388 rate = icc_units_to_bps(max(agg_avg[bucket], agg_peak[bucket]));
389 do_div(rate, src_qn->buswidth);
390 rate = min_t(u64, rate, LONG_MAX);
391
392 if (qp->bus_clk_rate[i] == rate)
393 continue;
394
395 ret = clk_set_rate(qp->bus_clks[i].clk, rate);
396 if (ret) {
397 pr_err("%s clk_set_rate error: %d\n",
398 qp->bus_clks[i].id, ret);
399 return ret;
400 }
401 qp->bus_clk_rate[i] = rate;
402 }
403
404 return 0;
405 }
406
407 static const char * const bus_clocks[] = {
408 "bus", "bus_a",
409 };
410
qnoc_probe(struct platform_device * pdev)411 int qnoc_probe(struct platform_device *pdev)
412 {
413 struct device *dev = &pdev->dev;
414 const struct qcom_icc_desc *desc;
415 struct icc_onecell_data *data;
416 struct icc_provider *provider;
417 struct qcom_icc_node * const *qnodes;
418 struct qcom_icc_provider *qp;
419 struct icc_node *node;
420 size_t num_nodes, i;
421 const char * const *cds;
422 int cd_num;
423 int ret;
424
425 /* wait for the RPM proxy */
426 if (!qcom_icc_rpm_smd_available())
427 return -EPROBE_DEFER;
428
429 desc = of_device_get_match_data(dev);
430 if (!desc)
431 return -EINVAL;
432
433 qnodes = desc->nodes;
434 num_nodes = desc->num_nodes;
435
436 if (desc->num_clocks) {
437 cds = desc->clocks;
438 cd_num = desc->num_clocks;
439 } else {
440 cds = bus_clocks;
441 cd_num = ARRAY_SIZE(bus_clocks);
442 }
443
444 qp = devm_kzalloc(dev, struct_size(qp, bus_clks, cd_num), GFP_KERNEL);
445 if (!qp)
446 return -ENOMEM;
447
448 qp->bus_clk_rate = devm_kcalloc(dev, cd_num, sizeof(*qp->bus_clk_rate),
449 GFP_KERNEL);
450 if (!qp->bus_clk_rate)
451 return -ENOMEM;
452
453 data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
454 GFP_KERNEL);
455 if (!data)
456 return -ENOMEM;
457
458 for (i = 0; i < cd_num; i++)
459 qp->bus_clks[i].id = cds[i];
460 qp->num_clks = cd_num;
461
462 qp->type = desc->type;
463 qp->qos_offset = desc->qos_offset;
464
465 if (desc->regmap_cfg) {
466 struct resource *res;
467 void __iomem *mmio;
468
469 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
470 if (!res) {
471 /* Try parent's regmap */
472 qp->regmap = dev_get_regmap(dev->parent, NULL);
473 if (qp->regmap)
474 goto regmap_done;
475 return -ENODEV;
476 }
477
478 mmio = devm_ioremap_resource(dev, res);
479
480 if (IS_ERR(mmio)) {
481 dev_err(dev, "Cannot ioremap interconnect bus resource\n");
482 return PTR_ERR(mmio);
483 }
484
485 qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg);
486 if (IS_ERR(qp->regmap)) {
487 dev_err(dev, "Cannot regmap interconnect bus resource\n");
488 return PTR_ERR(qp->regmap);
489 }
490 }
491
492 regmap_done:
493 ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
494 if (ret)
495 return ret;
496
497 ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
498 if (ret)
499 return ret;
500
501 provider = &qp->provider;
502 provider->dev = dev;
503 provider->set = qcom_icc_set;
504 provider->pre_aggregate = qcom_icc_pre_bw_aggregate;
505 provider->aggregate = qcom_icc_bw_aggregate;
506 provider->xlate_extended = qcom_icc_xlate_extended;
507 provider->data = data;
508
509 icc_provider_init(provider);
510
511 for (i = 0; i < num_nodes; i++) {
512 size_t j;
513
514 node = icc_node_create(qnodes[i]->id);
515 if (IS_ERR(node)) {
516 ret = PTR_ERR(node);
517 goto err_remove_nodes;
518 }
519
520 node->name = qnodes[i]->name;
521 node->data = qnodes[i];
522 icc_node_add(node, provider);
523
524 for (j = 0; j < qnodes[i]->num_links; j++)
525 icc_link_create(node, qnodes[i]->links[j]);
526
527 data->nodes[i] = node;
528 }
529 data->num_nodes = num_nodes;
530
531 ret = icc_provider_register(provider);
532 if (ret)
533 goto err_remove_nodes;
534
535 platform_set_drvdata(pdev, qp);
536
537 /* Populate child NoC devices if any */
538 if (of_get_child_count(dev->of_node) > 0) {
539 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
540 if (ret)
541 goto err_deregister_provider;
542 }
543
544 return 0;
545
546 err_deregister_provider:
547 icc_provider_deregister(provider);
548 err_remove_nodes:
549 icc_nodes_remove(provider);
550 clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
551
552 return ret;
553 }
554 EXPORT_SYMBOL(qnoc_probe);
555
qnoc_remove(struct platform_device * pdev)556 int qnoc_remove(struct platform_device *pdev)
557 {
558 struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
559
560 icc_provider_deregister(&qp->provider);
561 icc_nodes_remove(&qp->provider);
562 clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
563
564 return 0;
565 }
566 EXPORT_SYMBOL(qnoc_remove);
567