1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
4 */
5
6 #include <asm/div64.h>
7 #include <linux/interconnect-provider.h>
8 #include <linux/list_sort.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/platform_device.h>
12
13 #include <soc/qcom/rpmh.h>
14 #include <soc/qcom/tcs.h>
15
16 #include "bcm-voter.h"
17 #include "icc-rpmh.h"
18
19 static LIST_HEAD(bcm_voters);
20 static DEFINE_MUTEX(bcm_voter_lock);
21
22 /**
23 * struct bcm_voter - Bus Clock Manager voter
24 * @dev: reference to the device that communicates with the BCM
25 * @np: reference to the device node to match bcm voters
26 * @lock: mutex to protect commit and wake/sleep lists in the voter
27 * @commit_list: list containing bcms to be committed to hardware
28 * @ws_list: list containing bcms that have different wake/sleep votes
29 * @voter_node: list of bcm voters
30 * @tcs_wait: mask for which buckets require TCS completion
31 */
32 struct bcm_voter {
33 struct device *dev;
34 struct device_node *np;
35 struct mutex lock;
36 struct list_head commit_list;
37 struct list_head ws_list;
38 struct list_head voter_node;
39 u32 tcs_wait;
40 };
41
cmp_vcd(void * priv,const struct list_head * a,const struct list_head * b)42 static int cmp_vcd(void *priv, const struct list_head *a, const struct list_head *b)
43 {
44 const struct qcom_icc_bcm *bcm_a =
45 list_entry(a, struct qcom_icc_bcm, list);
46 const struct qcom_icc_bcm *bcm_b =
47 list_entry(b, struct qcom_icc_bcm, list);
48
49 if (bcm_a->aux_data.vcd < bcm_b->aux_data.vcd)
50 return -1;
51 else if (bcm_a->aux_data.vcd == bcm_b->aux_data.vcd)
52 return 0;
53 else
54 return 1;
55 }
56
bcm_div(u64 num,u32 base)57 static u64 bcm_div(u64 num, u32 base)
58 {
59 /* Ensure that small votes aren't lost. */
60 if (num && num < base)
61 return 1;
62
63 do_div(num, base);
64
65 return num;
66 }
67
bcm_aggregate(struct qcom_icc_bcm * bcm)68 static void bcm_aggregate(struct qcom_icc_bcm *bcm)
69 {
70 struct qcom_icc_node *node;
71 size_t i, bucket;
72 u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0};
73 u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0};
74 u64 temp;
75
76 for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
77 for (i = 0; i < bcm->num_nodes; i++) {
78 node = bcm->nodes[i];
79 temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width,
80 node->buswidth * node->channels);
81 agg_avg[bucket] = max(agg_avg[bucket], temp);
82
83 temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width,
84 node->buswidth);
85 agg_peak[bucket] = max(agg_peak[bucket], temp);
86 }
87
88 temp = agg_avg[bucket] * bcm->vote_scale;
89 bcm->vote_x[bucket] = bcm_div(temp, bcm->aux_data.unit);
90
91 temp = agg_peak[bucket] * bcm->vote_scale;
92 bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit);
93 }
94
95 if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
96 bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) {
97 bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1;
98 bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1;
99 bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1;
100 bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1;
101 }
102 }
103
tcs_cmd_gen(struct tcs_cmd * cmd,u64 vote_x,u64 vote_y,u32 addr,bool commit,bool wait)104 static inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
105 u32 addr, bool commit, bool wait)
106 {
107 bool valid = true;
108
109 if (!cmd)
110 return;
111
112 memset(cmd, 0, sizeof(*cmd));
113
114 if (vote_x == 0 && vote_y == 0)
115 valid = false;
116
117 if (vote_x > BCM_TCS_CMD_VOTE_MASK)
118 vote_x = BCM_TCS_CMD_VOTE_MASK;
119
120 if (vote_y > BCM_TCS_CMD_VOTE_MASK)
121 vote_y = BCM_TCS_CMD_VOTE_MASK;
122
123 cmd->addr = addr;
124 cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y);
125
126 /*
127 * Set the wait for completion flag on command that need to be completed
128 * before the next command.
129 */
130 cmd->wait = wait;
131 }
132
tcs_list_gen(struct bcm_voter * voter,int bucket,struct tcs_cmd tcs_list[MAX_VCD],int n[MAX_VCD+1])133 static void tcs_list_gen(struct bcm_voter *voter, int bucket,
134 struct tcs_cmd tcs_list[MAX_VCD],
135 int n[MAX_VCD + 1])
136 {
137 struct list_head *bcm_list = &voter->commit_list;
138 struct qcom_icc_bcm *bcm;
139 bool commit, wait;
140 size_t idx = 0, batch = 0, cur_vcd_size = 0;
141
142 memset(n, 0, sizeof(int) * (MAX_VCD + 1));
143
144 list_for_each_entry(bcm, bcm_list, list) {
145 commit = false;
146 cur_vcd_size++;
147 if ((list_is_last(&bcm->list, bcm_list)) ||
148 bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) {
149 commit = true;
150 cur_vcd_size = 0;
151 }
152
153 wait = commit && (voter->tcs_wait & BIT(bucket));
154
155 tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket],
156 bcm->vote_y[bucket], bcm->addr, commit, wait);
157 idx++;
158 n[batch]++;
159 /*
160 * Batch the BCMs in such a way that we do not split them in
161 * multiple payloads when they are under the same VCD. This is
162 * to ensure that every BCM is committed since we only set the
163 * commit bit on the last BCM request of every VCD.
164 */
165 if (n[batch] >= MAX_RPMH_PAYLOAD) {
166 if (!commit) {
167 n[batch] -= cur_vcd_size;
168 n[batch + 1] = cur_vcd_size;
169 }
170 batch++;
171 }
172 }
173 }
174
175 /**
176 * of_bcm_voter_get - gets a bcm voter handle from DT node
177 * @dev: device pointer for the consumer device
178 * @name: name for the bcm voter device
179 *
180 * This function will match a device_node pointer for the phandle
181 * specified in the device DT and return a bcm_voter handle on success.
182 *
183 * Returns bcm_voter pointer or ERR_PTR() on error. EPROBE_DEFER is returned
184 * when matching bcm voter is yet to be found.
185 */
of_bcm_voter_get(struct device * dev,const char * name)186 struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name)
187 {
188 struct bcm_voter *voter = ERR_PTR(-EPROBE_DEFER);
189 struct bcm_voter *temp;
190 struct device_node *np, *node;
191 int idx = 0;
192
193 if (!dev || !dev->of_node)
194 return ERR_PTR(-ENODEV);
195
196 np = dev->of_node;
197
198 if (name) {
199 idx = of_property_match_string(np, "qcom,bcm-voter-names", name);
200 if (idx < 0)
201 return ERR_PTR(idx);
202 }
203
204 node = of_parse_phandle(np, "qcom,bcm-voters", idx);
205
206 mutex_lock(&bcm_voter_lock);
207 list_for_each_entry(temp, &bcm_voters, voter_node) {
208 if (temp->np == node) {
209 voter = temp;
210 break;
211 }
212 }
213 mutex_unlock(&bcm_voter_lock);
214
215 of_node_put(node);
216 return voter;
217 }
218 EXPORT_SYMBOL_GPL(of_bcm_voter_get);
219
220 /**
221 * qcom_icc_bcm_voter_add - queues up the bcm nodes that require updates
222 * @voter: voter that the bcms are being added to
223 * @bcm: bcm to add to the commit and wake sleep list
224 */
qcom_icc_bcm_voter_add(struct bcm_voter * voter,struct qcom_icc_bcm * bcm)225 void qcom_icc_bcm_voter_add(struct bcm_voter *voter, struct qcom_icc_bcm *bcm)
226 {
227 if (!voter)
228 return;
229
230 mutex_lock(&voter->lock);
231 if (list_empty(&bcm->list))
232 list_add_tail(&bcm->list, &voter->commit_list);
233
234 if (list_empty(&bcm->ws_list))
235 list_add_tail(&bcm->ws_list, &voter->ws_list);
236
237 mutex_unlock(&voter->lock);
238 }
239 EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_add);
240
241 /**
242 * qcom_icc_bcm_voter_commit - generates and commits tcs cmds based on bcms
243 * @voter: voter that needs flushing
244 *
245 * This function generates a set of AMC commands and flushes to the BCM device
246 * associated with the voter. It conditionally generate WAKE and SLEEP commands
247 * based on deltas between WAKE/SLEEP requirements. The ws_list persists
248 * through multiple commit requests and bcm nodes are removed only when the
249 * requirements for WAKE matches SLEEP.
250 *
251 * Returns 0 on success, or an appropriate error code otherwise.
252 */
qcom_icc_bcm_voter_commit(struct bcm_voter * voter)253 int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
254 {
255 struct qcom_icc_bcm *bcm;
256 struct qcom_icc_bcm *bcm_tmp;
257 int commit_idx[MAX_VCD + 1];
258 struct tcs_cmd cmds[MAX_BCMS];
259 int ret = 0;
260
261 if (!voter)
262 return 0;
263
264 mutex_lock(&voter->lock);
265 list_for_each_entry(bcm, &voter->commit_list, list)
266 bcm_aggregate(bcm);
267
268 /*
269 * Pre sort the BCMs based on VCD for ease of generating a command list
270 * that groups the BCMs with the same VCD together. VCDs are numbered
271 * with lowest being the most expensive time wise, ensuring that
272 * those commands are being sent the earliest in the queue. This needs
273 * to be sorted every commit since we can't guarantee the order in which
274 * the BCMs are added to the list.
275 */
276 list_sort(NULL, &voter->commit_list, cmp_vcd);
277
278 /*
279 * Construct the command list based on a pre ordered list of BCMs
280 * based on VCD.
281 */
282 tcs_list_gen(voter, QCOM_ICC_BUCKET_AMC, cmds, commit_idx);
283 if (!commit_idx[0])
284 goto out;
285
286 rpmh_invalidate(voter->dev);
287
288 ret = rpmh_write_batch(voter->dev, RPMH_ACTIVE_ONLY_STATE,
289 cmds, commit_idx);
290 if (ret) {
291 pr_err("Error sending AMC RPMH requests (%d)\n", ret);
292 goto out;
293 }
294
295 list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
296 list_del_init(&bcm->list);
297
298 list_for_each_entry_safe(bcm, bcm_tmp, &voter->ws_list, ws_list) {
299 /*
300 * Only generate WAKE and SLEEP commands if a resource's
301 * requirements change as the execution environment transitions
302 * between different power states.
303 */
304 if (bcm->vote_x[QCOM_ICC_BUCKET_WAKE] !=
305 bcm->vote_x[QCOM_ICC_BUCKET_SLEEP] ||
306 bcm->vote_y[QCOM_ICC_BUCKET_WAKE] !=
307 bcm->vote_y[QCOM_ICC_BUCKET_SLEEP])
308 list_add_tail(&bcm->list, &voter->commit_list);
309 else
310 list_del_init(&bcm->ws_list);
311 }
312
313 if (list_empty(&voter->commit_list))
314 goto out;
315
316 list_sort(NULL, &voter->commit_list, cmp_vcd);
317
318 tcs_list_gen(voter, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx);
319
320 ret = rpmh_write_batch(voter->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx);
321 if (ret) {
322 pr_err("Error sending WAKE RPMH requests (%d)\n", ret);
323 goto out;
324 }
325
326 tcs_list_gen(voter, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx);
327
328 ret = rpmh_write_batch(voter->dev, RPMH_SLEEP_STATE, cmds, commit_idx);
329 if (ret) {
330 pr_err("Error sending SLEEP RPMH requests (%d)\n", ret);
331 goto out;
332 }
333
334 out:
335 list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
336 list_del_init(&bcm->list);
337
338 mutex_unlock(&voter->lock);
339 return ret;
340 }
341 EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_commit);
342
qcom_icc_bcm_voter_probe(struct platform_device * pdev)343 static int qcom_icc_bcm_voter_probe(struct platform_device *pdev)
344 {
345 struct device_node *np = pdev->dev.of_node;
346 struct bcm_voter *voter;
347
348 voter = devm_kzalloc(&pdev->dev, sizeof(*voter), GFP_KERNEL);
349 if (!voter)
350 return -ENOMEM;
351
352 voter->dev = &pdev->dev;
353 voter->np = np;
354
355 if (of_property_read_u32(np, "qcom,tcs-wait", &voter->tcs_wait))
356 voter->tcs_wait = QCOM_ICC_TAG_ACTIVE_ONLY;
357
358 mutex_init(&voter->lock);
359 INIT_LIST_HEAD(&voter->commit_list);
360 INIT_LIST_HEAD(&voter->ws_list);
361
362 mutex_lock(&bcm_voter_lock);
363 list_add_tail(&voter->voter_node, &bcm_voters);
364 mutex_unlock(&bcm_voter_lock);
365
366 return 0;
367 }
368
369 static const struct of_device_id bcm_voter_of_match[] = {
370 { .compatible = "qcom,bcm-voter" },
371 { }
372 };
373 MODULE_DEVICE_TABLE(of, bcm_voter_of_match);
374
375 static struct platform_driver qcom_icc_bcm_voter_driver = {
376 .probe = qcom_icc_bcm_voter_probe,
377 .driver = {
378 .name = "bcm_voter",
379 .of_match_table = bcm_voter_of_match,
380 },
381 };
382 module_platform_driver(qcom_icc_bcm_voter_driver);
383
384 MODULE_AUTHOR("David Dai <daidavid1@codeaurora.org>");
385 MODULE_DESCRIPTION("Qualcomm BCM Voter interconnect driver");
386 MODULE_LICENSE("GPL v2");
387