1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/atomic.h>
7 #include <linux/bug.h>
8 #include <linux/interrupt.h>
9 #include <linux/jiffies.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/lockdep.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/types.h>
19 #include <linux/wait.h>
20
21 #include <soc/qcom/rpmh.h>
22
23 #include "rpmh-internal.h"
24
25 #define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
26
27 #define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
28 struct rpmh_request name = { \
29 .msg = { \
30 .state = s, \
31 .cmds = name.cmd, \
32 .num_cmds = 0, \
33 .wait_for_compl = true, \
34 }, \
35 .cmd = { { 0 } }, \
36 .completion = q, \
37 .dev = device, \
38 .needs_free = false, \
39 }
40
41 #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
42
43 /**
44 * struct cache_req: the request object for caching
45 *
46 * @addr: the address of the resource
47 * @sleep_val: the sleep vote
48 * @wake_val: the wake vote
49 * @list: linked list obj
50 */
51 struct cache_req {
52 u32 addr;
53 u32 sleep_val;
54 u32 wake_val;
55 struct list_head list;
56 };
57
58 /**
59 * struct batch_cache_req - An entry in our batch catch
60 *
61 * @list: linked list obj
62 * @count: number of messages
63 * @rpm_msgs: the messages
64 */
65
66 struct batch_cache_req {
67 struct list_head list;
68 int count;
69 struct rpmh_request rpm_msgs[];
70 };
71
get_rpmh_ctrlr(const struct device * dev)72 static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
73 {
74 struct rsc_drv *drv = dev_get_drvdata(dev->parent);
75
76 return &drv->client;
77 }
78
rpmh_tx_done(const struct tcs_request * msg,int r)79 void rpmh_tx_done(const struct tcs_request *msg, int r)
80 {
81 struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
82 msg);
83 struct completion *compl = rpm_msg->completion;
84 bool free = rpm_msg->needs_free;
85
86 rpm_msg->err = r;
87
88 if (r)
89 dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
90 rpm_msg->msg.cmds[0].addr, r);
91
92 if (!compl)
93 goto exit;
94
95 /* Signal the blocking thread we are done */
96 complete(compl);
97
98 exit:
99 if (free)
100 kfree(rpm_msg);
101 }
102
__find_req(struct rpmh_ctrlr * ctrlr,u32 addr)103 static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
104 {
105 struct cache_req *p, *req = NULL;
106
107 list_for_each_entry(p, &ctrlr->cache, list) {
108 if (p->addr == addr) {
109 req = p;
110 break;
111 }
112 }
113
114 return req;
115 }
116
cache_rpm_request(struct rpmh_ctrlr * ctrlr,enum rpmh_state state,struct tcs_cmd * cmd)117 static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
118 enum rpmh_state state,
119 struct tcs_cmd *cmd)
120 {
121 struct cache_req *req;
122 unsigned long flags;
123 u32 old_sleep_val, old_wake_val;
124
125 spin_lock_irqsave(&ctrlr->cache_lock, flags);
126 req = __find_req(ctrlr, cmd->addr);
127 if (req)
128 goto existing;
129
130 req = kzalloc(sizeof(*req), GFP_ATOMIC);
131 if (!req) {
132 req = ERR_PTR(-ENOMEM);
133 goto unlock;
134 }
135
136 req->addr = cmd->addr;
137 req->sleep_val = req->wake_val = UINT_MAX;
138 list_add_tail(&req->list, &ctrlr->cache);
139
140 existing:
141 old_sleep_val = req->sleep_val;
142 old_wake_val = req->wake_val;
143
144 switch (state) {
145 case RPMH_ACTIVE_ONLY_STATE:
146 case RPMH_WAKE_ONLY_STATE:
147 req->wake_val = cmd->data;
148 break;
149 case RPMH_SLEEP_STATE:
150 req->sleep_val = cmd->data;
151 break;
152 }
153
154 ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
155 req->wake_val != old_wake_val) &&
156 req->sleep_val != UINT_MAX &&
157 req->wake_val != UINT_MAX;
158
159 unlock:
160 spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
161
162 return req;
163 }
164
165 /**
166 * __rpmh_write: Cache and send the RPMH request
167 *
168 * @dev: The device making the request
169 * @state: Active/Sleep request type
170 * @rpm_msg: The data that needs to be sent (cmds).
171 *
172 * Cache the RPMH request and send if the state is ACTIVE_ONLY.
173 * SLEEP/WAKE_ONLY requests are not sent to the controller at
174 * this time. Use rpmh_flush() to send them to the controller.
175 */
__rpmh_write(const struct device * dev,enum rpmh_state state,struct rpmh_request * rpm_msg)176 static int __rpmh_write(const struct device *dev, enum rpmh_state state,
177 struct rpmh_request *rpm_msg)
178 {
179 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
180 int ret = -EINVAL;
181 struct cache_req *req;
182 int i;
183
184 rpm_msg->msg.state = state;
185
186 /* Cache the request in our store and link the payload */
187 for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
188 req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
189 if (IS_ERR(req))
190 return PTR_ERR(req);
191 }
192
193 rpm_msg->msg.state = state;
194
195 if (state == RPMH_ACTIVE_ONLY_STATE) {
196 WARN_ON(irqs_disabled());
197 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
198 } else {
199 /* Clean up our call by spoofing tx_done */
200 ret = 0;
201 rpmh_tx_done(&rpm_msg->msg, ret);
202 }
203
204 return ret;
205 }
206
__fill_rpmh_msg(struct rpmh_request * req,enum rpmh_state state,const struct tcs_cmd * cmd,u32 n)207 static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
208 const struct tcs_cmd *cmd, u32 n)
209 {
210 if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
211 return -EINVAL;
212
213 memcpy(req->cmd, cmd, n * sizeof(*cmd));
214
215 req->msg.state = state;
216 req->msg.cmds = req->cmd;
217 req->msg.num_cmds = n;
218
219 return 0;
220 }
221
222 /**
223 * rpmh_write_async: Write a set of RPMH commands
224 *
225 * @dev: The device making the request
226 * @state: Active/sleep set
227 * @cmd: The payload data
228 * @n: The number of elements in payload
229 *
230 * Write a set of RPMH commands, the order of commands is maintained
231 * and will be sent as a single shot.
232 */
rpmh_write_async(const struct device * dev,enum rpmh_state state,const struct tcs_cmd * cmd,u32 n)233 int rpmh_write_async(const struct device *dev, enum rpmh_state state,
234 const struct tcs_cmd *cmd, u32 n)
235 {
236 struct rpmh_request *rpm_msg;
237 int ret;
238
239 rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
240 if (!rpm_msg)
241 return -ENOMEM;
242 rpm_msg->needs_free = true;
243
244 ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
245 if (ret) {
246 kfree(rpm_msg);
247 return ret;
248 }
249
250 return __rpmh_write(dev, state, rpm_msg);
251 }
252 EXPORT_SYMBOL(rpmh_write_async);
253
254 /**
255 * rpmh_write: Write a set of RPMH commands and block until response
256 *
257 * @rc: The RPMH handle got from rpmh_get_client
258 * @state: Active/sleep set
259 * @cmd: The payload data
260 * @n: The number of elements in @cmd
261 *
262 * May sleep. Do not call from atomic contexts.
263 */
rpmh_write(const struct device * dev,enum rpmh_state state,const struct tcs_cmd * cmd,u32 n)264 int rpmh_write(const struct device *dev, enum rpmh_state state,
265 const struct tcs_cmd *cmd, u32 n)
266 {
267 DECLARE_COMPLETION_ONSTACK(compl);
268 DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
269 int ret;
270
271 if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
272 return -EINVAL;
273
274 memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
275 rpm_msg.msg.num_cmds = n;
276
277 ret = __rpmh_write(dev, state, &rpm_msg);
278 if (ret)
279 return ret;
280
281 ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
282 WARN_ON(!ret);
283 return (ret > 0) ? 0 : -ETIMEDOUT;
284 }
285 EXPORT_SYMBOL(rpmh_write);
286
cache_batch(struct rpmh_ctrlr * ctrlr,struct batch_cache_req * req)287 static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
288 {
289 unsigned long flags;
290
291 spin_lock_irqsave(&ctrlr->cache_lock, flags);
292 list_add_tail(&req->list, &ctrlr->batch_cache);
293 ctrlr->dirty = true;
294 spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
295 }
296
flush_batch(struct rpmh_ctrlr * ctrlr)297 static int flush_batch(struct rpmh_ctrlr *ctrlr)
298 {
299 struct batch_cache_req *req;
300 const struct rpmh_request *rpm_msg;
301 int ret = 0;
302 int i;
303
304 /* Send Sleep/Wake requests to the controller, expect no response */
305 list_for_each_entry(req, &ctrlr->batch_cache, list) {
306 for (i = 0; i < req->count; i++) {
307 rpm_msg = req->rpm_msgs + i;
308 ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
309 &rpm_msg->msg);
310 if (ret)
311 break;
312 }
313 }
314
315 return ret;
316 }
317
318 /**
319 * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
320 * batch to finish.
321 *
322 * @dev: the device making the request
323 * @state: Active/sleep set
324 * @cmd: The payload data
325 * @n: The array of count of elements in each batch, 0 terminated.
326 *
327 * Write a request to the RSC controller without caching. If the request
328 * state is ACTIVE, then the requests are treated as completion request
329 * and sent to the controller immediately. The function waits until all the
330 * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
331 * request is sent as fire-n-forget and no ack is expected.
332 *
333 * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
334 */
rpmh_write_batch(const struct device * dev,enum rpmh_state state,const struct tcs_cmd * cmd,u32 * n)335 int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
336 const struct tcs_cmd *cmd, u32 *n)
337 {
338 struct batch_cache_req *req;
339 struct rpmh_request *rpm_msgs;
340 struct completion *compls;
341 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
342 unsigned long time_left;
343 int count = 0;
344 int ret, i;
345 void *ptr;
346
347 if (!cmd || !n)
348 return -EINVAL;
349
350 while (n[count] > 0)
351 count++;
352 if (!count)
353 return -EINVAL;
354
355 ptr = kzalloc(sizeof(*req) +
356 count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
357 GFP_ATOMIC);
358 if (!ptr)
359 return -ENOMEM;
360
361 req = ptr;
362 compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
363
364 req->count = count;
365 rpm_msgs = req->rpm_msgs;
366
367 for (i = 0; i < count; i++) {
368 __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
369 cmd += n[i];
370 }
371
372 if (state != RPMH_ACTIVE_ONLY_STATE) {
373 cache_batch(ctrlr, req);
374 return 0;
375 }
376
377 for (i = 0; i < count; i++) {
378 struct completion *compl = &compls[i];
379
380 init_completion(compl);
381 rpm_msgs[i].completion = compl;
382 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
383 if (ret) {
384 pr_err("Error(%d) sending RPMH message addr=%#x\n",
385 ret, rpm_msgs[i].msg.cmds[0].addr);
386 break;
387 }
388 }
389
390 time_left = RPMH_TIMEOUT_MS;
391 while (i--) {
392 time_left = wait_for_completion_timeout(&compls[i], time_left);
393 if (!time_left) {
394 /*
395 * Better hope they never finish because they'll signal
396 * the completion that we're going to free once
397 * we've returned from this function.
398 */
399 WARN_ON(1);
400 ret = -ETIMEDOUT;
401 goto exit;
402 }
403 }
404
405 exit:
406 kfree(ptr);
407
408 return ret;
409 }
410 EXPORT_SYMBOL(rpmh_write_batch);
411
is_req_valid(struct cache_req * req)412 static int is_req_valid(struct cache_req *req)
413 {
414 return (req->sleep_val != UINT_MAX &&
415 req->wake_val != UINT_MAX &&
416 req->sleep_val != req->wake_val);
417 }
418
send_single(struct rpmh_ctrlr * ctrlr,enum rpmh_state state,u32 addr,u32 data)419 static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
420 u32 addr, u32 data)
421 {
422 DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
423
424 /* Wake sets are always complete and sleep sets are not */
425 rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
426 rpm_msg.cmd[0].addr = addr;
427 rpm_msg.cmd[0].data = data;
428 rpm_msg.msg.num_cmds = 1;
429
430 return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
431 }
432
433 /**
434 * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes
435 *
436 * @ctrlr: Controller making request to flush cached data
437 *
438 * Return:
439 * * 0 - Success
440 * * Error code - Otherwise
441 */
rpmh_flush(struct rpmh_ctrlr * ctrlr)442 int rpmh_flush(struct rpmh_ctrlr *ctrlr)
443 {
444 struct cache_req *p;
445 int ret = 0;
446
447 lockdep_assert_irqs_disabled();
448
449 /*
450 * Currently rpmh_flush() is only called when we think we're running
451 * on the last processor. If the lock is busy it means another
452 * processor is up and it's better to abort than spin.
453 */
454 if (!spin_trylock(&ctrlr->cache_lock))
455 return -EBUSY;
456
457 if (!ctrlr->dirty) {
458 pr_debug("Skipping flush, TCS has latest data.\n");
459 goto exit;
460 }
461
462 /* Invalidate the TCSes first to avoid stale data */
463 rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
464
465 /* First flush the cached batch requests */
466 ret = flush_batch(ctrlr);
467 if (ret)
468 goto exit;
469
470 list_for_each_entry(p, &ctrlr->cache, list) {
471 if (!is_req_valid(p)) {
472 pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
473 __func__, p->addr, p->sleep_val, p->wake_val);
474 continue;
475 }
476 ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
477 p->sleep_val);
478 if (ret)
479 goto exit;
480 ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
481 p->wake_val);
482 if (ret)
483 goto exit;
484 }
485
486 ctrlr->dirty = false;
487
488 exit:
489 spin_unlock(&ctrlr->cache_lock);
490 return ret;
491 }
492
493 /**
494 * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
495 *
496 * @dev: The device making the request
497 *
498 * Invalidate the sleep and wake values in batch_cache.
499 */
rpmh_invalidate(const struct device * dev)500 void rpmh_invalidate(const struct device *dev)
501 {
502 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
503 struct batch_cache_req *req, *tmp;
504 unsigned long flags;
505
506 spin_lock_irqsave(&ctrlr->cache_lock, flags);
507 list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
508 kfree(req);
509 INIT_LIST_HEAD(&ctrlr->batch_cache);
510 ctrlr->dirty = true;
511 spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
512 }
513 EXPORT_SYMBOL(rpmh_invalidate);
514