Lines Matching +full:tcs +full:- +full:wait
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
18 #include <linux/wait.h>
22 #include "rpmh-internal.h"
58 * struct batch_cache_req - An entry in our batch catch
73 struct rsc_drv *drv = dev_get_drvdata(dev->parent); in get_rpmh_ctrlr()
75 return &drv->client; in get_rpmh_ctrlr()
82 struct completion *compl = rpm_msg->completion; in rpmh_tx_done()
83 bool free = rpm_msg->needs_free; in rpmh_tx_done()
85 rpm_msg->err = r; in rpmh_tx_done()
88 dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n", in rpmh_tx_done()
89 rpm_msg->msg.cmds[0].addr, r); in rpmh_tx_done()
106 list_for_each_entry(p, &ctrlr->cache, list) { in __find_req()
107 if (p->addr == addr) { in __find_req()
124 spin_lock_irqsave(&ctrlr->cache_lock, flags); in cache_rpm_request()
125 req = __find_req(ctrlr, cmd->addr); in cache_rpm_request()
131 req = ERR_PTR(-ENOMEM); in cache_rpm_request()
135 req->addr = cmd->addr; in cache_rpm_request()
136 req->sleep_val = req->wake_val = UINT_MAX; in cache_rpm_request()
137 list_add_tail(&req->list, &ctrlr->cache); in cache_rpm_request()
140 old_sleep_val = req->sleep_val; in cache_rpm_request()
141 old_wake_val = req->wake_val; in cache_rpm_request()
146 req->wake_val = cmd->data; in cache_rpm_request()
149 req->sleep_val = cmd->data; in cache_rpm_request()
153 ctrlr->dirty |= (req->sleep_val != old_sleep_val || in cache_rpm_request()
154 req->wake_val != old_wake_val) && in cache_rpm_request()
155 req->sleep_val != UINT_MAX && in cache_rpm_request()
156 req->wake_val != UINT_MAX; in cache_rpm_request()
159 spin_unlock_irqrestore(&ctrlr->cache_lock, flags); in cache_rpm_request()
179 int ret = -EINVAL; in __rpmh_write()
183 rpm_msg->msg.state = state; in __rpmh_write()
186 for (i = 0; i < rpm_msg->msg.num_cmds; i++) { in __rpmh_write()
187 req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]); in __rpmh_write()
192 rpm_msg->msg.state = state; in __rpmh_write()
196 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg); in __rpmh_write()
199 &rpm_msg->msg); in __rpmh_write()
201 rpmh_tx_done(&rpm_msg->msg, ret); in __rpmh_write()
211 return -EINVAL; in __fill_rpmh_msg()
213 memcpy(req->cmd, cmd, n * sizeof(*cmd)); in __fill_rpmh_msg()
215 req->msg.state = state; in __fill_rpmh_msg()
216 req->msg.cmds = req->cmd; in __fill_rpmh_msg()
217 req->msg.num_cmds = n; in __fill_rpmh_msg()
241 return -ENOMEM; in rpmh_write_async()
242 rpm_msg->needs_free = true; in rpmh_write_async()
272 return -EINVAL; in rpmh_write()
283 return (ret > 0) ? 0 : -ETIMEDOUT; in rpmh_write()
291 spin_lock_irqsave(&ctrlr->cache_lock, flags); in cache_batch()
292 list_add_tail(&req->list, &ctrlr->batch_cache); in cache_batch()
293 ctrlr->dirty = true; in cache_batch()
294 spin_unlock_irqrestore(&ctrlr->cache_lock, flags); in cache_batch()
306 spin_lock_irqsave(&ctrlr->cache_lock, flags); in flush_batch()
307 list_for_each_entry(req, &ctrlr->batch_cache, list) { in flush_batch()
308 for (i = 0; i < req->count; i++) { in flush_batch()
309 rpm_msg = req->rpm_msgs + i; in flush_batch()
311 &rpm_msg->msg); in flush_batch()
316 spin_unlock_irqrestore(&ctrlr->cache_lock, flags); in flush_batch()
322 * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
334 * request is sent as fire-n-forget and no ack is expected.
351 return -EINVAL; in rpmh_write_batch()
356 return -EINVAL; in rpmh_write_batch()
359 count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)), in rpmh_write_batch()
362 return -ENOMEM; in rpmh_write_batch()
367 req->count = count; in rpmh_write_batch()
368 rpm_msgs = req->rpm_msgs; in rpmh_write_batch()
394 while (i--) { in rpmh_write_batch()
403 ret = -ETIMEDOUT; in rpmh_write_batch()
417 return (req->sleep_val != UINT_MAX && in is_req_valid()
418 req->wake_val != UINT_MAX && in is_req_valid()
419 req->sleep_val != req->wake_val); in is_req_valid()
438 * rpmh_flush: Flushes the buffered active and sleep sets to TCS
442 * Return: -EBUSY if the controller is busy, probably waiting on a response
455 if (!ctrlr->dirty) { in rpmh_flush()
456 pr_debug("Skipping flush, TCS has latest data.\n"); in rpmh_flush()
463 } while (ret == -EAGAIN); in rpmh_flush()
476 list_for_each_entry(p, &ctrlr->cache, list) { in rpmh_flush()
479 __func__, p->addr, p->sleep_val, p->wake_val); in rpmh_flush()
482 ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val); in rpmh_flush()
486 p->addr, p->wake_val); in rpmh_flush()
491 ctrlr->dirty = false; in rpmh_flush()
510 spin_lock_irqsave(&ctrlr->cache_lock, flags); in rpmh_invalidate()
511 list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list) in rpmh_invalidate()
513 INIT_LIST_HEAD(&ctrlr->batch_cache); in rpmh_invalidate()
514 ctrlr->dirty = true; in rpmh_invalidate()
515 spin_unlock_irqrestore(&ctrlr->cache_lock, flags); in rpmh_invalidate()