1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * sst_pvt.c - Intel SST Driver for audio engine
4 *
5 * Copyright (C) 2008-14 Intel Corp
6 * Authors: Vinod Koul <vinod.koul@intel.com>
7 * Harsha Priya <priya.harsha@intel.com>
8 * Dharageswari R <dharageswari.r@intel.com>
9 * KP Jeeja <jeeja.kp@intel.com>
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13 */
14 #include <linux/kobject.h>
15 #include <linux/pci.h>
16 #include <linux/fs.h>
17 #include <linux/firmware.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/sched.h>
20 #include <linux/delay.h>
21 #include <sound/asound.h>
22 #include <sound/core.h>
23 #include <sound/pcm.h>
24 #include <sound/soc.h>
25 #include <sound/compress_driver.h>
26 #include <asm/platform_sst_audio.h>
27 #include "../sst-mfld-platform.h"
28 #include "sst.h"
29 #include "../../common/sst-dsp.h"
30
sst_shim_write(void __iomem * addr,int offset,int value)31 int sst_shim_write(void __iomem *addr, int offset, int value)
32 {
33 writel(value, addr + offset);
34 return 0;
35 }
36
sst_shim_read(void __iomem * addr,int offset)37 u32 sst_shim_read(void __iomem *addr, int offset)
38 {
39 return readl(addr + offset);
40 }
41
sst_reg_read64(void __iomem * addr,int offset)42 u64 sst_reg_read64(void __iomem *addr, int offset)
43 {
44 u64 val = 0;
45
46 memcpy_fromio(&val, addr + offset, sizeof(val));
47
48 return val;
49 }
50
sst_shim_write64(void __iomem * addr,int offset,u64 value)51 int sst_shim_write64(void __iomem *addr, int offset, u64 value)
52 {
53 memcpy_toio(addr + offset, &value, sizeof(value));
54 return 0;
55 }
56
sst_shim_read64(void __iomem * addr,int offset)57 u64 sst_shim_read64(void __iomem *addr, int offset)
58 {
59 u64 val = 0;
60
61 memcpy_fromio(&val, addr + offset, sizeof(val));
62 return val;
63 }
64
sst_set_fw_state_locked(struct intel_sst_drv * sst_drv_ctx,int sst_state)65 void sst_set_fw_state_locked(
66 struct intel_sst_drv *sst_drv_ctx, int sst_state)
67 {
68 mutex_lock(&sst_drv_ctx->sst_lock);
69 sst_drv_ctx->sst_state = sst_state;
70 mutex_unlock(&sst_drv_ctx->sst_lock);
71 }
72
73 /*
74 * sst_wait_interruptible - wait on event
75 *
76 * @sst_drv_ctx: Driver context
77 * @block: Driver block to wait on
78 *
79 * This function waits without a timeout (and is interruptable) for a
80 * given block event
81 */
sst_wait_interruptible(struct intel_sst_drv * sst_drv_ctx,struct sst_block * block)82 int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
83 struct sst_block *block)
84 {
85 int retval = 0;
86
87 if (!wait_event_interruptible(sst_drv_ctx->wait_queue,
88 block->condition)) {
89 /* event wake */
90 if (block->ret_code < 0) {
91 dev_err(sst_drv_ctx->dev,
92 "stream failed %d\n", block->ret_code);
93 retval = -EBUSY;
94 } else {
95 dev_dbg(sst_drv_ctx->dev, "event up\n");
96 retval = 0;
97 }
98 } else {
99 dev_err(sst_drv_ctx->dev, "signal interrupted\n");
100 retval = -EINTR;
101 }
102 return retval;
103
104 }
105
106 /*
107 * sst_wait_timeout - wait on event for timeout
108 *
109 * @sst_drv_ctx: Driver context
110 * @block: Driver block to wait on
111 *
112 * This function waits with a timeout value (and is not interruptible) on a
113 * given block event
114 */
sst_wait_timeout(struct intel_sst_drv * sst_drv_ctx,struct sst_block * block)115 int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx, struct sst_block *block)
116 {
117 int retval = 0;
118
119 /*
120 * NOTE:
121 * Observed that FW processes the alloc msg and replies even
122 * before the alloc thread has finished execution
123 */
124 dev_dbg(sst_drv_ctx->dev,
125 "waiting for condition %x ipc %d drv_id %d\n",
126 block->condition, block->msg_id, block->drv_id);
127 if (wait_event_timeout(sst_drv_ctx->wait_queue,
128 block->condition,
129 msecs_to_jiffies(SST_BLOCK_TIMEOUT))) {
130 /* event wake */
131 dev_dbg(sst_drv_ctx->dev, "Event wake %x\n",
132 block->condition);
133 dev_dbg(sst_drv_ctx->dev, "message ret: %d\n",
134 block->ret_code);
135 retval = -block->ret_code;
136 } else {
137 block->on = false;
138 dev_err(sst_drv_ctx->dev,
139 "Wait timed-out condition:%#x, msg_id:%#x fw_state %#x\n",
140 block->condition, block->msg_id, sst_drv_ctx->sst_state);
141 sst_drv_ctx->sst_state = SST_RESET;
142
143 retval = -EBUSY;
144 }
145 return retval;
146 }
147
148 /*
149 * sst_create_ipc_msg - create a IPC message
150 *
151 * @arg: ipc message
152 * @large: large or short message
153 *
154 * this function allocates structures to send a large or short
155 * message to the firmware
156 */
sst_create_ipc_msg(struct ipc_post ** arg,bool large)157 int sst_create_ipc_msg(struct ipc_post **arg, bool large)
158 {
159 struct ipc_post *msg;
160
161 msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
162 if (!msg)
163 return -ENOMEM;
164 if (large) {
165 msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
166 if (!msg->mailbox_data) {
167 kfree(msg);
168 return -ENOMEM;
169 }
170 } else {
171 msg->mailbox_data = NULL;
172 }
173 msg->is_large = large;
174 *arg = msg;
175 return 0;
176 }
177
178 /*
179 * sst_create_block_and_ipc_msg - Creates IPC message and sst block
180 * @arg: passed to sst_create_ipc_message API
181 * @large: large or short message
182 * @sst_drv_ctx: sst driver context
183 * @block: return block allocated
184 * @msg_id: IPC
185 * @drv_id: stream id or private id
186 */
sst_create_block_and_ipc_msg(struct ipc_post ** arg,bool large,struct intel_sst_drv * sst_drv_ctx,struct sst_block ** block,u32 msg_id,u32 drv_id)187 int sst_create_block_and_ipc_msg(struct ipc_post **arg, bool large,
188 struct intel_sst_drv *sst_drv_ctx, struct sst_block **block,
189 u32 msg_id, u32 drv_id)
190 {
191 int retval = 0;
192
193 retval = sst_create_ipc_msg(arg, large);
194 if (retval)
195 return retval;
196 *block = sst_create_block(sst_drv_ctx, msg_id, drv_id);
197 if (*block == NULL) {
198 kfree(*arg);
199 return -ENOMEM;
200 }
201 return retval;
202 }
203
204 /*
205 * sst_clean_stream - clean the stream context
206 *
207 * @stream: stream structure
208 *
209 * this function resets the stream contexts
210 * should be called in free
211 */
sst_clean_stream(struct stream_info * stream)212 void sst_clean_stream(struct stream_info *stream)
213 {
214 stream->status = STREAM_UN_INIT;
215 stream->prev = STREAM_UN_INIT;
216 mutex_lock(&stream->lock);
217 stream->cumm_bytes = 0;
218 mutex_unlock(&stream->lock);
219 }
220
sst_prepare_and_post_msg(struct intel_sst_drv * sst,int task_id,int ipc_msg,int cmd_id,int pipe_id,size_t mbox_data_len,const void * mbox_data,void ** data,bool large,bool fill_dsp,bool sync,bool response)221 int sst_prepare_and_post_msg(struct intel_sst_drv *sst,
222 int task_id, int ipc_msg, int cmd_id, int pipe_id,
223 size_t mbox_data_len, const void *mbox_data, void **data,
224 bool large, bool fill_dsp, bool sync, bool response)
225 {
226 struct ipc_post *msg = NULL;
227 struct ipc_dsp_hdr dsp_hdr;
228 struct sst_block *block;
229 int ret = 0, pvt_id;
230
231 pvt_id = sst_assign_pvt_id(sst);
232 if (pvt_id < 0)
233 return pvt_id;
234
235 if (response)
236 ret = sst_create_block_and_ipc_msg(
237 &msg, large, sst, &block, ipc_msg, pvt_id);
238 else
239 ret = sst_create_ipc_msg(&msg, large);
240
241 if (ret < 0) {
242 test_and_clear_bit(pvt_id, &sst->pvt_id);
243 return -ENOMEM;
244 }
245
246 dev_dbg(sst->dev, "pvt_id = %d, pipe id = %d, task = %d ipc_msg: %d\n",
247 pvt_id, pipe_id, task_id, ipc_msg);
248 sst_fill_header_mrfld(&msg->mrfld_header, ipc_msg,
249 task_id, large, pvt_id);
250 msg->mrfld_header.p.header_low_payload = sizeof(dsp_hdr) + mbox_data_len;
251 msg->mrfld_header.p.header_high.part.res_rqd = !sync;
252 dev_dbg(sst->dev, "header:%x\n",
253 msg->mrfld_header.p.header_high.full);
254 dev_dbg(sst->dev, "response rqd: %x",
255 msg->mrfld_header.p.header_high.part.res_rqd);
256 dev_dbg(sst->dev, "msg->mrfld_header.p.header_low_payload:%d",
257 msg->mrfld_header.p.header_low_payload);
258 if (fill_dsp) {
259 sst_fill_header_dsp(&dsp_hdr, cmd_id, pipe_id, mbox_data_len);
260 memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
261 if (mbox_data_len) {
262 memcpy(msg->mailbox_data + sizeof(dsp_hdr),
263 mbox_data, mbox_data_len);
264 }
265 }
266
267 if (sync)
268 sst->ops->post_message(sst, msg, true);
269 else
270 sst_add_to_dispatch_list_and_post(sst, msg);
271
272 if (response) {
273 ret = sst_wait_timeout(sst, block);
274 if (ret < 0)
275 goto out;
276
277 if (data && block->data) {
278 *data = kmemdup(block->data, block->size, GFP_KERNEL);
279 if (!*data) {
280 ret = -ENOMEM;
281 goto out;
282 }
283 }
284 }
285 out:
286 if (response)
287 sst_free_block(sst, block);
288 test_and_clear_bit(pvt_id, &sst->pvt_id);
289 return ret;
290 }
291
sst_pm_runtime_put(struct intel_sst_drv * sst_drv)292 int sst_pm_runtime_put(struct intel_sst_drv *sst_drv)
293 {
294 int ret;
295
296 pm_runtime_mark_last_busy(sst_drv->dev);
297 ret = pm_runtime_put_autosuspend(sst_drv->dev);
298 if (ret < 0)
299 return ret;
300 return 0;
301 }
302
sst_fill_header_mrfld(union ipc_header_mrfld * header,int msg,int task_id,int large,int drv_id)303 void sst_fill_header_mrfld(union ipc_header_mrfld *header,
304 int msg, int task_id, int large, int drv_id)
305 {
306 header->full = 0;
307 header->p.header_high.part.msg_id = msg;
308 header->p.header_high.part.task_id = task_id;
309 header->p.header_high.part.large = large;
310 header->p.header_high.part.drv_id = drv_id;
311 header->p.header_high.part.done = 0;
312 header->p.header_high.part.busy = 1;
313 header->p.header_high.part.res_rqd = 1;
314 }
315
sst_fill_header_dsp(struct ipc_dsp_hdr * dsp,int msg,int pipe_id,int len)316 void sst_fill_header_dsp(struct ipc_dsp_hdr *dsp, int msg,
317 int pipe_id, int len)
318 {
319 dsp->cmd_id = msg;
320 dsp->mod_index_id = 0xff;
321 dsp->pipe_id = pipe_id;
322 dsp->length = len;
323 dsp->mod_id = 0;
324 }
325
326 #define SST_MAX_BLOCKS 15
327 /*
328 * sst_assign_pvt_id - assign a pvt id for stream
329 *
330 * @sst_drv_ctx : driver context
331 *
332 * this function assigns a private id for calls that dont have stream
333 * context yet, should be called with lock held
334 * uses bits for the id, and finds first free bits and assigns that
335 */
sst_assign_pvt_id(struct intel_sst_drv * drv)336 int sst_assign_pvt_id(struct intel_sst_drv *drv)
337 {
338 int local;
339
340 spin_lock(&drv->block_lock);
341 /* find first zero index from lsb */
342 local = ffz(drv->pvt_id);
343 dev_dbg(drv->dev, "pvt_id assigned --> %d\n", local);
344 if (local >= SST_MAX_BLOCKS){
345 spin_unlock(&drv->block_lock);
346 dev_err(drv->dev, "PVT _ID error: no free id blocks ");
347 return -EINVAL;
348 }
349 /* toggle the index */
350 change_bit(local, &drv->pvt_id);
351 spin_unlock(&drv->block_lock);
352 return local;
353 }
354
sst_validate_strid(struct intel_sst_drv * sst_drv_ctx,int str_id)355 int sst_validate_strid(
356 struct intel_sst_drv *sst_drv_ctx, int str_id)
357 {
358 if (str_id <= 0 || str_id > sst_drv_ctx->info.max_streams) {
359 dev_err(sst_drv_ctx->dev,
360 "SST ERR: invalid stream id : %d, max %d\n",
361 str_id, sst_drv_ctx->info.max_streams);
362 return -EINVAL;
363 }
364
365 return 0;
366 }
367
get_stream_info(struct intel_sst_drv * sst_drv_ctx,int str_id)368 struct stream_info *get_stream_info(
369 struct intel_sst_drv *sst_drv_ctx, int str_id)
370 {
371 if (sst_validate_strid(sst_drv_ctx, str_id))
372 return NULL;
373 return &sst_drv_ctx->streams[str_id];
374 }
375
get_stream_id_mrfld(struct intel_sst_drv * sst_drv_ctx,u32 pipe_id)376 int get_stream_id_mrfld(struct intel_sst_drv *sst_drv_ctx,
377 u32 pipe_id)
378 {
379 int i;
380
381 for (i = 1; i <= sst_drv_ctx->info.max_streams; i++)
382 if (pipe_id == sst_drv_ctx->streams[i].pipe_id)
383 return i;
384
385 dev_dbg(sst_drv_ctx->dev, "no such pipe_id(%u)", pipe_id);
386 return -1;
387 }
388
relocate_imr_addr_mrfld(u32 base_addr)389 u32 relocate_imr_addr_mrfld(u32 base_addr)
390 {
391 /* Get the difference from 512MB aligned base addr */
392 /* relocate the base */
393 base_addr = MRFLD_FW_VIRTUAL_BASE + (base_addr % (512 * 1024 * 1024));
394 return base_addr;
395 }
396 EXPORT_SYMBOL_GPL(relocate_imr_addr_mrfld);
397
sst_add_to_dispatch_list_and_post(struct intel_sst_drv * sst,struct ipc_post * msg)398 void sst_add_to_dispatch_list_and_post(struct intel_sst_drv *sst,
399 struct ipc_post *msg)
400 {
401 unsigned long irq_flags;
402
403 spin_lock_irqsave(&sst->ipc_spin_lock, irq_flags);
404 list_add_tail(&msg->node, &sst->ipc_dispatch_list);
405 spin_unlock_irqrestore(&sst->ipc_spin_lock, irq_flags);
406 sst->ops->post_message(sst, NULL, false);
407 }
408