• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  sst_pvt.c - Intel SST Driver for audio engine
4  *
5  *  Copyright (C) 2008-14	Intel Corp
6  *  Authors:	Vinod Koul <vinod.koul@intel.com>
7  *		Harsha Priya <priya.harsha@intel.com>
8  *		Dharageswari R <dharageswari.r@intel.com>
9  *		KP Jeeja <jeeja.kp@intel.com>
10  *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13  */
14 #include <linux/kobject.h>
15 #include <linux/pci.h>
16 #include <linux/fs.h>
17 #include <linux/firmware.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/sched.h>
20 #include <linux/delay.h>
21 #include <sound/asound.h>
22 #include <sound/core.h>
23 #include <sound/pcm.h>
24 #include <sound/soc.h>
25 #include <sound/compress_driver.h>
26 #include <asm/platform_sst_audio.h>
27 #include "../sst-mfld-platform.h"
28 #include "sst.h"
29 
sst_shim_write(void __iomem * addr,int offset,int value)30 int sst_shim_write(void __iomem *addr, int offset, int value)
31 {
32 	writel(value, addr + offset);
33 	return 0;
34 }
35 
sst_shim_read(void __iomem * addr,int offset)36 u32 sst_shim_read(void __iomem *addr, int offset)
37 {
38 	return readl(addr + offset);
39 }
40 
sst_reg_read64(void __iomem * addr,int offset)41 u64 sst_reg_read64(void __iomem *addr, int offset)
42 {
43 	u64 val = 0;
44 
45 	memcpy_fromio(&val, addr + offset, sizeof(val));
46 
47 	return val;
48 }
49 
sst_shim_write64(void __iomem * addr,int offset,u64 value)50 int sst_shim_write64(void __iomem *addr, int offset, u64 value)
51 {
52 	memcpy_toio(addr + offset, &value, sizeof(value));
53 	return 0;
54 }
55 
sst_shim_read64(void __iomem * addr,int offset)56 u64 sst_shim_read64(void __iomem *addr, int offset)
57 {
58 	u64 val = 0;
59 
60 	memcpy_fromio(&val, addr + offset, sizeof(val));
61 	return val;
62 }
63 
sst_set_fw_state_locked(struct intel_sst_drv * sst_drv_ctx,int sst_state)64 void sst_set_fw_state_locked(
65 		struct intel_sst_drv *sst_drv_ctx, int sst_state)
66 {
67 	mutex_lock(&sst_drv_ctx->sst_lock);
68 	sst_drv_ctx->sst_state = sst_state;
69 	mutex_unlock(&sst_drv_ctx->sst_lock);
70 }
71 
72 /*
73  * sst_wait_interruptible - wait on event
74  *
75  * @sst_drv_ctx: Driver context
76  * @block: Driver block to wait on
77  *
78  * This function waits without a timeout (and is interruptable) for a
79  * given block event
80  */
sst_wait_interruptible(struct intel_sst_drv * sst_drv_ctx,struct sst_block * block)81 int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
82 				struct sst_block *block)
83 {
84 	int retval = 0;
85 
86 	if (!wait_event_interruptible(sst_drv_ctx->wait_queue,
87 				block->condition)) {
88 		/* event wake */
89 		if (block->ret_code < 0) {
90 			dev_err(sst_drv_ctx->dev,
91 				"stream failed %d\n", block->ret_code);
92 			retval = -EBUSY;
93 		} else {
94 			dev_dbg(sst_drv_ctx->dev, "event up\n");
95 			retval = 0;
96 		}
97 	} else {
98 		dev_err(sst_drv_ctx->dev, "signal interrupted\n");
99 		retval = -EINTR;
100 	}
101 	return retval;
102 
103 }
104 
105 /*
106  * sst_wait_timeout - wait on event for timeout
107  *
108  * @sst_drv_ctx: Driver context
109  * @block: Driver block to wait on
110  *
111  * This function waits with a timeout value (and is not interruptible) on a
112  * given block event
113  */
sst_wait_timeout(struct intel_sst_drv * sst_drv_ctx,struct sst_block * block)114 int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx, struct sst_block *block)
115 {
116 	int retval = 0;
117 
118 	/*
119 	 * NOTE:
120 	 * Observed that FW processes the alloc msg and replies even
121 	 * before the alloc thread has finished execution
122 	 */
123 	dev_dbg(sst_drv_ctx->dev,
124 		"waiting for condition %x ipc %d drv_id %d\n",
125 		block->condition, block->msg_id, block->drv_id);
126 	if (wait_event_timeout(sst_drv_ctx->wait_queue,
127 				block->condition,
128 				msecs_to_jiffies(SST_BLOCK_TIMEOUT))) {
129 		/* event wake */
130 		dev_dbg(sst_drv_ctx->dev, "Event wake %x\n",
131 				block->condition);
132 		dev_dbg(sst_drv_ctx->dev, "message ret: %d\n",
133 				block->ret_code);
134 		retval = -block->ret_code;
135 	} else {
136 		block->on = false;
137 		dev_err(sst_drv_ctx->dev,
138 			"Wait timed-out condition:%#x, msg_id:%#x fw_state %#x\n",
139 			block->condition, block->msg_id, sst_drv_ctx->sst_state);
140 		sst_drv_ctx->sst_state = SST_RESET;
141 
142 		retval = -EBUSY;
143 	}
144 	return retval;
145 }
146 
147 /*
148  * sst_create_ipc_msg - create a IPC message
149  *
150  * @arg: ipc message
151  * @large: large or short message
152  *
153  * this function allocates structures to send a large or short
154  * message to the firmware
155  */
sst_create_ipc_msg(struct ipc_post ** arg,bool large)156 int sst_create_ipc_msg(struct ipc_post **arg, bool large)
157 {
158 	struct ipc_post *msg;
159 
160 	msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
161 	if (!msg)
162 		return -ENOMEM;
163 	if (large) {
164 		msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
165 		if (!msg->mailbox_data) {
166 			kfree(msg);
167 			return -ENOMEM;
168 		}
169 	} else {
170 		msg->mailbox_data = NULL;
171 	}
172 	msg->is_large = large;
173 	*arg = msg;
174 	return 0;
175 }
176 
177 /*
178  * sst_create_block_and_ipc_msg - Creates IPC message and sst block
179  * @arg: passed to sst_create_ipc_message API
180  * @large: large or short message
181  * @sst_drv_ctx: sst driver context
182  * @block: return block allocated
183  * @msg_id: IPC
184  * @drv_id: stream id or private id
185  */
sst_create_block_and_ipc_msg(struct ipc_post ** arg,bool large,struct intel_sst_drv * sst_drv_ctx,struct sst_block ** block,u32 msg_id,u32 drv_id)186 int sst_create_block_and_ipc_msg(struct ipc_post **arg, bool large,
187 		struct intel_sst_drv *sst_drv_ctx, struct sst_block **block,
188 		u32 msg_id, u32 drv_id)
189 {
190 	int retval;
191 
192 	retval = sst_create_ipc_msg(arg, large);
193 	if (retval)
194 		return retval;
195 	*block = sst_create_block(sst_drv_ctx, msg_id, drv_id);
196 	if (*block == NULL) {
197 		kfree(*arg);
198 		return -ENOMEM;
199 	}
200 	return 0;
201 }
202 
203 /*
204  * sst_clean_stream - clean the stream context
205  *
206  * @stream: stream structure
207  *
208  * this function resets the stream contexts
209  * should be called in free
210  */
sst_clean_stream(struct stream_info * stream)211 void sst_clean_stream(struct stream_info *stream)
212 {
213 	stream->status = STREAM_UN_INIT;
214 	stream->prev = STREAM_UN_INIT;
215 	mutex_lock(&stream->lock);
216 	stream->cumm_bytes = 0;
217 	mutex_unlock(&stream->lock);
218 }
219 
sst_prepare_and_post_msg(struct intel_sst_drv * sst,int task_id,int ipc_msg,int cmd_id,int pipe_id,size_t mbox_data_len,const void * mbox_data,void ** data,bool large,bool fill_dsp,bool sync,bool response)220 int sst_prepare_and_post_msg(struct intel_sst_drv *sst,
221 		int task_id, int ipc_msg, int cmd_id, int pipe_id,
222 		size_t mbox_data_len, const void *mbox_data, void **data,
223 		bool large, bool fill_dsp, bool sync, bool response)
224 {
225 	struct sst_block *block = NULL;
226 	struct ipc_post *msg = NULL;
227 	struct ipc_dsp_hdr dsp_hdr;
228 	int ret = 0, pvt_id;
229 
230 	pvt_id = sst_assign_pvt_id(sst);
231 	if (pvt_id < 0)
232 		return pvt_id;
233 
234 	if (response)
235 		ret = sst_create_block_and_ipc_msg(
236 				&msg, large, sst, &block, ipc_msg, pvt_id);
237 	else
238 		ret = sst_create_ipc_msg(&msg, large);
239 
240 	if (ret < 0) {
241 		test_and_clear_bit(pvt_id, &sst->pvt_id);
242 		return -ENOMEM;
243 	}
244 
245 	dev_dbg(sst->dev, "pvt_id = %d, pipe id = %d, task = %d ipc_msg: %d\n",
246 		 pvt_id, pipe_id, task_id, ipc_msg);
247 	sst_fill_header_mrfld(&msg->mrfld_header, ipc_msg,
248 					task_id, large, pvt_id);
249 	msg->mrfld_header.p.header_low_payload = sizeof(dsp_hdr) + mbox_data_len;
250 	msg->mrfld_header.p.header_high.part.res_rqd = !sync;
251 	dev_dbg(sst->dev, "header:%x\n",
252 			msg->mrfld_header.p.header_high.full);
253 	dev_dbg(sst->dev, "response rqd: %x",
254 			msg->mrfld_header.p.header_high.part.res_rqd);
255 	dev_dbg(sst->dev, "msg->mrfld_header.p.header_low_payload:%d",
256 			msg->mrfld_header.p.header_low_payload);
257 	if (fill_dsp) {
258 		sst_fill_header_dsp(&dsp_hdr, cmd_id, pipe_id, mbox_data_len);
259 		memcpy(msg->mailbox_data, &dsp_hdr, sizeof(dsp_hdr));
260 		if (mbox_data_len) {
261 			memcpy(msg->mailbox_data + sizeof(dsp_hdr),
262 					mbox_data, mbox_data_len);
263 		}
264 	}
265 
266 	if (sync)
267 		sst->ops->post_message(sst, msg, true);
268 	else
269 		sst_add_to_dispatch_list_and_post(sst, msg);
270 
271 	if (response) {
272 		ret = sst_wait_timeout(sst, block);
273 		if (ret < 0)
274 			goto out;
275 
276 		if (data && block->data) {
277 			*data = kmemdup(block->data, block->size, GFP_KERNEL);
278 			if (!*data) {
279 				ret = -ENOMEM;
280 				goto out;
281 			}
282 		}
283 	}
284 out:
285 	if (response)
286 		sst_free_block(sst, block);
287 	test_and_clear_bit(pvt_id, &sst->pvt_id);
288 	return ret;
289 }
290 
sst_pm_runtime_put(struct intel_sst_drv * sst_drv)291 int sst_pm_runtime_put(struct intel_sst_drv *sst_drv)
292 {
293 	int ret;
294 
295 	pm_runtime_mark_last_busy(sst_drv->dev);
296 	ret = pm_runtime_put_autosuspend(sst_drv->dev);
297 	if (ret < 0)
298 		return ret;
299 	return 0;
300 }
301 
sst_fill_header_mrfld(union ipc_header_mrfld * header,int msg,int task_id,int large,int drv_id)302 void sst_fill_header_mrfld(union ipc_header_mrfld *header,
303 				int msg, int task_id, int large, int drv_id)
304 {
305 	header->full = 0;
306 	header->p.header_high.part.msg_id = msg;
307 	header->p.header_high.part.task_id = task_id;
308 	header->p.header_high.part.large = large;
309 	header->p.header_high.part.drv_id = drv_id;
310 	header->p.header_high.part.done = 0;
311 	header->p.header_high.part.busy = 1;
312 	header->p.header_high.part.res_rqd = 1;
313 }
314 
sst_fill_header_dsp(struct ipc_dsp_hdr * dsp,int msg,int pipe_id,int len)315 void sst_fill_header_dsp(struct ipc_dsp_hdr *dsp, int msg,
316 					int pipe_id, int len)
317 {
318 	dsp->cmd_id = msg;
319 	dsp->mod_index_id = 0xff;
320 	dsp->pipe_id = pipe_id;
321 	dsp->length = len;
322 	dsp->mod_id = 0;
323 }
324 
325 #define SST_MAX_BLOCKS 15
326 /*
327  * sst_assign_pvt_id - assign a pvt id for stream
328  *
329  * @sst_drv_ctx : driver context
330  *
331  * this function assigns a private id for calls that dont have stream
332  * context yet, should be called with lock held
333  * uses bits for the id, and finds first free bits and assigns that
334  */
sst_assign_pvt_id(struct intel_sst_drv * drv)335 int sst_assign_pvt_id(struct intel_sst_drv *drv)
336 {
337 	int local;
338 
339 	spin_lock(&drv->block_lock);
340 	/* find first zero index from lsb */
341 	local = ffz(drv->pvt_id);
342 	dev_dbg(drv->dev, "pvt_id assigned --> %d\n", local);
343 	if (local >= SST_MAX_BLOCKS){
344 		spin_unlock(&drv->block_lock);
345 		dev_err(drv->dev, "PVT _ID error: no free id blocks ");
346 		return -EINVAL;
347 	}
348 	/* toggle the index */
349 	change_bit(local, &drv->pvt_id);
350 	spin_unlock(&drv->block_lock);
351 	return local;
352 }
353 
sst_validate_strid(struct intel_sst_drv * sst_drv_ctx,int str_id)354 int sst_validate_strid(
355 		struct intel_sst_drv *sst_drv_ctx, int str_id)
356 {
357 	if (str_id <= 0 || str_id > sst_drv_ctx->info.max_streams) {
358 		dev_err(sst_drv_ctx->dev,
359 			"SST ERR: invalid stream id : %d, max %d\n",
360 			str_id, sst_drv_ctx->info.max_streams);
361 		return -EINVAL;
362 	}
363 
364 	return 0;
365 }
366 
get_stream_info(struct intel_sst_drv * sst_drv_ctx,int str_id)367 struct stream_info *get_stream_info(
368 		struct intel_sst_drv *sst_drv_ctx, int str_id)
369 {
370 	if (sst_validate_strid(sst_drv_ctx, str_id))
371 		return NULL;
372 	return &sst_drv_ctx->streams[str_id];
373 }
374 
get_stream_id_mrfld(struct intel_sst_drv * sst_drv_ctx,u32 pipe_id)375 int get_stream_id_mrfld(struct intel_sst_drv *sst_drv_ctx,
376 		u32 pipe_id)
377 {
378 	int i;
379 
380 	for (i = 1; i <= sst_drv_ctx->info.max_streams; i++)
381 		if (pipe_id == sst_drv_ctx->streams[i].pipe_id)
382 			return i;
383 
384 	dev_dbg(sst_drv_ctx->dev, "no such pipe_id(%u)", pipe_id);
385 	return -1;
386 }
387 
relocate_imr_addr_mrfld(u32 base_addr)388 u32 relocate_imr_addr_mrfld(u32 base_addr)
389 {
390 	/* Get the difference from 512MB aligned base addr */
391 	/* relocate the base */
392 	base_addr = MRFLD_FW_VIRTUAL_BASE + (base_addr % (512 * 1024 * 1024));
393 	return base_addr;
394 }
395 EXPORT_SYMBOL_GPL(relocate_imr_addr_mrfld);
396 
sst_add_to_dispatch_list_and_post(struct intel_sst_drv * sst,struct ipc_post * msg)397 void sst_add_to_dispatch_list_and_post(struct intel_sst_drv *sst,
398 						struct ipc_post *msg)
399 {
400 	unsigned long irq_flags;
401 
402 	spin_lock_irqsave(&sst->ipc_spin_lock, irq_flags);
403 	list_add_tail(&msg->node, &sst->ipc_dispatch_list);
404 	spin_unlock_irqrestore(&sst->ipc_spin_lock, irq_flags);
405 	sst->ops->post_message(sst, NULL, false);
406 }
407