1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2017-2021 NXP
3
4 #include <linux/dma-mapping.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/delay.h>
8 #include <linux/rpmsg.h>
9 #include <sound/core.h>
10 #include <sound/pcm.h>
11 #include <sound/pcm_params.h>
12 #include <sound/dmaengine_pcm.h>
13 #include <sound/soc.h>
14
15 #include "imx-pcm.h"
16 #include "fsl_rpmsg.h"
17 #include "imx-pcm-rpmsg.h"
18
19 static struct snd_pcm_hardware imx_rpmsg_pcm_hardware = {
20 .info = SNDRV_PCM_INFO_INTERLEAVED |
21 SNDRV_PCM_INFO_BLOCK_TRANSFER |
22 SNDRV_PCM_INFO_BATCH |
23 SNDRV_PCM_INFO_MMAP |
24 SNDRV_PCM_INFO_MMAP_VALID |
25 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
26 SNDRV_PCM_INFO_PAUSE |
27 SNDRV_PCM_INFO_RESUME,
28 .buffer_bytes_max = IMX_DEFAULT_DMABUF_SIZE,
29 .period_bytes_min = 512,
30 .period_bytes_max = 65536,
31 .periods_min = 2,
32 .periods_max = 6000,
33 .fifo_size = 0,
34 };
35
imx_rpmsg_pcm_send_message(struct rpmsg_msg * msg,struct rpmsg_info * info)36 static int imx_rpmsg_pcm_send_message(struct rpmsg_msg *msg,
37 struct rpmsg_info *info)
38 {
39 struct rpmsg_device *rpdev = info->rpdev;
40 int ret = 0;
41
42 mutex_lock(&info->msg_lock);
43 if (!rpdev) {
44 dev_err(info->dev, "rpmsg channel not ready\n");
45 mutex_unlock(&info->msg_lock);
46 return -EINVAL;
47 }
48
49 dev_dbg(&rpdev->dev, "send cmd %d\n", msg->s_msg.header.cmd);
50
51 if (!(msg->s_msg.header.type == MSG_TYPE_C))
52 reinit_completion(&info->cmd_complete);
53
54 ret = rpmsg_send(rpdev->ept, (void *)&msg->s_msg,
55 sizeof(struct rpmsg_s_msg));
56 if (ret) {
57 dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret);
58 mutex_unlock(&info->msg_lock);
59 return ret;
60 }
61
62 /* No receive msg for TYPE_C command */
63 if (msg->s_msg.header.type == MSG_TYPE_C) {
64 mutex_unlock(&info->msg_lock);
65 return 0;
66 }
67
68 /* wait response from rpmsg */
69 ret = wait_for_completion_timeout(&info->cmd_complete,
70 msecs_to_jiffies(RPMSG_TIMEOUT));
71 if (!ret) {
72 dev_err(&rpdev->dev, "rpmsg_send cmd %d timeout!\n",
73 msg->s_msg.header.cmd);
74 mutex_unlock(&info->msg_lock);
75 return -ETIMEDOUT;
76 }
77
78 memcpy(&msg->r_msg, &info->r_msg, sizeof(struct rpmsg_r_msg));
79 memcpy(&info->msg[msg->r_msg.header.cmd].r_msg,
80 &msg->r_msg, sizeof(struct rpmsg_r_msg));
81
82 /*
83 * Reset the buffer pointer to be zero, actully we have
84 * set the buffer pointer to be zero in imx_rpmsg_terminate_all
85 * But if there is timer task queued in queue, after it is
86 * executed the buffer pointer will be changed, so need to
87 * reset it again with TERMINATE command.
88 */
89 switch (msg->s_msg.header.cmd) {
90 case TX_TERMINATE:
91 info->msg[TX_POINTER].r_msg.param.buffer_offset = 0;
92 break;
93 case RX_TERMINATE:
94 info->msg[RX_POINTER].r_msg.param.buffer_offset = 0;
95 break;
96 default:
97 break;
98 }
99
100 dev_dbg(&rpdev->dev, "cmd:%d, resp %d\n", msg->s_msg.header.cmd,
101 info->r_msg.param.resp);
102
103 mutex_unlock(&info->msg_lock);
104
105 return 0;
106 }
107
imx_rpmsg_insert_workqueue(struct snd_pcm_substream * substream,struct rpmsg_msg * msg,struct rpmsg_info * info)108 static int imx_rpmsg_insert_workqueue(struct snd_pcm_substream *substream,
109 struct rpmsg_msg *msg,
110 struct rpmsg_info *info)
111 {
112 unsigned long flags;
113 int ret = 0;
114
115 /*
116 * Queue the work to workqueue.
117 * If the queue is full, drop the message.
118 */
119 spin_lock_irqsave(&info->wq_lock, flags);
120 if (info->work_write_index != info->work_read_index) {
121 int index = info->work_write_index;
122
123 memcpy(&info->work_list[index].msg, msg,
124 sizeof(struct rpmsg_s_msg));
125
126 queue_work(info->rpmsg_wq, &info->work_list[index].work);
127 info->work_write_index++;
128 info->work_write_index %= WORK_MAX_NUM;
129 } else {
130 info->msg_drop_count[substream->stream]++;
131 ret = -EPIPE;
132 }
133 spin_unlock_irqrestore(&info->wq_lock, flags);
134
135 return ret;
136 }
137
imx_rpmsg_pcm_hw_params(struct snd_soc_component * component,struct snd_pcm_substream * substream,struct snd_pcm_hw_params * params)138 static int imx_rpmsg_pcm_hw_params(struct snd_soc_component *component,
139 struct snd_pcm_substream *substream,
140 struct snd_pcm_hw_params *params)
141 {
142 struct rpmsg_info *info = dev_get_drvdata(component->dev);
143 struct rpmsg_msg *msg;
144 int ret = 0;
145
146 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
147 msg = &info->msg[TX_HW_PARAM];
148 msg->s_msg.header.cmd = TX_HW_PARAM;
149 } else {
150 msg = &info->msg[RX_HW_PARAM];
151 msg->s_msg.header.cmd = RX_HW_PARAM;
152 }
153
154 msg->s_msg.param.rate = params_rate(params);
155
156 switch (params_format(params)) {
157 case SNDRV_PCM_FORMAT_S16_LE:
158 msg->s_msg.param.format = RPMSG_S16_LE;
159 break;
160 case SNDRV_PCM_FORMAT_S24_LE:
161 msg->s_msg.param.format = RPMSG_S24_LE;
162 break;
163 case SNDRV_PCM_FORMAT_DSD_U16_LE:
164 msg->s_msg.param.format = RPMSG_DSD_U16_LE;
165 break;
166 case SNDRV_PCM_FORMAT_DSD_U32_LE:
167 msg->s_msg.param.format = RPMSG_DSD_U32_LE;
168 break;
169 default:
170 msg->s_msg.param.format = RPMSG_S32_LE;
171 break;
172 }
173
174 switch (params_channels(params)) {
175 case 1:
176 msg->s_msg.param.channels = RPMSG_CH_LEFT;
177 break;
178 case 2:
179 msg->s_msg.param.channels = RPMSG_CH_STEREO;
180 break;
181 default:
182 ret = -EINVAL;
183 break;
184 }
185
186 info->send_message(msg, info);
187
188 return ret;
189 }
190
imx_rpmsg_pcm_pointer(struct snd_soc_component * component,struct snd_pcm_substream * substream)191 static snd_pcm_uframes_t imx_rpmsg_pcm_pointer(struct snd_soc_component *component,
192 struct snd_pcm_substream *substream)
193 {
194 struct rpmsg_info *info = dev_get_drvdata(component->dev);
195 struct rpmsg_msg *msg;
196 unsigned int pos = 0;
197 int buffer_tail = 0;
198
199 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
200 msg = &info->msg[TX_PERIOD_DONE + MSG_TYPE_A_NUM];
201 else
202 msg = &info->msg[RX_PERIOD_DONE + MSG_TYPE_A_NUM];
203
204 buffer_tail = msg->r_msg.param.buffer_tail;
205 pos = buffer_tail * snd_pcm_lib_period_bytes(substream);
206
207 return bytes_to_frames(substream->runtime, pos);
208 }
209
imx_rpmsg_timer_callback(struct timer_list * t)210 static void imx_rpmsg_timer_callback(struct timer_list *t)
211 {
212 struct stream_timer *stream_timer =
213 from_timer(stream_timer, t, timer);
214 struct snd_pcm_substream *substream = stream_timer->substream;
215 struct rpmsg_info *info = stream_timer->info;
216 struct rpmsg_msg *msg;
217
218 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
219 msg = &info->msg[TX_PERIOD_DONE + MSG_TYPE_A_NUM];
220 msg->s_msg.header.cmd = TX_PERIOD_DONE;
221 } else {
222 msg = &info->msg[RX_PERIOD_DONE + MSG_TYPE_A_NUM];
223 msg->s_msg.header.cmd = RX_PERIOD_DONE;
224 }
225
226 imx_rpmsg_insert_workqueue(substream, msg, info);
227 }
228
imx_rpmsg_pcm_open(struct snd_soc_component * component,struct snd_pcm_substream * substream)229 static int imx_rpmsg_pcm_open(struct snd_soc_component *component,
230 struct snd_pcm_substream *substream)
231 {
232 struct rpmsg_info *info = dev_get_drvdata(component->dev);
233 struct rpmsg_msg *msg;
234 int ret = 0;
235 int cmd;
236
237 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
238 msg = &info->msg[TX_OPEN];
239 msg->s_msg.header.cmd = TX_OPEN;
240
241 /* reinitialize buffer counter*/
242 cmd = TX_PERIOD_DONE + MSG_TYPE_A_NUM;
243 info->msg[cmd].s_msg.param.buffer_tail = 0;
244 info->msg[cmd].r_msg.param.buffer_tail = 0;
245 info->msg[TX_POINTER].r_msg.param.buffer_offset = 0;
246
247 } else {
248 msg = &info->msg[RX_OPEN];
249 msg->s_msg.header.cmd = RX_OPEN;
250
251 /* reinitialize buffer counter*/
252 cmd = RX_PERIOD_DONE + MSG_TYPE_A_NUM;
253 info->msg[cmd].s_msg.param.buffer_tail = 0;
254 info->msg[cmd].r_msg.param.buffer_tail = 0;
255 info->msg[RX_POINTER].r_msg.param.buffer_offset = 0;
256 }
257
258 info->send_message(msg, info);
259
260 imx_rpmsg_pcm_hardware.period_bytes_max =
261 imx_rpmsg_pcm_hardware.buffer_bytes_max / 2;
262
263 snd_soc_set_runtime_hwparams(substream, &imx_rpmsg_pcm_hardware);
264
265 ret = snd_pcm_hw_constraint_integer(substream->runtime,
266 SNDRV_PCM_HW_PARAM_PERIODS);
267 if (ret < 0)
268 return ret;
269
270 info->msg_drop_count[substream->stream] = 0;
271
272 /* Create timer*/
273 info->stream_timer[substream->stream].info = info;
274 info->stream_timer[substream->stream].substream = substream;
275 timer_setup(&info->stream_timer[substream->stream].timer,
276 imx_rpmsg_timer_callback, 0);
277 return ret;
278 }
279
imx_rpmsg_pcm_close(struct snd_soc_component * component,struct snd_pcm_substream * substream)280 static int imx_rpmsg_pcm_close(struct snd_soc_component *component,
281 struct snd_pcm_substream *substream)
282 {
283 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
284 struct rpmsg_info *info = dev_get_drvdata(component->dev);
285 struct rpmsg_msg *msg;
286 int ret = 0;
287
288 /* Flush work in workqueue to make TX_CLOSE is the last message */
289 flush_workqueue(info->rpmsg_wq);
290
291 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
292 msg = &info->msg[TX_CLOSE];
293 msg->s_msg.header.cmd = TX_CLOSE;
294 } else {
295 msg = &info->msg[RX_CLOSE];
296 msg->s_msg.header.cmd = RX_CLOSE;
297 }
298
299 info->send_message(msg, info);
300
301 del_timer(&info->stream_timer[substream->stream].timer);
302
303 rtd->dai_link->ignore_suspend = 0;
304
305 if (info->msg_drop_count[substream->stream])
306 dev_warn(rtd->dev, "Msg is dropped!, number is %d\n",
307 info->msg_drop_count[substream->stream]);
308
309 return ret;
310 }
311
imx_rpmsg_pcm_prepare(struct snd_soc_component * component,struct snd_pcm_substream * substream)312 static int imx_rpmsg_pcm_prepare(struct snd_soc_component *component,
313 struct snd_pcm_substream *substream)
314 {
315 struct snd_pcm_runtime *runtime = substream->runtime;
316 struct snd_soc_pcm_runtime *rtd = substream->private_data;
317 struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
318 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev);
319
320 /*
321 * NON-MMAP mode, NONBLOCK, Version 2, enable lpa in dts
322 * four conditions to determine the lpa is enabled.
323 */
324 if ((runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
325 runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) &&
326 rpmsg->enable_lpa) {
327 /*
328 * Ignore suspend operation in low power mode
329 * M core will continue playback music on A core suspend.
330 */
331 rtd->dai_link->ignore_suspend = 1;
332 rpmsg->force_lpa = 1;
333 } else {
334 rpmsg->force_lpa = 0;
335 }
336
337 return 0;
338 }
339
imx_rpmsg_pcm_dma_complete(void * arg)340 static void imx_rpmsg_pcm_dma_complete(void *arg)
341 {
342 struct snd_pcm_substream *substream = arg;
343
344 snd_pcm_period_elapsed(substream);
345 }
346
imx_rpmsg_prepare_and_submit(struct snd_soc_component * component,struct snd_pcm_substream * substream)347 static int imx_rpmsg_prepare_and_submit(struct snd_soc_component *component,
348 struct snd_pcm_substream *substream)
349 {
350 struct rpmsg_info *info = dev_get_drvdata(component->dev);
351 struct rpmsg_msg *msg;
352
353 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
354 msg = &info->msg[TX_BUFFER];
355 msg->s_msg.header.cmd = TX_BUFFER;
356 } else {
357 msg = &info->msg[RX_BUFFER];
358 msg->s_msg.header.cmd = RX_BUFFER;
359 }
360
361 /* Send buffer address and buffer size */
362 msg->s_msg.param.buffer_addr = substream->runtime->dma_addr;
363 msg->s_msg.param.buffer_size = snd_pcm_lib_buffer_bytes(substream);
364 msg->s_msg.param.period_size = snd_pcm_lib_period_bytes(substream);
365 msg->s_msg.param.buffer_tail = 0;
366
367 info->num_period[substream->stream] = msg->s_msg.param.buffer_size /
368 msg->s_msg.param.period_size;
369
370 info->callback[substream->stream] = imx_rpmsg_pcm_dma_complete;
371 info->callback_param[substream->stream] = substream;
372
373 return imx_rpmsg_insert_workqueue(substream, msg, info);
374 }
375
imx_rpmsg_async_issue_pending(struct snd_soc_component * component,struct snd_pcm_substream * substream)376 static int imx_rpmsg_async_issue_pending(struct snd_soc_component *component,
377 struct snd_pcm_substream *substream)
378 {
379 struct rpmsg_info *info = dev_get_drvdata(component->dev);
380 struct rpmsg_msg *msg;
381
382 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
383 msg = &info->msg[TX_START];
384 msg->s_msg.header.cmd = TX_START;
385 } else {
386 msg = &info->msg[RX_START];
387 msg->s_msg.header.cmd = RX_START;
388 }
389
390 return imx_rpmsg_insert_workqueue(substream, msg, info);
391 }
392
imx_rpmsg_restart(struct snd_soc_component * component,struct snd_pcm_substream * substream)393 static int imx_rpmsg_restart(struct snd_soc_component *component,
394 struct snd_pcm_substream *substream)
395 {
396 struct rpmsg_info *info = dev_get_drvdata(component->dev);
397 struct rpmsg_msg *msg;
398
399 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
400 msg = &info->msg[TX_RESTART];
401 msg->s_msg.header.cmd = TX_RESTART;
402 } else {
403 msg = &info->msg[RX_RESTART];
404 msg->s_msg.header.cmd = RX_RESTART;
405 }
406
407 return imx_rpmsg_insert_workqueue(substream, msg, info);
408 }
409
imx_rpmsg_pause(struct snd_soc_component * component,struct snd_pcm_substream * substream)410 static int imx_rpmsg_pause(struct snd_soc_component *component,
411 struct snd_pcm_substream *substream)
412 {
413 struct rpmsg_info *info = dev_get_drvdata(component->dev);
414 struct rpmsg_msg *msg;
415
416 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
417 msg = &info->msg[TX_PAUSE];
418 msg->s_msg.header.cmd = TX_PAUSE;
419 } else {
420 msg = &info->msg[RX_PAUSE];
421 msg->s_msg.header.cmd = RX_PAUSE;
422 }
423
424 return imx_rpmsg_insert_workqueue(substream, msg, info);
425 }
426
imx_rpmsg_terminate_all(struct snd_soc_component * component,struct snd_pcm_substream * substream)427 static int imx_rpmsg_terminate_all(struct snd_soc_component *component,
428 struct snd_pcm_substream *substream)
429 {
430 struct rpmsg_info *info = dev_get_drvdata(component->dev);
431 struct rpmsg_msg *msg;
432 int cmd;
433
434 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
435 msg = &info->msg[TX_TERMINATE];
436 msg->s_msg.header.cmd = TX_TERMINATE;
437 /* Clear buffer count*/
438 cmd = TX_PERIOD_DONE + MSG_TYPE_A_NUM;
439 info->msg[cmd].s_msg.param.buffer_tail = 0;
440 info->msg[cmd].r_msg.param.buffer_tail = 0;
441 info->msg[TX_POINTER].r_msg.param.buffer_offset = 0;
442 } else {
443 msg = &info->msg[RX_TERMINATE];
444 msg->s_msg.header.cmd = RX_TERMINATE;
445 /* Clear buffer count*/
446 cmd = RX_PERIOD_DONE + MSG_TYPE_A_NUM;
447 info->msg[cmd].s_msg.param.buffer_tail = 0;
448 info->msg[cmd].r_msg.param.buffer_tail = 0;
449 info->msg[RX_POINTER].r_msg.param.buffer_offset = 0;
450 }
451
452 del_timer(&info->stream_timer[substream->stream].timer);
453
454 return imx_rpmsg_insert_workqueue(substream, msg, info);
455 }
456
imx_rpmsg_pcm_trigger(struct snd_soc_component * component,struct snd_pcm_substream * substream,int cmd)457 static int imx_rpmsg_pcm_trigger(struct snd_soc_component *component,
458 struct snd_pcm_substream *substream, int cmd)
459 {
460 struct snd_pcm_runtime *runtime = substream->runtime;
461 struct snd_soc_pcm_runtime *rtd = substream->private_data;
462 struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
463 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev);
464 int ret = 0;
465
466 switch (cmd) {
467 case SNDRV_PCM_TRIGGER_START:
468 ret = imx_rpmsg_prepare_and_submit(component, substream);
469 if (ret)
470 return ret;
471 ret = imx_rpmsg_async_issue_pending(component, substream);
472 break;
473 case SNDRV_PCM_TRIGGER_RESUME:
474 if (rpmsg->force_lpa)
475 break;
476 fallthrough;
477 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
478 ret = imx_rpmsg_restart(component, substream);
479 break;
480 case SNDRV_PCM_TRIGGER_SUSPEND:
481 if (!rpmsg->force_lpa) {
482 if (runtime->info & SNDRV_PCM_INFO_PAUSE)
483 ret = imx_rpmsg_pause(component, substream);
484 else
485 ret = imx_rpmsg_terminate_all(component, substream);
486 }
487 break;
488 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
489 ret = imx_rpmsg_pause(component, substream);
490 break;
491 case SNDRV_PCM_TRIGGER_STOP:
492 ret = imx_rpmsg_terminate_all(component, substream);
493 break;
494 default:
495 return -EINVAL;
496 }
497
498 if (ret)
499 return ret;
500
501 return 0;
502 }
503
504 /*
505 * imx_rpmsg_pcm_ack
506 *
507 * Send the period index to M core through rpmsg, but not send
508 * all the period index to M core, reduce some unnessesary msg
509 * to reduce the pressure of rpmsg bandwidth.
510 */
imx_rpmsg_pcm_ack(struct snd_soc_component * component,struct snd_pcm_substream * substream)511 static int imx_rpmsg_pcm_ack(struct snd_soc_component *component,
512 struct snd_pcm_substream *substream)
513 {
514 struct snd_pcm_runtime *runtime = substream->runtime;
515 struct snd_soc_pcm_runtime *rtd = substream->private_data;
516 struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
517 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev);
518 struct rpmsg_info *info = dev_get_drvdata(component->dev);
519 snd_pcm_uframes_t period_size = runtime->period_size;
520 snd_pcm_sframes_t avail;
521 struct timer_list *timer;
522 struct rpmsg_msg *msg;
523 unsigned long flags;
524 int buffer_tail = 0;
525 int written_num;
526
527 if (!rpmsg->force_lpa)
528 return 0;
529
530 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
531 msg = &info->msg[TX_PERIOD_DONE + MSG_TYPE_A_NUM];
532 msg->s_msg.header.cmd = TX_PERIOD_DONE;
533 } else {
534 msg = &info->msg[RX_PERIOD_DONE + MSG_TYPE_A_NUM];
535 msg->s_msg.header.cmd = RX_PERIOD_DONE;
536 }
537
538 msg->s_msg.header.type = MSG_TYPE_C;
539
540 buffer_tail = (frames_to_bytes(runtime, runtime->control->appl_ptr) %
541 snd_pcm_lib_buffer_bytes(substream));
542 buffer_tail = buffer_tail / snd_pcm_lib_period_bytes(substream);
543
544 /* There is update for period index */
545 if (buffer_tail != msg->s_msg.param.buffer_tail) {
546 written_num = buffer_tail - msg->s_msg.param.buffer_tail;
547 if (written_num < 0)
548 written_num += runtime->periods;
549
550 msg->s_msg.param.buffer_tail = buffer_tail;
551
552 /* The notification message is updated to latest */
553 spin_lock_irqsave(&info->lock[substream->stream], flags);
554 memcpy(&info->notify[substream->stream], msg,
555 sizeof(struct rpmsg_s_msg));
556 info->notify_updated[substream->stream] = true;
557 spin_unlock_irqrestore(&info->lock[substream->stream], flags);
558
559 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
560 avail = snd_pcm_playback_hw_avail(runtime);
561 else
562 avail = snd_pcm_capture_hw_avail(runtime);
563
564 timer = &info->stream_timer[substream->stream].timer;
565 /*
566 * If the data in the buffer is less than one period before
567 * this fill, which means the data may not enough on M
568 * core side, we need to send message immediately to let
569 * M core know the pointer is updated.
570 * if there is more than one period data in the buffer before
571 * this fill, which means the data is enough on M core side,
572 * we can delay one period (using timer) to send the message
573 * for reduce the message number in workqueue, because the
574 * pointer may be updated by ack function later, we can
575 * send latest pointer to M core side.
576 */
577 if ((avail - written_num * period_size) <= period_size) {
578 imx_rpmsg_insert_workqueue(substream, msg, info);
579 } else if (rpmsg->force_lpa && !timer_pending(timer)) {
580 int time_msec;
581
582 time_msec = (int)(runtime->period_size * 1000 / runtime->rate);
583 mod_timer(timer, jiffies + msecs_to_jiffies(time_msec));
584 }
585 }
586
587 return 0;
588 }
589
imx_rpmsg_pcm_new(struct snd_soc_component * component,struct snd_soc_pcm_runtime * rtd)590 static int imx_rpmsg_pcm_new(struct snd_soc_component *component,
591 struct snd_soc_pcm_runtime *rtd)
592 {
593 struct snd_card *card = rtd->card->snd_card;
594 struct snd_pcm *pcm = rtd->pcm;
595 struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
596 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev);
597 int ret;
598
599 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
600 if (ret)
601 return ret;
602
603 imx_rpmsg_pcm_hardware.buffer_bytes_max = rpmsg->buffer_size;
604 return snd_pcm_set_fixed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV_WC,
605 pcm->card->dev, rpmsg->buffer_size);
606 }
607
608 static const struct snd_soc_component_driver imx_rpmsg_soc_component = {
609 .name = IMX_PCM_DRV_NAME,
610 .pcm_construct = imx_rpmsg_pcm_new,
611 .open = imx_rpmsg_pcm_open,
612 .close = imx_rpmsg_pcm_close,
613 .hw_params = imx_rpmsg_pcm_hw_params,
614 .trigger = imx_rpmsg_pcm_trigger,
615 .pointer = imx_rpmsg_pcm_pointer,
616 .ack = imx_rpmsg_pcm_ack,
617 .prepare = imx_rpmsg_pcm_prepare,
618 };
619
imx_rpmsg_pcm_work(struct work_struct * work)620 static void imx_rpmsg_pcm_work(struct work_struct *work)
621 {
622 struct work_of_rpmsg *work_of_rpmsg;
623 bool is_notification = false;
624 struct rpmsg_info *info;
625 struct rpmsg_msg msg;
626 unsigned long flags;
627
628 work_of_rpmsg = container_of(work, struct work_of_rpmsg, work);
629 info = work_of_rpmsg->info;
630
631 /*
632 * Every work in the work queue, first we check if there
633 * is update for period is filled, because there may be not
634 * enough data in M core side, need to let M core know
635 * data is updated immediately.
636 */
637 spin_lock_irqsave(&info->lock[TX], flags);
638 if (info->notify_updated[TX]) {
639 memcpy(&msg, &info->notify[TX], sizeof(struct rpmsg_s_msg));
640 info->notify_updated[TX] = false;
641 spin_unlock_irqrestore(&info->lock[TX], flags);
642 info->send_message(&msg, info);
643 } else {
644 spin_unlock_irqrestore(&info->lock[TX], flags);
645 }
646
647 spin_lock_irqsave(&info->lock[RX], flags);
648 if (info->notify_updated[RX]) {
649 memcpy(&msg, &info->notify[RX], sizeof(struct rpmsg_s_msg));
650 info->notify_updated[RX] = false;
651 spin_unlock_irqrestore(&info->lock[RX], flags);
652 info->send_message(&msg, info);
653 } else {
654 spin_unlock_irqrestore(&info->lock[RX], flags);
655 }
656
657 /* Skip the notification message for it has been processed above */
658 if (work_of_rpmsg->msg.s_msg.header.type == MSG_TYPE_C &&
659 (work_of_rpmsg->msg.s_msg.header.cmd == TX_PERIOD_DONE ||
660 work_of_rpmsg->msg.s_msg.header.cmd == RX_PERIOD_DONE))
661 is_notification = true;
662
663 if (!is_notification)
664 info->send_message(&work_of_rpmsg->msg, info);
665
666 /* update read index */
667 spin_lock_irqsave(&info->wq_lock, flags);
668 info->work_read_index++;
669 info->work_read_index %= WORK_MAX_NUM;
670 spin_unlock_irqrestore(&info->wq_lock, flags);
671 }
672
imx_rpmsg_pcm_probe(struct platform_device * pdev)673 static int imx_rpmsg_pcm_probe(struct platform_device *pdev)
674 {
675 struct snd_soc_component *component;
676 struct rpmsg_info *info;
677 int ret, i;
678
679 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
680 if (!info)
681 return -ENOMEM;
682
683 platform_set_drvdata(pdev, info);
684
685 info->rpdev = container_of(pdev->dev.parent, struct rpmsg_device, dev);
686 info->dev = &pdev->dev;
687 /* Setup work queue */
688 info->rpmsg_wq = alloc_ordered_workqueue("rpmsg_audio",
689 WQ_HIGHPRI |
690 WQ_UNBOUND |
691 WQ_FREEZABLE);
692 if (!info->rpmsg_wq) {
693 dev_err(&pdev->dev, "workqueue create failed\n");
694 return -ENOMEM;
695 }
696
697 /* Write index initialize 1, make it differ with the read index */
698 info->work_write_index = 1;
699 info->send_message = imx_rpmsg_pcm_send_message;
700
701 for (i = 0; i < WORK_MAX_NUM; i++) {
702 INIT_WORK(&info->work_list[i].work, imx_rpmsg_pcm_work);
703 info->work_list[i].info = info;
704 }
705
706 /* Initialize msg */
707 for (i = 0; i < MSG_MAX_NUM; i++) {
708 info->msg[i].s_msg.header.cate = IMX_RPMSG_AUDIO;
709 info->msg[i].s_msg.header.major = IMX_RMPSG_MAJOR;
710 info->msg[i].s_msg.header.minor = IMX_RMPSG_MINOR;
711 info->msg[i].s_msg.header.type = MSG_TYPE_A;
712 info->msg[i].s_msg.param.audioindex = 0;
713 }
714
715 init_completion(&info->cmd_complete);
716 mutex_init(&info->msg_lock);
717 spin_lock_init(&info->lock[TX]);
718 spin_lock_init(&info->lock[RX]);
719 spin_lock_init(&info->wq_lock);
720
721 ret = devm_snd_soc_register_component(&pdev->dev,
722 &imx_rpmsg_soc_component,
723 NULL, 0);
724 if (ret)
725 goto fail;
726
727 component = snd_soc_lookup_component(&pdev->dev, IMX_PCM_DRV_NAME);
728 if (!component) {
729 ret = -EINVAL;
730 goto fail;
731 }
732 #ifdef CONFIG_DEBUG_FS
733 component->debugfs_prefix = "rpmsg";
734 #endif
735
736 return 0;
737
738 fail:
739 if (info->rpmsg_wq)
740 destroy_workqueue(info->rpmsg_wq);
741
742 return ret;
743 }
744
imx_rpmsg_pcm_remove(struct platform_device * pdev)745 static int imx_rpmsg_pcm_remove(struct platform_device *pdev)
746 {
747 struct rpmsg_info *info = platform_get_drvdata(pdev);
748
749 if (info->rpmsg_wq)
750 destroy_workqueue(info->rpmsg_wq);
751
752 return 0;
753 }
754
755 #ifdef CONFIG_PM
imx_rpmsg_pcm_runtime_resume(struct device * dev)756 static int imx_rpmsg_pcm_runtime_resume(struct device *dev)
757 {
758 struct rpmsg_info *info = dev_get_drvdata(dev);
759
760 cpu_latency_qos_add_request(&info->pm_qos_req, 0);
761
762 return 0;
763 }
764
imx_rpmsg_pcm_runtime_suspend(struct device * dev)765 static int imx_rpmsg_pcm_runtime_suspend(struct device *dev)
766 {
767 struct rpmsg_info *info = dev_get_drvdata(dev);
768
769 cpu_latency_qos_remove_request(&info->pm_qos_req);
770
771 return 0;
772 }
773 #endif
774
775 #ifdef CONFIG_PM_SLEEP
imx_rpmsg_pcm_suspend(struct device * dev)776 static int imx_rpmsg_pcm_suspend(struct device *dev)
777 {
778 struct rpmsg_info *info = dev_get_drvdata(dev);
779 struct rpmsg_msg *rpmsg_tx;
780 struct rpmsg_msg *rpmsg_rx;
781
782 rpmsg_tx = &info->msg[TX_SUSPEND];
783 rpmsg_rx = &info->msg[RX_SUSPEND];
784
785 rpmsg_tx->s_msg.header.cmd = TX_SUSPEND;
786 info->send_message(rpmsg_tx, info);
787
788 rpmsg_rx->s_msg.header.cmd = RX_SUSPEND;
789 info->send_message(rpmsg_rx, info);
790
791 return 0;
792 }
793
imx_rpmsg_pcm_resume(struct device * dev)794 static int imx_rpmsg_pcm_resume(struct device *dev)
795 {
796 struct rpmsg_info *info = dev_get_drvdata(dev);
797 struct rpmsg_msg *rpmsg_tx;
798 struct rpmsg_msg *rpmsg_rx;
799
800 rpmsg_tx = &info->msg[TX_RESUME];
801 rpmsg_rx = &info->msg[RX_RESUME];
802
803 rpmsg_tx->s_msg.header.cmd = TX_RESUME;
804 info->send_message(rpmsg_tx, info);
805
806 rpmsg_rx->s_msg.header.cmd = RX_RESUME;
807 info->send_message(rpmsg_rx, info);
808
809 return 0;
810 }
811 #endif /* CONFIG_PM_SLEEP */
812
813 static const struct dev_pm_ops imx_rpmsg_pcm_pm_ops = {
814 SET_RUNTIME_PM_OPS(imx_rpmsg_pcm_runtime_suspend,
815 imx_rpmsg_pcm_runtime_resume,
816 NULL)
817 SET_SYSTEM_SLEEP_PM_OPS(imx_rpmsg_pcm_suspend,
818 imx_rpmsg_pcm_resume)
819 };
820
821 static struct platform_driver imx_pcm_rpmsg_driver = {
822 .probe = imx_rpmsg_pcm_probe,
823 .remove = imx_rpmsg_pcm_remove,
824 .driver = {
825 .name = IMX_PCM_DRV_NAME,
826 .pm = &imx_rpmsg_pcm_pm_ops,
827 },
828 };
829 module_platform_driver(imx_pcm_rpmsg_driver);
830
831 MODULE_DESCRIPTION("Freescale SoC Audio RPMSG PCM interface");
832 MODULE_AUTHOR("Shengjiu Wang <shengjiu.wang@nxp.com>");
833 MODULE_ALIAS("platform:" IMX_PCM_DRV_NAME);
834 MODULE_LICENSE("GPL v2");
835