1 /*
2 * Renesas R-Car Audio DMAC support
3 *
4 * Copyright (C) 2015 Renesas Electronics Corp.
5 * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/delay.h>
12 #include <linux/of_dma.h>
13 #include "rsnd.h"
14
15 /*
16 * Audio DMAC peri peri register
17 */
18 #define PDMASAR 0x00
19 #define PDMADAR 0x04
20 #define PDMACHCR 0x0c
21
22 /* PDMACHCR */
23 #define PDMACHCR_DE (1 << 0)
24
25
26 struct rsnd_dmaen {
27 struct dma_chan *chan;
28 dma_cookie_t cookie;
29 dma_addr_t dma_buf;
30 unsigned int dma_len;
31 unsigned int dma_period;
32 unsigned int dma_cnt;
33 };
34
35 struct rsnd_dmapp {
36 int dmapp_id;
37 u32 chcr;
38 };
39
40 struct rsnd_dma {
41 struct rsnd_mod mod;
42 struct rsnd_mod *mod_from;
43 struct rsnd_mod *mod_to;
44 dma_addr_t src_addr;
45 dma_addr_t dst_addr;
46 union {
47 struct rsnd_dmaen en;
48 struct rsnd_dmapp pp;
49 } dma;
50 };
51
52 struct rsnd_dma_ctrl {
53 void __iomem *base;
54 int dmaen_num;
55 int dmapp_num;
56 };
57
58 #define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma)
59 #define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod)
60 #define rsnd_dma_to_dmaen(dma) (&(dma)->dma.en)
61 #define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp)
62
63 /*
64 * Audio DMAC
65 */
66 #define rsnd_dmaen_sync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 1)
67 #define rsnd_dmaen_unsync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 0)
__rsnd_dmaen_sync(struct rsnd_dmaen * dmaen,struct rsnd_dai_stream * io,int i,int sync)68 static void __rsnd_dmaen_sync(struct rsnd_dmaen *dmaen, struct rsnd_dai_stream *io,
69 int i, int sync)
70 {
71 struct device *dev = dmaen->chan->device->dev;
72 enum dma_data_direction dir;
73 int is_play = rsnd_io_is_play(io);
74 dma_addr_t buf;
75 int len, max;
76 size_t period;
77
78 len = dmaen->dma_len;
79 period = dmaen->dma_period;
80 max = len / period;
81 i = i % max;
82 buf = dmaen->dma_buf + (period * i);
83
84 dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
85
86 if (sync)
87 dma_sync_single_for_device(dev, buf, period, dir);
88 else
89 dma_sync_single_for_cpu(dev, buf, period, dir);
90 }
91
__rsnd_dmaen_complete(struct rsnd_mod * mod,struct rsnd_dai_stream * io)92 static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
93 struct rsnd_dai_stream *io)
94 {
95 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
96 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
97 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
98 bool elapsed = false;
99 unsigned long flags;
100
101 /*
102 * Renesas sound Gen1 needs 1 DMAC,
103 * Gen2 needs 2 DMAC.
104 * In Gen2 case, it are Audio-DMAC, and Audio-DMAC-peri-peri.
105 * But, Audio-DMAC-peri-peri doesn't have interrupt,
106 * and this driver is assuming that here.
107 */
108 spin_lock_irqsave(&priv->lock, flags);
109
110 if (rsnd_io_is_working(io)) {
111 rsnd_dmaen_unsync(dmaen, io, dmaen->dma_cnt);
112
113 /*
114 * Next period is already started.
115 * Let's sync Next Next period
116 * see
117 * rsnd_dmaen_start()
118 */
119 rsnd_dmaen_sync(dmaen, io, dmaen->dma_cnt + 2);
120
121 elapsed = true;
122
123 dmaen->dma_cnt++;
124 }
125
126 spin_unlock_irqrestore(&priv->lock, flags);
127
128 if (elapsed)
129 rsnd_dai_period_elapsed(io);
130 }
131
rsnd_dmaen_complete(void * data)132 static void rsnd_dmaen_complete(void *data)
133 {
134 struct rsnd_mod *mod = data;
135
136 rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
137 }
138
rsnd_dmaen_request_channel(struct rsnd_dai_stream * io,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)139 static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
140 struct rsnd_mod *mod_from,
141 struct rsnd_mod *mod_to)
142 {
143 if ((!mod_from && !mod_to) ||
144 (mod_from && mod_to))
145 return NULL;
146
147 if (mod_from)
148 return rsnd_mod_dma_req(io, mod_from);
149 else
150 return rsnd_mod_dma_req(io, mod_to);
151 }
152
rsnd_dmaen_stop(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)153 static int rsnd_dmaen_stop(struct rsnd_mod *mod,
154 struct rsnd_dai_stream *io,
155 struct rsnd_priv *priv)
156 {
157 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
158 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
159
160 if (dmaen->chan) {
161 int is_play = rsnd_io_is_play(io);
162
163 dmaengine_terminate_all(dmaen->chan);
164 dma_unmap_single(dmaen->chan->device->dev,
165 dmaen->dma_buf, dmaen->dma_len,
166 is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
167 }
168
169 return 0;
170 }
171
rsnd_dmaen_nolock_stop(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)172 static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod,
173 struct rsnd_dai_stream *io,
174 struct rsnd_priv *priv)
175 {
176 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
177 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
178
179 /*
180 * DMAEngine release uses mutex lock.
181 * Thus, it shouldn't be called under spinlock.
182 * Let's call it under nolock_start
183 */
184 if (dmaen->chan)
185 dma_release_channel(dmaen->chan);
186
187 dmaen->chan = NULL;
188
189 return 0;
190 }
191
rsnd_dmaen_nolock_start(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)192 static int rsnd_dmaen_nolock_start(struct rsnd_mod *mod,
193 struct rsnd_dai_stream *io,
194 struct rsnd_priv *priv)
195 {
196 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
197 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
198 struct device *dev = rsnd_priv_to_dev(priv);
199
200 if (dmaen->chan) {
201 dev_err(dev, "it already has dma channel\n");
202 return -EIO;
203 }
204
205 /*
206 * DMAEngine request uses mutex lock.
207 * Thus, it shouldn't be called under spinlock.
208 * Let's call it under nolock_start
209 */
210 dmaen->chan = rsnd_dmaen_request_channel(io,
211 dma->mod_from,
212 dma->mod_to);
213 if (IS_ERR_OR_NULL(dmaen->chan)) {
214 int ret = PTR_ERR(dmaen->chan);
215
216 dmaen->chan = NULL;
217 dev_err(dev, "can't get dma channel\n");
218 return ret;
219 }
220
221 return 0;
222 }
223
rsnd_dmaen_start(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)224 static int rsnd_dmaen_start(struct rsnd_mod *mod,
225 struct rsnd_dai_stream *io,
226 struct rsnd_priv *priv)
227 {
228 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
229 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
230 struct snd_pcm_substream *substream = io->substream;
231 struct device *dev = rsnd_priv_to_dev(priv);
232 struct dma_async_tx_descriptor *desc;
233 struct dma_slave_config cfg = {};
234 dma_addr_t buf;
235 size_t len;
236 size_t period;
237 int is_play = rsnd_io_is_play(io);
238 int i;
239 int ret;
240
241 cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
242 cfg.src_addr = dma->src_addr;
243 cfg.dst_addr = dma->dst_addr;
244 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
245 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
246
247 dev_dbg(dev, "%s[%d] %pad -> %pad\n",
248 rsnd_mod_name(mod), rsnd_mod_id(mod),
249 &cfg.src_addr, &cfg.dst_addr);
250
251 ret = dmaengine_slave_config(dmaen->chan, &cfg);
252 if (ret < 0)
253 return ret;
254
255 len = snd_pcm_lib_buffer_bytes(substream);
256 period = snd_pcm_lib_period_bytes(substream);
257 buf = dma_map_single(dmaen->chan->device->dev,
258 substream->runtime->dma_area,
259 len,
260 is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
261 if (dma_mapping_error(dmaen->chan->device->dev, buf)) {
262 dev_err(dev, "dma map failed\n");
263 return -EIO;
264 }
265
266 desc = dmaengine_prep_dma_cyclic(dmaen->chan,
267 buf, len, period,
268 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
269 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
270
271 if (!desc) {
272 dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
273 return -EIO;
274 }
275
276 desc->callback = rsnd_dmaen_complete;
277 desc->callback_param = rsnd_mod_get(dma);
278
279 dmaen->dma_buf = buf;
280 dmaen->dma_len = len;
281 dmaen->dma_period = period;
282 dmaen->dma_cnt = 0;
283
284 /*
285 * synchronize this and next period
286 * see
287 * __rsnd_dmaen_complete()
288 */
289 for (i = 0; i < 2; i++)
290 rsnd_dmaen_sync(dmaen, io, i);
291
292 dmaen->cookie = dmaengine_submit(desc);
293 if (dmaen->cookie < 0) {
294 dev_err(dev, "dmaengine_submit() fail\n");
295 return -EIO;
296 }
297
298 dma_async_issue_pending(dmaen->chan);
299
300 return 0;
301 }
302
rsnd_dma_request_channel(struct device_node * of_node,struct rsnd_mod * mod,char * name)303 struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
304 struct rsnd_mod *mod, char *name)
305 {
306 struct dma_chan *chan = NULL;
307 struct device_node *np;
308 int i = 0;
309
310 for_each_child_of_node(of_node, np) {
311 if (i == rsnd_mod_id(mod) && (!chan))
312 chan = of_dma_request_slave_channel(np, name);
313 i++;
314 }
315
316 /* It should call of_node_put(), since, it is rsnd_xxx_of_node() */
317 of_node_put(of_node);
318
319 return chan;
320 }
321
rsnd_dmaen_attach(struct rsnd_dai_stream * io,struct rsnd_dma * dma,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)322 static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
323 struct rsnd_dma *dma,
324 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
325 {
326 struct rsnd_priv *priv = rsnd_io_to_priv(io);
327 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
328 struct dma_chan *chan;
329
330 /* try to get DMAEngine channel */
331 chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
332 if (IS_ERR_OR_NULL(chan)) {
333 /* Let's follow when -EPROBE_DEFER case */
334 if (PTR_ERR(chan) == -EPROBE_DEFER)
335 return PTR_ERR(chan);
336
337 /*
338 * DMA failed. try to PIO mode
339 * see
340 * rsnd_ssi_fallback()
341 * rsnd_rdai_continuance_probe()
342 */
343 return -EAGAIN;
344 }
345
346 dma_release_channel(chan);
347
348 dmac->dmaen_num++;
349
350 return 0;
351 }
352
rsnd_dmaen_pointer(struct rsnd_mod * mod,struct rsnd_dai_stream * io,snd_pcm_uframes_t * pointer)353 static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
354 struct rsnd_dai_stream *io,
355 snd_pcm_uframes_t *pointer)
356 {
357 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
358 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
359 struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
360 struct dma_tx_state state;
361 enum dma_status status;
362 unsigned int pos = 0;
363
364 status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state);
365 if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
366 if (state.residue > 0 && state.residue <= dmaen->dma_len)
367 pos = dmaen->dma_len - state.residue;
368 }
369 *pointer = bytes_to_frames(runtime, pos);
370
371 return 0;
372 }
373
374 static struct rsnd_mod_ops rsnd_dmaen_ops = {
375 .name = "audmac",
376 .nolock_start = rsnd_dmaen_nolock_start,
377 .nolock_stop = rsnd_dmaen_nolock_stop,
378 .start = rsnd_dmaen_start,
379 .stop = rsnd_dmaen_stop,
380 .pointer= rsnd_dmaen_pointer,
381 };
382
383 /*
384 * Audio DMAC peri peri
385 */
386 static const u8 gen2_id_table_ssiu[] = {
387 0x00, /* SSI00 */
388 0x04, /* SSI10 */
389 0x08, /* SSI20 */
390 0x0c, /* SSI3 */
391 0x0d, /* SSI4 */
392 0x0e, /* SSI5 */
393 0x0f, /* SSI6 */
394 0x10, /* SSI7 */
395 0x11, /* SSI8 */
396 0x12, /* SSI90 */
397 };
398 static const u8 gen2_id_table_scu[] = {
399 0x2d, /* SCU_SRCI0 */
400 0x2e, /* SCU_SRCI1 */
401 0x2f, /* SCU_SRCI2 */
402 0x30, /* SCU_SRCI3 */
403 0x31, /* SCU_SRCI4 */
404 0x32, /* SCU_SRCI5 */
405 0x33, /* SCU_SRCI6 */
406 0x34, /* SCU_SRCI7 */
407 0x35, /* SCU_SRCI8 */
408 0x36, /* SCU_SRCI9 */
409 };
410 static const u8 gen2_id_table_cmd[] = {
411 0x37, /* SCU_CMD0 */
412 0x38, /* SCU_CMD1 */
413 };
414
rsnd_dmapp_get_id(struct rsnd_dai_stream * io,struct rsnd_mod * mod)415 static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
416 struct rsnd_mod *mod)
417 {
418 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
419 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
420 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
421 const u8 *entry = NULL;
422 int id = rsnd_mod_id(mod);
423 int size = 0;
424
425 if (mod == ssi) {
426 entry = gen2_id_table_ssiu;
427 size = ARRAY_SIZE(gen2_id_table_ssiu);
428 } else if (mod == src) {
429 entry = gen2_id_table_scu;
430 size = ARRAY_SIZE(gen2_id_table_scu);
431 } else if (mod == dvc) {
432 entry = gen2_id_table_cmd;
433 size = ARRAY_SIZE(gen2_id_table_cmd);
434 }
435
436 if ((!entry) || (size <= id)) {
437 struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
438
439 dev_err(dev, "unknown connection (%s[%d])\n",
440 rsnd_mod_name(mod), rsnd_mod_id(mod));
441
442 /* use non-prohibited SRS number as error */
443 return 0x00; /* SSI00 */
444 }
445
446 return entry[id];
447 }
448
rsnd_dmapp_get_chcr(struct rsnd_dai_stream * io,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)449 static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io,
450 struct rsnd_mod *mod_from,
451 struct rsnd_mod *mod_to)
452 {
453 return (rsnd_dmapp_get_id(io, mod_from) << 24) +
454 (rsnd_dmapp_get_id(io, mod_to) << 16);
455 }
456
457 #define rsnd_dmapp_addr(dmac, dma, reg) \
458 (dmac->base + 0x20 + reg + \
459 (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id))
rsnd_dmapp_write(struct rsnd_dma * dma,u32 data,u32 reg)460 static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
461 {
462 struct rsnd_mod *mod = rsnd_mod_get(dma);
463 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
464 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
465 struct device *dev = rsnd_priv_to_dev(priv);
466
467 dev_dbg(dev, "w %p : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
468
469 iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
470 }
471
rsnd_dmapp_read(struct rsnd_dma * dma,u32 reg)472 static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
473 {
474 struct rsnd_mod *mod = rsnd_mod_get(dma);
475 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
476 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
477
478 return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
479 }
480
rsnd_dmapp_bset(struct rsnd_dma * dma,u32 data,u32 mask,u32 reg)481 static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
482 {
483 struct rsnd_mod *mod = rsnd_mod_get(dma);
484 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
485 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
486 void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
487 u32 val = ioread32(addr);
488
489 val &= ~mask;
490 val |= (data & mask);
491
492 iowrite32(val, addr);
493 }
494
rsnd_dmapp_stop(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)495 static int rsnd_dmapp_stop(struct rsnd_mod *mod,
496 struct rsnd_dai_stream *io,
497 struct rsnd_priv *priv)
498 {
499 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
500 int i;
501
502 rsnd_dmapp_bset(dma, 0, PDMACHCR_DE, PDMACHCR);
503
504 for (i = 0; i < 1024; i++) {
505 if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
506 return 0;
507 udelay(1);
508 }
509
510 return -EIO;
511 }
512
rsnd_dmapp_start(struct rsnd_mod * mod,struct rsnd_dai_stream * io,struct rsnd_priv * priv)513 static int rsnd_dmapp_start(struct rsnd_mod *mod,
514 struct rsnd_dai_stream *io,
515 struct rsnd_priv *priv)
516 {
517 struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
518 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
519
520 rsnd_dmapp_write(dma, dma->src_addr, PDMASAR);
521 rsnd_dmapp_write(dma, dma->dst_addr, PDMADAR);
522 rsnd_dmapp_write(dma, dmapp->chcr, PDMACHCR);
523
524 return 0;
525 }
526
rsnd_dmapp_attach(struct rsnd_dai_stream * io,struct rsnd_dma * dma,struct rsnd_mod * mod_from,struct rsnd_mod * mod_to)527 static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
528 struct rsnd_dma *dma,
529 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
530 {
531 struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
532 struct rsnd_priv *priv = rsnd_io_to_priv(io);
533 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
534 struct device *dev = rsnd_priv_to_dev(priv);
535
536 dmapp->dmapp_id = dmac->dmapp_num;
537 dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE;
538
539 dmac->dmapp_num++;
540
541 dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n",
542 dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr);
543
544 return 0;
545 }
546
547 static struct rsnd_mod_ops rsnd_dmapp_ops = {
548 .name = "audmac-pp",
549 .start = rsnd_dmapp_start,
550 .stop = rsnd_dmapp_stop,
551 .quit = rsnd_dmapp_stop,
552 };
553
554 /*
555 * Common DMAC Interface
556 */
557
558 /*
559 * DMA read/write register offset
560 *
561 * RSND_xxx_I_N for Audio DMAC input
562 * RSND_xxx_O_N for Audio DMAC output
563 * RSND_xxx_I_P for Audio DMAC peri peri input
564 * RSND_xxx_O_P for Audio DMAC peri peri output
565 *
566 * ex) R-Car H2 case
567 * mod / DMAC in / DMAC out / DMAC PP in / DMAC pp out
568 * SSI : 0xec541000 / 0xec241008 / 0xec24100c
569 * SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000
570 * SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000
571 * CMD : 0xec500000 / / 0xec008000 0xec308000
572 */
573 #define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
574 #define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
575
576 #define RDMA_SSIU_I_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
577 #define RDMA_SSIU_O_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
578
579 #define RDMA_SSIU_I_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
580 #define RDMA_SSIU_O_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
581
582 #define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
583 #define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i))
584
585 #define RDMA_SRC_I_P(addr, i) (addr ##_reg - 0x00200000 + (0x400 * i))
586 #define RDMA_SRC_O_P(addr, i) (addr ##_reg - 0x001fc000 + (0x400 * i))
587
588 #define RDMA_CMD_O_N(addr, i) (addr ##_reg - 0x004f8000 + (0x400 * i))
589 #define RDMA_CMD_O_P(addr, i) (addr ##_reg - 0x001f8000 + (0x400 * i))
590
591 static dma_addr_t
rsnd_gen2_dma_addr(struct rsnd_dai_stream * io,struct rsnd_mod * mod,int is_play,int is_from)592 rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
593 struct rsnd_mod *mod,
594 int is_play, int is_from)
595 {
596 struct rsnd_priv *priv = rsnd_io_to_priv(io);
597 struct device *dev = rsnd_priv_to_dev(priv);
598 phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI);
599 phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
600 int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod);
601 int use_src = !!rsnd_io_to_mod_src(io);
602 int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
603 !!rsnd_io_to_mod_mix(io) ||
604 !!rsnd_io_to_mod_ctu(io);
605 int id = rsnd_mod_id(mod);
606 struct dma_addr {
607 dma_addr_t out_addr;
608 dma_addr_t in_addr;
609 } dma_addrs[3][2][3] = {
610 /* SRC */
611 /* Capture */
612 {{{ 0, 0 },
613 { RDMA_SRC_O_N(src, id), RDMA_SRC_I_P(src, id) },
614 { RDMA_CMD_O_N(src, id), RDMA_SRC_I_P(src, id) } },
615 /* Playback */
616 {{ 0, 0, },
617 { RDMA_SRC_O_P(src, id), RDMA_SRC_I_N(src, id) },
618 { RDMA_CMD_O_P(src, id), RDMA_SRC_I_N(src, id) } }
619 },
620 /* SSI */
621 /* Capture */
622 {{{ RDMA_SSI_O_N(ssi, id), 0 },
623 { RDMA_SSIU_O_P(ssi, id), 0 },
624 { RDMA_SSIU_O_P(ssi, id), 0 } },
625 /* Playback */
626 {{ 0, RDMA_SSI_I_N(ssi, id) },
627 { 0, RDMA_SSIU_I_P(ssi, id) },
628 { 0, RDMA_SSIU_I_P(ssi, id) } }
629 },
630 /* SSIU */
631 /* Capture */
632 {{{ RDMA_SSIU_O_N(ssi, id), 0 },
633 { RDMA_SSIU_O_P(ssi, id), 0 },
634 { RDMA_SSIU_O_P(ssi, id), 0 } },
635 /* Playback */
636 {{ 0, RDMA_SSIU_I_N(ssi, id) },
637 { 0, RDMA_SSIU_I_P(ssi, id) },
638 { 0, RDMA_SSIU_I_P(ssi, id) } } },
639 };
640
641 /* it shouldn't happen */
642 if (use_cmd && !use_src)
643 dev_err(dev, "DVC is selected without SRC\n");
644
645 /* use SSIU or SSI ? */
646 if (is_ssi && rsnd_ssi_use_busif(io))
647 is_ssi++;
648
649 return (is_from) ?
650 dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
651 dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
652 }
653
rsnd_dma_addr(struct rsnd_dai_stream * io,struct rsnd_mod * mod,int is_play,int is_from)654 static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
655 struct rsnd_mod *mod,
656 int is_play, int is_from)
657 {
658 struct rsnd_priv *priv = rsnd_io_to_priv(io);
659
660 /*
661 * gen1 uses default DMA addr
662 */
663 if (rsnd_is_gen1(priv))
664 return 0;
665
666 if (!mod)
667 return 0;
668
669 return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
670 }
671
672 #define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */
rsnd_dma_of_path(struct rsnd_mod * this,struct rsnd_dai_stream * io,int is_play,struct rsnd_mod ** mod_from,struct rsnd_mod ** mod_to)673 static void rsnd_dma_of_path(struct rsnd_mod *this,
674 struct rsnd_dai_stream *io,
675 int is_play,
676 struct rsnd_mod **mod_from,
677 struct rsnd_mod **mod_to)
678 {
679 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
680 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
681 struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
682 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
683 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
684 struct rsnd_mod *mod[MOD_MAX];
685 struct rsnd_mod *mod_start, *mod_end;
686 struct rsnd_priv *priv = rsnd_mod_to_priv(this);
687 struct device *dev = rsnd_priv_to_dev(priv);
688 int nr, i, idx;
689
690 if (!ssi)
691 return;
692
693 nr = 0;
694 for (i = 0; i < MOD_MAX; i++) {
695 mod[i] = NULL;
696 nr += !!rsnd_io_to_mod(io, i);
697 }
698
699 /*
700 * [S] -*-> [E]
701 * [S] -*-> SRC -o-> [E]
702 * [S] -*-> SRC -> DVC -o-> [E]
703 * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E]
704 *
705 * playback [S] = mem
706 * [E] = SSI
707 *
708 * capture [S] = SSI
709 * [E] = mem
710 *
711 * -*-> Audio DMAC
712 * -o-> Audio DMAC peri peri
713 */
714 mod_start = (is_play) ? NULL : ssi;
715 mod_end = (is_play) ? ssi : NULL;
716
717 idx = 0;
718 mod[idx++] = mod_start;
719 for (i = 1; i < nr; i++) {
720 if (src) {
721 mod[idx++] = src;
722 src = NULL;
723 } else if (ctu) {
724 mod[idx++] = ctu;
725 ctu = NULL;
726 } else if (mix) {
727 mod[idx++] = mix;
728 mix = NULL;
729 } else if (dvc) {
730 mod[idx++] = dvc;
731 dvc = NULL;
732 }
733 }
734 mod[idx] = mod_end;
735
736 /*
737 * | SSI | SRC |
738 * -------------+-----+-----+
739 * is_play | o | * |
740 * !is_play | * | o |
741 */
742 if ((this == ssi) == (is_play)) {
743 *mod_from = mod[idx - 1];
744 *mod_to = mod[idx];
745 } else {
746 *mod_from = mod[0];
747 *mod_to = mod[1];
748 }
749
750 dev_dbg(dev, "module connection (this is %s[%d])\n",
751 rsnd_mod_name(this), rsnd_mod_id(this));
752 for (i = 0; i <= idx; i++) {
753 dev_dbg(dev, " %s[%d]%s\n",
754 rsnd_mod_name(mod[i]), rsnd_mod_id(mod[i]),
755 (mod[i] == *mod_from) ? " from" :
756 (mod[i] == *mod_to) ? " to" : "");
757 }
758 }
759
rsnd_dma_attach(struct rsnd_dai_stream * io,struct rsnd_mod * mod,struct rsnd_mod ** dma_mod)760 int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
761 struct rsnd_mod **dma_mod)
762 {
763 struct rsnd_mod *mod_from = NULL;
764 struct rsnd_mod *mod_to = NULL;
765 struct rsnd_priv *priv = rsnd_io_to_priv(io);
766 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
767 struct device *dev = rsnd_priv_to_dev(priv);
768 struct rsnd_mod_ops *ops;
769 enum rsnd_mod_type type;
770 int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
771 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
772 int is_play = rsnd_io_is_play(io);
773 int ret, dma_id;
774
775 /*
776 * DMA failed. try to PIO mode
777 * see
778 * rsnd_ssi_fallback()
779 * rsnd_rdai_continuance_probe()
780 */
781 if (!dmac)
782 return -EAGAIN;
783
784 rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to);
785
786 /* for Gen2 */
787 if (mod_from && mod_to) {
788 ops = &rsnd_dmapp_ops;
789 attach = rsnd_dmapp_attach;
790 dma_id = dmac->dmapp_num;
791 type = RSND_MOD_AUDMAPP;
792 } else {
793 ops = &rsnd_dmaen_ops;
794 attach = rsnd_dmaen_attach;
795 dma_id = dmac->dmaen_num;
796 type = RSND_MOD_AUDMA;
797 }
798
799 /* for Gen1, overwrite */
800 if (rsnd_is_gen1(priv)) {
801 ops = &rsnd_dmaen_ops;
802 attach = rsnd_dmaen_attach;
803 dma_id = dmac->dmaen_num;
804 type = RSND_MOD_AUDMA;
805 }
806
807 if (!(*dma_mod)) {
808 struct rsnd_dma *dma;
809
810 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
811 if (!dma)
812 return -ENOMEM;
813
814 *dma_mod = rsnd_mod_get(dma);
815
816 ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
817 rsnd_mod_get_status, type, dma_id);
818 if (ret < 0)
819 return ret;
820
821 dev_dbg(dev, "%s[%d] %s[%d] -> %s[%d]\n",
822 rsnd_mod_name(*dma_mod), rsnd_mod_id(*dma_mod),
823 rsnd_mod_name(mod_from), rsnd_mod_id(mod_from),
824 rsnd_mod_name(mod_to), rsnd_mod_id(mod_to));
825
826 ret = attach(io, dma, mod_from, mod_to);
827 if (ret < 0)
828 return ret;
829
830 dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
831 dma->dst_addr = rsnd_dma_addr(io, mod_to, is_play, 0);
832 dma->mod_from = mod_from;
833 dma->mod_to = mod_to;
834 }
835
836 ret = rsnd_dai_connect(*dma_mod, io, type);
837 if (ret < 0)
838 return ret;
839
840 return 0;
841 }
842
rsnd_dma_probe(struct rsnd_priv * priv)843 int rsnd_dma_probe(struct rsnd_priv *priv)
844 {
845 struct platform_device *pdev = rsnd_priv_to_pdev(priv);
846 struct device *dev = rsnd_priv_to_dev(priv);
847 struct rsnd_dma_ctrl *dmac;
848 struct resource *res;
849
850 /*
851 * for Gen1
852 */
853 if (rsnd_is_gen1(priv))
854 return 0;
855
856 /*
857 * for Gen2
858 */
859 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp");
860 dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL);
861 if (!dmac || !res) {
862 dev_err(dev, "dma allocate failed\n");
863 return 0; /* it will be PIO mode */
864 }
865
866 dmac->dmapp_num = 0;
867 dmac->base = devm_ioremap_resource(dev, res);
868 if (IS_ERR(dmac->base))
869 return PTR_ERR(dmac->base);
870
871 priv->dma = dmac;
872
873 return 0;
874 }
875