1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // drivers/dma/imx-dma.c
4 //
5 // This file contains a driver for the Freescale i.MX DMA engine
6 // found on i.MX1/21/27
7 //
8 // Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
9 // Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
10
11 #include <linux/err.h>
12 #include <linux/init.h>
13 #include <linux/types.h>
14 #include <linux/mm.h>
15 #include <linux/interrupt.h>
16 #include <linux/spinlock.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/slab.h>
20 #include <linux/platform_device.h>
21 #include <linux/clk.h>
22 #include <linux/dmaengine.h>
23 #include <linux/module.h>
24 #include <linux/of_device.h>
25 #include <linux/of_dma.h>
26
27 #include <asm/irq.h>
28 #include <linux/platform_data/dma-imx.h>
29
30 #include "dmaengine.h"
31 #define IMXDMA_MAX_CHAN_DESCRIPTORS 16
32 #define IMX_DMA_CHANNELS 16
33
34 #define IMX_DMA_2D_SLOTS 2
35 #define IMX_DMA_2D_SLOT_A 0
36 #define IMX_DMA_2D_SLOT_B 1
37
38 #define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
39 #define IMX_DMA_MEMSIZE_32 (0 << 4)
40 #define IMX_DMA_MEMSIZE_8 (1 << 4)
41 #define IMX_DMA_MEMSIZE_16 (2 << 4)
42 #define IMX_DMA_TYPE_LINEAR (0 << 10)
43 #define IMX_DMA_TYPE_2D (1 << 10)
44 #define IMX_DMA_TYPE_FIFO (2 << 10)
45
46 #define IMX_DMA_ERR_BURST (1 << 0)
47 #define IMX_DMA_ERR_REQUEST (1 << 1)
48 #define IMX_DMA_ERR_TRANSFER (1 << 2)
49 #define IMX_DMA_ERR_BUFFER (1 << 3)
50 #define IMX_DMA_ERR_TIMEOUT (1 << 4)
51
52 #define DMA_DCR 0x00 /* Control Register */
53 #define DMA_DISR 0x04 /* Interrupt status Register */
54 #define DMA_DIMR 0x08 /* Interrupt mask Register */
55 #define DMA_DBTOSR 0x0c /* Burst timeout status Register */
56 #define DMA_DRTOSR 0x10 /* Request timeout Register */
57 #define DMA_DSESR 0x14 /* Transfer Error Status Register */
58 #define DMA_DBOSR 0x18 /* Buffer overflow status Register */
59 #define DMA_DBTOCR 0x1c /* Burst timeout control Register */
60 #define DMA_WSRA 0x40 /* W-Size Register A */
61 #define DMA_XSRA 0x44 /* X-Size Register A */
62 #define DMA_YSRA 0x48 /* Y-Size Register A */
63 #define DMA_WSRB 0x4c /* W-Size Register B */
64 #define DMA_XSRB 0x50 /* X-Size Register B */
65 #define DMA_YSRB 0x54 /* Y-Size Register B */
66 #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
67 #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
68 #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
69 #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
70 #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
71 #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
72 #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
73 #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
74 #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
75
76 #define DCR_DRST (1<<1)
77 #define DCR_DEN (1<<0)
78 #define DBTOCR_EN (1<<15)
79 #define DBTOCR_CNT(x) ((x) & 0x7fff)
80 #define CNTR_CNT(x) ((x) & 0xffffff)
81 #define CCR_ACRPT (1<<14)
82 #define CCR_DMOD_LINEAR (0x0 << 12)
83 #define CCR_DMOD_2D (0x1 << 12)
84 #define CCR_DMOD_FIFO (0x2 << 12)
85 #define CCR_DMOD_EOBFIFO (0x3 << 12)
86 #define CCR_SMOD_LINEAR (0x0 << 10)
87 #define CCR_SMOD_2D (0x1 << 10)
88 #define CCR_SMOD_FIFO (0x2 << 10)
89 #define CCR_SMOD_EOBFIFO (0x3 << 10)
90 #define CCR_MDIR_DEC (1<<9)
91 #define CCR_MSEL_B (1<<8)
92 #define CCR_DSIZ_32 (0x0 << 6)
93 #define CCR_DSIZ_8 (0x1 << 6)
94 #define CCR_DSIZ_16 (0x2 << 6)
95 #define CCR_SSIZ_32 (0x0 << 4)
96 #define CCR_SSIZ_8 (0x1 << 4)
97 #define CCR_SSIZ_16 (0x2 << 4)
98 #define CCR_REN (1<<3)
99 #define CCR_RPT (1<<2)
100 #define CCR_FRC (1<<1)
101 #define CCR_CEN (1<<0)
102 #define RTOR_EN (1<<15)
103 #define RTOR_CLK (1<<14)
104 #define RTOR_PSC (1<<13)
105
106 enum imxdma_prep_type {
107 IMXDMA_DESC_MEMCPY,
108 IMXDMA_DESC_INTERLEAVED,
109 IMXDMA_DESC_SLAVE_SG,
110 IMXDMA_DESC_CYCLIC,
111 };
112
113 struct imx_dma_2d_config {
114 u16 xsr;
115 u16 ysr;
116 u16 wsr;
117 int count;
118 };
119
120 struct imxdma_desc {
121 struct list_head node;
122 struct dma_async_tx_descriptor desc;
123 enum dma_status status;
124 dma_addr_t src;
125 dma_addr_t dest;
126 size_t len;
127 enum dma_transfer_direction direction;
128 enum imxdma_prep_type type;
129 /* For memcpy and interleaved */
130 unsigned int config_port;
131 unsigned int config_mem;
132 /* For interleaved transfers */
133 unsigned int x;
134 unsigned int y;
135 unsigned int w;
136 /* For slave sg and cyclic */
137 struct scatterlist *sg;
138 unsigned int sgcount;
139 };
140
141 struct imxdma_channel {
142 int hw_chaining;
143 struct timer_list watchdog;
144 struct imxdma_engine *imxdma;
145 unsigned int channel;
146
147 struct tasklet_struct dma_tasklet;
148 struct list_head ld_free;
149 struct list_head ld_queue;
150 struct list_head ld_active;
151 int descs_allocated;
152 enum dma_slave_buswidth word_size;
153 dma_addr_t per_address;
154 u32 watermark_level;
155 struct dma_chan chan;
156 struct dma_async_tx_descriptor desc;
157 enum dma_status status;
158 int dma_request;
159 struct scatterlist *sg_list;
160 u32 ccr_from_device;
161 u32 ccr_to_device;
162 bool enabled_2d;
163 int slot_2d;
164 unsigned int irq;
165 struct dma_slave_config config;
166 };
167
168 enum imx_dma_type {
169 IMX1_DMA,
170 IMX21_DMA,
171 IMX27_DMA,
172 };
173
174 struct imxdma_engine {
175 struct device *dev;
176 struct dma_device dma_device;
177 void __iomem *base;
178 struct clk *dma_ahb;
179 struct clk *dma_ipg;
180 spinlock_t lock;
181 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
182 struct imxdma_channel channel[IMX_DMA_CHANNELS];
183 enum imx_dma_type devtype;
184 unsigned int irq;
185 unsigned int irq_err;
186
187 };
188
189 struct imxdma_filter_data {
190 struct imxdma_engine *imxdma;
191 int request;
192 };
193
194 static const struct of_device_id imx_dma_of_dev_id[] = {
195 {
196 .compatible = "fsl,imx1-dma", .data = (const void *)IMX1_DMA,
197 }, {
198 .compatible = "fsl,imx21-dma", .data = (const void *)IMX21_DMA,
199 }, {
200 .compatible = "fsl,imx27-dma", .data = (const void *)IMX27_DMA,
201 }, {
202 /* sentinel */
203 }
204 };
205 MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id);
206
is_imx1_dma(struct imxdma_engine * imxdma)207 static inline int is_imx1_dma(struct imxdma_engine *imxdma)
208 {
209 return imxdma->devtype == IMX1_DMA;
210 }
211
is_imx27_dma(struct imxdma_engine * imxdma)212 static inline int is_imx27_dma(struct imxdma_engine *imxdma)
213 {
214 return imxdma->devtype == IMX27_DMA;
215 }
216
to_imxdma_chan(struct dma_chan * chan)217 static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
218 {
219 return container_of(chan, struct imxdma_channel, chan);
220 }
221
imxdma_chan_is_doing_cyclic(struct imxdma_channel * imxdmac)222 static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
223 {
224 struct imxdma_desc *desc;
225
226 if (!list_empty(&imxdmac->ld_active)) {
227 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
228 node);
229 if (desc->type == IMXDMA_DESC_CYCLIC)
230 return true;
231 }
232 return false;
233 }
234
235
236
imx_dmav1_writel(struct imxdma_engine * imxdma,unsigned val,unsigned offset)237 static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
238 unsigned offset)
239 {
240 __raw_writel(val, imxdma->base + offset);
241 }
242
imx_dmav1_readl(struct imxdma_engine * imxdma,unsigned offset)243 static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
244 {
245 return __raw_readl(imxdma->base + offset);
246 }
247
imxdma_hw_chain(struct imxdma_channel * imxdmac)248 static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
249 {
250 struct imxdma_engine *imxdma = imxdmac->imxdma;
251
252 if (is_imx27_dma(imxdma))
253 return imxdmac->hw_chaining;
254 else
255 return 0;
256 }
257
258 /*
259 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
260 */
imxdma_sg_next(struct imxdma_desc * d)261 static inline void imxdma_sg_next(struct imxdma_desc *d)
262 {
263 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
264 struct imxdma_engine *imxdma = imxdmac->imxdma;
265 struct scatterlist *sg = d->sg;
266 size_t now;
267
268 now = min_t(size_t, d->len, sg_dma_len(sg));
269 if (d->len != IMX_DMA_LENGTH_LOOP)
270 d->len -= now;
271
272 if (d->direction == DMA_DEV_TO_MEM)
273 imx_dmav1_writel(imxdma, sg->dma_address,
274 DMA_DAR(imxdmac->channel));
275 else
276 imx_dmav1_writel(imxdma, sg->dma_address,
277 DMA_SAR(imxdmac->channel));
278
279 imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
280
281 dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
282 "size 0x%08x\n", __func__, imxdmac->channel,
283 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
284 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
285 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
286 }
287
imxdma_enable_hw(struct imxdma_desc * d)288 static void imxdma_enable_hw(struct imxdma_desc *d)
289 {
290 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
291 struct imxdma_engine *imxdma = imxdmac->imxdma;
292 int channel = imxdmac->channel;
293 unsigned long flags;
294
295 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
296
297 local_irq_save(flags);
298
299 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
300 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
301 ~(1 << channel), DMA_DIMR);
302 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
303 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
304
305 if (!is_imx1_dma(imxdma) &&
306 d->sg && imxdma_hw_chain(imxdmac)) {
307 d->sg = sg_next(d->sg);
308 if (d->sg) {
309 u32 tmp;
310 imxdma_sg_next(d);
311 tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
312 imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
313 DMA_CCR(channel));
314 }
315 }
316
317 local_irq_restore(flags);
318 }
319
imxdma_disable_hw(struct imxdma_channel * imxdmac)320 static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
321 {
322 struct imxdma_engine *imxdma = imxdmac->imxdma;
323 int channel = imxdmac->channel;
324 unsigned long flags;
325
326 dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
327
328 if (imxdma_hw_chain(imxdmac))
329 del_timer(&imxdmac->watchdog);
330
331 local_irq_save(flags);
332 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
333 (1 << channel), DMA_DIMR);
334 imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
335 ~CCR_CEN, DMA_CCR(channel));
336 imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
337 local_irq_restore(flags);
338 }
339
imxdma_watchdog(struct timer_list * t)340 static void imxdma_watchdog(struct timer_list *t)
341 {
342 struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog);
343 struct imxdma_engine *imxdma = imxdmac->imxdma;
344 int channel = imxdmac->channel;
345
346 imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
347
348 /* Tasklet watchdog error handler */
349 tasklet_schedule(&imxdmac->dma_tasklet);
350 dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
351 imxdmac->channel);
352 }
353
imxdma_err_handler(int irq,void * dev_id)354 static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
355 {
356 struct imxdma_engine *imxdma = dev_id;
357 unsigned int err_mask;
358 int i, disr;
359 int errcode;
360
361 disr = imx_dmav1_readl(imxdma, DMA_DISR);
362
363 err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
364 imx_dmav1_readl(imxdma, DMA_DRTOSR) |
365 imx_dmav1_readl(imxdma, DMA_DSESR) |
366 imx_dmav1_readl(imxdma, DMA_DBOSR);
367
368 if (!err_mask)
369 return IRQ_HANDLED;
370
371 imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
372
373 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
374 if (!(err_mask & (1 << i)))
375 continue;
376 errcode = 0;
377
378 if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
379 imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
380 errcode |= IMX_DMA_ERR_BURST;
381 }
382 if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
383 imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
384 errcode |= IMX_DMA_ERR_REQUEST;
385 }
386 if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
387 imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
388 errcode |= IMX_DMA_ERR_TRANSFER;
389 }
390 if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
391 imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
392 errcode |= IMX_DMA_ERR_BUFFER;
393 }
394 /* Tasklet error handler */
395 tasklet_schedule(&imxdma->channel[i].dma_tasklet);
396
397 dev_warn(imxdma->dev,
398 "DMA timeout on channel %d -%s%s%s%s\n", i,
399 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
400 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
401 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
402 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
403 }
404 return IRQ_HANDLED;
405 }
406
dma_irq_handle_channel(struct imxdma_channel * imxdmac)407 static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
408 {
409 struct imxdma_engine *imxdma = imxdmac->imxdma;
410 int chno = imxdmac->channel;
411 struct imxdma_desc *desc;
412 unsigned long flags;
413
414 spin_lock_irqsave(&imxdma->lock, flags);
415 if (list_empty(&imxdmac->ld_active)) {
416 spin_unlock_irqrestore(&imxdma->lock, flags);
417 goto out;
418 }
419
420 desc = list_first_entry(&imxdmac->ld_active,
421 struct imxdma_desc,
422 node);
423 spin_unlock_irqrestore(&imxdma->lock, flags);
424
425 if (desc->sg) {
426 u32 tmp;
427 desc->sg = sg_next(desc->sg);
428
429 if (desc->sg) {
430 imxdma_sg_next(desc);
431
432 tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
433
434 if (imxdma_hw_chain(imxdmac)) {
435 /* FIXME: The timeout should probably be
436 * configurable
437 */
438 mod_timer(&imxdmac->watchdog,
439 jiffies + msecs_to_jiffies(500));
440
441 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
442 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
443 } else {
444 imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
445 DMA_CCR(chno));
446 tmp |= CCR_CEN;
447 }
448
449 imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
450
451 if (imxdma_chan_is_doing_cyclic(imxdmac))
452 /* Tasklet progression */
453 tasklet_schedule(&imxdmac->dma_tasklet);
454
455 return;
456 }
457
458 if (imxdma_hw_chain(imxdmac)) {
459 del_timer(&imxdmac->watchdog);
460 return;
461 }
462 }
463
464 out:
465 imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
466 /* Tasklet irq */
467 tasklet_schedule(&imxdmac->dma_tasklet);
468 }
469
dma_irq_handler(int irq,void * dev_id)470 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
471 {
472 struct imxdma_engine *imxdma = dev_id;
473 int i, disr;
474
475 if (!is_imx1_dma(imxdma))
476 imxdma_err_handler(irq, dev_id);
477
478 disr = imx_dmav1_readl(imxdma, DMA_DISR);
479
480 dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
481
482 imx_dmav1_writel(imxdma, disr, DMA_DISR);
483 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
484 if (disr & (1 << i))
485 dma_irq_handle_channel(&imxdma->channel[i]);
486 }
487
488 return IRQ_HANDLED;
489 }
490
imxdma_xfer_desc(struct imxdma_desc * d)491 static int imxdma_xfer_desc(struct imxdma_desc *d)
492 {
493 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
494 struct imxdma_engine *imxdma = imxdmac->imxdma;
495 int slot = -1;
496 int i;
497
498 /* Configure and enable */
499 switch (d->type) {
500 case IMXDMA_DESC_INTERLEAVED:
501 /* Try to get a free 2D slot */
502 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
503 if ((imxdma->slots_2d[i].count > 0) &&
504 ((imxdma->slots_2d[i].xsr != d->x) ||
505 (imxdma->slots_2d[i].ysr != d->y) ||
506 (imxdma->slots_2d[i].wsr != d->w)))
507 continue;
508 slot = i;
509 break;
510 }
511 if (slot < 0)
512 return -EBUSY;
513
514 imxdma->slots_2d[slot].xsr = d->x;
515 imxdma->slots_2d[slot].ysr = d->y;
516 imxdma->slots_2d[slot].wsr = d->w;
517 imxdma->slots_2d[slot].count++;
518
519 imxdmac->slot_2d = slot;
520 imxdmac->enabled_2d = true;
521
522 if (slot == IMX_DMA_2D_SLOT_A) {
523 d->config_mem &= ~CCR_MSEL_B;
524 d->config_port &= ~CCR_MSEL_B;
525 imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
526 imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
527 imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
528 } else {
529 d->config_mem |= CCR_MSEL_B;
530 d->config_port |= CCR_MSEL_B;
531 imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
532 imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
533 imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
534 }
535 /*
536 * We fall-through here intentionally, since a 2D transfer is
537 * similar to MEMCPY just adding the 2D slot configuration.
538 */
539 fallthrough;
540 case IMXDMA_DESC_MEMCPY:
541 imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
542 imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
543 imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
544 DMA_CCR(imxdmac->channel));
545
546 imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
547
548 dev_dbg(imxdma->dev,
549 "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
550 __func__, imxdmac->channel,
551 (unsigned long long)d->dest,
552 (unsigned long long)d->src, d->len);
553
554 break;
555 /* Cyclic transfer is the same as slave_sg with special sg configuration. */
556 case IMXDMA_DESC_CYCLIC:
557 case IMXDMA_DESC_SLAVE_SG:
558 if (d->direction == DMA_DEV_TO_MEM) {
559 imx_dmav1_writel(imxdma, imxdmac->per_address,
560 DMA_SAR(imxdmac->channel));
561 imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
562 DMA_CCR(imxdmac->channel));
563
564 dev_dbg(imxdma->dev,
565 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
566 __func__, imxdmac->channel,
567 d->sg, d->sgcount, d->len,
568 (unsigned long long)imxdmac->per_address);
569 } else if (d->direction == DMA_MEM_TO_DEV) {
570 imx_dmav1_writel(imxdma, imxdmac->per_address,
571 DMA_DAR(imxdmac->channel));
572 imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
573 DMA_CCR(imxdmac->channel));
574
575 dev_dbg(imxdma->dev,
576 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
577 __func__, imxdmac->channel,
578 d->sg, d->sgcount, d->len,
579 (unsigned long long)imxdmac->per_address);
580 } else {
581 dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
582 __func__, imxdmac->channel);
583 return -EINVAL;
584 }
585
586 imxdma_sg_next(d);
587
588 break;
589 default:
590 return -EINVAL;
591 }
592 imxdma_enable_hw(d);
593 return 0;
594 }
595
imxdma_tasklet(struct tasklet_struct * t)596 static void imxdma_tasklet(struct tasklet_struct *t)
597 {
598 struct imxdma_channel *imxdmac = from_tasklet(imxdmac, t, dma_tasklet);
599 struct imxdma_engine *imxdma = imxdmac->imxdma;
600 struct imxdma_desc *desc, *next_desc;
601 unsigned long flags;
602
603 spin_lock_irqsave(&imxdma->lock, flags);
604
605 if (list_empty(&imxdmac->ld_active)) {
606 /* Someone might have called terminate all */
607 spin_unlock_irqrestore(&imxdma->lock, flags);
608 return;
609 }
610 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
611
612 /* If we are dealing with a cyclic descriptor, keep it on ld_active
613 * and dont mark the descriptor as complete.
614 * Only in non-cyclic cases it would be marked as complete
615 */
616 if (imxdma_chan_is_doing_cyclic(imxdmac))
617 goto out;
618 else
619 dma_cookie_complete(&desc->desc);
620
621 /* Free 2D slot if it was an interleaved transfer */
622 if (imxdmac->enabled_2d) {
623 imxdma->slots_2d[imxdmac->slot_2d].count--;
624 imxdmac->enabled_2d = false;
625 }
626
627 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
628
629 if (!list_empty(&imxdmac->ld_queue)) {
630 next_desc = list_first_entry(&imxdmac->ld_queue,
631 struct imxdma_desc, node);
632 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
633 if (imxdma_xfer_desc(next_desc) < 0)
634 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
635 __func__, imxdmac->channel);
636 }
637 out:
638 spin_unlock_irqrestore(&imxdma->lock, flags);
639
640 dmaengine_desc_get_callback_invoke(&desc->desc, NULL);
641 }
642
imxdma_terminate_all(struct dma_chan * chan)643 static int imxdma_terminate_all(struct dma_chan *chan)
644 {
645 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
646 struct imxdma_engine *imxdma = imxdmac->imxdma;
647 unsigned long flags;
648
649 imxdma_disable_hw(imxdmac);
650
651 spin_lock_irqsave(&imxdma->lock, flags);
652 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
653 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
654 spin_unlock_irqrestore(&imxdma->lock, flags);
655 return 0;
656 }
657
imxdma_config_write(struct dma_chan * chan,struct dma_slave_config * dmaengine_cfg,enum dma_transfer_direction direction)658 static int imxdma_config_write(struct dma_chan *chan,
659 struct dma_slave_config *dmaengine_cfg,
660 enum dma_transfer_direction direction)
661 {
662 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
663 struct imxdma_engine *imxdma = imxdmac->imxdma;
664 unsigned int mode = 0;
665
666 if (direction == DMA_DEV_TO_MEM) {
667 imxdmac->per_address = dmaengine_cfg->src_addr;
668 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
669 imxdmac->word_size = dmaengine_cfg->src_addr_width;
670 } else {
671 imxdmac->per_address = dmaengine_cfg->dst_addr;
672 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
673 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
674 }
675
676 switch (imxdmac->word_size) {
677 case DMA_SLAVE_BUSWIDTH_1_BYTE:
678 mode = IMX_DMA_MEMSIZE_8;
679 break;
680 case DMA_SLAVE_BUSWIDTH_2_BYTES:
681 mode = IMX_DMA_MEMSIZE_16;
682 break;
683 default:
684 case DMA_SLAVE_BUSWIDTH_4_BYTES:
685 mode = IMX_DMA_MEMSIZE_32;
686 break;
687 }
688
689 imxdmac->hw_chaining = 0;
690
691 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
692 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
693 CCR_REN;
694 imxdmac->ccr_to_device =
695 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
696 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
697 imx_dmav1_writel(imxdma, imxdmac->dma_request,
698 DMA_RSSR(imxdmac->channel));
699
700 /* Set burst length */
701 imx_dmav1_writel(imxdma, imxdmac->watermark_level *
702 imxdmac->word_size, DMA_BLR(imxdmac->channel));
703
704 return 0;
705 }
706
imxdma_config(struct dma_chan * chan,struct dma_slave_config * dmaengine_cfg)707 static int imxdma_config(struct dma_chan *chan,
708 struct dma_slave_config *dmaengine_cfg)
709 {
710 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
711
712 memcpy(&imxdmac->config, dmaengine_cfg, sizeof(*dmaengine_cfg));
713
714 return 0;
715 }
716
imxdma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)717 static enum dma_status imxdma_tx_status(struct dma_chan *chan,
718 dma_cookie_t cookie,
719 struct dma_tx_state *txstate)
720 {
721 return dma_cookie_status(chan, cookie, txstate);
722 }
723
imxdma_tx_submit(struct dma_async_tx_descriptor * tx)724 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
725 {
726 struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
727 struct imxdma_engine *imxdma = imxdmac->imxdma;
728 dma_cookie_t cookie;
729 unsigned long flags;
730
731 spin_lock_irqsave(&imxdma->lock, flags);
732 list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
733 cookie = dma_cookie_assign(tx);
734 spin_unlock_irqrestore(&imxdma->lock, flags);
735
736 return cookie;
737 }
738
imxdma_alloc_chan_resources(struct dma_chan * chan)739 static int imxdma_alloc_chan_resources(struct dma_chan *chan)
740 {
741 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
742 struct imx_dma_data *data = chan->private;
743
744 if (data != NULL)
745 imxdmac->dma_request = data->dma_request;
746
747 while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
748 struct imxdma_desc *desc;
749
750 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
751 if (!desc)
752 break;
753 memset(&desc->desc, 0, sizeof(struct dma_async_tx_descriptor));
754 dma_async_tx_descriptor_init(&desc->desc, chan);
755 desc->desc.tx_submit = imxdma_tx_submit;
756 /* txd.flags will be overwritten in prep funcs */
757 desc->desc.flags = DMA_CTRL_ACK;
758 desc->status = DMA_COMPLETE;
759
760 list_add_tail(&desc->node, &imxdmac->ld_free);
761 imxdmac->descs_allocated++;
762 }
763
764 if (!imxdmac->descs_allocated)
765 return -ENOMEM;
766
767 return imxdmac->descs_allocated;
768 }
769
imxdma_free_chan_resources(struct dma_chan * chan)770 static void imxdma_free_chan_resources(struct dma_chan *chan)
771 {
772 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
773 struct imxdma_engine *imxdma = imxdmac->imxdma;
774 struct imxdma_desc *desc, *_desc;
775 unsigned long flags;
776
777 spin_lock_irqsave(&imxdma->lock, flags);
778
779 imxdma_disable_hw(imxdmac);
780 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
781 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
782
783 spin_unlock_irqrestore(&imxdma->lock, flags);
784
785 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
786 kfree(desc);
787 imxdmac->descs_allocated--;
788 }
789 INIT_LIST_HEAD(&imxdmac->ld_free);
790
791 kfree(imxdmac->sg_list);
792 imxdmac->sg_list = NULL;
793 }
794
imxdma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)795 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
796 struct dma_chan *chan, struct scatterlist *sgl,
797 unsigned int sg_len, enum dma_transfer_direction direction,
798 unsigned long flags, void *context)
799 {
800 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
801 struct scatterlist *sg;
802 int i, dma_length = 0;
803 struct imxdma_desc *desc;
804
805 if (list_empty(&imxdmac->ld_free) ||
806 imxdma_chan_is_doing_cyclic(imxdmac))
807 return NULL;
808
809 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
810
811 for_each_sg(sgl, sg, sg_len, i) {
812 dma_length += sg_dma_len(sg);
813 }
814
815 imxdma_config_write(chan, &imxdmac->config, direction);
816
817 switch (imxdmac->word_size) {
818 case DMA_SLAVE_BUSWIDTH_4_BYTES:
819 if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
820 return NULL;
821 break;
822 case DMA_SLAVE_BUSWIDTH_2_BYTES:
823 if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
824 return NULL;
825 break;
826 case DMA_SLAVE_BUSWIDTH_1_BYTE:
827 break;
828 default:
829 return NULL;
830 }
831
832 desc->type = IMXDMA_DESC_SLAVE_SG;
833 desc->sg = sgl;
834 desc->sgcount = sg_len;
835 desc->len = dma_length;
836 desc->direction = direction;
837 if (direction == DMA_DEV_TO_MEM) {
838 desc->src = imxdmac->per_address;
839 } else {
840 desc->dest = imxdmac->per_address;
841 }
842 desc->desc.callback = NULL;
843 desc->desc.callback_param = NULL;
844
845 return &desc->desc;
846 }
847
imxdma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t dma_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)848 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
849 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
850 size_t period_len, enum dma_transfer_direction direction,
851 unsigned long flags)
852 {
853 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
854 struct imxdma_engine *imxdma = imxdmac->imxdma;
855 struct imxdma_desc *desc;
856 int i;
857 unsigned int periods = buf_len / period_len;
858
859 dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
860 __func__, imxdmac->channel, buf_len, period_len);
861
862 if (list_empty(&imxdmac->ld_free) ||
863 imxdma_chan_is_doing_cyclic(imxdmac))
864 return NULL;
865
866 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
867
868 kfree(imxdmac->sg_list);
869
870 imxdmac->sg_list = kcalloc(periods + 1,
871 sizeof(struct scatterlist), GFP_ATOMIC);
872 if (!imxdmac->sg_list)
873 return NULL;
874
875 sg_init_table(imxdmac->sg_list, periods);
876
877 for (i = 0; i < periods; i++) {
878 sg_assign_page(&imxdmac->sg_list[i], NULL);
879 imxdmac->sg_list[i].offset = 0;
880 imxdmac->sg_list[i].dma_address = dma_addr;
881 sg_dma_len(&imxdmac->sg_list[i]) = period_len;
882 dma_addr += period_len;
883 }
884
885 /* close the loop */
886 sg_chain(imxdmac->sg_list, periods + 1, imxdmac->sg_list);
887
888 desc->type = IMXDMA_DESC_CYCLIC;
889 desc->sg = imxdmac->sg_list;
890 desc->sgcount = periods;
891 desc->len = IMX_DMA_LENGTH_LOOP;
892 desc->direction = direction;
893 if (direction == DMA_DEV_TO_MEM) {
894 desc->src = imxdmac->per_address;
895 } else {
896 desc->dest = imxdmac->per_address;
897 }
898 desc->desc.callback = NULL;
899 desc->desc.callback_param = NULL;
900
901 imxdma_config_write(chan, &imxdmac->config, direction);
902
903 return &desc->desc;
904 }
905
imxdma_prep_dma_memcpy(struct dma_chan * chan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)906 static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
907 struct dma_chan *chan, dma_addr_t dest,
908 dma_addr_t src, size_t len, unsigned long flags)
909 {
910 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
911 struct imxdma_engine *imxdma = imxdmac->imxdma;
912 struct imxdma_desc *desc;
913
914 dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
915 __func__, imxdmac->channel, (unsigned long long)src,
916 (unsigned long long)dest, len);
917
918 if (list_empty(&imxdmac->ld_free) ||
919 imxdma_chan_is_doing_cyclic(imxdmac))
920 return NULL;
921
922 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
923
924 desc->type = IMXDMA_DESC_MEMCPY;
925 desc->src = src;
926 desc->dest = dest;
927 desc->len = len;
928 desc->direction = DMA_MEM_TO_MEM;
929 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
930 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
931 desc->desc.callback = NULL;
932 desc->desc.callback_param = NULL;
933
934 return &desc->desc;
935 }
936
imxdma_prep_dma_interleaved(struct dma_chan * chan,struct dma_interleaved_template * xt,unsigned long flags)937 static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
938 struct dma_chan *chan, struct dma_interleaved_template *xt,
939 unsigned long flags)
940 {
941 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
942 struct imxdma_engine *imxdma = imxdmac->imxdma;
943 struct imxdma_desc *desc;
944
945 dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
946 " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
947 imxdmac->channel, (unsigned long long)xt->src_start,
948 (unsigned long long) xt->dst_start,
949 xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
950 xt->numf, xt->frame_size);
951
952 if (list_empty(&imxdmac->ld_free) ||
953 imxdma_chan_is_doing_cyclic(imxdmac))
954 return NULL;
955
956 if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
957 return NULL;
958
959 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
960
961 desc->type = IMXDMA_DESC_INTERLEAVED;
962 desc->src = xt->src_start;
963 desc->dest = xt->dst_start;
964 desc->x = xt->sgl[0].size;
965 desc->y = xt->numf;
966 desc->w = xt->sgl[0].icg + desc->x;
967 desc->len = desc->x * desc->y;
968 desc->direction = DMA_MEM_TO_MEM;
969 desc->config_port = IMX_DMA_MEMSIZE_32;
970 desc->config_mem = IMX_DMA_MEMSIZE_32;
971 if (xt->src_sgl)
972 desc->config_mem |= IMX_DMA_TYPE_2D;
973 if (xt->dst_sgl)
974 desc->config_port |= IMX_DMA_TYPE_2D;
975 desc->desc.callback = NULL;
976 desc->desc.callback_param = NULL;
977
978 return &desc->desc;
979 }
980
imxdma_issue_pending(struct dma_chan * chan)981 static void imxdma_issue_pending(struct dma_chan *chan)
982 {
983 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
984 struct imxdma_engine *imxdma = imxdmac->imxdma;
985 struct imxdma_desc *desc;
986 unsigned long flags;
987
988 spin_lock_irqsave(&imxdma->lock, flags);
989 if (list_empty(&imxdmac->ld_active) &&
990 !list_empty(&imxdmac->ld_queue)) {
991 desc = list_first_entry(&imxdmac->ld_queue,
992 struct imxdma_desc, node);
993
994 if (imxdma_xfer_desc(desc) < 0) {
995 dev_warn(imxdma->dev,
996 "%s: channel: %d couldn't issue DMA xfer\n",
997 __func__, imxdmac->channel);
998 } else {
999 list_move_tail(imxdmac->ld_queue.next,
1000 &imxdmac->ld_active);
1001 }
1002 }
1003 spin_unlock_irqrestore(&imxdma->lock, flags);
1004 }
1005
imxdma_filter_fn(struct dma_chan * chan,void * param)1006 static bool imxdma_filter_fn(struct dma_chan *chan, void *param)
1007 {
1008 struct imxdma_filter_data *fdata = param;
1009 struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan);
1010
1011 if (chan->device->dev != fdata->imxdma->dev)
1012 return false;
1013
1014 imxdma_chan->dma_request = fdata->request;
1015 chan->private = NULL;
1016
1017 return true;
1018 }
1019
imxdma_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)1020 static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
1021 struct of_dma *ofdma)
1022 {
1023 int count = dma_spec->args_count;
1024 struct imxdma_engine *imxdma = ofdma->of_dma_data;
1025 struct imxdma_filter_data fdata = {
1026 .imxdma = imxdma,
1027 };
1028
1029 if (count != 1)
1030 return NULL;
1031
1032 fdata.request = dma_spec->args[0];
1033
1034 return dma_request_channel(imxdma->dma_device.cap_mask,
1035 imxdma_filter_fn, &fdata);
1036 }
1037
imxdma_probe(struct platform_device * pdev)1038 static int __init imxdma_probe(struct platform_device *pdev)
1039 {
1040 struct imxdma_engine *imxdma;
1041 struct resource *res;
1042 int ret, i;
1043 int irq, irq_err;
1044
1045 imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1046 if (!imxdma)
1047 return -ENOMEM;
1048
1049 imxdma->dev = &pdev->dev;
1050 imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
1051
1052 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1053 imxdma->base = devm_ioremap_resource(&pdev->dev, res);
1054 if (IS_ERR(imxdma->base))
1055 return PTR_ERR(imxdma->base);
1056
1057 irq = platform_get_irq(pdev, 0);
1058 if (irq < 0)
1059 return irq;
1060
1061 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1062 if (IS_ERR(imxdma->dma_ipg))
1063 return PTR_ERR(imxdma->dma_ipg);
1064
1065 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1066 if (IS_ERR(imxdma->dma_ahb))
1067 return PTR_ERR(imxdma->dma_ahb);
1068
1069 ret = clk_prepare_enable(imxdma->dma_ipg);
1070 if (ret)
1071 return ret;
1072 ret = clk_prepare_enable(imxdma->dma_ahb);
1073 if (ret)
1074 goto disable_dma_ipg_clk;
1075
1076 /* reset DMA module */
1077 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1078
1079 if (is_imx1_dma(imxdma)) {
1080 ret = devm_request_irq(&pdev->dev, irq,
1081 dma_irq_handler, 0, "DMA", imxdma);
1082 if (ret) {
1083 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1084 goto disable_dma_ahb_clk;
1085 }
1086 imxdma->irq = irq;
1087
1088 irq_err = platform_get_irq(pdev, 1);
1089 if (irq_err < 0) {
1090 ret = irq_err;
1091 goto disable_dma_ahb_clk;
1092 }
1093
1094 ret = devm_request_irq(&pdev->dev, irq_err,
1095 imxdma_err_handler, 0, "DMA", imxdma);
1096 if (ret) {
1097 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1098 goto disable_dma_ahb_clk;
1099 }
1100 imxdma->irq_err = irq_err;
1101 }
1102
1103 /* enable DMA module */
1104 imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1105
1106 /* clear all interrupts */
1107 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1108
1109 /* disable interrupts */
1110 imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1111
1112 INIT_LIST_HEAD(&imxdma->dma_device.channels);
1113
1114 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
1115 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1116 dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1117 dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1118
1119 /* Initialize 2D global parameters */
1120 for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
1121 imxdma->slots_2d[i].count = 0;
1122
1123 spin_lock_init(&imxdma->lock);
1124
1125 /* Initialize channel parameters */
1126 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1127 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1128
1129 if (!is_imx1_dma(imxdma)) {
1130 ret = devm_request_irq(&pdev->dev, irq + i,
1131 dma_irq_handler, 0, "DMA", imxdma);
1132 if (ret) {
1133 dev_warn(imxdma->dev, "Can't register IRQ %d "
1134 "for DMA channel %d\n",
1135 irq + i, i);
1136 goto disable_dma_ahb_clk;
1137 }
1138
1139 imxdmac->irq = irq + i;
1140 timer_setup(&imxdmac->watchdog, imxdma_watchdog, 0);
1141 }
1142
1143 imxdmac->imxdma = imxdma;
1144
1145 INIT_LIST_HEAD(&imxdmac->ld_queue);
1146 INIT_LIST_HEAD(&imxdmac->ld_free);
1147 INIT_LIST_HEAD(&imxdmac->ld_active);
1148
1149 tasklet_setup(&imxdmac->dma_tasklet, imxdma_tasklet);
1150 imxdmac->chan.device = &imxdma->dma_device;
1151 dma_cookie_init(&imxdmac->chan);
1152 imxdmac->channel = i;
1153
1154 /* Add the channel to the DMAC list */
1155 list_add_tail(&imxdmac->chan.device_node,
1156 &imxdma->dma_device.channels);
1157 }
1158
1159 imxdma->dma_device.dev = &pdev->dev;
1160
1161 imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1162 imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1163 imxdma->dma_device.device_tx_status = imxdma_tx_status;
1164 imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1165 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1166 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1167 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1168 imxdma->dma_device.device_config = imxdma_config;
1169 imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
1170 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1171
1172 platform_set_drvdata(pdev, imxdma);
1173
1174 imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
1175 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
1176
1177 ret = dma_async_device_register(&imxdma->dma_device);
1178 if (ret) {
1179 dev_err(&pdev->dev, "unable to register\n");
1180 goto disable_dma_ahb_clk;
1181 }
1182
1183 if (pdev->dev.of_node) {
1184 ret = of_dma_controller_register(pdev->dev.of_node,
1185 imxdma_xlate, imxdma);
1186 if (ret) {
1187 dev_err(&pdev->dev, "unable to register of_dma_controller\n");
1188 goto err_of_dma_controller;
1189 }
1190 }
1191
1192 return 0;
1193
1194 err_of_dma_controller:
1195 dma_async_device_unregister(&imxdma->dma_device);
1196 disable_dma_ahb_clk:
1197 clk_disable_unprepare(imxdma->dma_ahb);
1198 disable_dma_ipg_clk:
1199 clk_disable_unprepare(imxdma->dma_ipg);
1200 return ret;
1201 }
1202
imxdma_free_irq(struct platform_device * pdev,struct imxdma_engine * imxdma)1203 static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma)
1204 {
1205 int i;
1206
1207 if (is_imx1_dma(imxdma)) {
1208 disable_irq(imxdma->irq);
1209 disable_irq(imxdma->irq_err);
1210 }
1211
1212 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1213 struct imxdma_channel *imxdmac = &imxdma->channel[i];
1214
1215 if (!is_imx1_dma(imxdma))
1216 disable_irq(imxdmac->irq);
1217
1218 tasklet_kill(&imxdmac->dma_tasklet);
1219 }
1220 }
1221
imxdma_remove(struct platform_device * pdev)1222 static int imxdma_remove(struct platform_device *pdev)
1223 {
1224 struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1225
1226 imxdma_free_irq(pdev, imxdma);
1227
1228 dma_async_device_unregister(&imxdma->dma_device);
1229
1230 if (pdev->dev.of_node)
1231 of_dma_controller_free(pdev->dev.of_node);
1232
1233 clk_disable_unprepare(imxdma->dma_ipg);
1234 clk_disable_unprepare(imxdma->dma_ahb);
1235
1236 return 0;
1237 }
1238
1239 static struct platform_driver imxdma_driver = {
1240 .driver = {
1241 .name = "imx-dma",
1242 .of_match_table = imx_dma_of_dev_id,
1243 },
1244 .remove = imxdma_remove,
1245 };
1246
imxdma_module_init(void)1247 static int __init imxdma_module_init(void)
1248 {
1249 return platform_driver_probe(&imxdma_driver, imxdma_probe);
1250 }
1251 subsys_initcall(imxdma_module_init);
1252
1253 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1254 MODULE_DESCRIPTION("i.MX dma driver");
1255 MODULE_LICENSE("GPL");
1256