• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/dma/fsl-edma.c
3  *
4  * Copyright 2013-2014 Freescale Semiconductor, Inc.
5  *
6  * Driver for the Freescale eDMA engine with flexible channel multiplexing
7  * capability for DMA request sources. The eDMA block can be found on some
8  * Vybrid and Layerscape SoCs.
9  *
10  * This program is free software; you can redistribute  it and/or modify it
11  * under  the terms of  the GNU General  Public License as published by the
12  * Free Software Foundation;  either version 2 of the  License, or (at your
13  * option) any later version.
14  */
15 
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/interrupt.h>
19 #include <linux/clk.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmapool.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/of.h>
25 #include <linux/of_device.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_dma.h>
29 
30 #include "virt-dma.h"
31 
32 #define EDMA_CR			0x00
33 #define EDMA_ES			0x04
34 #define EDMA_ERQ		0x0C
35 #define EDMA_EEI		0x14
36 #define EDMA_SERQ		0x1B
37 #define EDMA_CERQ		0x1A
38 #define EDMA_SEEI		0x19
39 #define EDMA_CEEI		0x18
40 #define EDMA_CINT		0x1F
41 #define EDMA_CERR		0x1E
42 #define EDMA_SSRT		0x1D
43 #define EDMA_CDNE		0x1C
44 #define EDMA_INTR		0x24
45 #define EDMA_ERR		0x2C
46 
47 #define EDMA_TCD_SADDR(x)	(0x1000 + 32 * (x))
48 #define EDMA_TCD_SOFF(x)	(0x1004 + 32 * (x))
49 #define EDMA_TCD_ATTR(x)	(0x1006 + 32 * (x))
50 #define EDMA_TCD_NBYTES(x)	(0x1008 + 32 * (x))
51 #define EDMA_TCD_SLAST(x)	(0x100C + 32 * (x))
52 #define EDMA_TCD_DADDR(x)	(0x1010 + 32 * (x))
53 #define EDMA_TCD_DOFF(x)	(0x1014 + 32 * (x))
54 #define EDMA_TCD_CITER_ELINK(x)	(0x1016 + 32 * (x))
55 #define EDMA_TCD_CITER(x)	(0x1016 + 32 * (x))
56 #define EDMA_TCD_DLAST_SGA(x)	(0x1018 + 32 * (x))
57 #define EDMA_TCD_CSR(x)		(0x101C + 32 * (x))
58 #define EDMA_TCD_BITER_ELINK(x)	(0x101E + 32 * (x))
59 #define EDMA_TCD_BITER(x)	(0x101E + 32 * (x))
60 
61 #define EDMA_CR_EDBG		BIT(1)
62 #define EDMA_CR_ERCA		BIT(2)
63 #define EDMA_CR_ERGA		BIT(3)
64 #define EDMA_CR_HOE		BIT(4)
65 #define EDMA_CR_HALT		BIT(5)
66 #define EDMA_CR_CLM		BIT(6)
67 #define EDMA_CR_EMLM		BIT(7)
68 #define EDMA_CR_ECX		BIT(16)
69 #define EDMA_CR_CX		BIT(17)
70 
71 #define EDMA_SEEI_SEEI(x)	((x) & 0x1F)
72 #define EDMA_CEEI_CEEI(x)	((x) & 0x1F)
73 #define EDMA_CINT_CINT(x)	((x) & 0x1F)
74 #define EDMA_CERR_CERR(x)	((x) & 0x1F)
75 
76 #define EDMA_TCD_ATTR_DSIZE(x)		(((x) & 0x0007))
77 #define EDMA_TCD_ATTR_DMOD(x)		(((x) & 0x001F) << 3)
78 #define EDMA_TCD_ATTR_SSIZE(x)		(((x) & 0x0007) << 8)
79 #define EDMA_TCD_ATTR_SMOD(x)		(((x) & 0x001F) << 11)
80 #define EDMA_TCD_ATTR_SSIZE_8BIT	(0x0000)
81 #define EDMA_TCD_ATTR_SSIZE_16BIT	(0x0100)
82 #define EDMA_TCD_ATTR_SSIZE_32BIT	(0x0200)
83 #define EDMA_TCD_ATTR_SSIZE_64BIT	(0x0300)
84 #define EDMA_TCD_ATTR_SSIZE_32BYTE	(0x0500)
85 #define EDMA_TCD_ATTR_DSIZE_8BIT	(0x0000)
86 #define EDMA_TCD_ATTR_DSIZE_16BIT	(0x0001)
87 #define EDMA_TCD_ATTR_DSIZE_32BIT	(0x0002)
88 #define EDMA_TCD_ATTR_DSIZE_64BIT	(0x0003)
89 #define EDMA_TCD_ATTR_DSIZE_32BYTE	(0x0005)
90 
91 #define EDMA_TCD_SOFF_SOFF(x)		(x)
92 #define EDMA_TCD_NBYTES_NBYTES(x)	(x)
93 #define EDMA_TCD_SLAST_SLAST(x)		(x)
94 #define EDMA_TCD_DADDR_DADDR(x)		(x)
95 #define EDMA_TCD_CITER_CITER(x)		((x) & 0x7FFF)
96 #define EDMA_TCD_DOFF_DOFF(x)		(x)
97 #define EDMA_TCD_DLAST_SGA_DLAST_SGA(x)	(x)
98 #define EDMA_TCD_BITER_BITER(x)		((x) & 0x7FFF)
99 
100 #define EDMA_TCD_CSR_START		BIT(0)
101 #define EDMA_TCD_CSR_INT_MAJOR		BIT(1)
102 #define EDMA_TCD_CSR_INT_HALF		BIT(2)
103 #define EDMA_TCD_CSR_D_REQ		BIT(3)
104 #define EDMA_TCD_CSR_E_SG		BIT(4)
105 #define EDMA_TCD_CSR_E_LINK		BIT(5)
106 #define EDMA_TCD_CSR_ACTIVE		BIT(6)
107 #define EDMA_TCD_CSR_DONE		BIT(7)
108 
109 #define EDMAMUX_CHCFG_DIS		0x0
110 #define EDMAMUX_CHCFG_ENBL		0x80
111 #define EDMAMUX_CHCFG_SOURCE(n)		((n) & 0x3F)
112 
113 #define DMAMUX_NR	2
114 
115 #define FSL_EDMA_BUSWIDTHS	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
116 				BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
117 				BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
118 				BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
119 enum fsl_edma_pm_state {
120 	RUNNING = 0,
121 	SUSPENDED,
122 };
123 
124 struct fsl_edma_hw_tcd {
125 	__le32	saddr;
126 	__le16	soff;
127 	__le16	attr;
128 	__le32	nbytes;
129 	__le32	slast;
130 	__le32	daddr;
131 	__le16	doff;
132 	__le16	citer;
133 	__le32	dlast_sga;
134 	__le16	csr;
135 	__le16	biter;
136 };
137 
138 struct fsl_edma_sw_tcd {
139 	dma_addr_t			ptcd;
140 	struct fsl_edma_hw_tcd		*vtcd;
141 };
142 
143 struct fsl_edma_slave_config {
144 	enum dma_transfer_direction	dir;
145 	enum dma_slave_buswidth		addr_width;
146 	u32				dev_addr;
147 	u32				burst;
148 	u32				attr;
149 };
150 
151 struct fsl_edma_chan {
152 	struct virt_dma_chan		vchan;
153 	enum dma_status			status;
154 	enum fsl_edma_pm_state		pm_state;
155 	bool				idle;
156 	u32				slave_id;
157 	struct fsl_edma_engine		*edma;
158 	struct fsl_edma_desc		*edesc;
159 	struct fsl_edma_slave_config	fsc;
160 	struct dma_pool			*tcd_pool;
161 };
162 
163 struct fsl_edma_desc {
164 	struct virt_dma_desc		vdesc;
165 	struct fsl_edma_chan		*echan;
166 	bool				iscyclic;
167 	unsigned int			n_tcds;
168 	struct fsl_edma_sw_tcd		tcd[];
169 };
170 
171 struct fsl_edma_engine {
172 	struct dma_device	dma_dev;
173 	void __iomem		*membase;
174 	void __iomem		*muxbase[DMAMUX_NR];
175 	struct clk		*muxclk[DMAMUX_NR];
176 	struct mutex		fsl_edma_mutex;
177 	u32			n_chans;
178 	int			txirq;
179 	int			errirq;
180 	bool			big_endian;
181 	struct fsl_edma_chan	chans[];
182 };
183 
184 /*
185  * R/W functions for big- or little-endian registers:
186  * The eDMA controller's endian is independent of the CPU core's endian.
187  * For the big-endian IP module, the offset for 8-bit or 16-bit registers
188  * should also be swapped opposite to that in little-endian IP.
189  */
190 
edma_readl(struct fsl_edma_engine * edma,void __iomem * addr)191 static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
192 {
193 	if (edma->big_endian)
194 		return ioread32be(addr);
195 	else
196 		return ioread32(addr);
197 }
198 
edma_writeb(struct fsl_edma_engine * edma,u8 val,void __iomem * addr)199 static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
200 {
201 	/* swap the reg offset for these in big-endian mode */
202 	if (edma->big_endian)
203 		iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
204 	else
205 		iowrite8(val, addr);
206 }
207 
edma_writew(struct fsl_edma_engine * edma,u16 val,void __iomem * addr)208 static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
209 {
210 	/* swap the reg offset for these in big-endian mode */
211 	if (edma->big_endian)
212 		iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
213 	else
214 		iowrite16(val, addr);
215 }
216 
edma_writel(struct fsl_edma_engine * edma,u32 val,void __iomem * addr)217 static void edma_writel(struct fsl_edma_engine *edma, u32 val, void __iomem *addr)
218 {
219 	if (edma->big_endian)
220 		iowrite32be(val, addr);
221 	else
222 		iowrite32(val, addr);
223 }
224 
to_fsl_edma_chan(struct dma_chan * chan)225 static struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
226 {
227 	return container_of(chan, struct fsl_edma_chan, vchan.chan);
228 }
229 
to_fsl_edma_desc(struct virt_dma_desc * vd)230 static struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
231 {
232 	return container_of(vd, struct fsl_edma_desc, vdesc);
233 }
234 
fsl_edma_enable_request(struct fsl_edma_chan * fsl_chan)235 static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
236 {
237 	void __iomem *addr = fsl_chan->edma->membase;
238 	u32 ch = fsl_chan->vchan.chan.chan_id;
239 
240 	edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI);
241 	edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ);
242 }
243 
fsl_edma_disable_request(struct fsl_edma_chan * fsl_chan)244 static void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
245 {
246 	void __iomem *addr = fsl_chan->edma->membase;
247 	u32 ch = fsl_chan->vchan.chan.chan_id;
248 
249 	edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ);
250 	edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI);
251 }
252 
fsl_edma_chan_mux(struct fsl_edma_chan * fsl_chan,unsigned int slot,bool enable)253 static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
254 			unsigned int slot, bool enable)
255 {
256 	u32 ch = fsl_chan->vchan.chan.chan_id;
257 	void __iomem *muxaddr;
258 	unsigned chans_per_mux, ch_off;
259 
260 	chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
261 	ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
262 	muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
263 	slot = EDMAMUX_CHCFG_SOURCE(slot);
264 
265 	if (enable)
266 		iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
267 	else
268 		iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
269 }
270 
fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)271 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
272 {
273 	switch (addr_width) {
274 	case 1:
275 		return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
276 	case 2:
277 		return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
278 	case 4:
279 		return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
280 	case 8:
281 		return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
282 	default:
283 		return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
284 	}
285 }
286 
fsl_edma_free_desc(struct virt_dma_desc * vdesc)287 static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
288 {
289 	struct fsl_edma_desc *fsl_desc;
290 	int i;
291 
292 	fsl_desc = to_fsl_edma_desc(vdesc);
293 	for (i = 0; i < fsl_desc->n_tcds; i++)
294 		dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
295 			      fsl_desc->tcd[i].ptcd);
296 	kfree(fsl_desc);
297 }
298 
fsl_edma_terminate_all(struct dma_chan * chan)299 static int fsl_edma_terminate_all(struct dma_chan *chan)
300 {
301 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
302 	unsigned long flags;
303 	LIST_HEAD(head);
304 
305 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
306 	fsl_edma_disable_request(fsl_chan);
307 	fsl_chan->edesc = NULL;
308 	fsl_chan->idle = true;
309 	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
310 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
311 	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
312 	return 0;
313 }
314 
fsl_edma_pause(struct dma_chan * chan)315 static int fsl_edma_pause(struct dma_chan *chan)
316 {
317 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
318 	unsigned long flags;
319 
320 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
321 	if (fsl_chan->edesc) {
322 		fsl_edma_disable_request(fsl_chan);
323 		fsl_chan->status = DMA_PAUSED;
324 		fsl_chan->idle = true;
325 	}
326 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
327 	return 0;
328 }
329 
fsl_edma_resume(struct dma_chan * chan)330 static int fsl_edma_resume(struct dma_chan *chan)
331 {
332 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
333 	unsigned long flags;
334 
335 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
336 	if (fsl_chan->edesc) {
337 		fsl_edma_enable_request(fsl_chan);
338 		fsl_chan->status = DMA_IN_PROGRESS;
339 		fsl_chan->idle = false;
340 	}
341 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
342 	return 0;
343 }
344 
fsl_edma_slave_config(struct dma_chan * chan,struct dma_slave_config * cfg)345 static int fsl_edma_slave_config(struct dma_chan *chan,
346 				 struct dma_slave_config *cfg)
347 {
348 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
349 
350 	fsl_chan->fsc.dir = cfg->direction;
351 	if (cfg->direction == DMA_DEV_TO_MEM) {
352 		fsl_chan->fsc.dev_addr = cfg->src_addr;
353 		fsl_chan->fsc.addr_width = cfg->src_addr_width;
354 		fsl_chan->fsc.burst = cfg->src_maxburst;
355 		fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
356 	} else if (cfg->direction == DMA_MEM_TO_DEV) {
357 		fsl_chan->fsc.dev_addr = cfg->dst_addr;
358 		fsl_chan->fsc.addr_width = cfg->dst_addr_width;
359 		fsl_chan->fsc.burst = cfg->dst_maxburst;
360 		fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
361 	} else {
362 			return -EINVAL;
363 	}
364 	return 0;
365 }
366 
fsl_edma_desc_residue(struct fsl_edma_chan * fsl_chan,struct virt_dma_desc * vdesc,bool in_progress)367 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
368 		struct virt_dma_desc *vdesc, bool in_progress)
369 {
370 	struct fsl_edma_desc *edesc = fsl_chan->edesc;
371 	void __iomem *addr = fsl_chan->edma->membase;
372 	u32 ch = fsl_chan->vchan.chan.chan_id;
373 	enum dma_transfer_direction dir = fsl_chan->fsc.dir;
374 	dma_addr_t cur_addr, dma_addr;
375 	size_t len, size;
376 	int i;
377 
378 	/* calculate the total size in this desc */
379 	for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
380 		len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
381 			* le16_to_cpu(edesc->tcd[i].vtcd->biter);
382 
383 	if (!in_progress)
384 		return len;
385 
386 	if (dir == DMA_MEM_TO_DEV)
387 		cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch));
388 	else
389 		cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch));
390 
391 	/* figure out the finished and calculate the residue */
392 	for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
393 		size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
394 			* le16_to_cpu(edesc->tcd[i].vtcd->biter);
395 		if (dir == DMA_MEM_TO_DEV)
396 			dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
397 		else
398 			dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
399 
400 		len -= size;
401 		if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
402 			len += dma_addr + size - cur_addr;
403 			break;
404 		}
405 	}
406 
407 	return len;
408 }
409 
fsl_edma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)410 static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
411 		dma_cookie_t cookie, struct dma_tx_state *txstate)
412 {
413 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
414 	struct virt_dma_desc *vdesc;
415 	enum dma_status status;
416 	unsigned long flags;
417 
418 	status = dma_cookie_status(chan, cookie, txstate);
419 	if (status == DMA_COMPLETE)
420 		return status;
421 
422 	if (!txstate)
423 		return fsl_chan->status;
424 
425 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
426 	vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
427 	if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
428 		txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, true);
429 	else if (vdesc)
430 		txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, false);
431 	else
432 		txstate->residue = 0;
433 
434 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
435 
436 	return fsl_chan->status;
437 }
438 
fsl_edma_set_tcd_regs(struct fsl_edma_chan * fsl_chan,struct fsl_edma_hw_tcd * tcd)439 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
440 				  struct fsl_edma_hw_tcd *tcd)
441 {
442 	struct fsl_edma_engine *edma = fsl_chan->edma;
443 	void __iomem *addr = fsl_chan->edma->membase;
444 	u32 ch = fsl_chan->vchan.chan.chan_id;
445 
446 	/*
447 	 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
448 	 * endian format. However, we need to load the TCD registers in
449 	 * big- or little-endian obeying the eDMA engine model endian.
450 	 */
451 	edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch));
452 	edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch));
453 	edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch));
454 
455 	edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch));
456 	edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch));
457 
458 	edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch));
459 	edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch));
460 
461 	edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch));
462 	edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch));
463 	edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch));
464 
465 	edma_writel(edma, le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch));
466 
467 	edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch));
468 }
469 
470 static inline
fsl_edma_fill_tcd(struct fsl_edma_hw_tcd * tcd,u32 src,u32 dst,u16 attr,u16 soff,u32 nbytes,u32 slast,u16 citer,u16 biter,u16 doff,u32 dlast_sga,bool major_int,bool disable_req,bool enable_sg)471 void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
472 		       u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
473 		       u16 biter, u16 doff, u32 dlast_sga, bool major_int,
474 		       bool disable_req, bool enable_sg)
475 {
476 	u16 csr = 0;
477 
478 	/*
479 	 * eDMA hardware SGs require the TCDs to be stored in little
480 	 * endian format irrespective of the register endian model.
481 	 * So we put the value in little endian in memory, waiting
482 	 * for fsl_edma_set_tcd_regs doing the swap.
483 	 */
484 	tcd->saddr = cpu_to_le32(src);
485 	tcd->daddr = cpu_to_le32(dst);
486 
487 	tcd->attr = cpu_to_le16(attr);
488 
489 	tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff));
490 
491 	tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes));
492 	tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast));
493 
494 	tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
495 	tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff));
496 
497 	tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga));
498 
499 	tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
500 	if (major_int)
501 		csr |= EDMA_TCD_CSR_INT_MAJOR;
502 
503 	if (disable_req)
504 		csr |= EDMA_TCD_CSR_D_REQ;
505 
506 	if (enable_sg)
507 		csr |= EDMA_TCD_CSR_E_SG;
508 
509 	tcd->csr = cpu_to_le16(csr);
510 }
511 
fsl_edma_alloc_desc(struct fsl_edma_chan * fsl_chan,int sg_len)512 static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
513 		int sg_len)
514 {
515 	struct fsl_edma_desc *fsl_desc;
516 	int i;
517 
518 	fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len,
519 				GFP_NOWAIT);
520 	if (!fsl_desc)
521 		return NULL;
522 
523 	fsl_desc->echan = fsl_chan;
524 	fsl_desc->n_tcds = sg_len;
525 	for (i = 0; i < sg_len; i++) {
526 		fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
527 					GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
528 		if (!fsl_desc->tcd[i].vtcd)
529 			goto err;
530 	}
531 	return fsl_desc;
532 
533 err:
534 	while (--i >= 0)
535 		dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
536 				fsl_desc->tcd[i].ptcd);
537 	kfree(fsl_desc);
538 	return NULL;
539 }
540 
fsl_edma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t dma_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)541 static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
542 		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
543 		size_t period_len, enum dma_transfer_direction direction,
544 		unsigned long flags)
545 {
546 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
547 	struct fsl_edma_desc *fsl_desc;
548 	dma_addr_t dma_buf_next;
549 	int sg_len, i;
550 	u32 src_addr, dst_addr, last_sg, nbytes;
551 	u16 soff, doff, iter;
552 
553 	if (!is_slave_direction(fsl_chan->fsc.dir))
554 		return NULL;
555 
556 	sg_len = buf_len / period_len;
557 	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
558 	if (!fsl_desc)
559 		return NULL;
560 	fsl_desc->iscyclic = true;
561 
562 	dma_buf_next = dma_addr;
563 	nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
564 	iter = period_len / nbytes;
565 
566 	for (i = 0; i < sg_len; i++) {
567 		if (dma_buf_next >= dma_addr + buf_len)
568 			dma_buf_next = dma_addr;
569 
570 		/* get next sg's physical address */
571 		last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
572 
573 		if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
574 			src_addr = dma_buf_next;
575 			dst_addr = fsl_chan->fsc.dev_addr;
576 			soff = fsl_chan->fsc.addr_width;
577 			doff = 0;
578 		} else {
579 			src_addr = fsl_chan->fsc.dev_addr;
580 			dst_addr = dma_buf_next;
581 			soff = 0;
582 			doff = fsl_chan->fsc.addr_width;
583 		}
584 
585 		fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
586 				  fsl_chan->fsc.attr, soff, nbytes, 0, iter,
587 				  iter, doff, last_sg, true, false, true);
588 		dma_buf_next += period_len;
589 	}
590 
591 	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
592 }
593 
fsl_edma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)594 static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
595 		struct dma_chan *chan, struct scatterlist *sgl,
596 		unsigned int sg_len, enum dma_transfer_direction direction,
597 		unsigned long flags, void *context)
598 {
599 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
600 	struct fsl_edma_desc *fsl_desc;
601 	struct scatterlist *sg;
602 	u32 src_addr, dst_addr, last_sg, nbytes;
603 	u16 soff, doff, iter;
604 	int i;
605 
606 	if (!is_slave_direction(fsl_chan->fsc.dir))
607 		return NULL;
608 
609 	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
610 	if (!fsl_desc)
611 		return NULL;
612 	fsl_desc->iscyclic = false;
613 
614 	nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
615 	for_each_sg(sgl, sg, sg_len, i) {
616 		/* get next sg's physical address */
617 		last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
618 
619 		if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
620 			src_addr = sg_dma_address(sg);
621 			dst_addr = fsl_chan->fsc.dev_addr;
622 			soff = fsl_chan->fsc.addr_width;
623 			doff = 0;
624 		} else {
625 			src_addr = fsl_chan->fsc.dev_addr;
626 			dst_addr = sg_dma_address(sg);
627 			soff = 0;
628 			doff = fsl_chan->fsc.addr_width;
629 		}
630 
631 		iter = sg_dma_len(sg) / nbytes;
632 		if (i < sg_len - 1) {
633 			last_sg = fsl_desc->tcd[(i + 1)].ptcd;
634 			fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
635 					  dst_addr, fsl_chan->fsc.attr, soff,
636 					  nbytes, 0, iter, iter, doff, last_sg,
637 					  false, false, true);
638 		} else {
639 			last_sg = 0;
640 			fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
641 					  dst_addr, fsl_chan->fsc.attr, soff,
642 					  nbytes, 0, iter, iter, doff, last_sg,
643 					  true, true, false);
644 		}
645 	}
646 
647 	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
648 }
649 
fsl_edma_xfer_desc(struct fsl_edma_chan * fsl_chan)650 static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
651 {
652 	struct virt_dma_desc *vdesc;
653 
654 	vdesc = vchan_next_desc(&fsl_chan->vchan);
655 	if (!vdesc)
656 		return;
657 	fsl_chan->edesc = to_fsl_edma_desc(vdesc);
658 	fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
659 	fsl_edma_enable_request(fsl_chan);
660 	fsl_chan->status = DMA_IN_PROGRESS;
661 	fsl_chan->idle = false;
662 }
663 
fsl_edma_tx_handler(int irq,void * dev_id)664 static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
665 {
666 	struct fsl_edma_engine *fsl_edma = dev_id;
667 	unsigned int intr, ch;
668 	void __iomem *base_addr;
669 	struct fsl_edma_chan *fsl_chan;
670 
671 	base_addr = fsl_edma->membase;
672 
673 	intr = edma_readl(fsl_edma, base_addr + EDMA_INTR);
674 	if (!intr)
675 		return IRQ_NONE;
676 
677 	for (ch = 0; ch < fsl_edma->n_chans; ch++) {
678 		if (intr & (0x1 << ch)) {
679 			edma_writeb(fsl_edma, EDMA_CINT_CINT(ch),
680 				base_addr + EDMA_CINT);
681 
682 			fsl_chan = &fsl_edma->chans[ch];
683 
684 			spin_lock(&fsl_chan->vchan.lock);
685 			if (!fsl_chan->edesc->iscyclic) {
686 				list_del(&fsl_chan->edesc->vdesc.node);
687 				vchan_cookie_complete(&fsl_chan->edesc->vdesc);
688 				fsl_chan->edesc = NULL;
689 				fsl_chan->status = DMA_COMPLETE;
690 				fsl_chan->idle = true;
691 			} else {
692 				vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
693 			}
694 
695 			if (!fsl_chan->edesc)
696 				fsl_edma_xfer_desc(fsl_chan);
697 
698 			spin_unlock(&fsl_chan->vchan.lock);
699 		}
700 	}
701 	return IRQ_HANDLED;
702 }
703 
fsl_edma_err_handler(int irq,void * dev_id)704 static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
705 {
706 	struct fsl_edma_engine *fsl_edma = dev_id;
707 	unsigned int err, ch;
708 
709 	err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR);
710 	if (!err)
711 		return IRQ_NONE;
712 
713 	for (ch = 0; ch < fsl_edma->n_chans; ch++) {
714 		if (err & (0x1 << ch)) {
715 			fsl_edma_disable_request(&fsl_edma->chans[ch]);
716 			edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
717 				fsl_edma->membase + EDMA_CERR);
718 			fsl_edma->chans[ch].status = DMA_ERROR;
719 			fsl_edma->chans[ch].idle = true;
720 		}
721 	}
722 	return IRQ_HANDLED;
723 }
724 
fsl_edma_irq_handler(int irq,void * dev_id)725 static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
726 {
727 	if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED)
728 		return IRQ_HANDLED;
729 
730 	return fsl_edma_err_handler(irq, dev_id);
731 }
732 
fsl_edma_issue_pending(struct dma_chan * chan)733 static void fsl_edma_issue_pending(struct dma_chan *chan)
734 {
735 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
736 	unsigned long flags;
737 
738 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
739 
740 	if (unlikely(fsl_chan->pm_state != RUNNING)) {
741 		spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
742 		/* cannot submit due to suspend */
743 		return;
744 	}
745 
746 	if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
747 		fsl_edma_xfer_desc(fsl_chan);
748 
749 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
750 }
751 
fsl_edma_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)752 static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
753 		struct of_dma *ofdma)
754 {
755 	struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
756 	struct dma_chan *chan, *_chan;
757 	struct fsl_edma_chan *fsl_chan;
758 	unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR;
759 
760 	if (dma_spec->args_count != 2)
761 		return NULL;
762 
763 	mutex_lock(&fsl_edma->fsl_edma_mutex);
764 	list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
765 		if (chan->client_count)
766 			continue;
767 		if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) {
768 			chan = dma_get_slave_channel(chan);
769 			if (chan) {
770 				chan->device->privatecnt++;
771 				fsl_chan = to_fsl_edma_chan(chan);
772 				fsl_chan->slave_id = dma_spec->args[1];
773 				fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id,
774 						true);
775 				mutex_unlock(&fsl_edma->fsl_edma_mutex);
776 				return chan;
777 			}
778 		}
779 	}
780 	mutex_unlock(&fsl_edma->fsl_edma_mutex);
781 	return NULL;
782 }
783 
fsl_edma_alloc_chan_resources(struct dma_chan * chan)784 static int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
785 {
786 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
787 
788 	fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
789 				sizeof(struct fsl_edma_hw_tcd),
790 				32, 0);
791 	return 0;
792 }
793 
fsl_edma_free_chan_resources(struct dma_chan * chan)794 static void fsl_edma_free_chan_resources(struct dma_chan *chan)
795 {
796 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
797 	unsigned long flags;
798 	LIST_HEAD(head);
799 
800 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
801 	fsl_edma_disable_request(fsl_chan);
802 	fsl_edma_chan_mux(fsl_chan, 0, false);
803 	fsl_chan->edesc = NULL;
804 	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
805 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
806 
807 	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
808 	dma_pool_destroy(fsl_chan->tcd_pool);
809 	fsl_chan->tcd_pool = NULL;
810 }
811 
812 static int
fsl_edma_irq_init(struct platform_device * pdev,struct fsl_edma_engine * fsl_edma)813 fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
814 {
815 	int ret;
816 
817 	fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
818 	if (fsl_edma->txirq < 0) {
819 		dev_err(&pdev->dev, "Can't get edma-tx irq.\n");
820 		return fsl_edma->txirq;
821 	}
822 
823 	fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
824 	if (fsl_edma->errirq < 0) {
825 		dev_err(&pdev->dev, "Can't get edma-err irq.\n");
826 		return fsl_edma->errirq;
827 	}
828 
829 	if (fsl_edma->txirq == fsl_edma->errirq) {
830 		ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
831 				fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
832 		if (ret) {
833 			dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
834 			 return  ret;
835 		}
836 	} else {
837 		ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
838 				fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
839 		if (ret) {
840 			dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
841 			return  ret;
842 		}
843 
844 		ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
845 				fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
846 		if (ret) {
847 			dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
848 			return  ret;
849 		}
850 	}
851 
852 	return 0;
853 }
854 
fsl_edma_irq_exit(struct platform_device * pdev,struct fsl_edma_engine * fsl_edma)855 static void fsl_edma_irq_exit(
856 		struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
857 {
858 	if (fsl_edma->txirq == fsl_edma->errirq) {
859 		devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
860 	} else {
861 		devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
862 		devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
863 	}
864 }
865 
fsl_disable_clocks(struct fsl_edma_engine * fsl_edma,int nr_clocks)866 static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
867 {
868 	int i;
869 
870 	for (i = 0; i < nr_clocks; i++)
871 		clk_disable_unprepare(fsl_edma->muxclk[i]);
872 }
873 
fsl_edma_probe(struct platform_device * pdev)874 static int fsl_edma_probe(struct platform_device *pdev)
875 {
876 	struct device_node *np = pdev->dev.of_node;
877 	struct fsl_edma_engine *fsl_edma;
878 	struct fsl_edma_chan *fsl_chan;
879 	struct resource *res;
880 	int len, chans;
881 	int ret, i;
882 
883 	ret = of_property_read_u32(np, "dma-channels", &chans);
884 	if (ret) {
885 		dev_err(&pdev->dev, "Can't get dma-channels.\n");
886 		return ret;
887 	}
888 
889 	len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans;
890 	fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
891 	if (!fsl_edma)
892 		return -ENOMEM;
893 
894 	fsl_edma->n_chans = chans;
895 	mutex_init(&fsl_edma->fsl_edma_mutex);
896 
897 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
898 	fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res);
899 	if (IS_ERR(fsl_edma->membase))
900 		return PTR_ERR(fsl_edma->membase);
901 
902 	for (i = 0; i < DMAMUX_NR; i++) {
903 		char clkname[32];
904 
905 		res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
906 		fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
907 		if (IS_ERR(fsl_edma->muxbase[i])) {
908 			/* on error: disable all previously enabled clks */
909 			fsl_disable_clocks(fsl_edma, i);
910 			return PTR_ERR(fsl_edma->muxbase[i]);
911 		}
912 
913 		sprintf(clkname, "dmamux%d", i);
914 		fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
915 		if (IS_ERR(fsl_edma->muxclk[i])) {
916 			dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
917 			/* on error: disable all previously enabled clks */
918 			fsl_disable_clocks(fsl_edma, i);
919 			return PTR_ERR(fsl_edma->muxclk[i]);
920 		}
921 
922 		ret = clk_prepare_enable(fsl_edma->muxclk[i]);
923 		if (ret)
924 			/* on error: disable all previously enabled clks */
925 			fsl_disable_clocks(fsl_edma, i);
926 
927 	}
928 
929 	fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
930 
931 	INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
932 	for (i = 0; i < fsl_edma->n_chans; i++) {
933 		struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
934 
935 		fsl_chan->edma = fsl_edma;
936 		fsl_chan->pm_state = RUNNING;
937 		fsl_chan->slave_id = 0;
938 		fsl_chan->idle = true;
939 		fsl_chan->vchan.desc_free = fsl_edma_free_desc;
940 		vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
941 
942 		edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
943 		fsl_edma_chan_mux(fsl_chan, 0, false);
944 	}
945 
946 	edma_writel(fsl_edma, ~0, fsl_edma->membase + EDMA_INTR);
947 	ret = fsl_edma_irq_init(pdev, fsl_edma);
948 	if (ret)
949 		return ret;
950 
951 	dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
952 	dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
953 	dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
954 
955 	fsl_edma->dma_dev.dev = &pdev->dev;
956 	fsl_edma->dma_dev.device_alloc_chan_resources
957 		= fsl_edma_alloc_chan_resources;
958 	fsl_edma->dma_dev.device_free_chan_resources
959 		= fsl_edma_free_chan_resources;
960 	fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
961 	fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
962 	fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
963 	fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
964 	fsl_edma->dma_dev.device_pause = fsl_edma_pause;
965 	fsl_edma->dma_dev.device_resume = fsl_edma_resume;
966 	fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
967 	fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
968 
969 	fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
970 	fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
971 	fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
972 
973 	platform_set_drvdata(pdev, fsl_edma);
974 
975 	ret = dma_async_device_register(&fsl_edma->dma_dev);
976 	if (ret) {
977 		dev_err(&pdev->dev,
978 			"Can't register Freescale eDMA engine. (%d)\n", ret);
979 		fsl_disable_clocks(fsl_edma, DMAMUX_NR);
980 		return ret;
981 	}
982 
983 	ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
984 	if (ret) {
985 		dev_err(&pdev->dev,
986 			"Can't register Freescale eDMA of_dma. (%d)\n", ret);
987 		dma_async_device_unregister(&fsl_edma->dma_dev);
988 		fsl_disable_clocks(fsl_edma, DMAMUX_NR);
989 		return ret;
990 	}
991 
992 	/* enable round robin arbitration */
993 	edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR);
994 
995 	return 0;
996 }
997 
fsl_edma_cleanup_vchan(struct dma_device * dmadev)998 static void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
999 {
1000 	struct fsl_edma_chan *chan, *_chan;
1001 
1002 	list_for_each_entry_safe(chan, _chan,
1003 				&dmadev->channels, vchan.chan.device_node) {
1004 		list_del(&chan->vchan.chan.device_node);
1005 		tasklet_kill(&chan->vchan.task);
1006 	}
1007 }
1008 
fsl_edma_remove(struct platform_device * pdev)1009 static int fsl_edma_remove(struct platform_device *pdev)
1010 {
1011 	struct device_node *np = pdev->dev.of_node;
1012 	struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
1013 
1014 	fsl_edma_irq_exit(pdev, fsl_edma);
1015 	fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
1016 	of_dma_controller_free(np);
1017 	dma_async_device_unregister(&fsl_edma->dma_dev);
1018 	fsl_disable_clocks(fsl_edma, DMAMUX_NR);
1019 
1020 	return 0;
1021 }
1022 
fsl_edma_suspend_late(struct device * dev)1023 static int fsl_edma_suspend_late(struct device *dev)
1024 {
1025 	struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
1026 	struct fsl_edma_chan *fsl_chan;
1027 	unsigned long flags;
1028 	int i;
1029 
1030 	for (i = 0; i < fsl_edma->n_chans; i++) {
1031 		fsl_chan = &fsl_edma->chans[i];
1032 		spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
1033 		/* Make sure chan is idle or will force disable. */
1034 		if (unlikely(!fsl_chan->idle)) {
1035 			dev_warn(dev, "WARN: There is non-idle channel.");
1036 			fsl_edma_disable_request(fsl_chan);
1037 			fsl_edma_chan_mux(fsl_chan, 0, false);
1038 		}
1039 
1040 		fsl_chan->pm_state = SUSPENDED;
1041 		spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
1042 	}
1043 
1044 	return 0;
1045 }
1046 
fsl_edma_resume_early(struct device * dev)1047 static int fsl_edma_resume_early(struct device *dev)
1048 {
1049 	struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
1050 	struct fsl_edma_chan *fsl_chan;
1051 	int i;
1052 
1053 	for (i = 0; i < fsl_edma->n_chans; i++) {
1054 		fsl_chan = &fsl_edma->chans[i];
1055 		fsl_chan->pm_state = RUNNING;
1056 		edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
1057 		if (fsl_chan->slave_id != 0)
1058 			fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
1059 	}
1060 
1061 	edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA,
1062 			fsl_edma->membase + EDMA_CR);
1063 
1064 	return 0;
1065 }
1066 
1067 /*
1068  * eDMA provides the service to others, so it should be suspend late
1069  * and resume early. When eDMA suspend, all of the clients should stop
1070  * the DMA data transmission and let the channel idle.
1071  */
1072 static const struct dev_pm_ops fsl_edma_pm_ops = {
1073 	.suspend_late   = fsl_edma_suspend_late,
1074 	.resume_early   = fsl_edma_resume_early,
1075 };
1076 
1077 static const struct of_device_id fsl_edma_dt_ids[] = {
1078 	{ .compatible = "fsl,vf610-edma", },
1079 	{ /* sentinel */ }
1080 };
1081 MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
1082 
1083 static struct platform_driver fsl_edma_driver = {
1084 	.driver		= {
1085 		.name	= "fsl-edma",
1086 		.of_match_table = fsl_edma_dt_ids,
1087 		.pm     = &fsl_edma_pm_ops,
1088 	},
1089 	.probe          = fsl_edma_probe,
1090 	.remove		= fsl_edma_remove,
1091 };
1092 
fsl_edma_init(void)1093 static int __init fsl_edma_init(void)
1094 {
1095 	return platform_driver_register(&fsl_edma_driver);
1096 }
1097 subsys_initcall(fsl_edma_init);
1098 
fsl_edma_exit(void)1099 static void __exit fsl_edma_exit(void)
1100 {
1101 	platform_driver_unregister(&fsl_edma_driver);
1102 }
1103 module_exit(fsl_edma_exit);
1104 
1105 MODULE_ALIAS("platform:fsl-edma");
1106 MODULE_DESCRIPTION("Freescale eDMA engine driver");
1107 MODULE_LICENSE("GPL v2");
1108