• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * EDMA3 support for DaVinci
3  *
4  * Copyright (C) 2006-2009 Texas Instruments.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20 #include <linux/err.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/platform_device.h>
26 #include <linux/io.h>
27 #include <linux/slab.h>
28 #include <linux/edma.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/of_dma.h>
33 #include <linux/of_irq.h>
34 #include <linux/pm_runtime.h>
35 
36 #include <linux/platform_data/edma.h>
37 
38 /* Offsets matching "struct edmacc_param" */
39 #define PARM_OPT		0x00
40 #define PARM_SRC		0x04
41 #define PARM_A_B_CNT		0x08
42 #define PARM_DST		0x0c
43 #define PARM_SRC_DST_BIDX	0x10
44 #define PARM_LINK_BCNTRLD	0x14
45 #define PARM_SRC_DST_CIDX	0x18
46 #define PARM_CCNT		0x1c
47 
48 #define PARM_SIZE		0x20
49 
50 /* Offsets for EDMA CC global channel registers and their shadows */
51 #define SH_ER		0x00	/* 64 bits */
52 #define SH_ECR		0x08	/* 64 bits */
53 #define SH_ESR		0x10	/* 64 bits */
54 #define SH_CER		0x18	/* 64 bits */
55 #define SH_EER		0x20	/* 64 bits */
56 #define SH_EECR		0x28	/* 64 bits */
57 #define SH_EESR		0x30	/* 64 bits */
58 #define SH_SER		0x38	/* 64 bits */
59 #define SH_SECR		0x40	/* 64 bits */
60 #define SH_IER		0x50	/* 64 bits */
61 #define SH_IECR		0x58	/* 64 bits */
62 #define SH_IESR		0x60	/* 64 bits */
63 #define SH_IPR		0x68	/* 64 bits */
64 #define SH_ICR		0x70	/* 64 bits */
65 #define SH_IEVAL	0x78
66 #define SH_QER		0x80
67 #define SH_QEER		0x84
68 #define SH_QEECR	0x88
69 #define SH_QEESR	0x8c
70 #define SH_QSER		0x90
71 #define SH_QSECR	0x94
72 #define SH_SIZE		0x200
73 
74 /* Offsets for EDMA CC global registers */
75 #define EDMA_REV	0x0000
76 #define EDMA_CCCFG	0x0004
77 #define EDMA_QCHMAP	0x0200	/* 8 registers */
78 #define EDMA_DMAQNUM	0x0240	/* 8 registers (4 on OMAP-L1xx) */
79 #define EDMA_QDMAQNUM	0x0260
80 #define EDMA_QUETCMAP	0x0280
81 #define EDMA_QUEPRI	0x0284
82 #define EDMA_EMR	0x0300	/* 64 bits */
83 #define EDMA_EMCR	0x0308	/* 64 bits */
84 #define EDMA_QEMR	0x0310
85 #define EDMA_QEMCR	0x0314
86 #define EDMA_CCERR	0x0318
87 #define EDMA_CCERRCLR	0x031c
88 #define EDMA_EEVAL	0x0320
89 #define EDMA_DRAE	0x0340	/* 4 x 64 bits*/
90 #define EDMA_QRAE	0x0380	/* 4 registers */
91 #define EDMA_QUEEVTENTRY	0x0400	/* 2 x 16 registers */
92 #define EDMA_QSTAT	0x0600	/* 2 registers */
93 #define EDMA_QWMTHRA	0x0620
94 #define EDMA_QWMTHRB	0x0624
95 #define EDMA_CCSTAT	0x0640
96 
97 #define EDMA_M		0x1000	/* global channel registers */
98 #define EDMA_ECR	0x1008
99 #define EDMA_ECRH	0x100C
100 #define EDMA_SHADOW0	0x2000	/* 4 regions shadowing global channels */
101 #define EDMA_PARM	0x4000	/* 128 param entries */
102 
103 #define PARM_OFFSET(param_no)	(EDMA_PARM + ((param_no) << 5))
104 
105 #define EDMA_DCHMAP	0x0100  /* 64 registers */
106 
107 /* CCCFG register */
108 #define GET_NUM_DMACH(x)	(x & 0x7) /* bits 0-2 */
109 #define GET_NUM_PAENTRY(x)	((x & 0x7000) >> 12) /* bits 12-14 */
110 #define GET_NUM_EVQUE(x)	((x & 0x70000) >> 16) /* bits 16-18 */
111 #define GET_NUM_REGN(x)		((x & 0x300000) >> 20) /* bits 20-21 */
112 #define CHMAP_EXIST		BIT(24)
113 
114 #define EDMA_MAX_DMACH           64
115 #define EDMA_MAX_PARAMENTRY     512
116 
117 /*****************************************************************************/
118 
119 static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
120 
edma_read(unsigned ctlr,int offset)121 static inline unsigned int edma_read(unsigned ctlr, int offset)
122 {
123 	return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
124 }
125 
edma_write(unsigned ctlr,int offset,int val)126 static inline void edma_write(unsigned ctlr, int offset, int val)
127 {
128 	__raw_writel(val, edmacc_regs_base[ctlr] + offset);
129 }
edma_modify(unsigned ctlr,int offset,unsigned and,unsigned or)130 static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
131 		unsigned or)
132 {
133 	unsigned val = edma_read(ctlr, offset);
134 	val &= and;
135 	val |= or;
136 	edma_write(ctlr, offset, val);
137 }
edma_and(unsigned ctlr,int offset,unsigned and)138 static inline void edma_and(unsigned ctlr, int offset, unsigned and)
139 {
140 	unsigned val = edma_read(ctlr, offset);
141 	val &= and;
142 	edma_write(ctlr, offset, val);
143 }
edma_or(unsigned ctlr,int offset,unsigned or)144 static inline void edma_or(unsigned ctlr, int offset, unsigned or)
145 {
146 	unsigned val = edma_read(ctlr, offset);
147 	val |= or;
148 	edma_write(ctlr, offset, val);
149 }
edma_read_array(unsigned ctlr,int offset,int i)150 static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
151 {
152 	return edma_read(ctlr, offset + (i << 2));
153 }
edma_write_array(unsigned ctlr,int offset,int i,unsigned val)154 static inline void edma_write_array(unsigned ctlr, int offset, int i,
155 		unsigned val)
156 {
157 	edma_write(ctlr, offset + (i << 2), val);
158 }
edma_modify_array(unsigned ctlr,int offset,int i,unsigned and,unsigned or)159 static inline void edma_modify_array(unsigned ctlr, int offset, int i,
160 		unsigned and, unsigned or)
161 {
162 	edma_modify(ctlr, offset + (i << 2), and, or);
163 }
edma_or_array(unsigned ctlr,int offset,int i,unsigned or)164 static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
165 {
166 	edma_or(ctlr, offset + (i << 2), or);
167 }
edma_or_array2(unsigned ctlr,int offset,int i,int j,unsigned or)168 static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
169 		unsigned or)
170 {
171 	edma_or(ctlr, offset + ((i*2 + j) << 2), or);
172 }
edma_write_array2(unsigned ctlr,int offset,int i,int j,unsigned val)173 static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
174 		unsigned val)
175 {
176 	edma_write(ctlr, offset + ((i*2 + j) << 2), val);
177 }
edma_shadow0_read(unsigned ctlr,int offset)178 static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
179 {
180 	return edma_read(ctlr, EDMA_SHADOW0 + offset);
181 }
edma_shadow0_read_array(unsigned ctlr,int offset,int i)182 static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
183 		int i)
184 {
185 	return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
186 }
edma_shadow0_write(unsigned ctlr,int offset,unsigned val)187 static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
188 {
189 	edma_write(ctlr, EDMA_SHADOW0 + offset, val);
190 }
edma_shadow0_write_array(unsigned ctlr,int offset,int i,unsigned val)191 static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
192 		unsigned val)
193 {
194 	edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
195 }
edma_parm_read(unsigned ctlr,int offset,int param_no)196 static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
197 		int param_no)
198 {
199 	return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
200 }
edma_parm_write(unsigned ctlr,int offset,int param_no,unsigned val)201 static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
202 		unsigned val)
203 {
204 	edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
205 }
edma_parm_modify(unsigned ctlr,int offset,int param_no,unsigned and,unsigned or)206 static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
207 		unsigned and, unsigned or)
208 {
209 	edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
210 }
edma_parm_and(unsigned ctlr,int offset,int param_no,unsigned and)211 static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
212 		unsigned and)
213 {
214 	edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
215 }
edma_parm_or(unsigned ctlr,int offset,int param_no,unsigned or)216 static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
217 		unsigned or)
218 {
219 	edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
220 }
221 
set_bits(int offset,int len,unsigned long * p)222 static inline void set_bits(int offset, int len, unsigned long *p)
223 {
224 	for (; len > 0; len--)
225 		set_bit(offset + (len - 1), p);
226 }
227 
clear_bits(int offset,int len,unsigned long * p)228 static inline void clear_bits(int offset, int len, unsigned long *p)
229 {
230 	for (; len > 0; len--)
231 		clear_bit(offset + (len - 1), p);
232 }
233 
234 /*****************************************************************************/
235 
236 /* actual number of DMA channels and slots on this silicon */
237 struct edma {
238 	/* how many dma resources of each type */
239 	unsigned	num_channels;
240 	unsigned	num_region;
241 	unsigned	num_slots;
242 	unsigned	num_tc;
243 	enum dma_event_q 	default_queue;
244 
245 	/* list of channels with no even trigger; terminated by "-1" */
246 	const s8	*noevent;
247 
248 	/* The edma_inuse bit for each PaRAM slot is clear unless the
249 	 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
250 	 */
251 	DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
252 
253 	/* The edma_unused bit for each channel is clear unless
254 	 * it is not being used on this platform. It uses a bit
255 	 * of SOC-specific initialization code.
256 	 */
257 	DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
258 
259 	unsigned	irq_res_start;
260 	unsigned	irq_res_end;
261 
262 	struct dma_interrupt_data {
263 		void (*callback)(unsigned channel, unsigned short ch_status,
264 				void *data);
265 		void *data;
266 	} intr_data[EDMA_MAX_DMACH];
267 };
268 
269 static struct edma *edma_cc[EDMA_MAX_CC];
270 static int arch_num_cc;
271 
272 /* dummy param set used to (re)initialize parameter RAM slots */
273 static const struct edmacc_param dummy_paramset = {
274 	.link_bcntrld = 0xffff,
275 	.ccnt = 1,
276 };
277 
278 static const struct of_device_id edma_of_ids[] = {
279 	{ .compatible = "ti,edma3", },
280 	{}
281 };
282 
283 /*****************************************************************************/
284 
map_dmach_queue(unsigned ctlr,unsigned ch_no,enum dma_event_q queue_no)285 static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
286 		enum dma_event_q queue_no)
287 {
288 	int bit = (ch_no & 0x7) * 4;
289 
290 	/* default to low priority queue */
291 	if (queue_no == EVENTQ_DEFAULT)
292 		queue_no = edma_cc[ctlr]->default_queue;
293 
294 	queue_no &= 7;
295 	edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
296 			~(0x7 << bit), queue_no << bit);
297 }
298 
assign_priority_to_queue(unsigned ctlr,int queue_no,int priority)299 static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
300 		int priority)
301 {
302 	int bit = queue_no * 4;
303 	edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
304 			((priority & 0x7) << bit));
305 }
306 
307 /**
308  * map_dmach_param - Maps channel number to param entry number
309  *
310  * This maps the dma channel number to param entry numberter. In
311  * other words using the DMA channel mapping registers a param entry
312  * can be mapped to any channel
313  *
314  * Callers are responsible for ensuring the channel mapping logic is
315  * included in that particular EDMA variant (Eg : dm646x)
316  *
317  */
map_dmach_param(unsigned ctlr)318 static void __init map_dmach_param(unsigned ctlr)
319 {
320 	int i;
321 	for (i = 0; i < EDMA_MAX_DMACH; i++)
322 		edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
323 }
324 
325 static inline void
setup_dma_interrupt(unsigned lch,void (* callback)(unsigned channel,u16 ch_status,void * data),void * data)326 setup_dma_interrupt(unsigned lch,
327 	void (*callback)(unsigned channel, u16 ch_status, void *data),
328 	void *data)
329 {
330 	unsigned ctlr;
331 
332 	ctlr = EDMA_CTLR(lch);
333 	lch = EDMA_CHAN_SLOT(lch);
334 
335 	if (!callback)
336 		edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
337 				BIT(lch & 0x1f));
338 
339 	edma_cc[ctlr]->intr_data[lch].callback = callback;
340 	edma_cc[ctlr]->intr_data[lch].data = data;
341 
342 	if (callback) {
343 		edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
344 				BIT(lch & 0x1f));
345 		edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
346 				BIT(lch & 0x1f));
347 	}
348 }
349 
irq2ctlr(int irq)350 static int irq2ctlr(int irq)
351 {
352 	if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
353 		return 0;
354 	else if (irq >= edma_cc[1]->irq_res_start &&
355 		irq <= edma_cc[1]->irq_res_end)
356 		return 1;
357 
358 	return -1;
359 }
360 
361 /******************************************************************************
362  *
363  * DMA interrupt handler
364  *
365  *****************************************************************************/
dma_irq_handler(int irq,void * data)366 static irqreturn_t dma_irq_handler(int irq, void *data)
367 {
368 	int ctlr;
369 	u32 sh_ier;
370 	u32 sh_ipr;
371 	u32 bank;
372 
373 	ctlr = irq2ctlr(irq);
374 	if (ctlr < 0)
375 		return IRQ_NONE;
376 
377 	dev_dbg(data, "dma_irq_handler\n");
378 
379 	sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0);
380 	if (!sh_ipr) {
381 		sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1);
382 		if (!sh_ipr)
383 			return IRQ_NONE;
384 		sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1);
385 		bank = 1;
386 	} else {
387 		sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0);
388 		bank = 0;
389 	}
390 
391 	do {
392 		u32 slot;
393 		u32 channel;
394 
395 		dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr);
396 
397 		slot = __ffs(sh_ipr);
398 		sh_ipr &= ~(BIT(slot));
399 
400 		if (sh_ier & BIT(slot)) {
401 			channel = (bank << 5) | slot;
402 			/* Clear the corresponding IPR bits */
403 			edma_shadow0_write_array(ctlr, SH_ICR, bank,
404 					BIT(slot));
405 			if (edma_cc[ctlr]->intr_data[channel].callback)
406 				edma_cc[ctlr]->intr_data[channel].callback(
407 					channel, EDMA_DMA_COMPLETE,
408 					edma_cc[ctlr]->intr_data[channel].data);
409 		}
410 	} while (sh_ipr);
411 
412 	edma_shadow0_write(ctlr, SH_IEVAL, 1);
413 	return IRQ_HANDLED;
414 }
415 
416 /******************************************************************************
417  *
418  * DMA error interrupt handler
419  *
420  *****************************************************************************/
dma_ccerr_handler(int irq,void * data)421 static irqreturn_t dma_ccerr_handler(int irq, void *data)
422 {
423 	int i;
424 	int ctlr;
425 	unsigned int cnt = 0;
426 
427 	ctlr = irq2ctlr(irq);
428 	if (ctlr < 0)
429 		return IRQ_NONE;
430 
431 	dev_dbg(data, "dma_ccerr_handler\n");
432 
433 	if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
434 	    (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
435 	    (edma_read(ctlr, EDMA_QEMR) == 0) &&
436 	    (edma_read(ctlr, EDMA_CCERR) == 0))
437 		return IRQ_NONE;
438 
439 	while (1) {
440 		int j = -1;
441 		if (edma_read_array(ctlr, EDMA_EMR, 0))
442 			j = 0;
443 		else if (edma_read_array(ctlr, EDMA_EMR, 1))
444 			j = 1;
445 		if (j >= 0) {
446 			dev_dbg(data, "EMR%d %08x\n", j,
447 					edma_read_array(ctlr, EDMA_EMR, j));
448 			for (i = 0; i < 32; i++) {
449 				int k = (j << 5) + i;
450 				if (edma_read_array(ctlr, EDMA_EMR, j) &
451 							BIT(i)) {
452 					/* Clear the corresponding EMR bits */
453 					edma_write_array(ctlr, EDMA_EMCR, j,
454 							BIT(i));
455 					/* Clear any SER */
456 					edma_shadow0_write_array(ctlr, SH_SECR,
457 								j, BIT(i));
458 					if (edma_cc[ctlr]->intr_data[k].
459 								callback) {
460 						edma_cc[ctlr]->intr_data[k].
461 						callback(k,
462 						EDMA_DMA_CC_ERROR,
463 						edma_cc[ctlr]->intr_data
464 						[k].data);
465 					}
466 				}
467 			}
468 		} else if (edma_read(ctlr, EDMA_QEMR)) {
469 			dev_dbg(data, "QEMR %02x\n",
470 				edma_read(ctlr, EDMA_QEMR));
471 			for (i = 0; i < 8; i++) {
472 				if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
473 					/* Clear the corresponding IPR bits */
474 					edma_write(ctlr, EDMA_QEMCR, BIT(i));
475 					edma_shadow0_write(ctlr, SH_QSECR,
476 								BIT(i));
477 
478 					/* NOTE:  not reported!! */
479 				}
480 			}
481 		} else if (edma_read(ctlr, EDMA_CCERR)) {
482 			dev_dbg(data, "CCERR %08x\n",
483 				edma_read(ctlr, EDMA_CCERR));
484 			/* FIXME:  CCERR.BIT(16) ignored!  much better
485 			 * to just write CCERRCLR with CCERR value...
486 			 */
487 			for (i = 0; i < 8; i++) {
488 				if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
489 					/* Clear the corresponding IPR bits */
490 					edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
491 
492 					/* NOTE:  not reported!! */
493 				}
494 			}
495 		}
496 		if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
497 		    (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
498 		    (edma_read(ctlr, EDMA_QEMR) == 0) &&
499 		    (edma_read(ctlr, EDMA_CCERR) == 0))
500 			break;
501 		cnt++;
502 		if (cnt > 10)
503 			break;
504 	}
505 	edma_write(ctlr, EDMA_EEVAL, 1);
506 	return IRQ_HANDLED;
507 }
508 
reserve_contiguous_slots(int ctlr,unsigned int id,unsigned int num_slots,unsigned int start_slot)509 static int reserve_contiguous_slots(int ctlr, unsigned int id,
510 				     unsigned int num_slots,
511 				     unsigned int start_slot)
512 {
513 	int i, j;
514 	unsigned int count = num_slots;
515 	int stop_slot = start_slot;
516 	DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
517 
518 	for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
519 		j = EDMA_CHAN_SLOT(i);
520 		if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
521 			/* Record our current beginning slot */
522 			if (count == num_slots)
523 				stop_slot = i;
524 
525 			count--;
526 			set_bit(j, tmp_inuse);
527 
528 			if (count == 0)
529 				break;
530 		} else {
531 			clear_bit(j, tmp_inuse);
532 
533 			if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
534 				stop_slot = i;
535 				break;
536 			} else {
537 				count = num_slots;
538 			}
539 		}
540 	}
541 
542 	/*
543 	 * We have to clear any bits that we set
544 	 * if we run out parameter RAM slots, i.e we do find a set
545 	 * of contiguous parameter RAM slots but do not find the exact number
546 	 * requested as we may reach the total number of parameter RAM slots
547 	 */
548 	if (i == edma_cc[ctlr]->num_slots)
549 		stop_slot = i;
550 
551 	j = start_slot;
552 	for_each_set_bit_from(j, tmp_inuse, stop_slot)
553 		clear_bit(j, edma_cc[ctlr]->edma_inuse);
554 
555 	if (count)
556 		return -EBUSY;
557 
558 	for (j = i - num_slots + 1; j <= i; ++j)
559 		memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
560 			&dummy_paramset, PARM_SIZE);
561 
562 	return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
563 }
564 
prepare_unused_channel_list(struct device * dev,void * data)565 static int prepare_unused_channel_list(struct device *dev, void *data)
566 {
567 	struct platform_device *pdev = to_platform_device(dev);
568 	int i, count, ctlr;
569 	struct of_phandle_args  dma_spec;
570 
571 	if (dev->of_node) {
572 		count = of_property_count_strings(dev->of_node, "dma-names");
573 		if (count < 0)
574 			return 0;
575 		for (i = 0; i < count; i++) {
576 			if (of_parse_phandle_with_args(dev->of_node, "dmas",
577 						       "#dma-cells", i,
578 						       &dma_spec))
579 				continue;
580 
581 			if (!of_match_node(edma_of_ids, dma_spec.np)) {
582 				of_node_put(dma_spec.np);
583 				continue;
584 			}
585 
586 			clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]),
587 				  edma_cc[0]->edma_unused);
588 			of_node_put(dma_spec.np);
589 		}
590 		return 0;
591 	}
592 
593 	/* For non-OF case */
594 	for (i = 0; i < pdev->num_resources; i++) {
595 		if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
596 				(int)pdev->resource[i].start >= 0) {
597 			ctlr = EDMA_CTLR(pdev->resource[i].start);
598 			clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
599 				  edma_cc[ctlr]->edma_unused);
600 		}
601 	}
602 
603 	return 0;
604 }
605 
606 /*-----------------------------------------------------------------------*/
607 
608 static bool unused_chan_list_done;
609 
610 /* Resource alloc/free:  dma channels, parameter RAM slots */
611 
612 /**
613  * edma_alloc_channel - allocate DMA channel and paired parameter RAM
614  * @channel: specific channel to allocate; negative for "any unmapped channel"
615  * @callback: optional; to be issued on DMA completion or errors
616  * @data: passed to callback
617  * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
618  *	Controller (TC) executes requests using this channel.  Use
619  *	EVENTQ_DEFAULT unless you really need a high priority queue.
620  *
621  * This allocates a DMA channel and its associated parameter RAM slot.
622  * The parameter RAM is initialized to hold a dummy transfer.
623  *
624  * Normal use is to pass a specific channel number as @channel, to make
625  * use of hardware events mapped to that channel.  When the channel will
626  * be used only for software triggering or event chaining, channels not
627  * mapped to hardware events (or mapped to unused events) are preferable.
628  *
629  * DMA transfers start from a channel using edma_start(), or by
630  * chaining.  When the transfer described in that channel's parameter RAM
631  * slot completes, that slot's data may be reloaded through a link.
632  *
633  * DMA errors are only reported to the @callback associated with the
634  * channel driving that transfer, but transfer completion callbacks can
635  * be sent to another channel under control of the TCC field in
636  * the option word of the transfer's parameter RAM set.  Drivers must not
637  * use DMA transfer completion callbacks for channels they did not allocate.
638  * (The same applies to TCC codes used in transfer chaining.)
639  *
640  * Returns the number of the channel, else negative errno.
641  */
edma_alloc_channel(int channel,void (* callback)(unsigned channel,u16 ch_status,void * data),void * data,enum dma_event_q eventq_no)642 int edma_alloc_channel(int channel,
643 		void (*callback)(unsigned channel, u16 ch_status, void *data),
644 		void *data,
645 		enum dma_event_q eventq_no)
646 {
647 	unsigned i, done = 0, ctlr = 0;
648 	int ret = 0;
649 
650 	if (!unused_chan_list_done) {
651 		/*
652 		 * Scan all the platform devices to find out the EDMA channels
653 		 * used and clear them in the unused list, making the rest
654 		 * available for ARM usage.
655 		 */
656 		ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
657 				prepare_unused_channel_list);
658 		if (ret < 0)
659 			return ret;
660 
661 		unused_chan_list_done = true;
662 	}
663 
664 	if (channel >= 0) {
665 		ctlr = EDMA_CTLR(channel);
666 		channel = EDMA_CHAN_SLOT(channel);
667 	}
668 
669 	if (channel < 0) {
670 		for (i = 0; i < arch_num_cc; i++) {
671 			channel = 0;
672 			for (;;) {
673 				channel = find_next_bit(edma_cc[i]->edma_unused,
674 						edma_cc[i]->num_channels,
675 						channel);
676 				if (channel == edma_cc[i]->num_channels)
677 					break;
678 				if (!test_and_set_bit(channel,
679 						edma_cc[i]->edma_inuse)) {
680 					done = 1;
681 					ctlr = i;
682 					break;
683 				}
684 				channel++;
685 			}
686 			if (done)
687 				break;
688 		}
689 		if (!done)
690 			return -ENOMEM;
691 	} else if (channel >= edma_cc[ctlr]->num_channels) {
692 		return -EINVAL;
693 	} else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
694 		return -EBUSY;
695 	}
696 
697 	/* ensure access through shadow region 0 */
698 	edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
699 
700 	/* ensure no events are pending */
701 	edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
702 	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
703 			&dummy_paramset, PARM_SIZE);
704 
705 	if (callback)
706 		setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
707 					callback, data);
708 
709 	map_dmach_queue(ctlr, channel, eventq_no);
710 
711 	return EDMA_CTLR_CHAN(ctlr, channel);
712 }
713 EXPORT_SYMBOL(edma_alloc_channel);
714 
715 
716 /**
717  * edma_free_channel - deallocate DMA channel
718  * @channel: dma channel returned from edma_alloc_channel()
719  *
720  * This deallocates the DMA channel and associated parameter RAM slot
721  * allocated by edma_alloc_channel().
722  *
723  * Callers are responsible for ensuring the channel is inactive, and
724  * will not be reactivated by linking, chaining, or software calls to
725  * edma_start().
726  */
edma_free_channel(unsigned channel)727 void edma_free_channel(unsigned channel)
728 {
729 	unsigned ctlr;
730 
731 	ctlr = EDMA_CTLR(channel);
732 	channel = EDMA_CHAN_SLOT(channel);
733 
734 	if (channel >= edma_cc[ctlr]->num_channels)
735 		return;
736 
737 	setup_dma_interrupt(channel, NULL, NULL);
738 	/* REVISIT should probably take out of shadow region 0 */
739 
740 	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
741 			&dummy_paramset, PARM_SIZE);
742 	clear_bit(channel, edma_cc[ctlr]->edma_inuse);
743 }
744 EXPORT_SYMBOL(edma_free_channel);
745 
746 /**
747  * edma_alloc_slot - allocate DMA parameter RAM
748  * @slot: specific slot to allocate; negative for "any unused slot"
749  *
750  * This allocates a parameter RAM slot, initializing it to hold a
751  * dummy transfer.  Slots allocated using this routine have not been
752  * mapped to a hardware DMA channel, and will normally be used by
753  * linking to them from a slot associated with a DMA channel.
754  *
755  * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
756  * slots may be allocated on behalf of DSP firmware.
757  *
758  * Returns the number of the slot, else negative errno.
759  */
edma_alloc_slot(unsigned ctlr,int slot)760 int edma_alloc_slot(unsigned ctlr, int slot)
761 {
762 	if (!edma_cc[ctlr])
763 		return -EINVAL;
764 
765 	if (slot >= 0)
766 		slot = EDMA_CHAN_SLOT(slot);
767 
768 	if (slot < 0) {
769 		slot = edma_cc[ctlr]->num_channels;
770 		for (;;) {
771 			slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
772 					edma_cc[ctlr]->num_slots, slot);
773 			if (slot == edma_cc[ctlr]->num_slots)
774 				return -ENOMEM;
775 			if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
776 				break;
777 		}
778 	} else if (slot < edma_cc[ctlr]->num_channels ||
779 			slot >= edma_cc[ctlr]->num_slots) {
780 		return -EINVAL;
781 	} else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
782 		return -EBUSY;
783 	}
784 
785 	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
786 			&dummy_paramset, PARM_SIZE);
787 
788 	return EDMA_CTLR_CHAN(ctlr, slot);
789 }
790 EXPORT_SYMBOL(edma_alloc_slot);
791 
792 /**
793  * edma_free_slot - deallocate DMA parameter RAM
794  * @slot: parameter RAM slot returned from edma_alloc_slot()
795  *
796  * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
797  * Callers are responsible for ensuring the slot is inactive, and will
798  * not be activated.
799  */
edma_free_slot(unsigned slot)800 void edma_free_slot(unsigned slot)
801 {
802 	unsigned ctlr;
803 
804 	ctlr = EDMA_CTLR(slot);
805 	slot = EDMA_CHAN_SLOT(slot);
806 
807 	if (slot < edma_cc[ctlr]->num_channels ||
808 		slot >= edma_cc[ctlr]->num_slots)
809 		return;
810 
811 	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
812 			&dummy_paramset, PARM_SIZE);
813 	clear_bit(slot, edma_cc[ctlr]->edma_inuse);
814 }
815 EXPORT_SYMBOL(edma_free_slot);
816 
817 
818 /**
819  * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
820  * The API will return the starting point of a set of
821  * contiguous parameter RAM slots that have been requested
822  *
823  * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
824  * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
825  * @count: number of contiguous Paramter RAM slots
826  * @slot  - the start value of Parameter RAM slot that should be passed if id
827  * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
828  *
829  * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
830  * contiguous Parameter RAM slots from parameter RAM 64 in the case of
831  * DaVinci SOCs and 32 in the case of DA8xx SOCs.
832  *
833  * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
834  * set of contiguous parameter RAM slots from the "slot" that is passed as an
835  * argument to the API.
836  *
837  * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
838  * starts looking for a set of contiguous parameter RAMs from the "slot"
839  * that is passed as an argument to the API. On failure the API will try to
840  * find a set of contiguous Parameter RAM slots from the remaining Parameter
841  * RAM slots
842  */
edma_alloc_cont_slots(unsigned ctlr,unsigned int id,int slot,int count)843 int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
844 {
845 	/*
846 	 * The start slot requested should be greater than
847 	 * the number of channels and lesser than the total number
848 	 * of slots
849 	 */
850 	if ((id != EDMA_CONT_PARAMS_ANY) &&
851 		(slot < edma_cc[ctlr]->num_channels ||
852 		slot >= edma_cc[ctlr]->num_slots))
853 		return -EINVAL;
854 
855 	/*
856 	 * The number of parameter RAM slots requested cannot be less than 1
857 	 * and cannot be more than the number of slots minus the number of
858 	 * channels
859 	 */
860 	if (count < 1 || count >
861 		(edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
862 		return -EINVAL;
863 
864 	switch (id) {
865 	case EDMA_CONT_PARAMS_ANY:
866 		return reserve_contiguous_slots(ctlr, id, count,
867 						 edma_cc[ctlr]->num_channels);
868 	case EDMA_CONT_PARAMS_FIXED_EXACT:
869 	case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
870 		return reserve_contiguous_slots(ctlr, id, count, slot);
871 	default:
872 		return -EINVAL;
873 	}
874 
875 }
876 EXPORT_SYMBOL(edma_alloc_cont_slots);
877 
878 /**
879  * edma_free_cont_slots - deallocate DMA parameter RAM slots
880  * @slot: first parameter RAM of a set of parameter RAM slots to be freed
881  * @count: the number of contiguous parameter RAM slots to be freed
882  *
883  * This deallocates the parameter RAM slots allocated by
884  * edma_alloc_cont_slots.
885  * Callers/applications need to keep track of sets of contiguous
886  * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
887  * API.
888  * Callers are responsible for ensuring the slots are inactive, and will
889  * not be activated.
890  */
edma_free_cont_slots(unsigned slot,int count)891 int edma_free_cont_slots(unsigned slot, int count)
892 {
893 	unsigned ctlr, slot_to_free;
894 	int i;
895 
896 	ctlr = EDMA_CTLR(slot);
897 	slot = EDMA_CHAN_SLOT(slot);
898 
899 	if (slot < edma_cc[ctlr]->num_channels ||
900 		slot >= edma_cc[ctlr]->num_slots ||
901 		count < 1)
902 		return -EINVAL;
903 
904 	for (i = slot; i < slot + count; ++i) {
905 		ctlr = EDMA_CTLR(i);
906 		slot_to_free = EDMA_CHAN_SLOT(i);
907 
908 		memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
909 			&dummy_paramset, PARM_SIZE);
910 		clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
911 	}
912 
913 	return 0;
914 }
915 EXPORT_SYMBOL(edma_free_cont_slots);
916 
917 /*-----------------------------------------------------------------------*/
918 
919 /* Parameter RAM operations (i) -- read/write partial slots */
920 
921 /**
922  * edma_set_src - set initial DMA source address in parameter RAM slot
923  * @slot: parameter RAM slot being configured
924  * @src_port: physical address of source (memory, controller FIFO, etc)
925  * @addressMode: INCR, except in very rare cases
926  * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
927  *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
928  *
929  * Note that the source address is modified during the DMA transfer
930  * according to edma_set_src_index().
931  */
edma_set_src(unsigned slot,dma_addr_t src_port,enum address_mode mode,enum fifo_width width)932 void edma_set_src(unsigned slot, dma_addr_t src_port,
933 				enum address_mode mode, enum fifo_width width)
934 {
935 	unsigned ctlr;
936 
937 	ctlr = EDMA_CTLR(slot);
938 	slot = EDMA_CHAN_SLOT(slot);
939 
940 	if (slot < edma_cc[ctlr]->num_slots) {
941 		unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
942 
943 		if (mode) {
944 			/* set SAM and program FWID */
945 			i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
946 		} else {
947 			/* clear SAM */
948 			i &= ~SAM;
949 		}
950 		edma_parm_write(ctlr, PARM_OPT, slot, i);
951 
952 		/* set the source port address
953 		   in source register of param structure */
954 		edma_parm_write(ctlr, PARM_SRC, slot, src_port);
955 	}
956 }
957 EXPORT_SYMBOL(edma_set_src);
958 
959 /**
960  * edma_set_dest - set initial DMA destination address in parameter RAM slot
961  * @slot: parameter RAM slot being configured
962  * @dest_port: physical address of destination (memory, controller FIFO, etc)
963  * @addressMode: INCR, except in very rare cases
964  * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
965  *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
966  *
967  * Note that the destination address is modified during the DMA transfer
968  * according to edma_set_dest_index().
969  */
edma_set_dest(unsigned slot,dma_addr_t dest_port,enum address_mode mode,enum fifo_width width)970 void edma_set_dest(unsigned slot, dma_addr_t dest_port,
971 				 enum address_mode mode, enum fifo_width width)
972 {
973 	unsigned ctlr;
974 
975 	ctlr = EDMA_CTLR(slot);
976 	slot = EDMA_CHAN_SLOT(slot);
977 
978 	if (slot < edma_cc[ctlr]->num_slots) {
979 		unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
980 
981 		if (mode) {
982 			/* set DAM and program FWID */
983 			i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
984 		} else {
985 			/* clear DAM */
986 			i &= ~DAM;
987 		}
988 		edma_parm_write(ctlr, PARM_OPT, slot, i);
989 		/* set the destination port address
990 		   in dest register of param structure */
991 		edma_parm_write(ctlr, PARM_DST, slot, dest_port);
992 	}
993 }
994 EXPORT_SYMBOL(edma_set_dest);
995 
996 /**
997  * edma_get_position - returns the current transfer point
998  * @slot: parameter RAM slot being examined
999  * @dst:  true selects the dest position, false the source
1000  *
1001  * Returns the position of the current active slot
1002  */
edma_get_position(unsigned slot,bool dst)1003 dma_addr_t edma_get_position(unsigned slot, bool dst)
1004 {
1005 	u32 offs, ctlr = EDMA_CTLR(slot);
1006 
1007 	slot = EDMA_CHAN_SLOT(slot);
1008 
1009 	offs = PARM_OFFSET(slot);
1010 	offs += dst ? PARM_DST : PARM_SRC;
1011 
1012 	return edma_read(ctlr, offs);
1013 }
1014 
1015 /**
1016  * edma_set_src_index - configure DMA source address indexing
1017  * @slot: parameter RAM slot being configured
1018  * @src_bidx: byte offset between source arrays in a frame
1019  * @src_cidx: byte offset between source frames in a block
1020  *
1021  * Offsets are specified to support either contiguous or discontiguous
1022  * memory transfers, or repeated access to a hardware register, as needed.
1023  * When accessing hardware registers, both offsets are normally zero.
1024  */
edma_set_src_index(unsigned slot,s16 src_bidx,s16 src_cidx)1025 void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
1026 {
1027 	unsigned ctlr;
1028 
1029 	ctlr = EDMA_CTLR(slot);
1030 	slot = EDMA_CHAN_SLOT(slot);
1031 
1032 	if (slot < edma_cc[ctlr]->num_slots) {
1033 		edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1034 				0xffff0000, src_bidx);
1035 		edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1036 				0xffff0000, src_cidx);
1037 	}
1038 }
1039 EXPORT_SYMBOL(edma_set_src_index);
1040 
1041 /**
1042  * edma_set_dest_index - configure DMA destination address indexing
1043  * @slot: parameter RAM slot being configured
1044  * @dest_bidx: byte offset between destination arrays in a frame
1045  * @dest_cidx: byte offset between destination frames in a block
1046  *
1047  * Offsets are specified to support either contiguous or discontiguous
1048  * memory transfers, or repeated access to a hardware register, as needed.
1049  * When accessing hardware registers, both offsets are normally zero.
1050  */
edma_set_dest_index(unsigned slot,s16 dest_bidx,s16 dest_cidx)1051 void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
1052 {
1053 	unsigned ctlr;
1054 
1055 	ctlr = EDMA_CTLR(slot);
1056 	slot = EDMA_CHAN_SLOT(slot);
1057 
1058 	if (slot < edma_cc[ctlr]->num_slots) {
1059 		edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1060 				0x0000ffff, dest_bidx << 16);
1061 		edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1062 				0x0000ffff, dest_cidx << 16);
1063 	}
1064 }
1065 EXPORT_SYMBOL(edma_set_dest_index);
1066 
1067 /**
1068  * edma_set_transfer_params - configure DMA transfer parameters
1069  * @slot: parameter RAM slot being configured
1070  * @acnt: how many bytes per array (at least one)
1071  * @bcnt: how many arrays per frame (at least one)
1072  * @ccnt: how many frames per block (at least one)
1073  * @bcnt_rld: used only for A-Synchronized transfers; this specifies
1074  *	the value to reload into bcnt when it decrements to zero
1075  * @sync_mode: ASYNC or ABSYNC
1076  *
1077  * See the EDMA3 documentation to understand how to configure and link
1078  * transfers using the fields in PaRAM slots.  If you are not doing it
1079  * all at once with edma_write_slot(), you will use this routine
1080  * plus two calls each for source and destination, setting the initial
1081  * address and saying how to index that address.
1082  *
1083  * An example of an A-Synchronized transfer is a serial link using a
1084  * single word shift register.  In that case, @acnt would be equal to
1085  * that word size; the serial controller issues a DMA synchronization
1086  * event to transfer each word, and memory access by the DMA transfer
1087  * controller will be word-at-a-time.
1088  *
1089  * An example of an AB-Synchronized transfer is a device using a FIFO.
1090  * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
1091  * The controller with the FIFO issues DMA synchronization events when
1092  * the FIFO threshold is reached, and the DMA transfer controller will
1093  * transfer one frame to (or from) the FIFO.  It will probably use
1094  * efficient burst modes to access memory.
1095  */
edma_set_transfer_params(unsigned slot,u16 acnt,u16 bcnt,u16 ccnt,u16 bcnt_rld,enum sync_dimension sync_mode)1096 void edma_set_transfer_params(unsigned slot,
1097 		u16 acnt, u16 bcnt, u16 ccnt,
1098 		u16 bcnt_rld, enum sync_dimension sync_mode)
1099 {
1100 	unsigned ctlr;
1101 
1102 	ctlr = EDMA_CTLR(slot);
1103 	slot = EDMA_CHAN_SLOT(slot);
1104 
1105 	if (slot < edma_cc[ctlr]->num_slots) {
1106 		edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
1107 				0x0000ffff, bcnt_rld << 16);
1108 		if (sync_mode == ASYNC)
1109 			edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
1110 		else
1111 			edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
1112 		/* Set the acount, bcount, ccount registers */
1113 		edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
1114 		edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
1115 	}
1116 }
1117 EXPORT_SYMBOL(edma_set_transfer_params);
1118 
1119 /**
1120  * edma_link - link one parameter RAM slot to another
1121  * @from: parameter RAM slot originating the link
1122  * @to: parameter RAM slot which is the link target
1123  *
1124  * The originating slot should not be part of any active DMA transfer.
1125  */
edma_link(unsigned from,unsigned to)1126 void edma_link(unsigned from, unsigned to)
1127 {
1128 	unsigned ctlr_from, ctlr_to;
1129 
1130 	ctlr_from = EDMA_CTLR(from);
1131 	from = EDMA_CHAN_SLOT(from);
1132 	ctlr_to = EDMA_CTLR(to);
1133 	to = EDMA_CHAN_SLOT(to);
1134 
1135 	if (from >= edma_cc[ctlr_from]->num_slots)
1136 		return;
1137 	if (to >= edma_cc[ctlr_to]->num_slots)
1138 		return;
1139 	edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
1140 				PARM_OFFSET(to));
1141 }
1142 EXPORT_SYMBOL(edma_link);
1143 
1144 /**
1145  * edma_unlink - cut link from one parameter RAM slot
1146  * @from: parameter RAM slot originating the link
1147  *
1148  * The originating slot should not be part of any active DMA transfer.
1149  * Its link is set to 0xffff.
1150  */
edma_unlink(unsigned from)1151 void edma_unlink(unsigned from)
1152 {
1153 	unsigned ctlr;
1154 
1155 	ctlr = EDMA_CTLR(from);
1156 	from = EDMA_CHAN_SLOT(from);
1157 
1158 	if (from >= edma_cc[ctlr]->num_slots)
1159 		return;
1160 	edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
1161 }
1162 EXPORT_SYMBOL(edma_unlink);
1163 
1164 /*-----------------------------------------------------------------------*/
1165 
1166 /* Parameter RAM operations (ii) -- read/write whole parameter sets */
1167 
1168 /**
1169  * edma_write_slot - write parameter RAM data for slot
1170  * @slot: number of parameter RAM slot being modified
1171  * @param: data to be written into parameter RAM slot
1172  *
1173  * Use this to assign all parameters of a transfer at once.  This
1174  * allows more efficient setup of transfers than issuing multiple
1175  * calls to set up those parameters in small pieces, and provides
1176  * complete control over all transfer options.
1177  */
edma_write_slot(unsigned slot,const struct edmacc_param * param)1178 void edma_write_slot(unsigned slot, const struct edmacc_param *param)
1179 {
1180 	unsigned ctlr;
1181 
1182 	ctlr = EDMA_CTLR(slot);
1183 	slot = EDMA_CHAN_SLOT(slot);
1184 
1185 	if (slot >= edma_cc[ctlr]->num_slots)
1186 		return;
1187 	memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
1188 			PARM_SIZE);
1189 }
1190 EXPORT_SYMBOL(edma_write_slot);
1191 
1192 /**
1193  * edma_read_slot - read parameter RAM data from slot
1194  * @slot: number of parameter RAM slot being copied
1195  * @param: where to store copy of parameter RAM data
1196  *
1197  * Use this to read data from a parameter RAM slot, perhaps to
1198  * save them as a template for later reuse.
1199  */
edma_read_slot(unsigned slot,struct edmacc_param * param)1200 void edma_read_slot(unsigned slot, struct edmacc_param *param)
1201 {
1202 	unsigned ctlr;
1203 
1204 	ctlr = EDMA_CTLR(slot);
1205 	slot = EDMA_CHAN_SLOT(slot);
1206 
1207 	if (slot >= edma_cc[ctlr]->num_slots)
1208 		return;
1209 	memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
1210 			PARM_SIZE);
1211 }
1212 EXPORT_SYMBOL(edma_read_slot);
1213 
1214 /*-----------------------------------------------------------------------*/
1215 
1216 /* Various EDMA channel control operations */
1217 
1218 /**
1219  * edma_pause - pause dma on a channel
1220  * @channel: on which edma_start() has been called
1221  *
1222  * This temporarily disables EDMA hardware events on the specified channel,
1223  * preventing them from triggering new transfers on its behalf
1224  */
edma_pause(unsigned channel)1225 void edma_pause(unsigned channel)
1226 {
1227 	unsigned ctlr;
1228 
1229 	ctlr = EDMA_CTLR(channel);
1230 	channel = EDMA_CHAN_SLOT(channel);
1231 
1232 	if (channel < edma_cc[ctlr]->num_channels) {
1233 		unsigned int mask = BIT(channel & 0x1f);
1234 
1235 		edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
1236 	}
1237 }
1238 EXPORT_SYMBOL(edma_pause);
1239 
1240 /**
1241  * edma_resume - resumes dma on a paused channel
1242  * @channel: on which edma_pause() has been called
1243  *
1244  * This re-enables EDMA hardware events on the specified channel.
1245  */
edma_resume(unsigned channel)1246 void edma_resume(unsigned channel)
1247 {
1248 	unsigned ctlr;
1249 
1250 	ctlr = EDMA_CTLR(channel);
1251 	channel = EDMA_CHAN_SLOT(channel);
1252 
1253 	if (channel < edma_cc[ctlr]->num_channels) {
1254 		unsigned int mask = BIT(channel & 0x1f);
1255 
1256 		edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
1257 	}
1258 }
1259 EXPORT_SYMBOL(edma_resume);
1260 
edma_trigger_channel(unsigned channel)1261 int edma_trigger_channel(unsigned channel)
1262 {
1263 	unsigned ctlr;
1264 	unsigned int mask;
1265 
1266 	ctlr = EDMA_CTLR(channel);
1267 	channel = EDMA_CHAN_SLOT(channel);
1268 	mask = BIT(channel & 0x1f);
1269 
1270 	edma_shadow0_write_array(ctlr, SH_ESR, (channel >> 5), mask);
1271 
1272 	pr_debug("EDMA: ESR%d %08x\n", (channel >> 5),
1273 		 edma_shadow0_read_array(ctlr, SH_ESR, (channel >> 5)));
1274 	return 0;
1275 }
1276 EXPORT_SYMBOL(edma_trigger_channel);
1277 
1278 /**
1279  * edma_start - start dma on a channel
1280  * @channel: channel being activated
1281  *
1282  * Channels with event associations will be triggered by their hardware
1283  * events, and channels without such associations will be triggered by
1284  * software.  (At this writing there is no interface for using software
1285  * triggers except with channels that don't support hardware triggers.)
1286  *
1287  * Returns zero on success, else negative errno.
1288  */
edma_start(unsigned channel)1289 int edma_start(unsigned channel)
1290 {
1291 	unsigned ctlr;
1292 
1293 	ctlr = EDMA_CTLR(channel);
1294 	channel = EDMA_CHAN_SLOT(channel);
1295 
1296 	if (channel < edma_cc[ctlr]->num_channels) {
1297 		int j = channel >> 5;
1298 		unsigned int mask = BIT(channel & 0x1f);
1299 
1300 		/* EDMA channels without event association */
1301 		if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
1302 			pr_debug("EDMA: ESR%d %08x\n", j,
1303 				edma_shadow0_read_array(ctlr, SH_ESR, j));
1304 			edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
1305 			return 0;
1306 		}
1307 
1308 		/* EDMA channel with event association */
1309 		pr_debug("EDMA: ER%d %08x\n", j,
1310 			edma_shadow0_read_array(ctlr, SH_ER, j));
1311 		/* Clear any pending event or error */
1312 		edma_write_array(ctlr, EDMA_ECR, j, mask);
1313 		edma_write_array(ctlr, EDMA_EMCR, j, mask);
1314 		/* Clear any SER */
1315 		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1316 		edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
1317 		pr_debug("EDMA: EER%d %08x\n", j,
1318 			edma_shadow0_read_array(ctlr, SH_EER, j));
1319 		return 0;
1320 	}
1321 
1322 	return -EINVAL;
1323 }
1324 EXPORT_SYMBOL(edma_start);
1325 
1326 /**
1327  * edma_stop - stops dma on the channel passed
1328  * @channel: channel being deactivated
1329  *
1330  * When @lch is a channel, any active transfer is paused and
1331  * all pending hardware events are cleared.  The current transfer
1332  * may not be resumed, and the channel's Parameter RAM should be
1333  * reinitialized before being reused.
1334  */
edma_stop(unsigned channel)1335 void edma_stop(unsigned channel)
1336 {
1337 	unsigned ctlr;
1338 
1339 	ctlr = EDMA_CTLR(channel);
1340 	channel = EDMA_CHAN_SLOT(channel);
1341 
1342 	if (channel < edma_cc[ctlr]->num_channels) {
1343 		int j = channel >> 5;
1344 		unsigned int mask = BIT(channel & 0x1f);
1345 
1346 		edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
1347 		edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1348 		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1349 		edma_write_array(ctlr, EDMA_EMCR, j, mask);
1350 
1351 		pr_debug("EDMA: EER%d %08x\n", j,
1352 				edma_shadow0_read_array(ctlr, SH_EER, j));
1353 
1354 		/* REVISIT:  consider guarding against inappropriate event
1355 		 * chaining by overwriting with dummy_paramset.
1356 		 */
1357 	}
1358 }
1359 EXPORT_SYMBOL(edma_stop);
1360 
1361 /******************************************************************************
1362  *
1363  * It cleans ParamEntry qand bring back EDMA to initial state if media has
1364  * been removed before EDMA has finished.It is usedful for removable media.
1365  * Arguments:
1366  *      ch_no     - channel no
1367  *
1368  * Return: zero on success, or corresponding error no on failure
1369  *
1370  * FIXME this should not be needed ... edma_stop() should suffice.
1371  *
1372  *****************************************************************************/
1373 
edma_clean_channel(unsigned channel)1374 void edma_clean_channel(unsigned channel)
1375 {
1376 	unsigned ctlr;
1377 
1378 	ctlr = EDMA_CTLR(channel);
1379 	channel = EDMA_CHAN_SLOT(channel);
1380 
1381 	if (channel < edma_cc[ctlr]->num_channels) {
1382 		int j = (channel >> 5);
1383 		unsigned int mask = BIT(channel & 0x1f);
1384 
1385 		pr_debug("EDMA: EMR%d %08x\n", j,
1386 				edma_read_array(ctlr, EDMA_EMR, j));
1387 		edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1388 		/* Clear the corresponding EMR bits */
1389 		edma_write_array(ctlr, EDMA_EMCR, j, mask);
1390 		/* Clear any SER */
1391 		edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1392 		edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
1393 	}
1394 }
1395 EXPORT_SYMBOL(edma_clean_channel);
1396 
1397 /*
1398  * edma_clear_event - clear an outstanding event on the DMA channel
1399  * Arguments:
1400  *	channel - channel number
1401  */
edma_clear_event(unsigned channel)1402 void edma_clear_event(unsigned channel)
1403 {
1404 	unsigned ctlr;
1405 
1406 	ctlr = EDMA_CTLR(channel);
1407 	channel = EDMA_CHAN_SLOT(channel);
1408 
1409 	if (channel >= edma_cc[ctlr]->num_channels)
1410 		return;
1411 	if (channel < 32)
1412 		edma_write(ctlr, EDMA_ECR, BIT(channel));
1413 	else
1414 		edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
1415 }
1416 EXPORT_SYMBOL(edma_clear_event);
1417 
1418 /*
1419  * edma_assign_channel_eventq - move given channel to desired eventq
1420  * Arguments:
1421  *	channel - channel number
1422  *	eventq_no - queue to move the channel
1423  *
1424  * Can be used to move a channel to a selected event queue.
1425  */
edma_assign_channel_eventq(unsigned channel,enum dma_event_q eventq_no)1426 void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no)
1427 {
1428 	unsigned ctlr;
1429 
1430 	ctlr = EDMA_CTLR(channel);
1431 	channel = EDMA_CHAN_SLOT(channel);
1432 
1433 	if (channel >= edma_cc[ctlr]->num_channels)
1434 		return;
1435 
1436 	/* default to low priority queue */
1437 	if (eventq_no == EVENTQ_DEFAULT)
1438 		eventq_no = edma_cc[ctlr]->default_queue;
1439 	if (eventq_no >= edma_cc[ctlr]->num_tc)
1440 		return;
1441 
1442 	map_dmach_queue(ctlr, channel, eventq_no);
1443 }
1444 EXPORT_SYMBOL(edma_assign_channel_eventq);
1445 
edma_setup_from_hw(struct device * dev,struct edma_soc_info * pdata,struct edma * edma_cc,int cc_id)1446 static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
1447 			      struct edma *edma_cc, int cc_id)
1448 {
1449 	int i;
1450 	u32 value, cccfg;
1451 	s8 (*queue_priority_map)[2];
1452 
1453 	/* Decode the eDMA3 configuration from CCCFG register */
1454 	cccfg = edma_read(cc_id, EDMA_CCCFG);
1455 
1456 	value = GET_NUM_REGN(cccfg);
1457 	edma_cc->num_region = BIT(value);
1458 
1459 	value = GET_NUM_DMACH(cccfg);
1460 	edma_cc->num_channels = BIT(value + 1);
1461 
1462 	value = GET_NUM_PAENTRY(cccfg);
1463 	edma_cc->num_slots = BIT(value + 4);
1464 
1465 	value = GET_NUM_EVQUE(cccfg);
1466 	edma_cc->num_tc = value + 1;
1467 
1468 	dev_dbg(dev, "eDMA3 CC%d HW configuration (cccfg: 0x%08x):\n", cc_id,
1469 		cccfg);
1470 	dev_dbg(dev, "num_region: %u\n", edma_cc->num_region);
1471 	dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels);
1472 	dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots);
1473 	dev_dbg(dev, "num_tc: %u\n", edma_cc->num_tc);
1474 
1475 	/* Nothing need to be done if queue priority is provided */
1476 	if (pdata->queue_priority_mapping)
1477 		return 0;
1478 
1479 	/*
1480 	 * Configure TC/queue priority as follows:
1481 	 * Q0 - priority 0
1482 	 * Q1 - priority 1
1483 	 * Q2 - priority 2
1484 	 * ...
1485 	 * The meaning of priority numbers: 0 highest priority, 7 lowest
1486 	 * priority. So Q0 is the highest priority queue and the last queue has
1487 	 * the lowest priority.
1488 	 */
1489 	queue_priority_map = devm_kzalloc(dev,
1490 					  (edma_cc->num_tc + 1) * sizeof(s8),
1491 					  GFP_KERNEL);
1492 	if (!queue_priority_map)
1493 		return -ENOMEM;
1494 
1495 	for (i = 0; i < edma_cc->num_tc; i++) {
1496 		queue_priority_map[i][0] = i;
1497 		queue_priority_map[i][1] = i;
1498 	}
1499 	queue_priority_map[i][0] = -1;
1500 	queue_priority_map[i][1] = -1;
1501 
1502 	pdata->queue_priority_mapping = queue_priority_map;
1503 	/* Default queue has the lowest priority */
1504 	pdata->default_queue = i - 1;
1505 
1506 	return 0;
1507 }
1508 
1509 #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES)
1510 
edma_xbar_event_map(struct device * dev,struct device_node * node,struct edma_soc_info * pdata,size_t sz)1511 static int edma_xbar_event_map(struct device *dev, struct device_node *node,
1512 			       struct edma_soc_info *pdata, size_t sz)
1513 {
1514 	const char pname[] = "ti,edma-xbar-event-map";
1515 	struct resource res;
1516 	void __iomem *xbar;
1517 	s16 (*xbar_chans)[2];
1518 	size_t nelm = sz / sizeof(s16);
1519 	u32 shift, offset, mux;
1520 	int ret, i;
1521 
1522 	xbar_chans = devm_kzalloc(dev, (nelm + 2) * sizeof(s16), GFP_KERNEL);
1523 	if (!xbar_chans)
1524 		return -ENOMEM;
1525 
1526 	ret = of_address_to_resource(node, 1, &res);
1527 	if (ret)
1528 		return -ENOMEM;
1529 
1530 	xbar = devm_ioremap(dev, res.start, resource_size(&res));
1531 	if (!xbar)
1532 		return -ENOMEM;
1533 
1534 	ret = of_property_read_u16_array(node, pname, (u16 *)xbar_chans, nelm);
1535 	if (ret)
1536 		return -EIO;
1537 
1538 	/* Invalidate last entry for the other user of this mess */
1539 	nelm >>= 1;
1540 	xbar_chans[nelm][0] = xbar_chans[nelm][1] = -1;
1541 
1542 	for (i = 0; i < nelm; i++) {
1543 		shift = (xbar_chans[i][1] & 0x03) << 3;
1544 		offset = xbar_chans[i][1] & 0xfffffffc;
1545 		mux = readl(xbar + offset);
1546 		mux &= ~(0xff << shift);
1547 		mux |= xbar_chans[i][0] << shift;
1548 		writel(mux, (xbar + offset));
1549 	}
1550 
1551 	pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
1552 	return 0;
1553 }
1554 
edma_of_parse_dt(struct device * dev,struct device_node * node,struct edma_soc_info * pdata)1555 static int edma_of_parse_dt(struct device *dev,
1556 			    struct device_node *node,
1557 			    struct edma_soc_info *pdata)
1558 {
1559 	int ret = 0;
1560 	struct property *prop;
1561 	size_t sz;
1562 	struct edma_rsv_info *rsv_info;
1563 
1564 	rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL);
1565 	if (!rsv_info)
1566 		return -ENOMEM;
1567 	pdata->rsv = rsv_info;
1568 
1569 	prop = of_find_property(node, "ti,edma-xbar-event-map", &sz);
1570 	if (prop)
1571 		ret = edma_xbar_event_map(dev, node, pdata, sz);
1572 
1573 	return ret;
1574 }
1575 
1576 static struct of_dma_filter_info edma_filter_info = {
1577 	.filter_fn = edma_filter_fn,
1578 };
1579 
edma_setup_info_from_dt(struct device * dev,struct device_node * node)1580 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
1581 						      struct device_node *node)
1582 {
1583 	struct edma_soc_info *info;
1584 	int ret;
1585 
1586 	info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
1587 	if (!info)
1588 		return ERR_PTR(-ENOMEM);
1589 
1590 	ret = edma_of_parse_dt(dev, node, info);
1591 	if (ret)
1592 		return ERR_PTR(ret);
1593 
1594 	dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap);
1595 	dma_cap_set(DMA_CYCLIC, edma_filter_info.dma_cap);
1596 	of_dma_controller_register(dev->of_node, of_dma_simple_xlate,
1597 				   &edma_filter_info);
1598 
1599 	return info;
1600 }
1601 #else
edma_setup_info_from_dt(struct device * dev,struct device_node * node)1602 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
1603 						      struct device_node *node)
1604 {
1605 	return ERR_PTR(-ENOSYS);
1606 }
1607 #endif
1608 
edma_probe(struct platform_device * pdev)1609 static int edma_probe(struct platform_device *pdev)
1610 {
1611 	struct edma_soc_info	**info = pdev->dev.platform_data;
1612 	struct edma_soc_info    *ninfo[EDMA_MAX_CC] = {NULL};
1613 	s8		(*queue_priority_mapping)[2];
1614 	int			i, j, off, ln, found = 0;
1615 	int			status = -1;
1616 	const s16		(*rsv_chans)[2];
1617 	const s16		(*rsv_slots)[2];
1618 	const s16		(*xbar_chans)[2];
1619 	int			irq[EDMA_MAX_CC] = {0, 0};
1620 	int			err_irq[EDMA_MAX_CC] = {0, 0};
1621 	struct resource		*r[EDMA_MAX_CC] = {NULL};
1622 	struct resource		res[EDMA_MAX_CC];
1623 	char			res_name[10];
1624 	struct device_node	*node = pdev->dev.of_node;
1625 	struct device		*dev = &pdev->dev;
1626 	int			ret;
1627 	struct platform_device_info edma_dev_info = {
1628 		.name = "edma-dma-engine",
1629 		.dma_mask = DMA_BIT_MASK(32),
1630 		.parent = &pdev->dev,
1631 	};
1632 
1633 	if (node) {
1634 		/* Check if this is a second instance registered */
1635 		if (arch_num_cc) {
1636 			dev_err(dev, "only one EDMA instance is supported via DT\n");
1637 			return -ENODEV;
1638 		}
1639 
1640 		ninfo[0] = edma_setup_info_from_dt(dev, node);
1641 		if (IS_ERR(ninfo[0])) {
1642 			dev_err(dev, "failed to get DT data\n");
1643 			return PTR_ERR(ninfo[0]);
1644 		}
1645 
1646 		info = ninfo;
1647 	}
1648 
1649 	if (!info)
1650 		return -ENODEV;
1651 
1652 	pm_runtime_enable(dev);
1653 	ret = pm_runtime_get_sync(dev);
1654 	if (ret < 0) {
1655 		dev_err(dev, "pm_runtime_get_sync() failed\n");
1656 		return ret;
1657 	}
1658 
1659 	for (j = 0; j < EDMA_MAX_CC; j++) {
1660 		if (!info[j]) {
1661 			if (!found)
1662 				return -ENODEV;
1663 			break;
1664 		}
1665 		if (node) {
1666 			ret = of_address_to_resource(node, j, &res[j]);
1667 			if (!ret)
1668 				r[j] = &res[j];
1669 		} else {
1670 			sprintf(res_name, "edma_cc%d", j);
1671 			r[j] = platform_get_resource_byname(pdev,
1672 						IORESOURCE_MEM,
1673 						res_name);
1674 		}
1675 		if (!r[j]) {
1676 			if (found)
1677 				break;
1678 			else
1679 				return -ENODEV;
1680 		} else {
1681 			found = 1;
1682 		}
1683 
1684 		edmacc_regs_base[j] = devm_ioremap_resource(&pdev->dev, r[j]);
1685 		if (IS_ERR(edmacc_regs_base[j]))
1686 			return PTR_ERR(edmacc_regs_base[j]);
1687 
1688 		edma_cc[j] = devm_kzalloc(&pdev->dev, sizeof(struct edma),
1689 					  GFP_KERNEL);
1690 		if (!edma_cc[j])
1691 			return -ENOMEM;
1692 
1693 		/* Get eDMA3 configuration from IP */
1694 		ret = edma_setup_from_hw(dev, info[j], edma_cc[j], j);
1695 		if (ret)
1696 			return ret;
1697 
1698 		edma_cc[j]->default_queue = info[j]->default_queue;
1699 
1700 		dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
1701 			edmacc_regs_base[j]);
1702 
1703 		for (i = 0; i < edma_cc[j]->num_slots; i++)
1704 			memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
1705 					&dummy_paramset, PARM_SIZE);
1706 
1707 		/* Mark all channels as unused */
1708 		memset(edma_cc[j]->edma_unused, 0xff,
1709 			sizeof(edma_cc[j]->edma_unused));
1710 
1711 		if (info[j]->rsv) {
1712 
1713 			/* Clear the reserved channels in unused list */
1714 			rsv_chans = info[j]->rsv->rsv_chans;
1715 			if (rsv_chans) {
1716 				for (i = 0; rsv_chans[i][0] != -1; i++) {
1717 					off = rsv_chans[i][0];
1718 					ln = rsv_chans[i][1];
1719 					clear_bits(off, ln,
1720 						  edma_cc[j]->edma_unused);
1721 				}
1722 			}
1723 
1724 			/* Set the reserved slots in inuse list */
1725 			rsv_slots = info[j]->rsv->rsv_slots;
1726 			if (rsv_slots) {
1727 				for (i = 0; rsv_slots[i][0] != -1; i++) {
1728 					off = rsv_slots[i][0];
1729 					ln = rsv_slots[i][1];
1730 					set_bits(off, ln,
1731 						edma_cc[j]->edma_inuse);
1732 				}
1733 			}
1734 		}
1735 
1736 		/* Clear the xbar mapped channels in unused list */
1737 		xbar_chans = info[j]->xbar_chans;
1738 		if (xbar_chans) {
1739 			for (i = 0; xbar_chans[i][1] != -1; i++) {
1740 				off = xbar_chans[i][1];
1741 				clear_bits(off, 1,
1742 					   edma_cc[j]->edma_unused);
1743 			}
1744 		}
1745 
1746 		if (node) {
1747 			irq[j] = irq_of_parse_and_map(node, 0);
1748 			err_irq[j] = irq_of_parse_and_map(node, 2);
1749 		} else {
1750 			char irq_name[10];
1751 
1752 			sprintf(irq_name, "edma%d", j);
1753 			irq[j] = platform_get_irq_byname(pdev, irq_name);
1754 
1755 			sprintf(irq_name, "edma%d_err", j);
1756 			err_irq[j] = platform_get_irq_byname(pdev, irq_name);
1757 		}
1758 		edma_cc[j]->irq_res_start = irq[j];
1759 		edma_cc[j]->irq_res_end = err_irq[j];
1760 
1761 		status = devm_request_irq(dev, irq[j], dma_irq_handler, 0,
1762 					  "edma", dev);
1763 		if (status < 0) {
1764 			dev_dbg(&pdev->dev,
1765 				"devm_request_irq %d failed --> %d\n",
1766 				irq[j], status);
1767 			return status;
1768 		}
1769 
1770 		status = devm_request_irq(dev, err_irq[j], dma_ccerr_handler, 0,
1771 					  "edma_error", dev);
1772 		if (status < 0) {
1773 			dev_dbg(&pdev->dev,
1774 				"devm_request_irq %d failed --> %d\n",
1775 				err_irq[j], status);
1776 			return status;
1777 		}
1778 
1779 		for (i = 0; i < edma_cc[j]->num_channels; i++)
1780 			map_dmach_queue(j, i, info[j]->default_queue);
1781 
1782 		queue_priority_mapping = info[j]->queue_priority_mapping;
1783 
1784 		/* Event queue priority mapping */
1785 		for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1786 			assign_priority_to_queue(j,
1787 						queue_priority_mapping[i][0],
1788 						queue_priority_mapping[i][1]);
1789 
1790 		/* Map the channel to param entry if channel mapping logic
1791 		 * exist
1792 		 */
1793 		if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
1794 			map_dmach_param(j);
1795 
1796 		for (i = 0; i < edma_cc[j]->num_region; i++) {
1797 			edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
1798 			edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
1799 			edma_write_array(j, EDMA_QRAE, i, 0x0);
1800 		}
1801 		arch_num_cc++;
1802 
1803 		edma_dev_info.id = j;
1804 		platform_device_register_full(&edma_dev_info);
1805 	}
1806 
1807 	return 0;
1808 }
1809 
1810 static struct platform_driver edma_driver = {
1811 	.driver = {
1812 		.name	= "edma",
1813 		.of_match_table = edma_of_ids,
1814 	},
1815 	.probe = edma_probe,
1816 };
1817 
edma_init(void)1818 static int __init edma_init(void)
1819 {
1820 	return platform_driver_probe(&edma_driver, edma_probe);
1821 }
1822 arch_initcall(edma_init);
1823 
1824