• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/arch/arm/plat-omap/dma.c
3  *
4  * Copyright (C) 2003 - 2008 Nokia Corporation
5  * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6  * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7  * Graphics DMA and LCD DMA graphics tranformations
8  * by Imre Deak <imre.deak@nokia.com>
9  * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10  * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
11  * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
12  *
13  * Copyright (C) 2009 Texas Instruments
14  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
15  *
16  * Support functions for the OMAP internal DMA channels.
17  *
18  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
19  * Converted DMA library into DMA platform driver.
20  *	- G, Manjunath Kondaiah <manjugk@ti.com>
21  *
22  * This program is free software; you can redistribute it and/or modify
23  * it under the terms of the GNU General Public License version 2 as
24  * published by the Free Software Foundation.
25  *
26  */
27 
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/sched.h>
31 #include <linux/spinlock.h>
32 #include <linux/errno.h>
33 #include <linux/interrupt.h>
34 #include <linux/irq.h>
35 #include <linux/io.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38 
39 #include <linux/omap-dma.h>
40 
41 #ifdef CONFIG_ARCH_OMAP1
42 #include <mach/soc.h>
43 #endif
44 
45 /*
46  * MAX_LOGICAL_DMA_CH_COUNT: the maximum number of logical DMA
47  * channels that an instance of the SDMA IP block can support.  Used
48  * to size arrays.  (The actual maximum on a particular SoC may be less
49  * than this -- for example, OMAP1 SDMA instances only support 17 logical
50  * DMA channels.)
51  */
52 #define MAX_LOGICAL_DMA_CH_COUNT		32
53 
54 #undef DEBUG
55 
56 #ifndef CONFIG_ARCH_OMAP1
57 enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
58 	DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
59 };
60 
61 enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
62 #endif
63 
64 #define OMAP_DMA_ACTIVE			0x01
65 #define OMAP2_DMA_CSR_CLEAR_MASK	0xffffffff
66 
67 #define OMAP_FUNC_MUX_ARM_BASE		(0xfffe1000 + 0xec)
68 
69 static struct omap_system_dma_plat_info *p;
70 static struct omap_dma_dev_attr *d;
71 static void omap_clear_dma(int lch);
72 static int omap_dma_set_prio_lch(int lch, unsigned char read_prio,
73 				 unsigned char write_prio);
74 static int enable_1510_mode;
75 static u32 errata;
76 
77 static struct omap_dma_global_context_registers {
78 	u32 dma_irqenable_l0;
79 	u32 dma_irqenable_l1;
80 	u32 dma_ocp_sysconfig;
81 	u32 dma_gcr;
82 } omap_dma_global_context;
83 
84 struct dma_link_info {
85 	int *linked_dmach_q;
86 	int no_of_lchs_linked;
87 
88 	int q_count;
89 	int q_tail;
90 	int q_head;
91 
92 	int chain_state;
93 	int chain_mode;
94 
95 };
96 
97 static struct dma_link_info *dma_linked_lch;
98 
99 #ifndef CONFIG_ARCH_OMAP1
100 
101 /* Chain handling macros */
102 #define OMAP_DMA_CHAIN_QINIT(chain_id)					\
103 	do {								\
104 		dma_linked_lch[chain_id].q_head =			\
105 		dma_linked_lch[chain_id].q_tail =			\
106 		dma_linked_lch[chain_id].q_count = 0;			\
107 	} while (0)
108 #define OMAP_DMA_CHAIN_QFULL(chain_id)					\
109 		(dma_linked_lch[chain_id].no_of_lchs_linked ==		\
110 		dma_linked_lch[chain_id].q_count)
111 #define OMAP_DMA_CHAIN_QLAST(chain_id)					\
112 	do {								\
113 		((dma_linked_lch[chain_id].no_of_lchs_linked-1) ==	\
114 		dma_linked_lch[chain_id].q_count)			\
115 	} while (0)
116 #define OMAP_DMA_CHAIN_QEMPTY(chain_id)					\
117 		(0 == dma_linked_lch[chain_id].q_count)
118 #define __OMAP_DMA_CHAIN_INCQ(end)					\
119 	((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
120 #define OMAP_DMA_CHAIN_INCQHEAD(chain_id)				\
121 	do {								\
122 		__OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head);	\
123 		dma_linked_lch[chain_id].q_count--;			\
124 	} while (0)
125 
126 #define OMAP_DMA_CHAIN_INCQTAIL(chain_id)				\
127 	do {								\
128 		__OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail);	\
129 		dma_linked_lch[chain_id].q_count++; \
130 	} while (0)
131 #endif
132 
133 static int dma_lch_count;
134 static int dma_chan_count;
135 static int omap_dma_reserve_channels;
136 
137 static spinlock_t dma_chan_lock;
138 static struct omap_dma_lch *dma_chan;
139 
140 static inline void disable_lnk(int lch);
141 static void omap_disable_channel_irq(int lch);
142 static inline void omap_enable_channel_irq(int lch);
143 
144 #define REVISIT_24XX()		printk(KERN_ERR "FIXME: no %s on 24xx\n", \
145 						__func__);
146 
147 #ifdef CONFIG_ARCH_OMAP15XX
148 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
omap_dma_in_1510_mode(void)149 static int omap_dma_in_1510_mode(void)
150 {
151 	return enable_1510_mode;
152 }
153 #else
154 #define omap_dma_in_1510_mode()		0
155 #endif
156 
157 #ifdef CONFIG_ARCH_OMAP1
set_gdma_dev(int req,int dev)158 static inline void set_gdma_dev(int req, int dev)
159 {
160 	u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
161 	int shift = ((req - 1) % 5) * 6;
162 	u32 l;
163 
164 	l = omap_readl(reg);
165 	l &= ~(0x3f << shift);
166 	l |= (dev - 1) << shift;
167 	omap_writel(l, reg);
168 }
169 #else
170 #define set_gdma_dev(req, dev)	do {} while (0)
171 #define omap_readl(reg)		0
172 #define omap_writel(val, reg)	do {} while (0)
173 #endif
174 
175 #ifdef CONFIG_ARCH_OMAP1
omap_set_dma_priority(int lch,int dst_port,int priority)176 void omap_set_dma_priority(int lch, int dst_port, int priority)
177 {
178 	unsigned long reg;
179 	u32 l;
180 
181 	if (dma_omap1()) {
182 		switch (dst_port) {
183 		case OMAP_DMA_PORT_OCP_T1:	/* FFFECC00 */
184 			reg = OMAP_TC_OCPT1_PRIOR;
185 			break;
186 		case OMAP_DMA_PORT_OCP_T2:	/* FFFECCD0 */
187 			reg = OMAP_TC_OCPT2_PRIOR;
188 			break;
189 		case OMAP_DMA_PORT_EMIFF:	/* FFFECC08 */
190 			reg = OMAP_TC_EMIFF_PRIOR;
191 			break;
192 		case OMAP_DMA_PORT_EMIFS:	/* FFFECC04 */
193 			reg = OMAP_TC_EMIFS_PRIOR;
194 			break;
195 		default:
196 			BUG();
197 			return;
198 		}
199 		l = omap_readl(reg);
200 		l &= ~(0xf << 8);
201 		l |= (priority & 0xf) << 8;
202 		omap_writel(l, reg);
203 	}
204 }
205 #endif
206 
207 #ifdef CONFIG_ARCH_OMAP2PLUS
omap_set_dma_priority(int lch,int dst_port,int priority)208 void omap_set_dma_priority(int lch, int dst_port, int priority)
209 {
210 	u32 ccr;
211 
212 	ccr = p->dma_read(CCR, lch);
213 	if (priority)
214 		ccr |= (1 << 6);
215 	else
216 		ccr &= ~(1 << 6);
217 	p->dma_write(ccr, CCR, lch);
218 }
219 #endif
220 EXPORT_SYMBOL(omap_set_dma_priority);
221 
omap_set_dma_transfer_params(int lch,int data_type,int elem_count,int frame_count,int sync_mode,int dma_trigger,int src_or_dst_synch)222 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
223 				  int frame_count, int sync_mode,
224 				  int dma_trigger, int src_or_dst_synch)
225 {
226 	u32 l;
227 
228 	l = p->dma_read(CSDP, lch);
229 	l &= ~0x03;
230 	l |= data_type;
231 	p->dma_write(l, CSDP, lch);
232 
233 	if (dma_omap1()) {
234 		u16 ccr;
235 
236 		ccr = p->dma_read(CCR, lch);
237 		ccr &= ~(1 << 5);
238 		if (sync_mode == OMAP_DMA_SYNC_FRAME)
239 			ccr |= 1 << 5;
240 		p->dma_write(ccr, CCR, lch);
241 
242 		ccr = p->dma_read(CCR2, lch);
243 		ccr &= ~(1 << 2);
244 		if (sync_mode == OMAP_DMA_SYNC_BLOCK)
245 			ccr |= 1 << 2;
246 		p->dma_write(ccr, CCR2, lch);
247 	}
248 
249 	if (dma_omap2plus() && dma_trigger) {
250 		u32 val;
251 
252 		val = p->dma_read(CCR, lch);
253 
254 		/* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
255 		val &= ~((1 << 23) | (3 << 19) | 0x1f);
256 		val |= (dma_trigger & ~0x1f) << 14;
257 		val |= dma_trigger & 0x1f;
258 
259 		if (sync_mode & OMAP_DMA_SYNC_FRAME)
260 			val |= 1 << 5;
261 		else
262 			val &= ~(1 << 5);
263 
264 		if (sync_mode & OMAP_DMA_SYNC_BLOCK)
265 			val |= 1 << 18;
266 		else
267 			val &= ~(1 << 18);
268 
269 		if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) {
270 			val &= ~(1 << 24);	/* dest synch */
271 			val |= (1 << 23);	/* Prefetch */
272 		} else if (src_or_dst_synch) {
273 			val |= 1 << 24;		/* source synch */
274 		} else {
275 			val &= ~(1 << 24);	/* dest synch */
276 		}
277 		p->dma_write(val, CCR, lch);
278 	}
279 
280 	p->dma_write(elem_count, CEN, lch);
281 	p->dma_write(frame_count, CFN, lch);
282 }
283 EXPORT_SYMBOL(omap_set_dma_transfer_params);
284 
omap_set_dma_write_mode(int lch,enum omap_dma_write_mode mode)285 void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
286 {
287 	if (dma_omap2plus()) {
288 		u32 csdp;
289 
290 		csdp = p->dma_read(CSDP, lch);
291 		csdp &= ~(0x3 << 16);
292 		csdp |= (mode << 16);
293 		p->dma_write(csdp, CSDP, lch);
294 	}
295 }
296 EXPORT_SYMBOL(omap_set_dma_write_mode);
297 
omap_set_dma_channel_mode(int lch,enum omap_dma_channel_mode mode)298 void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
299 {
300 	if (dma_omap1() && !dma_omap15xx()) {
301 		u32 l;
302 
303 		l = p->dma_read(LCH_CTRL, lch);
304 		l &= ~0x7;
305 		l |= mode;
306 		p->dma_write(l, LCH_CTRL, lch);
307 	}
308 }
309 EXPORT_SYMBOL(omap_set_dma_channel_mode);
310 
311 /* Note that src_port is only for omap1 */
omap_set_dma_src_params(int lch,int src_port,int src_amode,unsigned long src_start,int src_ei,int src_fi)312 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
313 			     unsigned long src_start,
314 			     int src_ei, int src_fi)
315 {
316 	u32 l;
317 
318 	if (dma_omap1()) {
319 		u16 w;
320 
321 		w = p->dma_read(CSDP, lch);
322 		w &= ~(0x1f << 2);
323 		w |= src_port << 2;
324 		p->dma_write(w, CSDP, lch);
325 	}
326 
327 	l = p->dma_read(CCR, lch);
328 	l &= ~(0x03 << 12);
329 	l |= src_amode << 12;
330 	p->dma_write(l, CCR, lch);
331 
332 	p->dma_write(src_start, CSSA, lch);
333 
334 	p->dma_write(src_ei, CSEI, lch);
335 	p->dma_write(src_fi, CSFI, lch);
336 }
337 EXPORT_SYMBOL(omap_set_dma_src_params);
338 
omap_set_dma_params(int lch,struct omap_dma_channel_params * params)339 void omap_set_dma_params(int lch, struct omap_dma_channel_params *params)
340 {
341 	omap_set_dma_transfer_params(lch, params->data_type,
342 				     params->elem_count, params->frame_count,
343 				     params->sync_mode, params->trigger,
344 				     params->src_or_dst_synch);
345 	omap_set_dma_src_params(lch, params->src_port,
346 				params->src_amode, params->src_start,
347 				params->src_ei, params->src_fi);
348 
349 	omap_set_dma_dest_params(lch, params->dst_port,
350 				 params->dst_amode, params->dst_start,
351 				 params->dst_ei, params->dst_fi);
352 	if (params->read_prio || params->write_prio)
353 		omap_dma_set_prio_lch(lch, params->read_prio,
354 				      params->write_prio);
355 }
356 EXPORT_SYMBOL(omap_set_dma_params);
357 
omap_set_dma_src_data_pack(int lch,int enable)358 void omap_set_dma_src_data_pack(int lch, int enable)
359 {
360 	u32 l;
361 
362 	l = p->dma_read(CSDP, lch);
363 	l &= ~(1 << 6);
364 	if (enable)
365 		l |= (1 << 6);
366 	p->dma_write(l, CSDP, lch);
367 }
368 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
369 
omap_set_dma_src_burst_mode(int lch,enum omap_dma_burst_mode burst_mode)370 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
371 {
372 	unsigned int burst = 0;
373 	u32 l;
374 
375 	l = p->dma_read(CSDP, lch);
376 	l &= ~(0x03 << 7);
377 
378 	switch (burst_mode) {
379 	case OMAP_DMA_DATA_BURST_DIS:
380 		break;
381 	case OMAP_DMA_DATA_BURST_4:
382 		if (dma_omap2plus())
383 			burst = 0x1;
384 		else
385 			burst = 0x2;
386 		break;
387 	case OMAP_DMA_DATA_BURST_8:
388 		if (dma_omap2plus()) {
389 			burst = 0x2;
390 			break;
391 		}
392 		/*
393 		 * not supported by current hardware on OMAP1
394 		 * w |= (0x03 << 7);
395 		 * fall through
396 		 */
397 	case OMAP_DMA_DATA_BURST_16:
398 		if (dma_omap2plus()) {
399 			burst = 0x3;
400 			break;
401 		}
402 		/*
403 		 * OMAP1 don't support burst 16
404 		 * fall through
405 		 */
406 	default:
407 		BUG();
408 	}
409 
410 	l |= (burst << 7);
411 	p->dma_write(l, CSDP, lch);
412 }
413 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
414 
415 /* Note that dest_port is only for OMAP1 */
omap_set_dma_dest_params(int lch,int dest_port,int dest_amode,unsigned long dest_start,int dst_ei,int dst_fi)416 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
417 			      unsigned long dest_start,
418 			      int dst_ei, int dst_fi)
419 {
420 	u32 l;
421 
422 	if (dma_omap1()) {
423 		l = p->dma_read(CSDP, lch);
424 		l &= ~(0x1f << 9);
425 		l |= dest_port << 9;
426 		p->dma_write(l, CSDP, lch);
427 	}
428 
429 	l = p->dma_read(CCR, lch);
430 	l &= ~(0x03 << 14);
431 	l |= dest_amode << 14;
432 	p->dma_write(l, CCR, lch);
433 
434 	p->dma_write(dest_start, CDSA, lch);
435 
436 	p->dma_write(dst_ei, CDEI, lch);
437 	p->dma_write(dst_fi, CDFI, lch);
438 }
439 EXPORT_SYMBOL(omap_set_dma_dest_params);
440 
omap_set_dma_dest_data_pack(int lch,int enable)441 void omap_set_dma_dest_data_pack(int lch, int enable)
442 {
443 	u32 l;
444 
445 	l = p->dma_read(CSDP, lch);
446 	l &= ~(1 << 13);
447 	if (enable)
448 		l |= 1 << 13;
449 	p->dma_write(l, CSDP, lch);
450 }
451 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
452 
omap_set_dma_dest_burst_mode(int lch,enum omap_dma_burst_mode burst_mode)453 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
454 {
455 	unsigned int burst = 0;
456 	u32 l;
457 
458 	l = p->dma_read(CSDP, lch);
459 	l &= ~(0x03 << 14);
460 
461 	switch (burst_mode) {
462 	case OMAP_DMA_DATA_BURST_DIS:
463 		break;
464 	case OMAP_DMA_DATA_BURST_4:
465 		if (dma_omap2plus())
466 			burst = 0x1;
467 		else
468 			burst = 0x2;
469 		break;
470 	case OMAP_DMA_DATA_BURST_8:
471 		if (dma_omap2plus())
472 			burst = 0x2;
473 		else
474 			burst = 0x3;
475 		break;
476 	case OMAP_DMA_DATA_BURST_16:
477 		if (dma_omap2plus()) {
478 			burst = 0x3;
479 			break;
480 		}
481 		/*
482 		 * OMAP1 don't support burst 16
483 		 * fall through
484 		 */
485 	default:
486 		printk(KERN_ERR "Invalid DMA burst mode\n");
487 		BUG();
488 		return;
489 	}
490 	l |= (burst << 14);
491 	p->dma_write(l, CSDP, lch);
492 }
493 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
494 
omap_enable_channel_irq(int lch)495 static inline void omap_enable_channel_irq(int lch)
496 {
497 	/* Clear CSR */
498 	if (dma_omap1())
499 		p->dma_read(CSR, lch);
500 	else
501 		p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
502 
503 	/* Enable some nice interrupts. */
504 	p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
505 }
506 
omap_disable_channel_irq(int lch)507 static inline void omap_disable_channel_irq(int lch)
508 {
509 	/* disable channel interrupts */
510 	p->dma_write(0, CICR, lch);
511 	/* Clear CSR */
512 	if (dma_omap1())
513 		p->dma_read(CSR, lch);
514 	else
515 		p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
516 }
517 
omap_enable_dma_irq(int lch,u16 bits)518 void omap_enable_dma_irq(int lch, u16 bits)
519 {
520 	dma_chan[lch].enabled_irqs |= bits;
521 }
522 EXPORT_SYMBOL(omap_enable_dma_irq);
523 
omap_disable_dma_irq(int lch,u16 bits)524 void omap_disable_dma_irq(int lch, u16 bits)
525 {
526 	dma_chan[lch].enabled_irqs &= ~bits;
527 }
528 EXPORT_SYMBOL(omap_disable_dma_irq);
529 
enable_lnk(int lch)530 static inline void enable_lnk(int lch)
531 {
532 	u32 l;
533 
534 	l = p->dma_read(CLNK_CTRL, lch);
535 
536 	if (dma_omap1())
537 		l &= ~(1 << 14);
538 
539 	/* Set the ENABLE_LNK bits */
540 	if (dma_chan[lch].next_lch != -1)
541 		l = dma_chan[lch].next_lch | (1 << 15);
542 
543 #ifndef CONFIG_ARCH_OMAP1
544 	if (dma_omap2plus())
545 		if (dma_chan[lch].next_linked_ch != -1)
546 			l = dma_chan[lch].next_linked_ch | (1 << 15);
547 #endif
548 
549 	p->dma_write(l, CLNK_CTRL, lch);
550 }
551 
disable_lnk(int lch)552 static inline void disable_lnk(int lch)
553 {
554 	u32 l;
555 
556 	l = p->dma_read(CLNK_CTRL, lch);
557 
558 	/* Disable interrupts */
559 	omap_disable_channel_irq(lch);
560 
561 	if (dma_omap1()) {
562 		/* Set the STOP_LNK bit */
563 		l |= 1 << 14;
564 	}
565 
566 	if (dma_omap2plus()) {
567 		/* Clear the ENABLE_LNK bit */
568 		l &= ~(1 << 15);
569 	}
570 
571 	p->dma_write(l, CLNK_CTRL, lch);
572 	dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
573 }
574 
omap2_enable_irq_lch(int lch)575 static inline void omap2_enable_irq_lch(int lch)
576 {
577 	u32 val;
578 	unsigned long flags;
579 
580 	if (dma_omap1())
581 		return;
582 
583 	spin_lock_irqsave(&dma_chan_lock, flags);
584 	/* clear IRQ STATUS */
585 	p->dma_write(1 << lch, IRQSTATUS_L0, lch);
586 	/* Enable interrupt */
587 	val = p->dma_read(IRQENABLE_L0, lch);
588 	val |= 1 << lch;
589 	p->dma_write(val, IRQENABLE_L0, lch);
590 	spin_unlock_irqrestore(&dma_chan_lock, flags);
591 }
592 
omap2_disable_irq_lch(int lch)593 static inline void omap2_disable_irq_lch(int lch)
594 {
595 	u32 val;
596 	unsigned long flags;
597 
598 	if (dma_omap1())
599 		return;
600 
601 	spin_lock_irqsave(&dma_chan_lock, flags);
602 	/* Disable interrupt */
603 	val = p->dma_read(IRQENABLE_L0, lch);
604 	val &= ~(1 << lch);
605 	p->dma_write(val, IRQENABLE_L0, lch);
606 	/* clear IRQ STATUS */
607 	p->dma_write(1 << lch, IRQSTATUS_L0, lch);
608 	spin_unlock_irqrestore(&dma_chan_lock, flags);
609 }
610 
omap_request_dma(int dev_id,const char * dev_name,void (* callback)(int lch,u16 ch_status,void * data),void * data,int * dma_ch_out)611 int omap_request_dma(int dev_id, const char *dev_name,
612 		     void (*callback)(int lch, u16 ch_status, void *data),
613 		     void *data, int *dma_ch_out)
614 {
615 	int ch, free_ch = -1;
616 	unsigned long flags;
617 	struct omap_dma_lch *chan;
618 
619 	WARN(strcmp(dev_name, "DMA engine"), "Using deprecated platform DMA API - please update to DMA engine");
620 
621 	spin_lock_irqsave(&dma_chan_lock, flags);
622 	for (ch = 0; ch < dma_chan_count; ch++) {
623 		if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
624 			free_ch = ch;
625 			/* Exit after first free channel found */
626 			break;
627 		}
628 	}
629 	if (free_ch == -1) {
630 		spin_unlock_irqrestore(&dma_chan_lock, flags);
631 		return -EBUSY;
632 	}
633 	chan = dma_chan + free_ch;
634 	chan->dev_id = dev_id;
635 
636 	if (p->clear_lch_regs)
637 		p->clear_lch_regs(free_ch);
638 
639 	if (dma_omap2plus())
640 		omap_clear_dma(free_ch);
641 
642 	spin_unlock_irqrestore(&dma_chan_lock, flags);
643 
644 	chan->dev_name = dev_name;
645 	chan->callback = callback;
646 	chan->data = data;
647 	chan->flags = 0;
648 
649 #ifndef CONFIG_ARCH_OMAP1
650 	if (dma_omap2plus()) {
651 		chan->chain_id = -1;
652 		chan->next_linked_ch = -1;
653 	}
654 #endif
655 
656 	chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
657 
658 	if (dma_omap1())
659 		chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
660 	else if (dma_omap2plus())
661 		chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
662 			OMAP2_DMA_TRANS_ERR_IRQ;
663 
664 	if (dma_omap16xx()) {
665 		/* If the sync device is set, configure it dynamically. */
666 		if (dev_id != 0) {
667 			set_gdma_dev(free_ch + 1, dev_id);
668 			dev_id = free_ch + 1;
669 		}
670 		/*
671 		 * Disable the 1510 compatibility mode and set the sync device
672 		 * id.
673 		 */
674 		p->dma_write(dev_id | (1 << 10), CCR, free_ch);
675 	} else if (dma_omap1()) {
676 		p->dma_write(dev_id, CCR, free_ch);
677 	}
678 
679 	if (dma_omap2plus()) {
680 		omap_enable_channel_irq(free_ch);
681 		omap2_enable_irq_lch(free_ch);
682 	}
683 
684 	*dma_ch_out = free_ch;
685 
686 	return 0;
687 }
688 EXPORT_SYMBOL(omap_request_dma);
689 
omap_free_dma(int lch)690 void omap_free_dma(int lch)
691 {
692 	unsigned long flags;
693 
694 	if (dma_chan[lch].dev_id == -1) {
695 		pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
696 		       lch);
697 		return;
698 	}
699 
700 	/* Disable interrupt for logical channel */
701 	if (dma_omap2plus())
702 		omap2_disable_irq_lch(lch);
703 
704 	/* Disable all DMA interrupts for the channel. */
705 	omap_disable_channel_irq(lch);
706 
707 	/* Make sure the DMA transfer is stopped. */
708 	p->dma_write(0, CCR, lch);
709 
710 	/* Clear registers */
711 	if (dma_omap2plus())
712 		omap_clear_dma(lch);
713 
714 	spin_lock_irqsave(&dma_chan_lock, flags);
715 	dma_chan[lch].dev_id = -1;
716 	dma_chan[lch].next_lch = -1;
717 	dma_chan[lch].callback = NULL;
718 	spin_unlock_irqrestore(&dma_chan_lock, flags);
719 }
720 EXPORT_SYMBOL(omap_free_dma);
721 
722 /**
723  * @brief omap_dma_set_global_params : Set global priority settings for dma
724  *
725  * @param arb_rate
726  * @param max_fifo_depth
727  * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
728  * 						   DMA_THREAD_RESERVE_ONET
729  * 						   DMA_THREAD_RESERVE_TWOT
730  * 						   DMA_THREAD_RESERVE_THREET
731  */
732 void
omap_dma_set_global_params(int arb_rate,int max_fifo_depth,int tparams)733 omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
734 {
735 	u32 reg;
736 
737 	if (dma_omap1()) {
738 		printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
739 		return;
740 	}
741 
742 	if (max_fifo_depth == 0)
743 		max_fifo_depth = 1;
744 	if (arb_rate == 0)
745 		arb_rate = 1;
746 
747 	reg = 0xff & max_fifo_depth;
748 	reg |= (0x3 & tparams) << 12;
749 	reg |= (arb_rate & 0xff) << 16;
750 
751 	p->dma_write(reg, GCR, 0);
752 }
753 EXPORT_SYMBOL(omap_dma_set_global_params);
754 
755 /**
756  * @brief omap_dma_set_prio_lch : Set channel wise priority settings
757  *
758  * @param lch
759  * @param read_prio - Read priority
760  * @param write_prio - Write priority
761  * Both of the above can be set with one of the following values :
762  * 	DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
763  */
764 static int
omap_dma_set_prio_lch(int lch,unsigned char read_prio,unsigned char write_prio)765 omap_dma_set_prio_lch(int lch, unsigned char read_prio,
766 		      unsigned char write_prio)
767 {
768 	u32 l;
769 
770 	if (unlikely((lch < 0 || lch >= dma_lch_count))) {
771 		printk(KERN_ERR "Invalid channel id\n");
772 		return -EINVAL;
773 	}
774 	l = p->dma_read(CCR, lch);
775 	l &= ~((1 << 6) | (1 << 26));
776 	if (d->dev_caps & IS_RW_PRIORITY)
777 		l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
778 	else
779 		l |= ((read_prio & 0x1) << 6);
780 
781 	p->dma_write(l, CCR, lch);
782 
783 	return 0;
784 }
785 
786 
787 /*
788  * Clears any DMA state so the DMA engine is ready to restart with new buffers
789  * through omap_start_dma(). Any buffers in flight are discarded.
790  */
omap_clear_dma(int lch)791 static void omap_clear_dma(int lch)
792 {
793 	unsigned long flags;
794 
795 	local_irq_save(flags);
796 	p->clear_dma(lch);
797 	local_irq_restore(flags);
798 }
799 
omap_start_dma(int lch)800 void omap_start_dma(int lch)
801 {
802 	u32 l;
803 
804 	/*
805 	 * The CPC/CDAC register needs to be initialized to zero
806 	 * before starting dma transfer.
807 	 */
808 	if (dma_omap15xx())
809 		p->dma_write(0, CPC, lch);
810 	else
811 		p->dma_write(0, CDAC, lch);
812 
813 	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
814 		int next_lch, cur_lch;
815 		char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
816 
817 		/* Set the link register of the first channel */
818 		enable_lnk(lch);
819 
820 		memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
821 		dma_chan_link_map[lch] = 1;
822 
823 		cur_lch = dma_chan[lch].next_lch;
824 		do {
825 			next_lch = dma_chan[cur_lch].next_lch;
826 
827 			/* The loop case: we've been here already */
828 			if (dma_chan_link_map[cur_lch])
829 				break;
830 			/* Mark the current channel */
831 			dma_chan_link_map[cur_lch] = 1;
832 
833 			enable_lnk(cur_lch);
834 			omap_enable_channel_irq(cur_lch);
835 
836 			cur_lch = next_lch;
837 		} while (next_lch != -1);
838 	} else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
839 		p->dma_write(lch, CLNK_CTRL, lch);
840 
841 	omap_enable_channel_irq(lch);
842 
843 	l = p->dma_read(CCR, lch);
844 
845 	if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
846 			l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
847 	l |= OMAP_DMA_CCR_EN;
848 
849 	/*
850 	 * As dma_write() uses IO accessors which are weakly ordered, there
851 	 * is no guarantee that data in coherent DMA memory will be visible
852 	 * to the DMA device.  Add a memory barrier here to ensure that any
853 	 * such data is visible prior to enabling DMA.
854 	 */
855 	mb();
856 	p->dma_write(l, CCR, lch);
857 
858 	dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
859 }
860 EXPORT_SYMBOL(omap_start_dma);
861 
omap_stop_dma(int lch)862 void omap_stop_dma(int lch)
863 {
864 	u32 l;
865 
866 	/* Disable all interrupts on the channel */
867 	omap_disable_channel_irq(lch);
868 
869 	l = p->dma_read(CCR, lch);
870 	if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
871 			(l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
872 		int i = 0;
873 		u32 sys_cf;
874 
875 		/* Configure No-Standby */
876 		l = p->dma_read(OCP_SYSCONFIG, lch);
877 		sys_cf = l;
878 		l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
879 		l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
880 		p->dma_write(l , OCP_SYSCONFIG, 0);
881 
882 		l = p->dma_read(CCR, lch);
883 		l &= ~OMAP_DMA_CCR_EN;
884 		p->dma_write(l, CCR, lch);
885 
886 		/* Wait for sDMA FIFO drain */
887 		l = p->dma_read(CCR, lch);
888 		while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
889 					OMAP_DMA_CCR_WR_ACTIVE))) {
890 			udelay(5);
891 			i++;
892 			l = p->dma_read(CCR, lch);
893 		}
894 		if (i >= 100)
895 			pr_err("DMA drain did not complete on lch %d\n", lch);
896 		/* Restore OCP_SYSCONFIG */
897 		p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
898 	} else {
899 		l &= ~OMAP_DMA_CCR_EN;
900 		p->dma_write(l, CCR, lch);
901 	}
902 
903 	/*
904 	 * Ensure that data transferred by DMA is visible to any access
905 	 * after DMA has been disabled.  This is important for coherent
906 	 * DMA regions.
907 	 */
908 	mb();
909 
910 	if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
911 		int next_lch, cur_lch = lch;
912 		char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
913 
914 		memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
915 		do {
916 			/* The loop case: we've been here already */
917 			if (dma_chan_link_map[cur_lch])
918 				break;
919 			/* Mark the current channel */
920 			dma_chan_link_map[cur_lch] = 1;
921 
922 			disable_lnk(cur_lch);
923 
924 			next_lch = dma_chan[cur_lch].next_lch;
925 			cur_lch = next_lch;
926 		} while (next_lch != -1);
927 	}
928 
929 	dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
930 }
931 EXPORT_SYMBOL(omap_stop_dma);
932 
933 /*
934  * Allows changing the DMA callback function or data. This may be needed if
935  * the driver shares a single DMA channel for multiple dma triggers.
936  */
omap_set_dma_callback(int lch,void (* callback)(int lch,u16 ch_status,void * data),void * data)937 int omap_set_dma_callback(int lch,
938 			  void (*callback)(int lch, u16 ch_status, void *data),
939 			  void *data)
940 {
941 	unsigned long flags;
942 
943 	if (lch < 0)
944 		return -ENODEV;
945 
946 	spin_lock_irqsave(&dma_chan_lock, flags);
947 	if (dma_chan[lch].dev_id == -1) {
948 		printk(KERN_ERR "DMA callback for not set for free channel\n");
949 		spin_unlock_irqrestore(&dma_chan_lock, flags);
950 		return -EINVAL;
951 	}
952 	dma_chan[lch].callback = callback;
953 	dma_chan[lch].data = data;
954 	spin_unlock_irqrestore(&dma_chan_lock, flags);
955 
956 	return 0;
957 }
958 EXPORT_SYMBOL(omap_set_dma_callback);
959 
960 /*
961  * Returns current physical source address for the given DMA channel.
962  * If the channel is running the caller must disable interrupts prior calling
963  * this function and process the returned value before re-enabling interrupt to
964  * prevent races with the interrupt handler. Note that in continuous mode there
965  * is a chance for CSSA_L register overflow between the two reads resulting
966  * in incorrect return value.
967  */
omap_get_dma_src_pos(int lch)968 dma_addr_t omap_get_dma_src_pos(int lch)
969 {
970 	dma_addr_t offset = 0;
971 
972 	if (dma_omap15xx())
973 		offset = p->dma_read(CPC, lch);
974 	else
975 		offset = p->dma_read(CSAC, lch);
976 
977 	if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
978 		offset = p->dma_read(CSAC, lch);
979 
980 	if (!dma_omap15xx()) {
981 		/*
982 		 * CDAC == 0 indicates that the DMA transfer on the channel has
983 		 * not been started (no data has been transferred so far).
984 		 * Return the programmed source start address in this case.
985 		 */
986 		if (likely(p->dma_read(CDAC, lch)))
987 			offset = p->dma_read(CSAC, lch);
988 		else
989 			offset = p->dma_read(CSSA, lch);
990 	}
991 
992 	if (dma_omap1())
993 		offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
994 
995 	return offset;
996 }
997 EXPORT_SYMBOL(omap_get_dma_src_pos);
998 
999 /*
1000  * Returns current physical destination address for the given DMA channel.
1001  * If the channel is running the caller must disable interrupts prior calling
1002  * this function and process the returned value before re-enabling interrupt to
1003  * prevent races with the interrupt handler. Note that in continuous mode there
1004  * is a chance for CDSA_L register overflow between the two reads resulting
1005  * in incorrect return value.
1006  */
omap_get_dma_dst_pos(int lch)1007 dma_addr_t omap_get_dma_dst_pos(int lch)
1008 {
1009 	dma_addr_t offset = 0;
1010 
1011 	if (dma_omap15xx())
1012 		offset = p->dma_read(CPC, lch);
1013 	else
1014 		offset = p->dma_read(CDAC, lch);
1015 
1016 	/*
1017 	 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1018 	 * read before the DMA controller finished disabling the channel.
1019 	 */
1020 	if (!dma_omap15xx() && offset == 0) {
1021 		offset = p->dma_read(CDAC, lch);
1022 		/*
1023 		 * CDAC == 0 indicates that the DMA transfer on the channel has
1024 		 * not been started (no data has been transferred so far).
1025 		 * Return the programmed destination start address in this case.
1026 		 */
1027 		if (unlikely(!offset))
1028 			offset = p->dma_read(CDSA, lch);
1029 	}
1030 
1031 	if (dma_omap1())
1032 		offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
1033 
1034 	return offset;
1035 }
1036 EXPORT_SYMBOL(omap_get_dma_dst_pos);
1037 
omap_get_dma_active_status(int lch)1038 int omap_get_dma_active_status(int lch)
1039 {
1040 	return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
1041 }
1042 EXPORT_SYMBOL(omap_get_dma_active_status);
1043 
omap_dma_running(void)1044 int omap_dma_running(void)
1045 {
1046 	int lch;
1047 
1048 	if (dma_omap1())
1049 		if (omap_lcd_dma_running())
1050 			return 1;
1051 
1052 	for (lch = 0; lch < dma_chan_count; lch++)
1053 		if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
1054 			return 1;
1055 
1056 	return 0;
1057 }
1058 
1059 /*
1060  * lch_queue DMA will start right after lch_head one is finished.
1061  * For this DMA link to start, you still need to start (see omap_start_dma)
1062  * the first one. That will fire up the entire queue.
1063  */
omap_dma_link_lch(int lch_head,int lch_queue)1064 void omap_dma_link_lch(int lch_head, int lch_queue)
1065 {
1066 	if (omap_dma_in_1510_mode()) {
1067 		if (lch_head == lch_queue) {
1068 			p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8),
1069 								CCR, lch_head);
1070 			return;
1071 		}
1072 		printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1073 		BUG();
1074 		return;
1075 	}
1076 
1077 	if ((dma_chan[lch_head].dev_id == -1) ||
1078 	    (dma_chan[lch_queue].dev_id == -1)) {
1079 		pr_err("omap_dma: trying to link non requested channels\n");
1080 		dump_stack();
1081 	}
1082 
1083 	dma_chan[lch_head].next_lch = lch_queue;
1084 }
1085 EXPORT_SYMBOL(omap_dma_link_lch);
1086 
1087 /*----------------------------------------------------------------------------*/
1088 
1089 #ifdef CONFIG_ARCH_OMAP1
1090 
omap1_dma_handle_ch(int ch)1091 static int omap1_dma_handle_ch(int ch)
1092 {
1093 	u32 csr;
1094 
1095 	if (enable_1510_mode && ch >= 6) {
1096 		csr = dma_chan[ch].saved_csr;
1097 		dma_chan[ch].saved_csr = 0;
1098 	} else
1099 		csr = p->dma_read(CSR, ch);
1100 	if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1101 		dma_chan[ch + 6].saved_csr = csr >> 7;
1102 		csr &= 0x7f;
1103 	}
1104 	if ((csr & 0x3f) == 0)
1105 		return 0;
1106 	if (unlikely(dma_chan[ch].dev_id == -1)) {
1107 		pr_warn("Spurious interrupt from DMA channel %d (CSR %04x)\n",
1108 			ch, csr);
1109 		return 0;
1110 	}
1111 	if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
1112 		pr_warn("DMA timeout with device %d\n", dma_chan[ch].dev_id);
1113 	if (unlikely(csr & OMAP_DMA_DROP_IRQ))
1114 		pr_warn("DMA synchronization event drop occurred with device %d\n",
1115 			dma_chan[ch].dev_id);
1116 	if (likely(csr & OMAP_DMA_BLOCK_IRQ))
1117 		dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1118 	if (likely(dma_chan[ch].callback != NULL))
1119 		dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
1120 
1121 	return 1;
1122 }
1123 
omap1_dma_irq_handler(int irq,void * dev_id)1124 static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
1125 {
1126 	int ch = ((int) dev_id) - 1;
1127 	int handled = 0;
1128 
1129 	for (;;) {
1130 		int handled_now = 0;
1131 
1132 		handled_now += omap1_dma_handle_ch(ch);
1133 		if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
1134 			handled_now += omap1_dma_handle_ch(ch + 6);
1135 		if (!handled_now)
1136 			break;
1137 		handled += handled_now;
1138 	}
1139 
1140 	return handled ? IRQ_HANDLED : IRQ_NONE;
1141 }
1142 
1143 #else
1144 #define omap1_dma_irq_handler	NULL
1145 #endif
1146 
1147 #ifdef CONFIG_ARCH_OMAP2PLUS
1148 
omap2_dma_handle_ch(int ch)1149 static int omap2_dma_handle_ch(int ch)
1150 {
1151 	u32 status = p->dma_read(CSR, ch);
1152 
1153 	if (!status) {
1154 		if (printk_ratelimit())
1155 			pr_warn("Spurious DMA IRQ for lch %d\n", ch);
1156 		p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1157 		return 0;
1158 	}
1159 	if (unlikely(dma_chan[ch].dev_id == -1)) {
1160 		if (printk_ratelimit())
1161 			pr_warn("IRQ %04x for non-allocated DMA channel %d\n",
1162 				status, ch);
1163 		return 0;
1164 	}
1165 	if (unlikely(status & OMAP_DMA_DROP_IRQ))
1166 		pr_info("DMA synchronization event drop occurred with device %d\n",
1167 			dma_chan[ch].dev_id);
1168 	if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
1169 		printk(KERN_INFO "DMA transaction error with device %d\n",
1170 		       dma_chan[ch].dev_id);
1171 		if (IS_DMA_ERRATA(DMA_ERRATA_i378)) {
1172 			u32 ccr;
1173 
1174 			ccr = p->dma_read(CCR, ch);
1175 			ccr &= ~OMAP_DMA_CCR_EN;
1176 			p->dma_write(ccr, CCR, ch);
1177 			dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1178 		}
1179 	}
1180 	if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
1181 		printk(KERN_INFO "DMA secure error with device %d\n",
1182 		       dma_chan[ch].dev_id);
1183 	if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
1184 		printk(KERN_INFO "DMA misaligned error with device %d\n",
1185 		       dma_chan[ch].dev_id);
1186 
1187 	p->dma_write(status, CSR, ch);
1188 	p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1189 	/* read back the register to flush the write */
1190 	p->dma_read(IRQSTATUS_L0, ch);
1191 
1192 	/* If the ch is not chained then chain_id will be -1 */
1193 	if (dma_chan[ch].chain_id != -1) {
1194 		int chain_id = dma_chan[ch].chain_id;
1195 		dma_chan[ch].state = DMA_CH_NOTSTARTED;
1196 		if (p->dma_read(CLNK_CTRL, ch) & (1 << 15))
1197 			dma_chan[dma_chan[ch].next_linked_ch].state =
1198 							DMA_CH_STARTED;
1199 		if (dma_linked_lch[chain_id].chain_mode ==
1200 						OMAP_DMA_DYNAMIC_CHAIN)
1201 			disable_lnk(ch);
1202 
1203 		if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
1204 			OMAP_DMA_CHAIN_INCQHEAD(chain_id);
1205 
1206 		status = p->dma_read(CSR, ch);
1207 		p->dma_write(status, CSR, ch);
1208 	}
1209 
1210 	if (likely(dma_chan[ch].callback != NULL))
1211 		dma_chan[ch].callback(ch, status, dma_chan[ch].data);
1212 
1213 	return 0;
1214 }
1215 
1216 /* STATUS register count is from 1-32 while our is 0-31 */
omap2_dma_irq_handler(int irq,void * dev_id)1217 static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
1218 {
1219 	u32 val, enable_reg;
1220 	int i;
1221 
1222 	val = p->dma_read(IRQSTATUS_L0, 0);
1223 	if (val == 0) {
1224 		if (printk_ratelimit())
1225 			printk(KERN_WARNING "Spurious DMA IRQ\n");
1226 		return IRQ_HANDLED;
1227 	}
1228 	enable_reg = p->dma_read(IRQENABLE_L0, 0);
1229 	val &= enable_reg; /* Dispatch only relevant interrupts */
1230 	for (i = 0; i < dma_lch_count && val != 0; i++) {
1231 		if (val & 1)
1232 			omap2_dma_handle_ch(i);
1233 		val >>= 1;
1234 	}
1235 
1236 	return IRQ_HANDLED;
1237 }
1238 
1239 static struct irqaction omap24xx_dma_irq = {
1240 	.name = "DMA",
1241 	.handler = omap2_dma_irq_handler,
1242 };
1243 
1244 #else
1245 static struct irqaction omap24xx_dma_irq;
1246 #endif
1247 
1248 /*----------------------------------------------------------------------------*/
1249 
1250 /*
1251  * Note that we are currently using only IRQENABLE_L0 and L1.
1252  * As the DSP may be using IRQENABLE_L2 and L3, let's not
1253  * touch those for now.
1254  */
omap_dma_global_context_save(void)1255 void omap_dma_global_context_save(void)
1256 {
1257 	omap_dma_global_context.dma_irqenable_l0 =
1258 		p->dma_read(IRQENABLE_L0, 0);
1259 	omap_dma_global_context.dma_irqenable_l1 =
1260 		p->dma_read(IRQENABLE_L1, 0);
1261 	omap_dma_global_context.dma_ocp_sysconfig =
1262 		p->dma_read(OCP_SYSCONFIG, 0);
1263 	omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
1264 }
1265 
omap_dma_global_context_restore(void)1266 void omap_dma_global_context_restore(void)
1267 {
1268 	int ch;
1269 
1270 	p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
1271 	p->dma_write(omap_dma_global_context.dma_ocp_sysconfig,
1272 		OCP_SYSCONFIG, 0);
1273 	p->dma_write(omap_dma_global_context.dma_irqenable_l0,
1274 		IRQENABLE_L0, 0);
1275 	p->dma_write(omap_dma_global_context.dma_irqenable_l1,
1276 		IRQENABLE_L1, 0);
1277 
1278 	if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
1279 		p->dma_write(0x3 , IRQSTATUS_L0, 0);
1280 
1281 	for (ch = 0; ch < dma_chan_count; ch++)
1282 		if (dma_chan[ch].dev_id != -1)
1283 			omap_clear_dma(ch);
1284 }
1285 
omap_get_plat_info(void)1286 struct omap_system_dma_plat_info *omap_get_plat_info(void)
1287 {
1288 	return p;
1289 }
1290 EXPORT_SYMBOL_GPL(omap_get_plat_info);
1291 
omap_system_dma_probe(struct platform_device * pdev)1292 static int omap_system_dma_probe(struct platform_device *pdev)
1293 {
1294 	int ch, ret = 0;
1295 	int dma_irq;
1296 	char irq_name[4];
1297 	int irq_rel;
1298 
1299 	p = pdev->dev.platform_data;
1300 	if (!p) {
1301 		dev_err(&pdev->dev,
1302 			"%s: System DMA initialized without platform data\n",
1303 			__func__);
1304 		return -EINVAL;
1305 	}
1306 
1307 	d			= p->dma_attr;
1308 	errata			= p->errata;
1309 
1310 	if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
1311 			&& (omap_dma_reserve_channels < d->lch_count))
1312 		d->lch_count	= omap_dma_reserve_channels;
1313 
1314 	dma_lch_count		= d->lch_count;
1315 	dma_chan_count		= dma_lch_count;
1316 	enable_1510_mode	= d->dev_caps & ENABLE_1510_MODE;
1317 
1318 	dma_chan = devm_kcalloc(&pdev->dev, dma_lch_count,
1319 				sizeof(struct omap_dma_lch), GFP_KERNEL);
1320 	if (!dma_chan) {
1321 		dev_err(&pdev->dev, "%s: kzalloc fail\n", __func__);
1322 		return -ENOMEM;
1323 	}
1324 
1325 
1326 	if (dma_omap2plus()) {
1327 		dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
1328 						dma_lch_count, GFP_KERNEL);
1329 		if (!dma_linked_lch) {
1330 			ret = -ENOMEM;
1331 			goto exit_dma_lch_fail;
1332 		}
1333 	}
1334 
1335 	spin_lock_init(&dma_chan_lock);
1336 	for (ch = 0; ch < dma_chan_count; ch++) {
1337 		omap_clear_dma(ch);
1338 		if (dma_omap2plus())
1339 			omap2_disable_irq_lch(ch);
1340 
1341 		dma_chan[ch].dev_id = -1;
1342 		dma_chan[ch].next_lch = -1;
1343 
1344 		if (ch >= 6 && enable_1510_mode)
1345 			continue;
1346 
1347 		if (dma_omap1()) {
1348 			/*
1349 			 * request_irq() doesn't like dev_id (ie. ch) being
1350 			 * zero, so we have to kludge around this.
1351 			 */
1352 			sprintf(&irq_name[0], "%d", ch);
1353 			dma_irq = platform_get_irq_byname(pdev, irq_name);
1354 
1355 			if (dma_irq < 0) {
1356 				ret = dma_irq;
1357 				goto exit_dma_irq_fail;
1358 			}
1359 
1360 			/* INT_DMA_LCD is handled in lcd_dma.c */
1361 			if (dma_irq == INT_DMA_LCD)
1362 				continue;
1363 
1364 			ret = request_irq(dma_irq,
1365 					omap1_dma_irq_handler, 0, "DMA",
1366 					(void *) (ch + 1));
1367 			if (ret != 0)
1368 				goto exit_dma_irq_fail;
1369 		}
1370 	}
1371 
1372 	if (d->dev_caps & IS_RW_PRIORITY)
1373 		omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
1374 				DMA_DEFAULT_FIFO_DEPTH, 0);
1375 
1376 	if (dma_omap2plus() && !(d->dev_caps & DMA_ENGINE_HANDLE_IRQ)) {
1377 		strcpy(irq_name, "0");
1378 		dma_irq = platform_get_irq_byname(pdev, irq_name);
1379 		if (dma_irq < 0) {
1380 			dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
1381 			ret = dma_irq;
1382 			goto exit_dma_lch_fail;
1383 		}
1384 		ret = setup_irq(dma_irq, &omap24xx_dma_irq);
1385 		if (ret) {
1386 			dev_err(&pdev->dev, "set_up failed for IRQ %d for DMA (error %d)\n",
1387 				dma_irq, ret);
1388 			goto exit_dma_lch_fail;
1389 		}
1390 	}
1391 
1392 	/* reserve dma channels 0 and 1 in high security devices on 34xx */
1393 	if (d->dev_caps & HS_CHANNELS_RESERVED) {
1394 		pr_info("Reserving DMA channels 0 and 1 for HS ROM code\n");
1395 		dma_chan[0].dev_id = 0;
1396 		dma_chan[1].dev_id = 1;
1397 	}
1398 	p->show_dma_caps();
1399 	return 0;
1400 
1401 exit_dma_irq_fail:
1402 	dev_err(&pdev->dev, "unable to request IRQ %d for DMA (error %d)\n",
1403 		dma_irq, ret);
1404 	for (irq_rel = 0; irq_rel < ch;	irq_rel++) {
1405 		dma_irq = platform_get_irq(pdev, irq_rel);
1406 		free_irq(dma_irq, (void *)(irq_rel + 1));
1407 	}
1408 
1409 exit_dma_lch_fail:
1410 	return ret;
1411 }
1412 
omap_system_dma_remove(struct platform_device * pdev)1413 static int omap_system_dma_remove(struct platform_device *pdev)
1414 {
1415 	int dma_irq;
1416 
1417 	if (dma_omap2plus()) {
1418 		char irq_name[4];
1419 		strcpy(irq_name, "0");
1420 		dma_irq = platform_get_irq_byname(pdev, irq_name);
1421 		if (dma_irq >= 0)
1422 			remove_irq(dma_irq, &omap24xx_dma_irq);
1423 	} else {
1424 		int irq_rel = 0;
1425 		for ( ; irq_rel < dma_chan_count; irq_rel++) {
1426 			dma_irq = platform_get_irq(pdev, irq_rel);
1427 			free_irq(dma_irq, (void *)(irq_rel + 1));
1428 		}
1429 	}
1430 	return 0;
1431 }
1432 
1433 static struct platform_driver omap_system_dma_driver = {
1434 	.probe		= omap_system_dma_probe,
1435 	.remove		= omap_system_dma_remove,
1436 	.driver		= {
1437 		.name	= "omap_dma_system"
1438 	},
1439 };
1440 
omap_system_dma_init(void)1441 static int __init omap_system_dma_init(void)
1442 {
1443 	return platform_driver_register(&omap_system_dma_driver);
1444 }
1445 arch_initcall(omap_system_dma_init);
1446 
omap_system_dma_exit(void)1447 static void __exit omap_system_dma_exit(void)
1448 {
1449 	platform_driver_unregister(&omap_system_dma_driver);
1450 }
1451 
1452 MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
1453 MODULE_LICENSE("GPL");
1454 MODULE_ALIAS("platform:" DRIVER_NAME);
1455 MODULE_AUTHOR("Texas Instruments Inc");
1456 
1457 /*
1458  * Reserve the omap SDMA channels using cmdline bootarg
1459  * "omap_dma_reserve_ch=". The valid range is 1 to 32
1460  */
omap_dma_cmdline_reserve_ch(char * str)1461 static int __init omap_dma_cmdline_reserve_ch(char *str)
1462 {
1463 	if (get_option(&str, &omap_dma_reserve_channels) != 1)
1464 		omap_dma_reserve_channels = 0;
1465 	return 1;
1466 }
1467 
1468 __setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
1469 
1470 
1471