• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2005-2006 by Texas Instruments
3  *
4  * This file implements a DMA  interface using TI's CPPI DMA.
5  * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
6  * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/platform_device.h>
11 #include <linux/slab.h>
12 #include <linux/usb.h>
13 
14 #include "musb_core.h"
15 #include "musb_debug.h"
16 #include "cppi_dma.h"
17 #include "davinci.h"
18 
19 
20 /* CPPI DMA status 7-mar-2006:
21  *
22  * - See musb_{host,gadget}.c for more info
23  *
24  * - Correct RX DMA generally forces the engine into irq-per-packet mode,
25  *   which can easily saturate the CPU under non-mass-storage loads.
26  *
27  * NOTES 24-aug-2006 (2.6.18-rc4):
28  *
29  * - peripheral RXDMA wedged in a test with packets of length 512/512/1.
30  *   evidently after the 1 byte packet was received and acked, the queue
31  *   of BDs got garbaged so it wouldn't empty the fifo.  (rxcsr 0x2003,
32  *   and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
33  *   004001ff 00000001 .. 8feff860)  Host was just getting NAKed on tx
34  *   of its next (512 byte) packet.  IRQ issues?
35  *
36  * REVISIT:  the "transfer DMA" glue between CPPI and USB fifos will
37  * evidently also directly update the RX and TX CSRs ... so audit all
38  * host and peripheral side DMA code to avoid CSR access after DMA has
39  * been started.
40  */
41 
42 /* REVISIT now we can avoid preallocating these descriptors; or
43  * more simply, switch to a global freelist not per-channel ones.
44  * Note: at full speed, 64 descriptors == 4K bulk data.
45  */
46 #define NUM_TXCHAN_BD       64
47 #define NUM_RXCHAN_BD       64
48 
cpu_drain_writebuffer(void)49 static inline void cpu_drain_writebuffer(void)
50 {
51 	wmb();
52 #ifdef	CONFIG_CPU_ARM926T
53 	/* REVISIT this "should not be needed",
54 	 * but lack of it sure seemed to hurt ...
55 	 */
56 	asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
57 #endif
58 }
59 
cppi_bd_alloc(struct cppi_channel * c)60 static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
61 {
62 	struct cppi_descriptor	*bd = c->freelist;
63 
64 	if (bd)
65 		c->freelist = bd->next;
66 	return bd;
67 }
68 
69 static inline void
cppi_bd_free(struct cppi_channel * c,struct cppi_descriptor * bd)70 cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
71 {
72 	if (!bd)
73 		return;
74 	bd->next = c->freelist;
75 	c->freelist = bd;
76 }
77 
78 /*
79  *  Start DMA controller
80  *
81  *  Initialize the DMA controller as necessary.
82  */
83 
84 /* zero out entire rx state RAM entry for the channel */
cppi_reset_rx(struct cppi_rx_stateram __iomem * rx)85 static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx)
86 {
87 	musb_writel(&rx->rx_skipbytes, 0, 0);
88 	musb_writel(&rx->rx_head, 0, 0);
89 	musb_writel(&rx->rx_sop, 0, 0);
90 	musb_writel(&rx->rx_current, 0, 0);
91 	musb_writel(&rx->rx_buf_current, 0, 0);
92 	musb_writel(&rx->rx_len_len, 0, 0);
93 	musb_writel(&rx->rx_cnt_cnt, 0, 0);
94 }
95 
96 /* zero out entire tx state RAM entry for the channel */
cppi_reset_tx(struct cppi_tx_stateram __iomem * tx,u32 ptr)97 static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
98 {
99 	musb_writel(&tx->tx_head, 0, 0);
100 	musb_writel(&tx->tx_buf, 0, 0);
101 	musb_writel(&tx->tx_current, 0, 0);
102 	musb_writel(&tx->tx_buf_current, 0, 0);
103 	musb_writel(&tx->tx_info, 0, 0);
104 	musb_writel(&tx->tx_rem_len, 0, 0);
105 	/* musb_writel(&tx->tx_dummy, 0, 0); */
106 	musb_writel(&tx->tx_complete, 0, ptr);
107 }
108 
cppi_pool_init(struct cppi * cppi,struct cppi_channel * c)109 static void cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
110 {
111 	int	j;
112 
113 	/* initialize channel fields */
114 	c->head = NULL;
115 	c->tail = NULL;
116 	c->last_processed = NULL;
117 	c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
118 	c->controller = cppi;
119 	c->is_rndis = 0;
120 	c->freelist = NULL;
121 
122 	/* build the BD Free list for the channel */
123 	for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
124 		struct cppi_descriptor	*bd;
125 		dma_addr_t		dma;
126 
127 		bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
128 		bd->dma = dma;
129 		cppi_bd_free(c, bd);
130 	}
131 }
132 
133 static int cppi_channel_abort(struct dma_channel *);
134 
cppi_pool_free(struct cppi_channel * c)135 static void cppi_pool_free(struct cppi_channel *c)
136 {
137 	struct cppi		*cppi = c->controller;
138 	struct cppi_descriptor	*bd;
139 
140 	(void) cppi_channel_abort(&c->channel);
141 	c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
142 	c->controller = NULL;
143 
144 	/* free all its bds */
145 	bd = c->last_processed;
146 	do {
147 		if (bd)
148 			dma_pool_free(cppi->pool, bd, bd->dma);
149 		bd = cppi_bd_alloc(c);
150 	} while (bd);
151 	c->last_processed = NULL;
152 }
153 
cppi_controller_start(struct cppi * controller)154 static void cppi_controller_start(struct cppi *controller)
155 {
156 	void __iomem	*tibase;
157 	int		i;
158 
159 	/* do whatever is necessary to start controller */
160 	for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
161 		controller->tx[i].transmit = true;
162 		controller->tx[i].index = i;
163 	}
164 	for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
165 		controller->rx[i].transmit = false;
166 		controller->rx[i].index = i;
167 	}
168 
169 	/* setup BD list on a per channel basis */
170 	for (i = 0; i < ARRAY_SIZE(controller->tx); i++)
171 		cppi_pool_init(controller, controller->tx + i);
172 	for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
173 		cppi_pool_init(controller, controller->rx + i);
174 
175 	tibase =  controller->tibase;
176 	INIT_LIST_HEAD(&controller->tx_complete);
177 
178 	/* initialise tx/rx channel head pointers to zero */
179 	for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
180 		struct cppi_channel	*tx_ch = controller->tx + i;
181 		struct cppi_tx_stateram __iomem *tx;
182 
183 		INIT_LIST_HEAD(&tx_ch->tx_complete);
184 
185 		tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
186 		tx_ch->state_ram = tx;
187 		cppi_reset_tx(tx, 0);
188 	}
189 	for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
190 		struct cppi_channel	*rx_ch = controller->rx + i;
191 		struct cppi_rx_stateram __iomem *rx;
192 
193 		INIT_LIST_HEAD(&rx_ch->tx_complete);
194 
195 		rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
196 		rx_ch->state_ram = rx;
197 		cppi_reset_rx(rx);
198 	}
199 
200 	/* enable individual cppi channels */
201 	musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
202 			DAVINCI_DMA_ALL_CHANNELS_ENABLE);
203 	musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG,
204 			DAVINCI_DMA_ALL_CHANNELS_ENABLE);
205 
206 	/* enable tx/rx CPPI control */
207 	musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
208 	musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
209 
210 	/* disable RNDIS mode, also host rx RNDIS autorequest */
211 	musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
212 	musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
213 }
214 
215 /*
216  *  Stop DMA controller
217  *
218  *  De-Init the DMA controller as necessary.
219  */
220 
cppi_controller_stop(struct cppi * controller)221 static void cppi_controller_stop(struct cppi *controller)
222 {
223 	void __iomem		*tibase;
224 	int			i;
225 	struct musb		*musb;
226 
227 	musb = controller->musb;
228 
229 	tibase = controller->tibase;
230 	/* DISABLE INDIVIDUAL CHANNEL Interrupts */
231 	musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
232 			DAVINCI_DMA_ALL_CHANNELS_ENABLE);
233 	musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
234 			DAVINCI_DMA_ALL_CHANNELS_ENABLE);
235 
236 	musb_dbg(musb, "Tearing down RX and TX Channels");
237 	for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
238 		/* FIXME restructure of txdma to use bds like rxdma */
239 		controller->tx[i].last_processed = NULL;
240 		cppi_pool_free(controller->tx + i);
241 	}
242 	for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
243 		cppi_pool_free(controller->rx + i);
244 
245 	/* in Tx Case proper teardown is supported. We resort to disabling
246 	 * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
247 	 * complete TX CPPI cannot be disabled.
248 	 */
249 	/*disable tx/rx cppi */
250 	musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
251 	musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
252 }
253 
254 /* While dma channel is allocated, we only want the core irqs active
255  * for fault reports, otherwise we'd get irqs that we don't care about.
256  * Except for TX irqs, where dma done != fifo empty and reusable ...
257  *
258  * NOTE: docs don't say either way, but irq masking **enables** irqs.
259  *
260  * REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
261  */
core_rxirq_disable(void __iomem * tibase,unsigned epnum)262 static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
263 {
264 	musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
265 }
266 
core_rxirq_enable(void __iomem * tibase,unsigned epnum)267 static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
268 {
269 	musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
270 }
271 
272 
273 /*
274  * Allocate a CPPI Channel for DMA.  With CPPI, channels are bound to
275  * each transfer direction of a non-control endpoint, so allocating
276  * (and deallocating) is mostly a way to notice bad housekeeping on
277  * the software side.  We assume the irqs are always active.
278  */
279 static struct dma_channel *
cppi_channel_allocate(struct dma_controller * c,struct musb_hw_ep * ep,u8 transmit)280 cppi_channel_allocate(struct dma_controller *c,
281 		struct musb_hw_ep *ep, u8 transmit)
282 {
283 	struct cppi		*controller;
284 	u8			index;
285 	struct cppi_channel	*cppi_ch;
286 	void __iomem		*tibase;
287 	struct musb		*musb;
288 
289 	controller = container_of(c, struct cppi, controller);
290 	tibase = controller->tibase;
291 	musb = controller->musb;
292 
293 	/* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
294 	index = ep->epnum - 1;
295 
296 	/* return the corresponding CPPI Channel Handle, and
297 	 * probably disable the non-CPPI irq until we need it.
298 	 */
299 	if (transmit) {
300 		if (index >= ARRAY_SIZE(controller->tx)) {
301 			musb_dbg(musb, "no %cX%d CPPI channel", 'T', index);
302 			return NULL;
303 		}
304 		cppi_ch = controller->tx + index;
305 	} else {
306 		if (index >= ARRAY_SIZE(controller->rx)) {
307 			musb_dbg(musb, "no %cX%d CPPI channel", 'R', index);
308 			return NULL;
309 		}
310 		cppi_ch = controller->rx + index;
311 		core_rxirq_disable(tibase, ep->epnum);
312 	}
313 
314 	/* REVISIT make this an error later once the same driver code works
315 	 * with the other DMA engine too
316 	 */
317 	if (cppi_ch->hw_ep)
318 		musb_dbg(musb, "re-allocating DMA%d %cX channel %p",
319 				index, transmit ? 'T' : 'R', cppi_ch);
320 	cppi_ch->hw_ep = ep;
321 	cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
322 	cppi_ch->channel.max_len = 0x7fffffff;
323 
324 	musb_dbg(musb, "Allocate CPPI%d %cX", index, transmit ? 'T' : 'R');
325 	return &cppi_ch->channel;
326 }
327 
328 /* Release a CPPI Channel.  */
cppi_channel_release(struct dma_channel * channel)329 static void cppi_channel_release(struct dma_channel *channel)
330 {
331 	struct cppi_channel	*c;
332 	void __iomem		*tibase;
333 
334 	/* REVISIT:  for paranoia, check state and abort if needed... */
335 
336 	c = container_of(channel, struct cppi_channel, channel);
337 	tibase = c->controller->tibase;
338 	if (!c->hw_ep)
339 		musb_dbg(c->controller->musb,
340 			"releasing idle DMA channel %p", c);
341 	else if (!c->transmit)
342 		core_rxirq_enable(tibase, c->index + 1);
343 
344 	/* for now, leave its cppi IRQ enabled (we won't trigger it) */
345 	c->hw_ep = NULL;
346 	channel->status = MUSB_DMA_STATUS_UNKNOWN;
347 }
348 
349 /* Context: controller irqlocked */
350 static void
cppi_dump_rx(int level,struct cppi_channel * c,const char * tag)351 cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
352 {
353 	void __iomem			*base = c->controller->mregs;
354 	struct cppi_rx_stateram __iomem	*rx = c->state_ram;
355 
356 	musb_ep_select(base, c->index + 1);
357 
358 	musb_dbg(c->controller->musb,
359 		"RX DMA%d%s: %d left, csr %04x, "
360 		"%08x H%08x S%08x C%08x, "
361 		"B%08x L%08x %08x .. %08x",
362 		c->index, tag,
363 		musb_readl(c->controller->tibase,
364 			DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
365 		musb_readw(c->hw_ep->regs, MUSB_RXCSR),
366 
367 		musb_readl(&rx->rx_skipbytes, 0),
368 		musb_readl(&rx->rx_head, 0),
369 		musb_readl(&rx->rx_sop, 0),
370 		musb_readl(&rx->rx_current, 0),
371 
372 		musb_readl(&rx->rx_buf_current, 0),
373 		musb_readl(&rx->rx_len_len, 0),
374 		musb_readl(&rx->rx_cnt_cnt, 0),
375 		musb_readl(&rx->rx_complete, 0)
376 		);
377 }
378 
379 /* Context: controller irqlocked */
380 static void
cppi_dump_tx(int level,struct cppi_channel * c,const char * tag)381 cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
382 {
383 	void __iomem			*base = c->controller->mregs;
384 	struct cppi_tx_stateram __iomem	*tx = c->state_ram;
385 
386 	musb_ep_select(base, c->index + 1);
387 
388 	musb_dbg(c->controller->musb,
389 		"TX DMA%d%s: csr %04x, "
390 		"H%08x S%08x C%08x %08x, "
391 		"F%08x L%08x .. %08x",
392 		c->index, tag,
393 		musb_readw(c->hw_ep->regs, MUSB_TXCSR),
394 
395 		musb_readl(&tx->tx_head, 0),
396 		musb_readl(&tx->tx_buf, 0),
397 		musb_readl(&tx->tx_current, 0),
398 		musb_readl(&tx->tx_buf_current, 0),
399 
400 		musb_readl(&tx->tx_info, 0),
401 		musb_readl(&tx->tx_rem_len, 0),
402 		/* dummy/unused word 6 */
403 		musb_readl(&tx->tx_complete, 0)
404 		);
405 }
406 
407 /* Context: controller irqlocked */
408 static inline void
cppi_rndis_update(struct cppi_channel * c,int is_rx,void __iomem * tibase,int is_rndis)409 cppi_rndis_update(struct cppi_channel *c, int is_rx,
410 		void __iomem *tibase, int is_rndis)
411 {
412 	/* we may need to change the rndis flag for this cppi channel */
413 	if (c->is_rndis != is_rndis) {
414 		u32	value = musb_readl(tibase, DAVINCI_RNDIS_REG);
415 		u32	temp = 1 << (c->index);
416 
417 		if (is_rx)
418 			temp <<= 16;
419 		if (is_rndis)
420 			value |= temp;
421 		else
422 			value &= ~temp;
423 		musb_writel(tibase, DAVINCI_RNDIS_REG, value);
424 		c->is_rndis = is_rndis;
425 	}
426 }
427 
cppi_dump_rxbd(const char * tag,struct cppi_descriptor * bd)428 static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
429 {
430 	pr_debug("RXBD/%s %08x: "
431 			"nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
432 			tag, bd->dma,
433 			bd->hw_next, bd->hw_bufp, bd->hw_off_len,
434 			bd->hw_options);
435 }
436 
cppi_dump_rxq(int level,const char * tag,struct cppi_channel * rx)437 static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
438 {
439 	struct cppi_descriptor	*bd;
440 
441 	cppi_dump_rx(level, rx, tag);
442 	if (rx->last_processed)
443 		cppi_dump_rxbd("last", rx->last_processed);
444 	for (bd = rx->head; bd; bd = bd->next)
445 		cppi_dump_rxbd("active", bd);
446 }
447 
448 
449 /* NOTE:  DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
450  * so we won't ever use it (see "CPPI RX Woes" below).
451  */
cppi_autoreq_update(struct cppi_channel * rx,void __iomem * tibase,int onepacket,unsigned n_bds)452 static inline int cppi_autoreq_update(struct cppi_channel *rx,
453 		void __iomem *tibase, int onepacket, unsigned n_bds)
454 {
455 	u32	val;
456 
457 #ifdef	RNDIS_RX_IS_USABLE
458 	u32	tmp;
459 	/* assert(is_host_active(musb)) */
460 
461 	/* start from "AutoReq never" */
462 	tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
463 	val = tmp & ~((0x3) << (rx->index * 2));
464 
465 	/* HCD arranged reqpkt for packet #1.  we arrange int
466 	 * for all but the last one, maybe in two segments.
467 	 */
468 	if (!onepacket) {
469 #if 0
470 		/* use two segments, autoreq "all" then the last "never" */
471 		val |= ((0x3) << (rx->index * 2));
472 		n_bds--;
473 #else
474 		/* one segment, autoreq "all-but-last" */
475 		val |= ((0x1) << (rx->index * 2));
476 #endif
477 	}
478 
479 	if (val != tmp) {
480 		int n = 100;
481 
482 		/* make sure that autoreq is updated before continuing */
483 		musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
484 		do {
485 			tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
486 			if (tmp == val)
487 				break;
488 			cpu_relax();
489 		} while (n-- > 0);
490 	}
491 #endif
492 
493 	/* REQPKT is turned off after each segment */
494 	if (n_bds && rx->channel.actual_len) {
495 		void __iomem	*regs = rx->hw_ep->regs;
496 
497 		val = musb_readw(regs, MUSB_RXCSR);
498 		if (!(val & MUSB_RXCSR_H_REQPKT)) {
499 			val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
500 			musb_writew(regs, MUSB_RXCSR, val);
501 			/* flush writebuffer */
502 			val = musb_readw(regs, MUSB_RXCSR);
503 		}
504 	}
505 	return n_bds;
506 }
507 
508 
509 /* Buffer enqueuing Logic:
510  *
511  *  - RX builds new queues each time, to help handle routine "early
512  *    termination" cases (faults, including errors and short reads)
513  *    more correctly.
514  *
515  *  - for now, TX reuses the same queue of BDs every time
516  *
517  * REVISIT long term, we want a normal dynamic model.
518  * ... the goal will be to append to the
519  * existing queue, processing completed "dma buffers" (segments) on the fly.
520  *
521  * Otherwise we force an IRQ latency between requests, which slows us a lot
522  * (especially in "transparent" dma).  Unfortunately that model seems to be
523  * inherent in the DMA model from the Mentor code, except in the rare case
524  * of transfers big enough (~128+ KB) that we could append "middle" segments
525  * in the TX paths.  (RX can't do this, see below.)
526  *
527  * That's true even in the CPPI- friendly iso case, where most urbs have
528  * several small segments provided in a group and where the "packet at a time"
529  * "transparent" DMA model is always correct, even on the RX side.
530  */
531 
532 /*
533  * CPPI TX:
534  * ========
535  * TX is a lot more reasonable than RX; it doesn't need to run in
536  * irq-per-packet mode very often.  RNDIS mode seems to behave too
537  * (except how it handles the exactly-N-packets case).  Building a
538  * txdma queue with multiple requests (urb or usb_request) looks
539  * like it would work ... but fault handling would need much testing.
540  *
541  * The main issue with TX mode RNDIS relates to transfer lengths that
542  * are an exact multiple of the packet length.  It appears that there's
543  * a hiccup in that case (maybe the DMA completes before the ZLP gets
544  * written?) boiling down to not being able to rely on CPPI writing any
545  * terminating zero length packet before the next transfer is written.
546  * So that's punted to PIO; better yet, gadget drivers can avoid it.
547  *
548  * Plus, there's allegedly an undocumented constraint that rndis transfer
549  * length be a multiple of 64 bytes ... but the chip doesn't act that
550  * way, and we really don't _want_ that behavior anyway.
551  *
552  * On TX, "transparent" mode works ... although experiments have shown
553  * problems trying to use the SOP/EOP bits in different USB packets.
554  *
555  * REVISIT try to handle terminating zero length packets using CPPI
556  * instead of doing it by PIO after an IRQ.  (Meanwhile, make Ethernet
557  * links avoid that issue by forcing them to avoid zlps.)
558  */
559 static void
cppi_next_tx_segment(struct musb * musb,struct cppi_channel * tx)560 cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
561 {
562 	unsigned		maxpacket = tx->maxpacket;
563 	dma_addr_t		addr = tx->buf_dma + tx->offset;
564 	size_t			length = tx->buf_len - tx->offset;
565 	struct cppi_descriptor	*bd;
566 	unsigned		n_bds;
567 	unsigned		i;
568 	struct cppi_tx_stateram	__iomem *tx_ram = tx->state_ram;
569 	int			rndis;
570 
571 	/* TX can use the CPPI "rndis" mode, where we can probably fit this
572 	 * transfer in one BD and one IRQ.  The only time we would NOT want
573 	 * to use it is when hardware constraints prevent it, or if we'd
574 	 * trigger the "send a ZLP?" confusion.
575 	 */
576 	rndis = (maxpacket & 0x3f) == 0
577 		&& length > maxpacket
578 		&& length < 0xffff
579 		&& (length % maxpacket) != 0;
580 
581 	if (rndis) {
582 		maxpacket = length;
583 		n_bds = 1;
584 	} else {
585 		n_bds = length / maxpacket;
586 		if (!length || (length % maxpacket))
587 			n_bds++;
588 		n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
589 		length = min(n_bds * maxpacket, length);
590 	}
591 
592 	musb_dbg(musb, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u",
593 			tx->index,
594 			maxpacket,
595 			rndis ? "rndis" : "transparent",
596 			n_bds,
597 			(unsigned long long)addr, length);
598 
599 	cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
600 
601 	/* assuming here that channel_program is called during
602 	 * transfer initiation ... current code maintains state
603 	 * for one outstanding request only (no queues, not even
604 	 * the implicit ones of an iso urb).
605 	 */
606 
607 	bd = tx->freelist;
608 	tx->head = bd;
609 	tx->last_processed = NULL;
610 
611 	/* FIXME use BD pool like RX side does, and just queue
612 	 * the minimum number for this request.
613 	 */
614 
615 	/* Prepare queue of BDs first, then hand it to hardware.
616 	 * All BDs except maybe the last should be of full packet
617 	 * size; for RNDIS there _is_ only that last packet.
618 	 */
619 	for (i = 0; i < n_bds; ) {
620 		if (++i < n_bds && bd->next)
621 			bd->hw_next = bd->next->dma;
622 		else
623 			bd->hw_next = 0;
624 
625 		bd->hw_bufp = tx->buf_dma + tx->offset;
626 
627 		/* FIXME set EOP only on the last packet,
628 		 * SOP only on the first ... avoid IRQs
629 		 */
630 		if ((tx->offset + maxpacket) <= tx->buf_len) {
631 			tx->offset += maxpacket;
632 			bd->hw_off_len = maxpacket;
633 			bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
634 				| CPPI_OWN_SET | maxpacket;
635 		} else {
636 			/* only this one may be a partial USB Packet */
637 			u32		partial_len;
638 
639 			partial_len = tx->buf_len - tx->offset;
640 			tx->offset = tx->buf_len;
641 			bd->hw_off_len = partial_len;
642 
643 			bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
644 				| CPPI_OWN_SET | partial_len;
645 			if (partial_len == 0)
646 				bd->hw_options |= CPPI_ZERO_SET;
647 		}
648 
649 		musb_dbg(musb, "TXBD %p: nxt %08x buf %08x len %04x opt %08x",
650 				bd, bd->hw_next, bd->hw_bufp,
651 				bd->hw_off_len, bd->hw_options);
652 
653 		/* update the last BD enqueued to the list */
654 		tx->tail = bd;
655 		bd = bd->next;
656 	}
657 
658 	/* BDs live in DMA-coherent memory, but writes might be pending */
659 	cpu_drain_writebuffer();
660 
661 	/* Write to the HeadPtr in state RAM to trigger */
662 	musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
663 
664 	cppi_dump_tx(5, tx, "/S");
665 }
666 
667 /*
668  * CPPI RX Woes:
669  * =============
670  * Consider a 1KB bulk RX buffer in two scenarios:  (a) it's fed two 300 byte
671  * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
672  * (Full speed transfers have similar scenarios.)
673  *
674  * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
675  * and the next packet goes into a buffer that's queued later; while (b) fills
676  * the buffer with 1024 bytes.  How to do that with CPPI?
677  *
678  * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
679  *   (b) loses **BADLY** because nothing (!) happens when that second packet
680  *   fills the buffer, much less when a third one arrives.  (Which makes this
681  *   not a "true" RNDIS mode.  In the RNDIS protocol short-packet termination
682  *   is optional, and it's fine if peripherals -- not hosts! -- pad messages
683  *   out to end-of-buffer.  Standard PCI host controller DMA descriptors
684  *   implement that mode by default ... which is no accident.)
685  *
686  * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
687  *   converse problems:  (b) is handled right, but (a) loses badly.  CPPI RX
688  *   ignores SOP/EOP markings and processes both of those BDs; so both packets
689  *   are loaded into the buffer (with a 212 byte gap between them), and the next
690  *   buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
691  *   are intended as outputs for RX queues, not inputs...)
692  *
693  * - A variant of "transparent" mode -- one BD at a time -- is the only way to
694  *   reliably make both cases work, with software handling both cases correctly
695  *   and at the significant penalty of needing an IRQ per packet.  (The lack of
696  *   I/O overlap can be slightly ameliorated by enabling double buffering.)
697  *
698  * So how to get rid of IRQ-per-packet?  The transparent multi-BD case could
699  * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
700  * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
701  * with guaranteed driver level fault recovery and scrubbing out what's left
702  * of that garbaged datastream.
703  *
704  * But there seems to be no way to identify the cases where CPPI RNDIS mode
705  * is appropriate -- which do NOT include RNDIS host drivers, but do include
706  * the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
707  * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
708  * that applies best on the peripheral side (and which could fail rudely).
709  *
710  * Leaving only "transparent" mode; we avoid multi-bd modes in almost all
711  * cases other than mass storage class.  Otherwise we're correct but slow,
712  * since CPPI penalizes our need for a "true RNDIS" default mode.
713  */
714 
715 
716 /* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
717  *
718  * IFF
719  *  (a)	peripheral mode ... since rndis peripherals could pad their
720  *	writes to hosts, causing i/o failure; or we'd have to cope with
721  *	a largely unknowable variety of host side protocol variants
722  *  (b)	and short reads are NOT errors ... since full reads would
723  *	cause those same i/o failures
724  *  (c)	and read length is
725  *	- less than 64KB (max per cppi descriptor)
726  *	- not a multiple of 4096 (g_zero default, full reads typical)
727  *	- N (>1) packets long, ditto (full reads not EXPECTED)
728  * THEN
729  *   try rx rndis mode
730  *
731  * Cost of heuristic failing:  RXDMA wedges at the end of transfers that
732  * fill out the whole buffer.  Buggy host side usb network drivers could
733  * trigger that, but "in the field" such bugs seem to be all but unknown.
734  *
735  * So this module parameter lets the heuristic be disabled.  When using
736  * gadgetfs, the heuristic will probably need to be disabled.
737  */
738 static bool cppi_rx_rndis = 1;
739 
740 module_param(cppi_rx_rndis, bool, 0);
741 MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
742 
743 
744 /**
745  * cppi_next_rx_segment - dma read for the next chunk of a buffer
746  * @musb: the controller
747  * @rx: dma channel
748  * @onepacket: true unless caller treats short reads as errors, and
749  *	performs fault recovery above usbcore.
750  * Context: controller irqlocked
751  *
752  * See above notes about why we can't use multi-BD RX queues except in
753  * rare cases (mass storage class), and can never use the hardware "rndis"
754  * mode (since it's not a "true" RNDIS mode) with complete safety..
755  *
756  * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
757  * code to recover from corrupted datastreams after each short transfer.
758  */
759 static void
cppi_next_rx_segment(struct musb * musb,struct cppi_channel * rx,int onepacket)760 cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
761 {
762 	unsigned		maxpacket = rx->maxpacket;
763 	dma_addr_t		addr = rx->buf_dma + rx->offset;
764 	size_t			length = rx->buf_len - rx->offset;
765 	struct cppi_descriptor	*bd, *tail;
766 	unsigned		n_bds;
767 	unsigned		i;
768 	void __iomem		*tibase = musb->ctrl_base;
769 	int			is_rndis = 0;
770 	struct cppi_rx_stateram	__iomem *rx_ram = rx->state_ram;
771 	struct cppi_descriptor	*d;
772 
773 	if (onepacket) {
774 		/* almost every USB driver, host or peripheral side */
775 		n_bds = 1;
776 
777 		/* maybe apply the heuristic above */
778 		if (cppi_rx_rndis
779 				&& is_peripheral_active(musb)
780 				&& length > maxpacket
781 				&& (length & ~0xffff) == 0
782 				&& (length & 0x0fff) != 0
783 				&& (length & (maxpacket - 1)) == 0) {
784 			maxpacket = length;
785 			is_rndis = 1;
786 		}
787 	} else {
788 		/* virtually nothing except mass storage class */
789 		if (length > 0xffff) {
790 			n_bds = 0xffff / maxpacket;
791 			length = n_bds * maxpacket;
792 		} else {
793 			n_bds = length / maxpacket;
794 			if (length % maxpacket)
795 				n_bds++;
796 		}
797 		if (n_bds == 1)
798 			onepacket = 1;
799 		else
800 			n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
801 	}
802 
803 	/* In host mode, autorequest logic can generate some IN tokens; it's
804 	 * tricky since we can't leave REQPKT set in RXCSR after the transfer
805 	 * finishes. So:  multipacket transfers involve two or more segments.
806 	 * And always at least two IRQs ... RNDIS mode is not an option.
807 	 */
808 	if (is_host_active(musb))
809 		n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
810 
811 	cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);
812 
813 	length = min(n_bds * maxpacket, length);
814 
815 	musb_dbg(musb, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
816 			"dma 0x%llx len %u %u/%u",
817 			rx->index, maxpacket,
818 			onepacket
819 				? (is_rndis ? "rndis" : "onepacket")
820 				: "multipacket",
821 			n_bds,
822 			musb_readl(tibase,
823 				DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
824 					& 0xffff,
825 			(unsigned long long)addr, length,
826 			rx->channel.actual_len, rx->buf_len);
827 
828 	/* only queue one segment at a time, since the hardware prevents
829 	 * correct queue shutdown after unexpected short packets
830 	 */
831 	bd = cppi_bd_alloc(rx);
832 	rx->head = bd;
833 
834 	/* Build BDs for all packets in this segment */
835 	for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
836 		u32	bd_len;
837 
838 		if (i) {
839 			bd = cppi_bd_alloc(rx);
840 			if (!bd)
841 				break;
842 			tail->next = bd;
843 			tail->hw_next = bd->dma;
844 		}
845 		bd->hw_next = 0;
846 
847 		/* all but the last packet will be maxpacket size */
848 		if (maxpacket < length)
849 			bd_len = maxpacket;
850 		else
851 			bd_len = length;
852 
853 		bd->hw_bufp = addr;
854 		addr += bd_len;
855 		rx->offset += bd_len;
856 
857 		bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
858 		bd->buflen = bd_len;
859 
860 		bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
861 		length -= bd_len;
862 	}
863 
864 	/* we always expect at least one reusable BD! */
865 	if (!tail) {
866 		WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
867 		return;
868 	} else if (i < n_bds)
869 		WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
870 
871 	tail->next = NULL;
872 	tail->hw_next = 0;
873 
874 	bd = rx->head;
875 	rx->tail = tail;
876 
877 	/* short reads and other faults should terminate this entire
878 	 * dma segment.  we want one "dma packet" per dma segment, not
879 	 * one per USB packet, terminating the whole queue at once...
880 	 * NOTE that current hardware seems to ignore SOP and EOP.
881 	 */
882 	bd->hw_options |= CPPI_SOP_SET;
883 	tail->hw_options |= CPPI_EOP_SET;
884 
885 	for (d = rx->head; d; d = d->next)
886 		cppi_dump_rxbd("S", d);
887 
888 	/* in case the preceding transfer left some state... */
889 	tail = rx->last_processed;
890 	if (tail) {
891 		tail->next = bd;
892 		tail->hw_next = bd->dma;
893 	}
894 
895 	core_rxirq_enable(tibase, rx->index + 1);
896 
897 	/* BDs live in DMA-coherent memory, but writes might be pending */
898 	cpu_drain_writebuffer();
899 
900 	/* REVISIT specs say to write this AFTER the BUFCNT register
901 	 * below ... but that loses badly.
902 	 */
903 	musb_writel(&rx_ram->rx_head, 0, bd->dma);
904 
905 	/* bufferCount must be at least 3, and zeroes on completion
906 	 * unless it underflows below zero, or stops at two, or keeps
907 	 * growing ... grr.
908 	 */
909 	i = musb_readl(tibase,
910 			DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
911 			& 0xffff;
912 
913 	if (!i)
914 		musb_writel(tibase,
915 			DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
916 			n_bds + 2);
917 	else if (n_bds > (i - 3))
918 		musb_writel(tibase,
919 			DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
920 			n_bds - (i - 3));
921 
922 	i = musb_readl(tibase,
923 			DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
924 			& 0xffff;
925 	if (i < (2 + n_bds)) {
926 		musb_dbg(musb, "bufcnt%d underrun - %d (for %d)",
927 					rx->index, i, n_bds);
928 		musb_writel(tibase,
929 			DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
930 			n_bds + 2);
931 	}
932 
933 	cppi_dump_rx(4, rx, "/S");
934 }
935 
936 /**
937  * cppi_channel_program - program channel for data transfer
938  * @ch: the channel
939  * @maxpacket: max packet size
940  * @mode: For RX, 1 unless the usb protocol driver promised to treat
941  *	all short reads as errors and kick in high level fault recovery.
942  *	For TX, ignored because of RNDIS mode races/glitches.
943  * @dma_addr: dma address of buffer
944  * @len: length of buffer
945  * Context: controller irqlocked
946  */
cppi_channel_program(struct dma_channel * ch,u16 maxpacket,u8 mode,dma_addr_t dma_addr,u32 len)947 static int cppi_channel_program(struct dma_channel *ch,
948 		u16 maxpacket, u8 mode,
949 		dma_addr_t dma_addr, u32 len)
950 {
951 	struct cppi_channel	*cppi_ch;
952 	struct cppi		*controller;
953 	struct musb		*musb;
954 
955 	cppi_ch = container_of(ch, struct cppi_channel, channel);
956 	controller = cppi_ch->controller;
957 	musb = controller->musb;
958 
959 	switch (ch->status) {
960 	case MUSB_DMA_STATUS_BUS_ABORT:
961 	case MUSB_DMA_STATUS_CORE_ABORT:
962 		/* fault irq handler should have handled cleanup */
963 		WARNING("%cX DMA%d not cleaned up after abort!\n",
964 				cppi_ch->transmit ? 'T' : 'R',
965 				cppi_ch->index);
966 		/* WARN_ON(1); */
967 		break;
968 	case MUSB_DMA_STATUS_BUSY:
969 		WARNING("program active channel?  %cX DMA%d\n",
970 				cppi_ch->transmit ? 'T' : 'R',
971 				cppi_ch->index);
972 		/* WARN_ON(1); */
973 		break;
974 	case MUSB_DMA_STATUS_UNKNOWN:
975 		musb_dbg(musb, "%cX DMA%d not allocated!",
976 				cppi_ch->transmit ? 'T' : 'R',
977 				cppi_ch->index);
978 		/* FALLTHROUGH */
979 	case MUSB_DMA_STATUS_FREE:
980 		break;
981 	}
982 
983 	ch->status = MUSB_DMA_STATUS_BUSY;
984 
985 	/* set transfer parameters, then queue up its first segment */
986 	cppi_ch->buf_dma = dma_addr;
987 	cppi_ch->offset = 0;
988 	cppi_ch->maxpacket = maxpacket;
989 	cppi_ch->buf_len = len;
990 	cppi_ch->channel.actual_len = 0;
991 
992 	/* TX channel? or RX? */
993 	if (cppi_ch->transmit)
994 		cppi_next_tx_segment(musb, cppi_ch);
995 	else
996 		cppi_next_rx_segment(musb, cppi_ch, mode);
997 
998 	return true;
999 }
1000 
cppi_rx_scan(struct cppi * cppi,unsigned ch)1001 static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
1002 {
1003 	struct cppi_channel		*rx = &cppi->rx[ch];
1004 	struct cppi_rx_stateram __iomem	*state = rx->state_ram;
1005 	struct cppi_descriptor		*bd;
1006 	struct cppi_descriptor		*last = rx->last_processed;
1007 	bool				completed = false;
1008 	bool				acked = false;
1009 	int				i;
1010 	dma_addr_t			safe2ack;
1011 	void __iomem			*regs = rx->hw_ep->regs;
1012 	struct musb			*musb = cppi->musb;
1013 
1014 	cppi_dump_rx(6, rx, "/K");
1015 
1016 	bd = last ? last->next : rx->head;
1017 	if (!bd)
1018 		return false;
1019 
1020 	/* run through all completed BDs */
1021 	for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
1022 			(safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
1023 			i++, bd = bd->next) {
1024 		u16	len;
1025 
1026 		/* catch latest BD writes from CPPI */
1027 		rmb();
1028 		if (!completed && (bd->hw_options & CPPI_OWN_SET))
1029 			break;
1030 
1031 		musb_dbg(musb, "C/RXBD %llx: nxt %08x buf %08x "
1032 			"off.len %08x opt.len %08x (%d)",
1033 			(unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
1034 			bd->hw_off_len, bd->hw_options,
1035 			rx->channel.actual_len);
1036 
1037 		/* actual packet received length */
1038 		if ((bd->hw_options & CPPI_SOP_SET) && !completed)
1039 			len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
1040 		else
1041 			len = 0;
1042 
1043 		if (bd->hw_options & CPPI_EOQ_MASK)
1044 			completed = true;
1045 
1046 		if (!completed && len < bd->buflen) {
1047 			/* NOTE:  when we get a short packet, RXCSR_H_REQPKT
1048 			 * must have been cleared, and no more DMA packets may
1049 			 * active be in the queue... TI docs didn't say, but
1050 			 * CPPI ignores those BDs even though OWN is still set.
1051 			 */
1052 			completed = true;
1053 			musb_dbg(musb, "rx short %d/%d (%d)",
1054 					len, bd->buflen,
1055 					rx->channel.actual_len);
1056 		}
1057 
1058 		/* If we got here, we expect to ack at least one BD; meanwhile
1059 		 * CPPI may completing other BDs while we scan this list...
1060 		 *
1061 		 * RACE: we can notice OWN cleared before CPPI raises the
1062 		 * matching irq by writing that BD as the completion pointer.
1063 		 * In such cases, stop scanning and wait for the irq, avoiding
1064 		 * lost acks and states where BD ownership is unclear.
1065 		 */
1066 		if (bd->dma == safe2ack) {
1067 			musb_writel(&state->rx_complete, 0, safe2ack);
1068 			safe2ack = musb_readl(&state->rx_complete, 0);
1069 			acked = true;
1070 			if (bd->dma == safe2ack)
1071 				safe2ack = 0;
1072 		}
1073 
1074 		rx->channel.actual_len += len;
1075 
1076 		cppi_bd_free(rx, last);
1077 		last = bd;
1078 
1079 		/* stop scanning on end-of-segment */
1080 		if (bd->hw_next == 0)
1081 			completed = true;
1082 	}
1083 	rx->last_processed = last;
1084 
1085 	/* dma abort, lost ack, or ... */
1086 	if (!acked && last) {
1087 		int	csr;
1088 
1089 		if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
1090 			musb_writel(&state->rx_complete, 0, safe2ack);
1091 		if (safe2ack == 0) {
1092 			cppi_bd_free(rx, last);
1093 			rx->last_processed = NULL;
1094 
1095 			/* if we land here on the host side, H_REQPKT will
1096 			 * be clear and we need to restart the queue...
1097 			 */
1098 			WARN_ON(rx->head);
1099 		}
1100 		musb_ep_select(cppi->mregs, rx->index + 1);
1101 		csr = musb_readw(regs, MUSB_RXCSR);
1102 		if (csr & MUSB_RXCSR_DMAENAB) {
1103 			musb_dbg(musb, "list%d %p/%p, last %llx%s, csr %04x",
1104 				rx->index,
1105 				rx->head, rx->tail,
1106 				rx->last_processed
1107 					? (unsigned long long)
1108 						rx->last_processed->dma
1109 					: 0,
1110 				completed ? ", completed" : "",
1111 				csr);
1112 			cppi_dump_rxq(4, "/what?", rx);
1113 		}
1114 	}
1115 	if (!completed) {
1116 		int	csr;
1117 
1118 		rx->head = bd;
1119 
1120 		/* REVISIT seems like "autoreq all but EOP" doesn't...
1121 		 * setting it here "should" be racey, but seems to work
1122 		 */
1123 		csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
1124 		if (is_host_active(cppi->musb)
1125 				&& bd
1126 				&& !(csr & MUSB_RXCSR_H_REQPKT)) {
1127 			csr |= MUSB_RXCSR_H_REQPKT;
1128 			musb_writew(regs, MUSB_RXCSR,
1129 					MUSB_RXCSR_H_WZC_BITS | csr);
1130 			csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
1131 		}
1132 	} else {
1133 		rx->head = NULL;
1134 		rx->tail = NULL;
1135 	}
1136 
1137 	cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
1138 	return completed;
1139 }
1140 
cppi_interrupt(int irq,void * dev_id)1141 irqreturn_t cppi_interrupt(int irq, void *dev_id)
1142 {
1143 	struct musb		*musb = dev_id;
1144 	struct cppi		*cppi;
1145 	void __iomem		*tibase;
1146 	struct musb_hw_ep	*hw_ep = NULL;
1147 	u32			rx, tx;
1148 	int			i, index;
1149 	unsigned long		uninitialized_var(flags);
1150 
1151 	cppi = container_of(musb->dma_controller, struct cppi, controller);
1152 	if (cppi->irq)
1153 		spin_lock_irqsave(&musb->lock, flags);
1154 
1155 	tibase = musb->ctrl_base;
1156 
1157 	tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
1158 	rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
1159 
1160 	if (!tx && !rx) {
1161 		if (cppi->irq)
1162 			spin_unlock_irqrestore(&musb->lock, flags);
1163 		return IRQ_NONE;
1164 	}
1165 
1166 	musb_dbg(musb, "CPPI IRQ Tx%x Rx%x", tx, rx);
1167 
1168 	/* process TX channels */
1169 	for (index = 0; tx; tx = tx >> 1, index++) {
1170 		struct cppi_channel		*tx_ch;
1171 		struct cppi_tx_stateram __iomem	*tx_ram;
1172 		bool				completed = false;
1173 		struct cppi_descriptor		*bd;
1174 
1175 		if (!(tx & 1))
1176 			continue;
1177 
1178 		tx_ch = cppi->tx + index;
1179 		tx_ram = tx_ch->state_ram;
1180 
1181 		/* FIXME  need a cppi_tx_scan() routine, which
1182 		 * can also be called from abort code
1183 		 */
1184 
1185 		cppi_dump_tx(5, tx_ch, "/E");
1186 
1187 		bd = tx_ch->head;
1188 
1189 		/*
1190 		 * If Head is null then this could mean that a abort interrupt
1191 		 * that needs to be acknowledged.
1192 		 */
1193 		if (NULL == bd) {
1194 			musb_dbg(musb, "null BD");
1195 			musb_writel(&tx_ram->tx_complete, 0, 0);
1196 			continue;
1197 		}
1198 
1199 		/* run through all completed BDs */
1200 		for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
1201 				i++, bd = bd->next) {
1202 			u16	len;
1203 
1204 			/* catch latest BD writes from CPPI */
1205 			rmb();
1206 			if (bd->hw_options & CPPI_OWN_SET)
1207 				break;
1208 
1209 			musb_dbg(musb, "C/TXBD %p n %x b %x off %x opt %x",
1210 					bd, bd->hw_next, bd->hw_bufp,
1211 					bd->hw_off_len, bd->hw_options);
1212 
1213 			len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
1214 			tx_ch->channel.actual_len += len;
1215 
1216 			tx_ch->last_processed = bd;
1217 
1218 			/* write completion register to acknowledge
1219 			 * processing of completed BDs, and possibly
1220 			 * release the IRQ; EOQ might not be set ...
1221 			 *
1222 			 * REVISIT use the same ack strategy as rx
1223 			 *
1224 			 * REVISIT have observed bit 18 set; huh??
1225 			 */
1226 			/* if ((bd->hw_options & CPPI_EOQ_MASK)) */
1227 				musb_writel(&tx_ram->tx_complete, 0, bd->dma);
1228 
1229 			/* stop scanning on end-of-segment */
1230 			if (bd->hw_next == 0)
1231 				completed = true;
1232 		}
1233 
1234 		/* on end of segment, maybe go to next one */
1235 		if (completed) {
1236 			/* cppi_dump_tx(4, tx_ch, "/complete"); */
1237 
1238 			/* transfer more, or report completion */
1239 			if (tx_ch->offset >= tx_ch->buf_len) {
1240 				tx_ch->head = NULL;
1241 				tx_ch->tail = NULL;
1242 				tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1243 
1244 				hw_ep = tx_ch->hw_ep;
1245 
1246 				musb_dma_completion(musb, index + 1, 1);
1247 
1248 			} else {
1249 				/* Bigger transfer than we could fit in
1250 				 * that first batch of descriptors...
1251 				 */
1252 				cppi_next_tx_segment(musb, tx_ch);
1253 			}
1254 		} else
1255 			tx_ch->head = bd;
1256 	}
1257 
1258 	/* Start processing the RX block */
1259 	for (index = 0; rx; rx = rx >> 1, index++) {
1260 
1261 		if (rx & 1) {
1262 			struct cppi_channel		*rx_ch;
1263 
1264 			rx_ch = cppi->rx + index;
1265 
1266 			/* let incomplete dma segments finish */
1267 			if (!cppi_rx_scan(cppi, index))
1268 				continue;
1269 
1270 			/* start another dma segment if needed */
1271 			if (rx_ch->channel.actual_len != rx_ch->buf_len
1272 					&& rx_ch->channel.actual_len
1273 						== rx_ch->offset) {
1274 				cppi_next_rx_segment(musb, rx_ch, 1);
1275 				continue;
1276 			}
1277 
1278 			/* all segments completed! */
1279 			rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1280 
1281 			hw_ep = rx_ch->hw_ep;
1282 
1283 			core_rxirq_disable(tibase, index + 1);
1284 			musb_dma_completion(musb, index + 1, 0);
1285 		}
1286 	}
1287 
1288 	/* write to CPPI EOI register to re-enable interrupts */
1289 	musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
1290 
1291 	if (cppi->irq)
1292 		spin_unlock_irqrestore(&musb->lock, flags);
1293 
1294 	return IRQ_HANDLED;
1295 }
1296 EXPORT_SYMBOL_GPL(cppi_interrupt);
1297 
1298 /* Instantiate a software object representing a DMA controller. */
1299 struct dma_controller *
cppi_dma_controller_create(struct musb * musb,void __iomem * mregs)1300 cppi_dma_controller_create(struct musb *musb, void __iomem *mregs)
1301 {
1302 	struct cppi		*controller;
1303 	struct device		*dev = musb->controller;
1304 	struct platform_device	*pdev = to_platform_device(dev);
1305 	int			irq = platform_get_irq_byname(pdev, "dma");
1306 
1307 	controller = kzalloc(sizeof *controller, GFP_KERNEL);
1308 	if (!controller)
1309 		return NULL;
1310 
1311 	controller->mregs = mregs;
1312 	controller->tibase = mregs - DAVINCI_BASE_OFFSET;
1313 
1314 	controller->musb = musb;
1315 	controller->controller.channel_alloc = cppi_channel_allocate;
1316 	controller->controller.channel_release = cppi_channel_release;
1317 	controller->controller.channel_program = cppi_channel_program;
1318 	controller->controller.channel_abort = cppi_channel_abort;
1319 
1320 	/* NOTE: allocating from on-chip SRAM would give the least
1321 	 * contention for memory access, if that ever matters here.
1322 	 */
1323 
1324 	/* setup BufferPool */
1325 	controller->pool = dma_pool_create("cppi",
1326 			controller->musb->controller,
1327 			sizeof(struct cppi_descriptor),
1328 			CPPI_DESCRIPTOR_ALIGN, 0);
1329 	if (!controller->pool) {
1330 		kfree(controller);
1331 		return NULL;
1332 	}
1333 
1334 	if (irq > 0) {
1335 		if (request_irq(irq, cppi_interrupt, 0, "cppi-dma", musb)) {
1336 			dev_err(dev, "request_irq %d failed!\n", irq);
1337 			musb_dma_controller_destroy(&controller->controller);
1338 			return NULL;
1339 		}
1340 		controller->irq = irq;
1341 	}
1342 
1343 	cppi_controller_start(controller);
1344 	return &controller->controller;
1345 }
1346 EXPORT_SYMBOL_GPL(cppi_dma_controller_create);
1347 
1348 /*
1349  *  Destroy a previously-instantiated DMA controller.
1350  */
cppi_dma_controller_destroy(struct dma_controller * c)1351 void cppi_dma_controller_destroy(struct dma_controller *c)
1352 {
1353 	struct cppi	*cppi;
1354 
1355 	cppi = container_of(c, struct cppi, controller);
1356 
1357 	cppi_controller_stop(cppi);
1358 
1359 	if (cppi->irq)
1360 		free_irq(cppi->irq, cppi->musb);
1361 
1362 	/* assert:  caller stopped the controller first */
1363 	dma_pool_destroy(cppi->pool);
1364 
1365 	kfree(cppi);
1366 }
1367 EXPORT_SYMBOL_GPL(cppi_dma_controller_destroy);
1368 
1369 /*
1370  * Context: controller irqlocked, endpoint selected
1371  */
cppi_channel_abort(struct dma_channel * channel)1372 static int cppi_channel_abort(struct dma_channel *channel)
1373 {
1374 	struct cppi_channel	*cppi_ch;
1375 	struct cppi		*controller;
1376 	void __iomem		*mbase;
1377 	void __iomem		*tibase;
1378 	void __iomem		*regs;
1379 	u32			value;
1380 	struct cppi_descriptor	*queue;
1381 
1382 	cppi_ch = container_of(channel, struct cppi_channel, channel);
1383 
1384 	controller = cppi_ch->controller;
1385 
1386 	switch (channel->status) {
1387 	case MUSB_DMA_STATUS_BUS_ABORT:
1388 	case MUSB_DMA_STATUS_CORE_ABORT:
1389 		/* from RX or TX fault irq handler */
1390 	case MUSB_DMA_STATUS_BUSY:
1391 		/* the hardware needs shutting down */
1392 		regs = cppi_ch->hw_ep->regs;
1393 		break;
1394 	case MUSB_DMA_STATUS_UNKNOWN:
1395 	case MUSB_DMA_STATUS_FREE:
1396 		return 0;
1397 	default:
1398 		return -EINVAL;
1399 	}
1400 
1401 	if (!cppi_ch->transmit && cppi_ch->head)
1402 		cppi_dump_rxq(3, "/abort", cppi_ch);
1403 
1404 	mbase = controller->mregs;
1405 	tibase = controller->tibase;
1406 
1407 	queue = cppi_ch->head;
1408 	cppi_ch->head = NULL;
1409 	cppi_ch->tail = NULL;
1410 
1411 	/* REVISIT should rely on caller having done this,
1412 	 * and caller should rely on us not changing it.
1413 	 * peripheral code is safe ... check host too.
1414 	 */
1415 	musb_ep_select(mbase, cppi_ch->index + 1);
1416 
1417 	if (cppi_ch->transmit) {
1418 		struct cppi_tx_stateram __iomem *tx_ram;
1419 		/* REVISIT put timeouts on these controller handshakes */
1420 
1421 		cppi_dump_tx(6, cppi_ch, " (teardown)");
1422 
1423 		/* teardown DMA engine then usb core */
1424 		do {
1425 			value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
1426 		} while (!(value & CPPI_TEAR_READY));
1427 		musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
1428 
1429 		tx_ram = cppi_ch->state_ram;
1430 		do {
1431 			value = musb_readl(&tx_ram->tx_complete, 0);
1432 		} while (0xFFFFFFFC != value);
1433 
1434 		/* FIXME clean up the transfer state ... here?
1435 		 * the completion routine should get called with
1436 		 * an appropriate status code.
1437 		 */
1438 
1439 		value = musb_readw(regs, MUSB_TXCSR);
1440 		value &= ~MUSB_TXCSR_DMAENAB;
1441 		value |= MUSB_TXCSR_FLUSHFIFO;
1442 		musb_writew(regs, MUSB_TXCSR, value);
1443 		musb_writew(regs, MUSB_TXCSR, value);
1444 
1445 		/*
1446 		 * 1. Write to completion Ptr value 0x1(bit 0 set)
1447 		 *    (write back mode)
1448 		 * 2. Wait for abort interrupt and then put the channel in
1449 		 *    compare mode by writing 1 to the tx_complete register.
1450 		 */
1451 		cppi_reset_tx(tx_ram, 1);
1452 		cppi_ch->head = NULL;
1453 		musb_writel(&tx_ram->tx_complete, 0, 1);
1454 		cppi_dump_tx(5, cppi_ch, " (done teardown)");
1455 
1456 		/* REVISIT tx side _should_ clean up the same way
1457 		 * as the RX side ... this does no cleanup at all!
1458 		 */
1459 
1460 	} else /* RX */ {
1461 		u16			csr;
1462 
1463 		/* NOTE: docs don't guarantee any of this works ...  we
1464 		 * expect that if the usb core stops telling the cppi core
1465 		 * to pull more data from it, then it'll be safe to flush
1466 		 * current RX DMA state iff any pending fifo transfer is done.
1467 		 */
1468 
1469 		core_rxirq_disable(tibase, cppi_ch->index + 1);
1470 
1471 		/* for host, ensure ReqPkt is never set again */
1472 		if (is_host_active(cppi_ch->controller->musb)) {
1473 			value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
1474 			value &= ~((0x3) << (cppi_ch->index * 2));
1475 			musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
1476 		}
1477 
1478 		csr = musb_readw(regs, MUSB_RXCSR);
1479 
1480 		/* for host, clear (just) ReqPkt at end of current packet(s) */
1481 		if (is_host_active(cppi_ch->controller->musb)) {
1482 			csr |= MUSB_RXCSR_H_WZC_BITS;
1483 			csr &= ~MUSB_RXCSR_H_REQPKT;
1484 		} else
1485 			csr |= MUSB_RXCSR_P_WZC_BITS;
1486 
1487 		/* clear dma enable */
1488 		csr &= ~(MUSB_RXCSR_DMAENAB);
1489 		musb_writew(regs, MUSB_RXCSR, csr);
1490 		csr = musb_readw(regs, MUSB_RXCSR);
1491 
1492 		/* Quiesce: wait for current dma to finish (if not cleanup).
1493 		 * We can't use bit zero of stateram->rx_sop, since that
1494 		 * refers to an entire "DMA packet" not just emptying the
1495 		 * current fifo.  Most segments need multiple usb packets.
1496 		 */
1497 		if (channel->status == MUSB_DMA_STATUS_BUSY)
1498 			udelay(50);
1499 
1500 		/* scan the current list, reporting any data that was
1501 		 * transferred and acking any IRQ
1502 		 */
1503 		cppi_rx_scan(controller, cppi_ch->index);
1504 
1505 		/* clobber the existing state once it's idle
1506 		 *
1507 		 * NOTE:  arguably, we should also wait for all the other
1508 		 * RX channels to quiesce (how??) and then temporarily
1509 		 * disable RXCPPI_CTRL_REG ... but it seems that we can
1510 		 * rely on the controller restarting from state ram, with
1511 		 * only RXCPPI_BUFCNT state being bogus.  BUFCNT will
1512 		 * correct itself after the next DMA transfer though.
1513 		 *
1514 		 * REVISIT does using rndis mode change that?
1515 		 */
1516 		cppi_reset_rx(cppi_ch->state_ram);
1517 
1518 		/* next DMA request _should_ load cppi head ptr */
1519 
1520 		/* ... we don't "free" that list, only mutate it in place.  */
1521 		cppi_dump_rx(5, cppi_ch, " (done abort)");
1522 
1523 		/* clean up previously pending bds */
1524 		cppi_bd_free(cppi_ch, cppi_ch->last_processed);
1525 		cppi_ch->last_processed = NULL;
1526 
1527 		while (queue) {
1528 			struct cppi_descriptor	*tmp = queue->next;
1529 
1530 			cppi_bd_free(cppi_ch, queue);
1531 			queue = tmp;
1532 		}
1533 	}
1534 
1535 	channel->status = MUSB_DMA_STATUS_FREE;
1536 	cppi_ch->buf_dma = 0;
1537 	cppi_ch->offset = 0;
1538 	cppi_ch->buf_len = 0;
1539 	cppi_ch->maxpacket = 0;
1540 	return 0;
1541 }
1542 
1543 /* TBD Queries:
1544  *
1545  * Power Management ... probably turn off cppi during suspend, restart;
1546  * check state ram?  Clocking is presumably shared with usb core.
1547  */
1548