• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Renesas USB driver
3  *
4  * Copyright (C) 2011 Renesas Solutions Corp.
5  * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
15  *
16  */
17 #include <linux/delay.h>
18 #include <linux/io.h>
19 #include <linux/scatterlist.h>
20 #include "common.h"
21 #include "pipe.h"
22 
23 #define usbhsf_get_cfifo(p)	(&((p)->fifo_info.cfifo))
24 #define usbhsf_get_d0fifo(p)	(&((p)->fifo_info.d0fifo))
25 #define usbhsf_get_d1fifo(p)	(&((p)->fifo_info.d1fifo))
26 #define usbhsf_is_cfifo(p, f)	(usbhsf_get_cfifo(p) == f)
27 
28 #define usbhsf_fifo_is_busy(f)	((f)->pipe) /* see usbhs_pipe_select_fifo */
29 
30 /*
31  *		packet initialize
32  */
usbhs_pkt_init(struct usbhs_pkt * pkt)33 void usbhs_pkt_init(struct usbhs_pkt *pkt)
34 {
35 	INIT_LIST_HEAD(&pkt->node);
36 }
37 
38 /*
39  *		packet control function
40  */
usbhsf_null_handle(struct usbhs_pkt * pkt,int * is_done)41 static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
42 {
43 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
44 	struct device *dev = usbhs_priv_to_dev(priv);
45 
46 	dev_err(dev, "null handler\n");
47 
48 	return -EINVAL;
49 }
50 
51 static struct usbhs_pkt_handle usbhsf_null_handler = {
52 	.prepare = usbhsf_null_handle,
53 	.try_run = usbhsf_null_handle,
54 };
55 
usbhs_pkt_push(struct usbhs_pipe * pipe,struct usbhs_pkt * pkt,void (* done)(struct usbhs_priv * priv,struct usbhs_pkt * pkt),void * buf,int len,int zero,int sequence)56 void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
57 		    void (*done)(struct usbhs_priv *priv,
58 				 struct usbhs_pkt *pkt),
59 		    void *buf, int len, int zero, int sequence)
60 {
61 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
62 	struct device *dev = usbhs_priv_to_dev(priv);
63 	unsigned long flags;
64 
65 	if (!done) {
66 		dev_err(dev, "no done function\n");
67 		return;
68 	}
69 
70 	/********************  spin lock ********************/
71 	usbhs_lock(priv, flags);
72 
73 	if (!pipe->handler) {
74 		dev_err(dev, "no handler function\n");
75 		pipe->handler = &usbhsf_null_handler;
76 	}
77 
78 	list_move_tail(&pkt->node, &pipe->list);
79 
80 	/*
81 	 * each pkt must hold own handler.
82 	 * because handler might be changed by its situation.
83 	 * dma handler -> pio handler.
84 	 */
85 	pkt->pipe	= pipe;
86 	pkt->buf	= buf;
87 	pkt->handler	= pipe->handler;
88 	pkt->length	= len;
89 	pkt->zero	= zero;
90 	pkt->actual	= 0;
91 	pkt->done	= done;
92 	pkt->sequence	= sequence;
93 
94 	usbhs_unlock(priv, flags);
95 	/********************  spin unlock ******************/
96 }
97 
__usbhsf_pkt_del(struct usbhs_pkt * pkt)98 static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
99 {
100 	list_del_init(&pkt->node);
101 }
102 
__usbhsf_pkt_get(struct usbhs_pipe * pipe)103 static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
104 {
105 	if (list_empty(&pipe->list))
106 		return NULL;
107 
108 	return list_first_entry(&pipe->list, struct usbhs_pkt, node);
109 }
110 
111 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
112 			      struct usbhs_fifo *fifo);
113 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
114 				 struct usbhs_fifo *fifo);
115 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
116 					    struct usbhs_pkt *pkt);
117 #define usbhsf_dma_map(p)	__usbhsf_dma_map_ctrl(p, 1)
118 #define usbhsf_dma_unmap(p)	__usbhsf_dma_map_ctrl(p, 0)
119 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
usbhs_pkt_pop(struct usbhs_pipe * pipe,struct usbhs_pkt * pkt)120 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
121 {
122 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
123 	struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
124 	unsigned long flags;
125 
126 	/********************  spin lock ********************/
127 	usbhs_lock(priv, flags);
128 
129 	usbhs_pipe_disable(pipe);
130 
131 	if (!pkt)
132 		pkt = __usbhsf_pkt_get(pipe);
133 
134 	if (pkt) {
135 		struct dma_chan *chan = NULL;
136 
137 		if (fifo)
138 			chan = usbhsf_dma_chan_get(fifo, pkt);
139 		if (chan) {
140 			dmaengine_terminate_all(chan);
141 			usbhsf_fifo_clear(pipe, fifo);
142 			usbhsf_dma_unmap(pkt);
143 		}
144 
145 		__usbhsf_pkt_del(pkt);
146 	}
147 
148 	if (fifo)
149 		usbhsf_fifo_unselect(pipe, fifo);
150 
151 	usbhs_unlock(priv, flags);
152 	/********************  spin unlock ******************/
153 
154 	return pkt;
155 }
156 
157 enum {
158 	USBHSF_PKT_PREPARE,
159 	USBHSF_PKT_TRY_RUN,
160 	USBHSF_PKT_DMA_DONE,
161 };
162 
usbhsf_pkt_handler(struct usbhs_pipe * pipe,int type)163 static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
164 {
165 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
166 	struct usbhs_pkt *pkt;
167 	struct device *dev = usbhs_priv_to_dev(priv);
168 	int (*func)(struct usbhs_pkt *pkt, int *is_done);
169 	unsigned long flags;
170 	int ret = 0;
171 	int is_done = 0;
172 
173 	/********************  spin lock ********************/
174 	usbhs_lock(priv, flags);
175 
176 	pkt = __usbhsf_pkt_get(pipe);
177 	if (!pkt)
178 		goto __usbhs_pkt_handler_end;
179 
180 	switch (type) {
181 	case USBHSF_PKT_PREPARE:
182 		func = pkt->handler->prepare;
183 		break;
184 	case USBHSF_PKT_TRY_RUN:
185 		func = pkt->handler->try_run;
186 		break;
187 	case USBHSF_PKT_DMA_DONE:
188 		func = pkt->handler->dma_done;
189 		break;
190 	default:
191 		dev_err(dev, "unknown pkt handler\n");
192 		goto __usbhs_pkt_handler_end;
193 	}
194 
195 	if (likely(func))
196 		ret = func(pkt, &is_done);
197 
198 	if (is_done)
199 		__usbhsf_pkt_del(pkt);
200 
201 __usbhs_pkt_handler_end:
202 	usbhs_unlock(priv, flags);
203 	/********************  spin unlock ******************/
204 
205 	if (is_done) {
206 		pkt->done(priv, pkt);
207 		usbhs_pkt_start(pipe);
208 	}
209 
210 	return ret;
211 }
212 
usbhs_pkt_start(struct usbhs_pipe * pipe)213 void usbhs_pkt_start(struct usbhs_pipe *pipe)
214 {
215 	usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE);
216 }
217 
218 /*
219  *		irq enable/disable function
220  */
221 #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e)
222 #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e)
223 #define usbhsf_irq_callback_ctrl(pipe, status, enable)			\
224 	({								\
225 		struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);	\
226 		struct usbhs_mod *mod = usbhs_mod_get_current(priv);	\
227 		u16 status = (1 << usbhs_pipe_number(pipe));		\
228 		if (!mod)						\
229 			return;						\
230 		if (enable)						\
231 			mod->status |= status;				\
232 		else							\
233 			mod->status &= ~status;				\
234 		usbhs_irq_callback_update(priv, mod);			\
235 	})
236 
usbhsf_tx_irq_ctrl(struct usbhs_pipe * pipe,int enable)237 static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
238 {
239 	/*
240 	 * And DCP pipe can NOT use "ready interrupt" for "send"
241 	 * it should use "empty" interrupt.
242 	 * see
243 	 *   "Operation" - "Interrupt Function" - "BRDY Interrupt"
244 	 *
245 	 * on the other hand, normal pipe can use "ready interrupt" for "send"
246 	 * even though it is single/double buffer
247 	 */
248 	if (usbhs_pipe_is_dcp(pipe))
249 		usbhsf_irq_empty_ctrl(pipe, enable);
250 	else
251 		usbhsf_irq_ready_ctrl(pipe, enable);
252 }
253 
usbhsf_rx_irq_ctrl(struct usbhs_pipe * pipe,int enable)254 static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
255 {
256 	usbhsf_irq_ready_ctrl(pipe, enable);
257 }
258 
259 /*
260  *		FIFO ctrl
261  */
usbhsf_send_terminator(struct usbhs_pipe * pipe,struct usbhs_fifo * fifo)262 static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
263 				   struct usbhs_fifo *fifo)
264 {
265 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
266 
267 	usbhs_bset(priv, fifo->ctr, BVAL, BVAL);
268 }
269 
usbhsf_fifo_barrier(struct usbhs_priv * priv,struct usbhs_fifo * fifo)270 static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
271 			       struct usbhs_fifo *fifo)
272 {
273 	int timeout = 1024;
274 
275 	do {
276 		/* The FIFO port is accessible */
277 		if (usbhs_read(priv, fifo->ctr) & FRDY)
278 			return 0;
279 
280 		udelay(10);
281 	} while (timeout--);
282 
283 	return -EBUSY;
284 }
285 
usbhsf_fifo_clear(struct usbhs_pipe * pipe,struct usbhs_fifo * fifo)286 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
287 			      struct usbhs_fifo *fifo)
288 {
289 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
290 	int ret = 0;
291 
292 	if (!usbhs_pipe_is_dcp(pipe)) {
293 		/*
294 		 * This driver checks the pipe condition first to avoid -EBUSY
295 		 * from usbhsf_fifo_barrier() with about 10 msec delay in
296 		 * the interrupt handler if the pipe is RX direction and empty.
297 		 */
298 		if (usbhs_pipe_is_dir_in(pipe))
299 			ret = usbhs_pipe_is_accessible(pipe);
300 		if (!ret)
301 			ret = usbhsf_fifo_barrier(priv, fifo);
302 	}
303 
304 	/*
305 	 * if non-DCP pipe, this driver should set BCLR when
306 	 * usbhsf_fifo_barrier() returns 0.
307 	 */
308 	if (!ret)
309 		usbhs_write(priv, fifo->ctr, BCLR);
310 }
311 
usbhsf_fifo_rcv_len(struct usbhs_priv * priv,struct usbhs_fifo * fifo)312 static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
313 			       struct usbhs_fifo *fifo)
314 {
315 	return usbhs_read(priv, fifo->ctr) & DTLN_MASK;
316 }
317 
usbhsf_fifo_unselect(struct usbhs_pipe * pipe,struct usbhs_fifo * fifo)318 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
319 				 struct usbhs_fifo *fifo)
320 {
321 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
322 
323 	usbhs_pipe_select_fifo(pipe, NULL);
324 	usbhs_write(priv, fifo->sel, 0);
325 }
326 
usbhsf_fifo_select(struct usbhs_pipe * pipe,struct usbhs_fifo * fifo,int write)327 static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
328 			      struct usbhs_fifo *fifo,
329 			      int write)
330 {
331 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
332 	struct device *dev = usbhs_priv_to_dev(priv);
333 	int timeout = 1024;
334 	u16 mask = ((1 << 5) | 0xF);		/* mask of ISEL | CURPIPE */
335 	u16 base = usbhs_pipe_number(pipe);	/* CURPIPE */
336 
337 	if (usbhs_pipe_is_busy(pipe) ||
338 	    usbhsf_fifo_is_busy(fifo))
339 		return -EBUSY;
340 
341 	if (usbhs_pipe_is_dcp(pipe)) {
342 		base |= (1 == write) << 5;	/* ISEL */
343 
344 		if (usbhs_mod_is_host(priv))
345 			usbhs_dcp_dir_for_host(pipe, write);
346 	}
347 
348 	/* "base" will be used below  */
349 	if (usbhs_get_dparam(priv, has_sudmac) && !usbhsf_is_cfifo(priv, fifo))
350 		usbhs_write(priv, fifo->sel, base);
351 	else
352 		usbhs_write(priv, fifo->sel, base | MBW_32);
353 
354 	/* check ISEL and CURPIPE value */
355 	while (timeout--) {
356 		if (base == (mask & usbhs_read(priv, fifo->sel))) {
357 			usbhs_pipe_select_fifo(pipe, fifo);
358 			return 0;
359 		}
360 		udelay(10);
361 	}
362 
363 	dev_err(dev, "fifo select error\n");
364 
365 	return -EIO;
366 }
367 
368 /*
369  *		DCP status stage
370  */
usbhs_dcp_dir_switch_to_write(struct usbhs_pkt * pkt,int * is_done)371 static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
372 {
373 	struct usbhs_pipe *pipe = pkt->pipe;
374 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
375 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
376 	struct device *dev = usbhs_priv_to_dev(priv);
377 	int ret;
378 
379 	usbhs_pipe_disable(pipe);
380 
381 	ret = usbhsf_fifo_select(pipe, fifo, 1);
382 	if (ret < 0) {
383 		dev_err(dev, "%s() faile\n", __func__);
384 		return ret;
385 	}
386 
387 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
388 
389 	usbhsf_fifo_clear(pipe, fifo);
390 	usbhsf_send_terminator(pipe, fifo);
391 
392 	usbhsf_fifo_unselect(pipe, fifo);
393 
394 	usbhsf_tx_irq_ctrl(pipe, 1);
395 	usbhs_pipe_enable(pipe);
396 
397 	return ret;
398 }
399 
usbhs_dcp_dir_switch_to_read(struct usbhs_pkt * pkt,int * is_done)400 static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
401 {
402 	struct usbhs_pipe *pipe = pkt->pipe;
403 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
404 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
405 	struct device *dev = usbhs_priv_to_dev(priv);
406 	int ret;
407 
408 	usbhs_pipe_disable(pipe);
409 
410 	ret = usbhsf_fifo_select(pipe, fifo, 0);
411 	if (ret < 0) {
412 		dev_err(dev, "%s() fail\n", __func__);
413 		return ret;
414 	}
415 
416 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
417 	usbhsf_fifo_clear(pipe, fifo);
418 
419 	usbhsf_fifo_unselect(pipe, fifo);
420 
421 	usbhsf_rx_irq_ctrl(pipe, 1);
422 	usbhs_pipe_enable(pipe);
423 
424 	return ret;
425 
426 }
427 
usbhs_dcp_dir_switch_done(struct usbhs_pkt * pkt,int * is_done)428 static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
429 {
430 	struct usbhs_pipe *pipe = pkt->pipe;
431 
432 	if (pkt->handler == &usbhs_dcp_status_stage_in_handler)
433 		usbhsf_tx_irq_ctrl(pipe, 0);
434 	else
435 		usbhsf_rx_irq_ctrl(pipe, 0);
436 
437 	pkt->actual = pkt->length;
438 	*is_done = 1;
439 
440 	return 0;
441 }
442 
443 struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
444 	.prepare = usbhs_dcp_dir_switch_to_write,
445 	.try_run = usbhs_dcp_dir_switch_done,
446 };
447 
448 struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
449 	.prepare = usbhs_dcp_dir_switch_to_read,
450 	.try_run = usbhs_dcp_dir_switch_done,
451 };
452 
453 /*
454  *		DCP data stage (push)
455  */
usbhsf_dcp_data_stage_try_push(struct usbhs_pkt * pkt,int * is_done)456 static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
457 {
458 	struct usbhs_pipe *pipe = pkt->pipe;
459 
460 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
461 
462 	/*
463 	 * change handler to PIO push
464 	 */
465 	pkt->handler = &usbhs_fifo_pio_push_handler;
466 
467 	return pkt->handler->prepare(pkt, is_done);
468 }
469 
470 struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
471 	.prepare = usbhsf_dcp_data_stage_try_push,
472 };
473 
474 /*
475  *		DCP data stage (pop)
476  */
usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt * pkt,int * is_done)477 static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
478 					     int *is_done)
479 {
480 	struct usbhs_pipe *pipe = pkt->pipe;
481 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
482 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
483 
484 	if (usbhs_pipe_is_busy(pipe))
485 		return 0;
486 
487 	/*
488 	 * prepare pop for DCP should
489 	 *  - change DCP direction,
490 	 *  - clear fifo
491 	 *  - DATA1
492 	 */
493 	usbhs_pipe_disable(pipe);
494 
495 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
496 
497 	usbhsf_fifo_select(pipe, fifo, 0);
498 	usbhsf_fifo_clear(pipe, fifo);
499 	usbhsf_fifo_unselect(pipe, fifo);
500 
501 	/*
502 	 * change handler to PIO pop
503 	 */
504 	pkt->handler = &usbhs_fifo_pio_pop_handler;
505 
506 	return pkt->handler->prepare(pkt, is_done);
507 }
508 
509 struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
510 	.prepare = usbhsf_dcp_data_stage_prepare_pop,
511 };
512 
513 /*
514  *		PIO push handler
515  */
usbhsf_pio_try_push(struct usbhs_pkt * pkt,int * is_done)516 static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
517 {
518 	struct usbhs_pipe *pipe = pkt->pipe;
519 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
520 	struct device *dev = usbhs_priv_to_dev(priv);
521 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
522 	void __iomem *addr = priv->base + fifo->port;
523 	u8 *buf;
524 	int maxp = usbhs_pipe_get_maxpacket(pipe);
525 	int total_len;
526 	int i, ret, len;
527 	int is_short;
528 
529 	usbhs_pipe_data_sequence(pipe, pkt->sequence);
530 	pkt->sequence = -1; /* -1 sequence will be ignored */
531 
532 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
533 
534 	ret = usbhsf_fifo_select(pipe, fifo, 1);
535 	if (ret < 0)
536 		return 0;
537 
538 	ret = usbhs_pipe_is_accessible(pipe);
539 	if (ret < 0) {
540 		/* inaccessible pipe is not an error */
541 		ret = 0;
542 		goto usbhs_fifo_write_busy;
543 	}
544 
545 	ret = usbhsf_fifo_barrier(priv, fifo);
546 	if (ret < 0)
547 		goto usbhs_fifo_write_busy;
548 
549 	buf		= pkt->buf    + pkt->actual;
550 	len		= pkt->length - pkt->actual;
551 	len		= min(len, maxp);
552 	total_len	= len;
553 	is_short	= total_len < maxp;
554 
555 	/*
556 	 * FIXME
557 	 *
558 	 * 32-bit access only
559 	 */
560 	if (len >= 4 && !((unsigned long)buf & 0x03)) {
561 		iowrite32_rep(addr, buf, len / 4);
562 		len %= 4;
563 		buf += total_len - len;
564 	}
565 
566 	/* the rest operation */
567 	for (i = 0; i < len; i++)
568 		iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
569 
570 	/*
571 	 * variable update
572 	 */
573 	pkt->actual += total_len;
574 
575 	if (pkt->actual < pkt->length)
576 		*is_done = 0;		/* there are remainder data */
577 	else if (is_short)
578 		*is_done = 1;		/* short packet */
579 	else
580 		*is_done = !pkt->zero;	/* send zero packet ? */
581 
582 	/*
583 	 * pipe/irq handling
584 	 */
585 	if (is_short)
586 		usbhsf_send_terminator(pipe, fifo);
587 
588 	usbhsf_tx_irq_ctrl(pipe, !*is_done);
589 	usbhs_pipe_running(pipe, !*is_done);
590 	usbhs_pipe_enable(pipe);
591 
592 	dev_dbg(dev, "  send %d (%d/ %d/ %d/ %d)\n",
593 		usbhs_pipe_number(pipe),
594 		pkt->length, pkt->actual, *is_done, pkt->zero);
595 
596 	/*
597 	 * Transmission end
598 	 */
599 	if (*is_done) {
600 		if (usbhs_pipe_is_dcp(pipe))
601 			usbhs_dcp_control_transfer_done(pipe);
602 	}
603 
604 	usbhsf_fifo_unselect(pipe, fifo);
605 
606 	return 0;
607 
608 usbhs_fifo_write_busy:
609 	usbhsf_fifo_unselect(pipe, fifo);
610 
611 	/*
612 	 * pipe is busy.
613 	 * retry in interrupt
614 	 */
615 	usbhsf_tx_irq_ctrl(pipe, 1);
616 	usbhs_pipe_running(pipe, 1);
617 
618 	return ret;
619 }
620 
usbhsf_pio_prepare_push(struct usbhs_pkt * pkt,int * is_done)621 static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
622 {
623 	if (usbhs_pipe_is_running(pkt->pipe))
624 		return 0;
625 
626 	return usbhsf_pio_try_push(pkt, is_done);
627 }
628 
629 struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
630 	.prepare = usbhsf_pio_prepare_push,
631 	.try_run = usbhsf_pio_try_push,
632 };
633 
634 /*
635  *		PIO pop handler
636  */
usbhsf_prepare_pop(struct usbhs_pkt * pkt,int * is_done)637 static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
638 {
639 	struct usbhs_pipe *pipe = pkt->pipe;
640 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
641 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
642 
643 	if (usbhs_pipe_is_busy(pipe))
644 		return 0;
645 
646 	if (usbhs_pipe_is_running(pipe))
647 		return 0;
648 
649 	/*
650 	 * pipe enable to prepare packet receive
651 	 */
652 	usbhs_pipe_data_sequence(pipe, pkt->sequence);
653 	pkt->sequence = -1; /* -1 sequence will be ignored */
654 
655 	if (usbhs_pipe_is_dcp(pipe))
656 		usbhsf_fifo_clear(pipe, fifo);
657 
658 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
659 	usbhs_pipe_enable(pipe);
660 	usbhs_pipe_running(pipe, 1);
661 	usbhsf_rx_irq_ctrl(pipe, 1);
662 
663 	return 0;
664 }
665 
usbhsf_pio_try_pop(struct usbhs_pkt * pkt,int * is_done)666 static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
667 {
668 	struct usbhs_pipe *pipe = pkt->pipe;
669 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
670 	struct device *dev = usbhs_priv_to_dev(priv);
671 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
672 	void __iomem *addr = priv->base + fifo->port;
673 	u8 *buf;
674 	u32 data = 0;
675 	int maxp = usbhs_pipe_get_maxpacket(pipe);
676 	int rcv_len, len;
677 	int i, ret;
678 	int total_len = 0;
679 
680 	ret = usbhsf_fifo_select(pipe, fifo, 0);
681 	if (ret < 0)
682 		return 0;
683 
684 	ret = usbhsf_fifo_barrier(priv, fifo);
685 	if (ret < 0)
686 		goto usbhs_fifo_read_busy;
687 
688 	rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
689 
690 	buf		= pkt->buf    + pkt->actual;
691 	len		= pkt->length - pkt->actual;
692 	len		= min(len, rcv_len);
693 	total_len	= len;
694 
695 	/*
696 	 * update actual length first here to decide disable pipe.
697 	 * if this pipe keeps BUF status and all data were popped,
698 	 * then, next interrupt/token will be issued again
699 	 */
700 	pkt->actual += total_len;
701 
702 	if ((pkt->actual == pkt->length) ||	/* receive all data */
703 	    (total_len < maxp)) {		/* short packet */
704 		*is_done = 1;
705 		usbhsf_rx_irq_ctrl(pipe, 0);
706 		usbhs_pipe_running(pipe, 0);
707 		/*
708 		 * If function mode, since this controller is possible to enter
709 		 * Control Write status stage at this timing, this driver
710 		 * should not disable the pipe. If such a case happens, this
711 		 * controller is not able to complete the status stage.
712 		 */
713 		if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe))
714 			usbhs_pipe_disable(pipe);	/* disable pipe first */
715 	}
716 
717 	/*
718 	 * Buffer clear if Zero-Length packet
719 	 *
720 	 * see
721 	 * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
722 	 */
723 	if (0 == rcv_len) {
724 		pkt->zero = 1;
725 		usbhsf_fifo_clear(pipe, fifo);
726 		goto usbhs_fifo_read_end;
727 	}
728 
729 	/*
730 	 * FIXME
731 	 *
732 	 * 32-bit access only
733 	 */
734 	if (len >= 4 && !((unsigned long)buf & 0x03)) {
735 		ioread32_rep(addr, buf, len / 4);
736 		len %= 4;
737 		buf += total_len - len;
738 	}
739 
740 	/* the rest operation */
741 	for (i = 0; i < len; i++) {
742 		if (!(i & 0x03))
743 			data = ioread32(addr);
744 
745 		buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
746 	}
747 
748 usbhs_fifo_read_end:
749 	dev_dbg(dev, "  recv %d (%d/ %d/ %d/ %d)\n",
750 		usbhs_pipe_number(pipe),
751 		pkt->length, pkt->actual, *is_done, pkt->zero);
752 
753 	/*
754 	 * Transmission end
755 	 */
756 	if (*is_done) {
757 		if (usbhs_pipe_is_dcp(pipe))
758 			usbhs_dcp_control_transfer_done(pipe);
759 	}
760 
761 usbhs_fifo_read_busy:
762 	usbhsf_fifo_unselect(pipe, fifo);
763 
764 	return ret;
765 }
766 
767 struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
768 	.prepare = usbhsf_prepare_pop,
769 	.try_run = usbhsf_pio_try_pop,
770 };
771 
772 /*
773  *		DCP ctrol statge handler
774  */
usbhsf_ctrl_stage_end(struct usbhs_pkt * pkt,int * is_done)775 static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
776 {
777 	usbhs_dcp_control_transfer_done(pkt->pipe);
778 
779 	*is_done = 1;
780 
781 	return 0;
782 }
783 
784 struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
785 	.prepare = usbhsf_ctrl_stage_end,
786 	.try_run = usbhsf_ctrl_stage_end,
787 };
788 
789 /*
790  *		DMA fifo functions
791  */
usbhsf_dma_chan_get(struct usbhs_fifo * fifo,struct usbhs_pkt * pkt)792 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
793 					    struct usbhs_pkt *pkt)
794 {
795 	if (&usbhs_fifo_dma_push_handler == pkt->handler)
796 		return fifo->tx_chan;
797 
798 	if (&usbhs_fifo_dma_pop_handler == pkt->handler)
799 		return fifo->rx_chan;
800 
801 	return NULL;
802 }
803 
usbhsf_get_dma_fifo(struct usbhs_priv * priv,struct usbhs_pkt * pkt)804 static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
805 					      struct usbhs_pkt *pkt)
806 {
807 	struct usbhs_fifo *fifo;
808 
809 	/* DMA :: D0FIFO */
810 	fifo = usbhsf_get_d0fifo(priv);
811 	if (usbhsf_dma_chan_get(fifo, pkt) &&
812 	    !usbhsf_fifo_is_busy(fifo))
813 		return fifo;
814 
815 	/* DMA :: D1FIFO */
816 	fifo = usbhsf_get_d1fifo(priv);
817 	if (usbhsf_dma_chan_get(fifo, pkt) &&
818 	    !usbhsf_fifo_is_busy(fifo))
819 		return fifo;
820 
821 	return NULL;
822 }
823 
824 #define usbhsf_dma_start(p, f)	__usbhsf_dma_ctrl(p, f, DREQE)
825 #define usbhsf_dma_stop(p, f)	__usbhsf_dma_ctrl(p, f, 0)
__usbhsf_dma_ctrl(struct usbhs_pipe * pipe,struct usbhs_fifo * fifo,u16 dreqe)826 static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
827 			      struct usbhs_fifo *fifo,
828 			      u16 dreqe)
829 {
830 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
831 
832 	usbhs_bset(priv, fifo->sel, DREQE, dreqe);
833 }
834 
__usbhsf_dma_map_ctrl(struct usbhs_pkt * pkt,int map)835 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
836 {
837 	struct usbhs_pipe *pipe = pkt->pipe;
838 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
839 	struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
840 
841 	return info->dma_map_ctrl(pkt, map);
842 }
843 
844 static void usbhsf_dma_complete(void *arg);
xfer_work(struct work_struct * work)845 static void xfer_work(struct work_struct *work)
846 {
847 	struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
848 	struct usbhs_pipe *pipe = pkt->pipe;
849 	struct usbhs_fifo *fifo;
850 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
851 	struct dma_async_tx_descriptor *desc;
852 	struct dma_chan *chan;
853 	struct device *dev = usbhs_priv_to_dev(priv);
854 	enum dma_transfer_direction dir;
855 	unsigned long flags;
856 
857 	usbhs_lock(priv, flags);
858 	fifo = usbhs_pipe_to_fifo(pipe);
859 	if (!fifo)
860 		goto xfer_work_end;
861 
862 	chan = usbhsf_dma_chan_get(fifo, pkt);
863 	dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
864 
865 	desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
866 					pkt->trans, dir,
867 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
868 	if (!desc)
869 		goto xfer_work_end;
870 
871 	desc->callback		= usbhsf_dma_complete;
872 	desc->callback_param	= pipe;
873 
874 	if (dmaengine_submit(desc) < 0) {
875 		dev_err(dev, "Failed to submit dma descriptor\n");
876 		goto xfer_work_end;
877 	}
878 
879 	dev_dbg(dev, "  %s %d (%d/ %d)\n",
880 		fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
881 
882 	usbhs_pipe_running(pipe, 1);
883 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
884 	dma_async_issue_pending(chan);
885 	usbhsf_dma_start(pipe, fifo);
886 	usbhs_pipe_enable(pipe);
887 
888 xfer_work_end:
889 	usbhs_unlock(priv, flags);
890 }
891 
892 /*
893  *		DMA push handler
894  */
usbhsf_dma_prepare_push(struct usbhs_pkt * pkt,int * is_done)895 static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
896 {
897 	struct usbhs_pipe *pipe = pkt->pipe;
898 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
899 	struct usbhs_fifo *fifo;
900 	int len = pkt->length - pkt->actual;
901 	int ret;
902 
903 	if (usbhs_pipe_is_busy(pipe))
904 		return 0;
905 
906 	/* use PIO if packet is less than pio_dma_border or pipe is DCP */
907 	if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
908 	    usbhs_pipe_is_dcp(pipe))
909 		goto usbhsf_pio_prepare_push;
910 
911 	if (len & 0x7) /* 8byte alignment */
912 		goto usbhsf_pio_prepare_push;
913 
914 	if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
915 		goto usbhsf_pio_prepare_push;
916 
917 	/* return at this time if the pipe is running */
918 	if (usbhs_pipe_is_running(pipe))
919 		return 0;
920 
921 	/* get enable DMA fifo */
922 	fifo = usbhsf_get_dma_fifo(priv, pkt);
923 	if (!fifo)
924 		goto usbhsf_pio_prepare_push;
925 
926 	if (usbhsf_dma_map(pkt) < 0)
927 		goto usbhsf_pio_prepare_push;
928 
929 	ret = usbhsf_fifo_select(pipe, fifo, 0);
930 	if (ret < 0)
931 		goto usbhsf_pio_prepare_push_unmap;
932 
933 	pkt->trans = len;
934 
935 	usbhsf_tx_irq_ctrl(pipe, 0);
936 	INIT_WORK(&pkt->work, xfer_work);
937 	schedule_work(&pkt->work);
938 
939 	return 0;
940 
941 usbhsf_pio_prepare_push_unmap:
942 	usbhsf_dma_unmap(pkt);
943 usbhsf_pio_prepare_push:
944 	/*
945 	 * change handler to PIO
946 	 */
947 	pkt->handler = &usbhs_fifo_pio_push_handler;
948 
949 	return pkt->handler->prepare(pkt, is_done);
950 }
951 
usbhsf_dma_push_done(struct usbhs_pkt * pkt,int * is_done)952 static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
953 {
954 	struct usbhs_pipe *pipe = pkt->pipe;
955 	int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe);
956 
957 	pkt->actual += pkt->trans;
958 
959 	if (pkt->actual < pkt->length)
960 		*is_done = 0;		/* there are remainder data */
961 	else if (is_short)
962 		*is_done = 1;		/* short packet */
963 	else
964 		*is_done = !pkt->zero;	/* send zero packet? */
965 
966 	usbhs_pipe_running(pipe, !*is_done);
967 
968 	usbhsf_dma_stop(pipe, pipe->fifo);
969 	usbhsf_dma_unmap(pkt);
970 	usbhsf_fifo_unselect(pipe, pipe->fifo);
971 
972 	if (!*is_done) {
973 		/* change handler to PIO */
974 		pkt->handler = &usbhs_fifo_pio_push_handler;
975 		return pkt->handler->try_run(pkt, is_done);
976 	}
977 
978 	return 0;
979 }
980 
981 struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
982 	.prepare	= usbhsf_dma_prepare_push,
983 	.dma_done	= usbhsf_dma_push_done,
984 };
985 
986 /*
987  *		DMA pop handler
988  */
usbhsf_dma_try_pop(struct usbhs_pkt * pkt,int * is_done)989 static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
990 {
991 	struct usbhs_pipe *pipe = pkt->pipe;
992 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
993 	struct usbhs_fifo *fifo;
994 	int len, ret;
995 
996 	if (usbhs_pipe_is_busy(pipe))
997 		return 0;
998 
999 	if (usbhs_pipe_is_dcp(pipe))
1000 		goto usbhsf_pio_prepare_pop;
1001 
1002 	/* get enable DMA fifo */
1003 	fifo = usbhsf_get_dma_fifo(priv, pkt);
1004 	if (!fifo)
1005 		goto usbhsf_pio_prepare_pop;
1006 
1007 	if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
1008 		goto usbhsf_pio_prepare_pop;
1009 
1010 	ret = usbhsf_fifo_select(pipe, fifo, 0);
1011 	if (ret < 0)
1012 		goto usbhsf_pio_prepare_pop;
1013 
1014 	/* use PIO if packet is less than pio_dma_border */
1015 	len = usbhsf_fifo_rcv_len(priv, fifo);
1016 	len = min(pkt->length - pkt->actual, len);
1017 	if (len & 0x7) /* 8byte alignment */
1018 		goto usbhsf_pio_prepare_pop_unselect;
1019 
1020 	if (len < usbhs_get_dparam(priv, pio_dma_border))
1021 		goto usbhsf_pio_prepare_pop_unselect;
1022 
1023 	ret = usbhsf_fifo_barrier(priv, fifo);
1024 	if (ret < 0)
1025 		goto usbhsf_pio_prepare_pop_unselect;
1026 
1027 	if (usbhsf_dma_map(pkt) < 0)
1028 		goto usbhsf_pio_prepare_pop_unselect;
1029 
1030 	/* DMA */
1031 
1032 	/*
1033 	 * usbhs_fifo_dma_pop_handler :: prepare
1034 	 * enabled irq to come here.
1035 	 * but it is no longer needed for DMA. disable it.
1036 	 */
1037 	usbhsf_rx_irq_ctrl(pipe, 0);
1038 
1039 	pkt->trans = len;
1040 
1041 	INIT_WORK(&pkt->work, xfer_work);
1042 	schedule_work(&pkt->work);
1043 
1044 	return 0;
1045 
1046 usbhsf_pio_prepare_pop_unselect:
1047 	usbhsf_fifo_unselect(pipe, fifo);
1048 usbhsf_pio_prepare_pop:
1049 
1050 	/*
1051 	 * change handler to PIO
1052 	 */
1053 	pkt->handler = &usbhs_fifo_pio_pop_handler;
1054 
1055 	return pkt->handler->try_run(pkt, is_done);
1056 }
1057 
usbhsf_dma_pop_done(struct usbhs_pkt * pkt,int * is_done)1058 static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
1059 {
1060 	struct usbhs_pipe *pipe = pkt->pipe;
1061 	int maxp = usbhs_pipe_get_maxpacket(pipe);
1062 
1063 	usbhsf_dma_stop(pipe, pipe->fifo);
1064 	usbhsf_dma_unmap(pkt);
1065 	usbhsf_fifo_unselect(pipe, pipe->fifo);
1066 
1067 	pkt->actual += pkt->trans;
1068 
1069 	if ((pkt->actual == pkt->length) ||	/* receive all data */
1070 	    (pkt->trans < maxp)) {		/* short packet */
1071 		*is_done = 1;
1072 		usbhs_pipe_running(pipe, 0);
1073 	} else {
1074 		/* re-enable */
1075 		usbhs_pipe_running(pipe, 0);
1076 		usbhsf_prepare_pop(pkt, is_done);
1077 	}
1078 
1079 	return 0;
1080 }
1081 
1082 struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
1083 	.prepare	= usbhsf_prepare_pop,
1084 	.try_run	= usbhsf_dma_try_pop,
1085 	.dma_done	= usbhsf_dma_pop_done
1086 };
1087 
1088 /*
1089  *		DMA setting
1090  */
usbhsf_dma_filter(struct dma_chan * chan,void * param)1091 static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
1092 {
1093 	struct sh_dmae_slave *slave = param;
1094 
1095 	/*
1096 	 * FIXME
1097 	 *
1098 	 * usbhs doesn't recognize id = 0 as valid DMA
1099 	 */
1100 	if (0 == slave->shdma_slave.slave_id)
1101 		return false;
1102 
1103 	chan->private = slave;
1104 
1105 	return true;
1106 }
1107 
usbhsf_dma_quit(struct usbhs_priv * priv,struct usbhs_fifo * fifo)1108 static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
1109 {
1110 	if (fifo->tx_chan)
1111 		dma_release_channel(fifo->tx_chan);
1112 	if (fifo->rx_chan)
1113 		dma_release_channel(fifo->rx_chan);
1114 
1115 	fifo->tx_chan = NULL;
1116 	fifo->rx_chan = NULL;
1117 }
1118 
usbhsf_dma_init(struct usbhs_priv * priv,struct usbhs_fifo * fifo)1119 static void usbhsf_dma_init(struct usbhs_priv *priv,
1120 			    struct usbhs_fifo *fifo)
1121 {
1122 	struct device *dev = usbhs_priv_to_dev(priv);
1123 	dma_cap_mask_t mask;
1124 
1125 	dma_cap_zero(mask);
1126 	dma_cap_set(DMA_SLAVE, mask);
1127 	fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1128 					    &fifo->tx_slave);
1129 
1130 	dma_cap_zero(mask);
1131 	dma_cap_set(DMA_SLAVE, mask);
1132 	fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1133 					    &fifo->rx_slave);
1134 
1135 	if (fifo->tx_chan || fifo->rx_chan)
1136 		dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
1137 			 fifo->name,
1138 			 fifo->tx_chan ? "[TX]" : "    ",
1139 			 fifo->rx_chan ? "[RX]" : "    ");
1140 }
1141 
1142 /*
1143  *		irq functions
1144  */
usbhsf_irq_empty(struct usbhs_priv * priv,struct usbhs_irq_state * irq_state)1145 static int usbhsf_irq_empty(struct usbhs_priv *priv,
1146 			    struct usbhs_irq_state *irq_state)
1147 {
1148 	struct usbhs_pipe *pipe;
1149 	struct device *dev = usbhs_priv_to_dev(priv);
1150 	int i, ret;
1151 
1152 	if (!irq_state->bempsts) {
1153 		dev_err(dev, "debug %s !!\n", __func__);
1154 		return -EIO;
1155 	}
1156 
1157 	dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
1158 
1159 	/*
1160 	 * search interrupted "pipe"
1161 	 * not "uep".
1162 	 */
1163 	usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1164 		if (!(irq_state->bempsts & (1 << i)))
1165 			continue;
1166 
1167 		ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1168 		if (ret < 0)
1169 			dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
1170 	}
1171 
1172 	return 0;
1173 }
1174 
usbhsf_irq_ready(struct usbhs_priv * priv,struct usbhs_irq_state * irq_state)1175 static int usbhsf_irq_ready(struct usbhs_priv *priv,
1176 			    struct usbhs_irq_state *irq_state)
1177 {
1178 	struct usbhs_pipe *pipe;
1179 	struct device *dev = usbhs_priv_to_dev(priv);
1180 	int i, ret;
1181 
1182 	if (!irq_state->brdysts) {
1183 		dev_err(dev, "debug %s !!\n", __func__);
1184 		return -EIO;
1185 	}
1186 
1187 	dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
1188 
1189 	/*
1190 	 * search interrupted "pipe"
1191 	 * not "uep".
1192 	 */
1193 	usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1194 		if (!(irq_state->brdysts & (1 << i)))
1195 			continue;
1196 
1197 		ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1198 		if (ret < 0)
1199 			dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
1200 	}
1201 
1202 	return 0;
1203 }
1204 
usbhsf_dma_complete(void * arg)1205 static void usbhsf_dma_complete(void *arg)
1206 {
1207 	struct usbhs_pipe *pipe = arg;
1208 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1209 	struct device *dev = usbhs_priv_to_dev(priv);
1210 	int ret;
1211 
1212 	ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
1213 	if (ret < 0)
1214 		dev_err(dev, "dma_complete run_error %d : %d\n",
1215 			usbhs_pipe_number(pipe), ret);
1216 }
1217 
1218 /*
1219  *		fifo init
1220  */
usbhs_fifo_init(struct usbhs_priv * priv)1221 void usbhs_fifo_init(struct usbhs_priv *priv)
1222 {
1223 	struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1224 	struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
1225 	struct usbhs_fifo *d0fifo = usbhsf_get_d0fifo(priv);
1226 	struct usbhs_fifo *d1fifo = usbhsf_get_d1fifo(priv);
1227 
1228 	mod->irq_empty		= usbhsf_irq_empty;
1229 	mod->irq_ready		= usbhsf_irq_ready;
1230 	mod->irq_bempsts	= 0;
1231 	mod->irq_brdysts	= 0;
1232 
1233 	cfifo->pipe	= NULL;
1234 	d0fifo->pipe	= NULL;
1235 	d1fifo->pipe	= NULL;
1236 }
1237 
usbhs_fifo_quit(struct usbhs_priv * priv)1238 void usbhs_fifo_quit(struct usbhs_priv *priv)
1239 {
1240 	struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1241 
1242 	mod->irq_empty		= NULL;
1243 	mod->irq_ready		= NULL;
1244 	mod->irq_bempsts	= 0;
1245 	mod->irq_brdysts	= 0;
1246 }
1247 
usbhs_fifo_probe(struct usbhs_priv * priv)1248 int usbhs_fifo_probe(struct usbhs_priv *priv)
1249 {
1250 	struct usbhs_fifo *fifo;
1251 
1252 	/* CFIFO */
1253 	fifo = usbhsf_get_cfifo(priv);
1254 	fifo->name	= "CFIFO";
1255 	fifo->port	= CFIFO;
1256 	fifo->sel	= CFIFOSEL;
1257 	fifo->ctr	= CFIFOCTR;
1258 
1259 	/* D0FIFO */
1260 	fifo = usbhsf_get_d0fifo(priv);
1261 	fifo->name	= "D0FIFO";
1262 	fifo->port	= D0FIFO;
1263 	fifo->sel	= D0FIFOSEL;
1264 	fifo->ctr	= D0FIFOCTR;
1265 	fifo->tx_slave.shdma_slave.slave_id	= usbhs_get_dparam(priv, d0_tx_id);
1266 	fifo->rx_slave.shdma_slave.slave_id	= usbhs_get_dparam(priv, d0_rx_id);
1267 	usbhsf_dma_init(priv, fifo);
1268 
1269 	/* D1FIFO */
1270 	fifo = usbhsf_get_d1fifo(priv);
1271 	fifo->name	= "D1FIFO";
1272 	fifo->port	= D1FIFO;
1273 	fifo->sel	= D1FIFOSEL;
1274 	fifo->ctr	= D1FIFOCTR;
1275 	fifo->tx_slave.shdma_slave.slave_id	= usbhs_get_dparam(priv, d1_tx_id);
1276 	fifo->rx_slave.shdma_slave.slave_id	= usbhs_get_dparam(priv, d1_rx_id);
1277 	usbhsf_dma_init(priv, fifo);
1278 
1279 	return 0;
1280 }
1281 
usbhs_fifo_remove(struct usbhs_priv * priv)1282 void usbhs_fifo_remove(struct usbhs_priv *priv)
1283 {
1284 	usbhsf_dma_quit(priv, usbhsf_get_d0fifo(priv));
1285 	usbhsf_dma_quit(priv, usbhsf_get_d1fifo(priv));
1286 }
1287