1 // SPDX-License-Identifier: GPL-1.0+
2 /*
3 * Renesas USB driver
4 *
5 * Copyright (C) 2011 Renesas Solutions Corp.
6 * Copyright (C) 2019 Renesas Electronics Corporation
7 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
8 */
9 #include <linux/delay.h>
10 #include <linux/io.h>
11 #include <linux/scatterlist.h>
12 #include "common.h"
13 #include "pipe.h"
14
15 #define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo))
16
17 #define usbhsf_fifo_is_busy(f) ((f)->pipe) /* see usbhs_pipe_select_fifo */
18
19 /*
20 * packet initialize
21 */
usbhs_pkt_init(struct usbhs_pkt * pkt)22 void usbhs_pkt_init(struct usbhs_pkt *pkt)
23 {
24 INIT_LIST_HEAD(&pkt->node);
25 }
26
27 /*
28 * packet control function
29 */
usbhsf_null_handle(struct usbhs_pkt * pkt,int * is_done)30 static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
31 {
32 struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
33 struct device *dev = usbhs_priv_to_dev(priv);
34
35 dev_err(dev, "null handler\n");
36
37 return -EINVAL;
38 }
39
40 static const struct usbhs_pkt_handle usbhsf_null_handler = {
41 .prepare = usbhsf_null_handle,
42 .try_run = usbhsf_null_handle,
43 };
44
usbhs_pkt_push(struct usbhs_pipe * pipe,struct usbhs_pkt * pkt,void (* done)(struct usbhs_priv * priv,struct usbhs_pkt * pkt),void * buf,int len,int zero,int sequence)45 void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
46 void (*done)(struct usbhs_priv *priv,
47 struct usbhs_pkt *pkt),
48 void *buf, int len, int zero, int sequence)
49 {
50 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
51 struct device *dev = usbhs_priv_to_dev(priv);
52 unsigned long flags;
53
54 if (!done) {
55 dev_err(dev, "no done function\n");
56 return;
57 }
58
59 /******************** spin lock ********************/
60 usbhs_lock(priv, flags);
61
62 if (!pipe->handler) {
63 dev_err(dev, "no handler function\n");
64 pipe->handler = &usbhsf_null_handler;
65 }
66
67 list_move_tail(&pkt->node, &pipe->list);
68
69 /*
70 * each pkt must hold own handler.
71 * because handler might be changed by its situation.
72 * dma handler -> pio handler.
73 */
74 pkt->pipe = pipe;
75 pkt->buf = buf;
76 pkt->handler = pipe->handler;
77 pkt->length = len;
78 pkt->zero = zero;
79 pkt->actual = 0;
80 pkt->done = done;
81 pkt->sequence = sequence;
82
83 usbhs_unlock(priv, flags);
84 /******************** spin unlock ******************/
85 }
86
__usbhsf_pkt_del(struct usbhs_pkt * pkt)87 static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
88 {
89 list_del_init(&pkt->node);
90 }
91
__usbhsf_pkt_get(struct usbhs_pipe * pipe)92 struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
93 {
94 return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
95 }
96
97 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
98 struct usbhs_fifo *fifo);
99 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
100 struct usbhs_pkt *pkt);
101 #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1)
102 #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0)
103 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
104 static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
105 static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable);
usbhs_pkt_pop(struct usbhs_pipe * pipe,struct usbhs_pkt * pkt)106 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
107 {
108 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
109 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
110 unsigned long flags;
111
112 /******************** spin lock ********************/
113 usbhs_lock(priv, flags);
114
115 usbhs_pipe_disable(pipe);
116
117 if (!pkt)
118 pkt = __usbhsf_pkt_get(pipe);
119
120 if (pkt) {
121 struct dma_chan *chan = NULL;
122
123 if (fifo)
124 chan = usbhsf_dma_chan_get(fifo, pkt);
125 if (chan) {
126 dmaengine_terminate_all(chan);
127 usbhsf_dma_unmap(pkt);
128 } else {
129 if (usbhs_pipe_is_dir_in(pipe))
130 usbhsf_rx_irq_ctrl(pipe, 0);
131 else
132 usbhsf_tx_irq_ctrl(pipe, 0);
133 }
134
135 usbhs_pipe_clear_without_sequence(pipe, 0, 0);
136 usbhs_pipe_running(pipe, 0);
137
138 __usbhsf_pkt_del(pkt);
139 }
140
141 if (fifo)
142 usbhsf_fifo_unselect(pipe, fifo);
143
144 usbhs_unlock(priv, flags);
145 /******************** spin unlock ******************/
146
147 return pkt;
148 }
149
150 enum {
151 USBHSF_PKT_PREPARE,
152 USBHSF_PKT_TRY_RUN,
153 USBHSF_PKT_DMA_DONE,
154 };
155
usbhsf_pkt_handler(struct usbhs_pipe * pipe,int type)156 static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
157 {
158 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
159 struct usbhs_pkt *pkt;
160 struct device *dev = usbhs_priv_to_dev(priv);
161 int (*func)(struct usbhs_pkt *pkt, int *is_done);
162 unsigned long flags;
163 int ret = 0;
164 int is_done = 0;
165
166 /******************** spin lock ********************/
167 usbhs_lock(priv, flags);
168
169 pkt = __usbhsf_pkt_get(pipe);
170 if (!pkt)
171 goto __usbhs_pkt_handler_end;
172
173 switch (type) {
174 case USBHSF_PKT_PREPARE:
175 func = pkt->handler->prepare;
176 break;
177 case USBHSF_PKT_TRY_RUN:
178 func = pkt->handler->try_run;
179 break;
180 case USBHSF_PKT_DMA_DONE:
181 func = pkt->handler->dma_done;
182 break;
183 default:
184 dev_err(dev, "unknown pkt handler\n");
185 goto __usbhs_pkt_handler_end;
186 }
187
188 if (likely(func))
189 ret = func(pkt, &is_done);
190
191 if (is_done)
192 __usbhsf_pkt_del(pkt);
193
194 __usbhs_pkt_handler_end:
195 usbhs_unlock(priv, flags);
196 /******************** spin unlock ******************/
197
198 if (is_done) {
199 pkt->done(priv, pkt);
200 usbhs_pkt_start(pipe);
201 }
202
203 return ret;
204 }
205
usbhs_pkt_start(struct usbhs_pipe * pipe)206 void usbhs_pkt_start(struct usbhs_pipe *pipe)
207 {
208 usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE);
209 }
210
211 /*
212 * irq enable/disable function
213 */
214 #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e)
215 #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e)
216 #define usbhsf_irq_callback_ctrl(pipe, status, enable) \
217 ({ \
218 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); \
219 struct usbhs_mod *mod = usbhs_mod_get_current(priv); \
220 u16 status = (1 << usbhs_pipe_number(pipe)); \
221 if (!mod) \
222 return; \
223 if (enable) \
224 mod->status |= status; \
225 else \
226 mod->status &= ~status; \
227 usbhs_irq_callback_update(priv, mod); \
228 })
229
usbhsf_tx_irq_ctrl(struct usbhs_pipe * pipe,int enable)230 static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
231 {
232 /*
233 * And DCP pipe can NOT use "ready interrupt" for "send"
234 * it should use "empty" interrupt.
235 * see
236 * "Operation" - "Interrupt Function" - "BRDY Interrupt"
237 *
238 * on the other hand, normal pipe can use "ready interrupt" for "send"
239 * even though it is single/double buffer
240 */
241 if (usbhs_pipe_is_dcp(pipe))
242 usbhsf_irq_empty_ctrl(pipe, enable);
243 else
244 usbhsf_irq_ready_ctrl(pipe, enable);
245 }
246
usbhsf_rx_irq_ctrl(struct usbhs_pipe * pipe,int enable)247 static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
248 {
249 usbhsf_irq_ready_ctrl(pipe, enable);
250 }
251
252 /*
253 * FIFO ctrl
254 */
usbhsf_send_terminator(struct usbhs_pipe * pipe,struct usbhs_fifo * fifo)255 static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
256 struct usbhs_fifo *fifo)
257 {
258 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
259
260 usbhs_bset(priv, fifo->ctr, BVAL, BVAL);
261 }
262
usbhsf_fifo_barrier(struct usbhs_priv * priv,struct usbhs_fifo * fifo)263 static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
264 struct usbhs_fifo *fifo)
265 {
266 /* The FIFO port is accessible */
267 if (usbhs_read(priv, fifo->ctr) & FRDY)
268 return 0;
269
270 return -EBUSY;
271 }
272
usbhsf_fifo_clear(struct usbhs_pipe * pipe,struct usbhs_fifo * fifo)273 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
274 struct usbhs_fifo *fifo)
275 {
276 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
277 int ret = 0;
278
279 if (!usbhs_pipe_is_dcp(pipe)) {
280 /*
281 * This driver checks the pipe condition first to avoid -EBUSY
282 * from usbhsf_fifo_barrier() if the pipe is RX direction and
283 * empty.
284 */
285 if (usbhs_pipe_is_dir_in(pipe))
286 ret = usbhs_pipe_is_accessible(pipe);
287 if (!ret)
288 ret = usbhsf_fifo_barrier(priv, fifo);
289 }
290
291 /*
292 * if non-DCP pipe, this driver should set BCLR when
293 * usbhsf_fifo_barrier() returns 0.
294 */
295 if (!ret)
296 usbhs_write(priv, fifo->ctr, BCLR);
297 }
298
usbhsf_fifo_rcv_len(struct usbhs_priv * priv,struct usbhs_fifo * fifo)299 static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
300 struct usbhs_fifo *fifo)
301 {
302 return usbhs_read(priv, fifo->ctr) & DTLN_MASK;
303 }
304
usbhsf_fifo_unselect(struct usbhs_pipe * pipe,struct usbhs_fifo * fifo)305 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
306 struct usbhs_fifo *fifo)
307 {
308 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
309
310 usbhs_pipe_select_fifo(pipe, NULL);
311 usbhs_write(priv, fifo->sel, 0);
312 }
313
usbhsf_fifo_select(struct usbhs_pipe * pipe,struct usbhs_fifo * fifo,int write)314 static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
315 struct usbhs_fifo *fifo,
316 int write)
317 {
318 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
319 struct device *dev = usbhs_priv_to_dev(priv);
320 int timeout = 1024;
321 u16 mask = ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */
322 u16 base = usbhs_pipe_number(pipe); /* CURPIPE */
323
324 if (usbhs_pipe_is_busy(pipe) ||
325 usbhsf_fifo_is_busy(fifo))
326 return -EBUSY;
327
328 if (usbhs_pipe_is_dcp(pipe)) {
329 base |= (1 == write) << 5; /* ISEL */
330
331 if (usbhs_mod_is_host(priv))
332 usbhs_dcp_dir_for_host(pipe, write);
333 }
334
335 /* "base" will be used below */
336 usbhs_write(priv, fifo->sel, base | MBW_32);
337
338 /* check ISEL and CURPIPE value */
339 while (timeout--) {
340 if (base == (mask & usbhs_read(priv, fifo->sel))) {
341 usbhs_pipe_select_fifo(pipe, fifo);
342 return 0;
343 }
344 udelay(10);
345 }
346
347 dev_err(dev, "fifo select error\n");
348
349 return -EIO;
350 }
351
352 /*
353 * DCP status stage
354 */
usbhs_dcp_dir_switch_to_write(struct usbhs_pkt * pkt,int * is_done)355 static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
356 {
357 struct usbhs_pipe *pipe = pkt->pipe;
358 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
359 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
360 struct device *dev = usbhs_priv_to_dev(priv);
361 int ret;
362
363 usbhs_pipe_disable(pipe);
364
365 ret = usbhsf_fifo_select(pipe, fifo, 1);
366 if (ret < 0) {
367 dev_err(dev, "%s() faile\n", __func__);
368 return ret;
369 }
370
371 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
372
373 usbhsf_fifo_clear(pipe, fifo);
374 usbhsf_send_terminator(pipe, fifo);
375
376 usbhsf_fifo_unselect(pipe, fifo);
377
378 usbhsf_tx_irq_ctrl(pipe, 1);
379 usbhs_pipe_enable(pipe);
380
381 return ret;
382 }
383
usbhs_dcp_dir_switch_to_read(struct usbhs_pkt * pkt,int * is_done)384 static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
385 {
386 struct usbhs_pipe *pipe = pkt->pipe;
387 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
388 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
389 struct device *dev = usbhs_priv_to_dev(priv);
390 int ret;
391
392 usbhs_pipe_disable(pipe);
393
394 ret = usbhsf_fifo_select(pipe, fifo, 0);
395 if (ret < 0) {
396 dev_err(dev, "%s() fail\n", __func__);
397 return ret;
398 }
399
400 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
401 usbhsf_fifo_clear(pipe, fifo);
402
403 usbhsf_fifo_unselect(pipe, fifo);
404
405 usbhsf_rx_irq_ctrl(pipe, 1);
406 usbhs_pipe_enable(pipe);
407
408 return ret;
409
410 }
411
usbhs_dcp_dir_switch_done(struct usbhs_pkt * pkt,int * is_done)412 static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
413 {
414 struct usbhs_pipe *pipe = pkt->pipe;
415
416 if (pkt->handler == &usbhs_dcp_status_stage_in_handler)
417 usbhsf_tx_irq_ctrl(pipe, 0);
418 else
419 usbhsf_rx_irq_ctrl(pipe, 0);
420
421 pkt->actual = pkt->length;
422 *is_done = 1;
423
424 return 0;
425 }
426
427 const struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
428 .prepare = usbhs_dcp_dir_switch_to_write,
429 .try_run = usbhs_dcp_dir_switch_done,
430 };
431
432 const struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
433 .prepare = usbhs_dcp_dir_switch_to_read,
434 .try_run = usbhs_dcp_dir_switch_done,
435 };
436
437 /*
438 * DCP data stage (push)
439 */
usbhsf_dcp_data_stage_try_push(struct usbhs_pkt * pkt,int * is_done)440 static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
441 {
442 struct usbhs_pipe *pipe = pkt->pipe;
443
444 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
445
446 /*
447 * change handler to PIO push
448 */
449 pkt->handler = &usbhs_fifo_pio_push_handler;
450
451 return pkt->handler->prepare(pkt, is_done);
452 }
453
454 const struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
455 .prepare = usbhsf_dcp_data_stage_try_push,
456 };
457
458 /*
459 * DCP data stage (pop)
460 */
usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt * pkt,int * is_done)461 static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
462 int *is_done)
463 {
464 struct usbhs_pipe *pipe = pkt->pipe;
465 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
466 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
467
468 if (usbhs_pipe_is_busy(pipe))
469 return 0;
470
471 /*
472 * prepare pop for DCP should
473 * - change DCP direction,
474 * - clear fifo
475 * - DATA1
476 */
477 usbhs_pipe_disable(pipe);
478
479 usbhs_pipe_sequence_data1(pipe); /* DATA1 */
480
481 usbhsf_fifo_select(pipe, fifo, 0);
482 usbhsf_fifo_clear(pipe, fifo);
483 usbhsf_fifo_unselect(pipe, fifo);
484
485 /*
486 * change handler to PIO pop
487 */
488 pkt->handler = &usbhs_fifo_pio_pop_handler;
489
490 return pkt->handler->prepare(pkt, is_done);
491 }
492
493 const struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
494 .prepare = usbhsf_dcp_data_stage_prepare_pop,
495 };
496
497 /*
498 * PIO push handler
499 */
usbhsf_pio_try_push(struct usbhs_pkt * pkt,int * is_done)500 static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
501 {
502 struct usbhs_pipe *pipe = pkt->pipe;
503 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
504 struct device *dev = usbhs_priv_to_dev(priv);
505 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
506 void __iomem *addr = priv->base + fifo->port;
507 u8 *buf;
508 int maxp = usbhs_pipe_get_maxpacket(pipe);
509 int total_len;
510 int i, ret, len;
511 int is_short;
512
513 usbhs_pipe_data_sequence(pipe, pkt->sequence);
514 pkt->sequence = -1; /* -1 sequence will be ignored */
515
516 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
517
518 ret = usbhsf_fifo_select(pipe, fifo, 1);
519 if (ret < 0)
520 return 0;
521
522 ret = usbhs_pipe_is_accessible(pipe);
523 if (ret < 0) {
524 /* inaccessible pipe is not an error */
525 ret = 0;
526 goto usbhs_fifo_write_busy;
527 }
528
529 ret = usbhsf_fifo_barrier(priv, fifo);
530 if (ret < 0)
531 goto usbhs_fifo_write_busy;
532
533 buf = pkt->buf + pkt->actual;
534 len = pkt->length - pkt->actual;
535 len = min(len, maxp);
536 total_len = len;
537 is_short = total_len < maxp;
538
539 /*
540 * FIXME
541 *
542 * 32-bit access only
543 */
544 if (len >= 4 && !((unsigned long)buf & 0x03)) {
545 iowrite32_rep(addr, buf, len / 4);
546 len %= 4;
547 buf += total_len - len;
548 }
549
550 /* the rest operation */
551 if (usbhs_get_dparam(priv, cfifo_byte_addr)) {
552 for (i = 0; i < len; i++)
553 iowrite8(buf[i], addr + (i & 0x03));
554 } else {
555 for (i = 0; i < len; i++)
556 iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
557 }
558
559 /*
560 * variable update
561 */
562 pkt->actual += total_len;
563
564 if (pkt->actual < pkt->length)
565 *is_done = 0; /* there are remainder data */
566 else if (is_short)
567 *is_done = 1; /* short packet */
568 else
569 *is_done = !pkt->zero; /* send zero packet ? */
570
571 /*
572 * pipe/irq handling
573 */
574 if (is_short)
575 usbhsf_send_terminator(pipe, fifo);
576
577 usbhsf_tx_irq_ctrl(pipe, !*is_done);
578 usbhs_pipe_running(pipe, !*is_done);
579 usbhs_pipe_enable(pipe);
580
581 dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n",
582 usbhs_pipe_number(pipe),
583 pkt->length, pkt->actual, *is_done, pkt->zero);
584
585 usbhsf_fifo_unselect(pipe, fifo);
586
587 return 0;
588
589 usbhs_fifo_write_busy:
590 usbhsf_fifo_unselect(pipe, fifo);
591
592 /*
593 * pipe is busy.
594 * retry in interrupt
595 */
596 usbhsf_tx_irq_ctrl(pipe, 1);
597 usbhs_pipe_running(pipe, 1);
598
599 return ret;
600 }
601
usbhsf_pio_prepare_push(struct usbhs_pkt * pkt,int * is_done)602 static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
603 {
604 if (usbhs_pipe_is_running(pkt->pipe))
605 return 0;
606
607 return usbhsf_pio_try_push(pkt, is_done);
608 }
609
610 const struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
611 .prepare = usbhsf_pio_prepare_push,
612 .try_run = usbhsf_pio_try_push,
613 };
614
615 /*
616 * PIO pop handler
617 */
usbhsf_prepare_pop(struct usbhs_pkt * pkt,int * is_done)618 static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
619 {
620 struct usbhs_pipe *pipe = pkt->pipe;
621 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
622 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
623
624 if (usbhs_pipe_is_busy(pipe))
625 return 0;
626
627 if (usbhs_pipe_is_running(pipe))
628 return 0;
629
630 /*
631 * pipe enable to prepare packet receive
632 */
633 usbhs_pipe_data_sequence(pipe, pkt->sequence);
634 pkt->sequence = -1; /* -1 sequence will be ignored */
635
636 if (usbhs_pipe_is_dcp(pipe))
637 usbhsf_fifo_clear(pipe, fifo);
638
639 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
640 usbhs_pipe_enable(pipe);
641 usbhs_pipe_running(pipe, 1);
642 usbhsf_rx_irq_ctrl(pipe, 1);
643
644 return 0;
645 }
646
usbhsf_pio_try_pop(struct usbhs_pkt * pkt,int * is_done)647 static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
648 {
649 struct usbhs_pipe *pipe = pkt->pipe;
650 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
651 struct device *dev = usbhs_priv_to_dev(priv);
652 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
653 void __iomem *addr = priv->base + fifo->port;
654 u8 *buf;
655 u32 data = 0;
656 int maxp = usbhs_pipe_get_maxpacket(pipe);
657 int rcv_len, len;
658 int i, ret;
659 int total_len = 0;
660
661 ret = usbhsf_fifo_select(pipe, fifo, 0);
662 if (ret < 0)
663 return 0;
664
665 ret = usbhsf_fifo_barrier(priv, fifo);
666 if (ret < 0)
667 goto usbhs_fifo_read_busy;
668
669 rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
670
671 buf = pkt->buf + pkt->actual;
672 len = pkt->length - pkt->actual;
673 len = min(len, rcv_len);
674 total_len = len;
675
676 /*
677 * update actual length first here to decide disable pipe.
678 * if this pipe keeps BUF status and all data were popped,
679 * then, next interrupt/token will be issued again
680 */
681 pkt->actual += total_len;
682
683 if ((pkt->actual == pkt->length) || /* receive all data */
684 (total_len < maxp)) { /* short packet */
685 *is_done = 1;
686 usbhsf_rx_irq_ctrl(pipe, 0);
687 usbhs_pipe_running(pipe, 0);
688 /*
689 * If function mode, since this controller is possible to enter
690 * Control Write status stage at this timing, this driver
691 * should not disable the pipe. If such a case happens, this
692 * controller is not able to complete the status stage.
693 */
694 if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe))
695 usbhs_pipe_disable(pipe); /* disable pipe first */
696 }
697
698 /*
699 * Buffer clear if Zero-Length packet
700 *
701 * see
702 * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
703 */
704 if (0 == rcv_len) {
705 pkt->zero = 1;
706 usbhsf_fifo_clear(pipe, fifo);
707 goto usbhs_fifo_read_end;
708 }
709
710 /*
711 * FIXME
712 *
713 * 32-bit access only
714 */
715 if (len >= 4 && !((unsigned long)buf & 0x03)) {
716 ioread32_rep(addr, buf, len / 4);
717 len %= 4;
718 buf += total_len - len;
719 }
720
721 /* the rest operation */
722 for (i = 0; i < len; i++) {
723 if (!(i & 0x03))
724 data = ioread32(addr);
725
726 buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
727 }
728
729 usbhs_fifo_read_end:
730 dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n",
731 usbhs_pipe_number(pipe),
732 pkt->length, pkt->actual, *is_done, pkt->zero);
733
734 usbhs_fifo_read_busy:
735 usbhsf_fifo_unselect(pipe, fifo);
736
737 return ret;
738 }
739
740 const struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
741 .prepare = usbhsf_prepare_pop,
742 .try_run = usbhsf_pio_try_pop,
743 };
744
745 /*
746 * DCP ctrol statge handler
747 */
usbhsf_ctrl_stage_end(struct usbhs_pkt * pkt,int * is_done)748 static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
749 {
750 usbhs_dcp_control_transfer_done(pkt->pipe);
751
752 *is_done = 1;
753
754 return 0;
755 }
756
757 const struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
758 .prepare = usbhsf_ctrl_stage_end,
759 .try_run = usbhsf_ctrl_stage_end,
760 };
761
762 /*
763 * DMA fifo functions
764 */
usbhsf_dma_chan_get(struct usbhs_fifo * fifo,struct usbhs_pkt * pkt)765 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
766 struct usbhs_pkt *pkt)
767 {
768 if (&usbhs_fifo_dma_push_handler == pkt->handler)
769 return fifo->tx_chan;
770
771 if (&usbhs_fifo_dma_pop_handler == pkt->handler)
772 return fifo->rx_chan;
773
774 return NULL;
775 }
776
usbhsf_get_dma_fifo(struct usbhs_priv * priv,struct usbhs_pkt * pkt)777 static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
778 struct usbhs_pkt *pkt)
779 {
780 struct usbhs_fifo *fifo;
781 int i;
782
783 usbhs_for_each_dfifo(priv, fifo, i) {
784 if (usbhsf_dma_chan_get(fifo, pkt) &&
785 !usbhsf_fifo_is_busy(fifo))
786 return fifo;
787 }
788
789 return NULL;
790 }
791
792 #define usbhsf_dma_start(p, f) __usbhsf_dma_ctrl(p, f, DREQE)
793 #define usbhsf_dma_stop(p, f) __usbhsf_dma_ctrl(p, f, 0)
__usbhsf_dma_ctrl(struct usbhs_pipe * pipe,struct usbhs_fifo * fifo,u16 dreqe)794 static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
795 struct usbhs_fifo *fifo,
796 u16 dreqe)
797 {
798 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
799
800 usbhs_bset(priv, fifo->sel, DREQE, dreqe);
801 }
802
__usbhsf_dma_map_ctrl(struct usbhs_pkt * pkt,int map)803 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
804 {
805 struct usbhs_pipe *pipe = pkt->pipe;
806 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
807 struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
808 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
809 struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
810
811 return info->dma_map_ctrl(chan->device->dev, pkt, map);
812 }
813
814 static void usbhsf_dma_complete(void *arg,
815 const struct dmaengine_result *result);
usbhsf_dma_xfer_preparing(struct usbhs_pkt * pkt)816 static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
817 {
818 struct usbhs_pipe *pipe = pkt->pipe;
819 struct usbhs_fifo *fifo;
820 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
821 struct dma_async_tx_descriptor *desc;
822 struct dma_chan *chan;
823 struct device *dev = usbhs_priv_to_dev(priv);
824 enum dma_transfer_direction dir;
825 dma_cookie_t cookie;
826
827 fifo = usbhs_pipe_to_fifo(pipe);
828 if (!fifo)
829 return;
830
831 chan = usbhsf_dma_chan_get(fifo, pkt);
832 dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
833
834 desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
835 pkt->trans, dir,
836 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
837 if (!desc)
838 return;
839
840 desc->callback_result = usbhsf_dma_complete;
841 desc->callback_param = pkt;
842
843 cookie = dmaengine_submit(desc);
844 if (cookie < 0) {
845 dev_err(dev, "Failed to submit dma descriptor\n");
846 return;
847 }
848
849 dev_dbg(dev, " %s %d (%d/ %d)\n",
850 fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
851
852 usbhs_pipe_running(pipe, 1);
853 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
854 dma_async_issue_pending(chan);
855 usbhsf_dma_start(pipe, fifo);
856 usbhs_pipe_enable(pipe);
857 }
858
xfer_work(struct work_struct * work)859 static void xfer_work(struct work_struct *work)
860 {
861 struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
862 struct usbhs_pipe *pipe = pkt->pipe;
863 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
864 unsigned long flags;
865
866 usbhs_lock(priv, flags);
867 usbhsf_dma_xfer_preparing(pkt);
868 usbhs_unlock(priv, flags);
869 }
870
871 /*
872 * DMA push handler
873 */
usbhsf_dma_prepare_push(struct usbhs_pkt * pkt,int * is_done)874 static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
875 {
876 struct usbhs_pipe *pipe = pkt->pipe;
877 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
878 struct usbhs_fifo *fifo;
879 int len = pkt->length - pkt->actual;
880 int ret;
881 uintptr_t align_mask;
882
883 if (usbhs_pipe_is_busy(pipe))
884 return 0;
885
886 /* use PIO if packet is less than pio_dma_border or pipe is DCP */
887 if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
888 usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
889 goto usbhsf_pio_prepare_push;
890
891 /* check data length if this driver don't use USB-DMAC */
892 if (!usbhs_get_dparam(priv, has_usb_dmac) && len & 0x7)
893 goto usbhsf_pio_prepare_push;
894
895 /* check buffer alignment */
896 align_mask = usbhs_get_dparam(priv, has_usb_dmac) ?
897 USBHS_USB_DMAC_XFER_SIZE - 1 : 0x7;
898 if ((uintptr_t)(pkt->buf + pkt->actual) & align_mask)
899 goto usbhsf_pio_prepare_push;
900
901 /* return at this time if the pipe is running */
902 if (usbhs_pipe_is_running(pipe))
903 return 0;
904
905 /* get enable DMA fifo */
906 fifo = usbhsf_get_dma_fifo(priv, pkt);
907 if (!fifo)
908 goto usbhsf_pio_prepare_push;
909
910 ret = usbhsf_fifo_select(pipe, fifo, 0);
911 if (ret < 0)
912 goto usbhsf_pio_prepare_push;
913
914 if (usbhsf_dma_map(pkt) < 0)
915 goto usbhsf_pio_prepare_push_unselect;
916
917 pkt->trans = len;
918
919 usbhsf_tx_irq_ctrl(pipe, 0);
920 /* FIXME: Workaound for usb dmac that driver can be used in atomic */
921 if (usbhs_get_dparam(priv, has_usb_dmac)) {
922 usbhsf_dma_xfer_preparing(pkt);
923 } else {
924 INIT_WORK(&pkt->work, xfer_work);
925 schedule_work(&pkt->work);
926 }
927
928 return 0;
929
930 usbhsf_pio_prepare_push_unselect:
931 usbhsf_fifo_unselect(pipe, fifo);
932 usbhsf_pio_prepare_push:
933 /*
934 * change handler to PIO
935 */
936 pkt->handler = &usbhs_fifo_pio_push_handler;
937
938 return pkt->handler->prepare(pkt, is_done);
939 }
940
usbhsf_dma_push_done(struct usbhs_pkt * pkt,int * is_done)941 static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
942 {
943 struct usbhs_pipe *pipe = pkt->pipe;
944 int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe);
945
946 pkt->actual += pkt->trans;
947
948 if (pkt->actual < pkt->length)
949 *is_done = 0; /* there are remainder data */
950 else if (is_short)
951 *is_done = 1; /* short packet */
952 else
953 *is_done = !pkt->zero; /* send zero packet? */
954
955 usbhs_pipe_running(pipe, !*is_done);
956
957 usbhsf_dma_stop(pipe, pipe->fifo);
958 usbhsf_dma_unmap(pkt);
959 usbhsf_fifo_unselect(pipe, pipe->fifo);
960
961 if (!*is_done) {
962 /* change handler to PIO */
963 pkt->handler = &usbhs_fifo_pio_push_handler;
964 return pkt->handler->try_run(pkt, is_done);
965 }
966
967 return 0;
968 }
969
970 const struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
971 .prepare = usbhsf_dma_prepare_push,
972 .dma_done = usbhsf_dma_push_done,
973 };
974
975 /*
976 * DMA pop handler
977 */
978
usbhsf_dma_prepare_pop_with_rx_irq(struct usbhs_pkt * pkt,int * is_done)979 static int usbhsf_dma_prepare_pop_with_rx_irq(struct usbhs_pkt *pkt,
980 int *is_done)
981 {
982 return usbhsf_prepare_pop(pkt, is_done);
983 }
984
usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt * pkt,int * is_done)985 static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
986 int *is_done)
987 {
988 struct usbhs_pipe *pipe = pkt->pipe;
989 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
990 struct usbhs_fifo *fifo;
991 int ret;
992
993 if (usbhs_pipe_is_busy(pipe))
994 return 0;
995
996 /* use PIO if packet is less than pio_dma_border or pipe is DCP */
997 if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) ||
998 usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
999 goto usbhsf_pio_prepare_pop;
1000
1001 fifo = usbhsf_get_dma_fifo(priv, pkt);
1002 if (!fifo)
1003 goto usbhsf_pio_prepare_pop;
1004
1005 if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1))
1006 goto usbhsf_pio_prepare_pop;
1007
1008 /* return at this time if the pipe is running */
1009 if (usbhs_pipe_is_running(pipe))
1010 return 0;
1011
1012 usbhs_pipe_config_change_bfre(pipe, 1);
1013
1014 ret = usbhsf_fifo_select(pipe, fifo, 0);
1015 if (ret < 0)
1016 goto usbhsf_pio_prepare_pop;
1017
1018 if (usbhsf_dma_map(pkt) < 0)
1019 goto usbhsf_pio_prepare_pop_unselect;
1020
1021 /* DMA */
1022
1023 /*
1024 * usbhs_fifo_dma_pop_handler :: prepare
1025 * enabled irq to come here.
1026 * but it is no longer needed for DMA. disable it.
1027 */
1028 usbhsf_rx_irq_ctrl(pipe, 0);
1029
1030 pkt->trans = pkt->length;
1031
1032 usbhsf_dma_xfer_preparing(pkt);
1033
1034 return 0;
1035
1036 usbhsf_pio_prepare_pop_unselect:
1037 usbhsf_fifo_unselect(pipe, fifo);
1038 usbhsf_pio_prepare_pop:
1039
1040 /*
1041 * change handler to PIO
1042 */
1043 pkt->handler = &usbhs_fifo_pio_pop_handler;
1044 usbhs_pipe_config_change_bfre(pipe, 0);
1045
1046 return pkt->handler->prepare(pkt, is_done);
1047 }
1048
usbhsf_dma_prepare_pop(struct usbhs_pkt * pkt,int * is_done)1049 static int usbhsf_dma_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
1050 {
1051 struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
1052
1053 if (usbhs_get_dparam(priv, has_usb_dmac))
1054 return usbhsf_dma_prepare_pop_with_usb_dmac(pkt, is_done);
1055 else
1056 return usbhsf_dma_prepare_pop_with_rx_irq(pkt, is_done);
1057 }
1058
usbhsf_dma_try_pop_with_rx_irq(struct usbhs_pkt * pkt,int * is_done)1059 static int usbhsf_dma_try_pop_with_rx_irq(struct usbhs_pkt *pkt, int *is_done)
1060 {
1061 struct usbhs_pipe *pipe = pkt->pipe;
1062 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1063 struct usbhs_fifo *fifo;
1064 int len, ret;
1065
1066 if (usbhs_pipe_is_busy(pipe))
1067 return 0;
1068
1069 if (usbhs_pipe_is_dcp(pipe))
1070 goto usbhsf_pio_prepare_pop;
1071
1072 /* get enable DMA fifo */
1073 fifo = usbhsf_get_dma_fifo(priv, pkt);
1074 if (!fifo)
1075 goto usbhsf_pio_prepare_pop;
1076
1077 if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
1078 goto usbhsf_pio_prepare_pop;
1079
1080 ret = usbhsf_fifo_select(pipe, fifo, 0);
1081 if (ret < 0)
1082 goto usbhsf_pio_prepare_pop;
1083
1084 /* use PIO if packet is less than pio_dma_border */
1085 len = usbhsf_fifo_rcv_len(priv, fifo);
1086 len = min(pkt->length - pkt->actual, len);
1087 if (len & 0x7) /* 8byte alignment */
1088 goto usbhsf_pio_prepare_pop_unselect;
1089
1090 if (len < usbhs_get_dparam(priv, pio_dma_border))
1091 goto usbhsf_pio_prepare_pop_unselect;
1092
1093 ret = usbhsf_fifo_barrier(priv, fifo);
1094 if (ret < 0)
1095 goto usbhsf_pio_prepare_pop_unselect;
1096
1097 if (usbhsf_dma_map(pkt) < 0)
1098 goto usbhsf_pio_prepare_pop_unselect;
1099
1100 /* DMA */
1101
1102 /*
1103 * usbhs_fifo_dma_pop_handler :: prepare
1104 * enabled irq to come here.
1105 * but it is no longer needed for DMA. disable it.
1106 */
1107 usbhsf_rx_irq_ctrl(pipe, 0);
1108
1109 pkt->trans = len;
1110
1111 INIT_WORK(&pkt->work, xfer_work);
1112 schedule_work(&pkt->work);
1113
1114 return 0;
1115
1116 usbhsf_pio_prepare_pop_unselect:
1117 usbhsf_fifo_unselect(pipe, fifo);
1118 usbhsf_pio_prepare_pop:
1119
1120 /*
1121 * change handler to PIO
1122 */
1123 pkt->handler = &usbhs_fifo_pio_pop_handler;
1124
1125 return pkt->handler->try_run(pkt, is_done);
1126 }
1127
usbhsf_dma_try_pop(struct usbhs_pkt * pkt,int * is_done)1128 static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
1129 {
1130 struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
1131
1132 BUG_ON(usbhs_get_dparam(priv, has_usb_dmac));
1133
1134 return usbhsf_dma_try_pop_with_rx_irq(pkt, is_done);
1135 }
1136
usbhsf_dma_pop_done_with_rx_irq(struct usbhs_pkt * pkt,int * is_done)1137 static int usbhsf_dma_pop_done_with_rx_irq(struct usbhs_pkt *pkt, int *is_done)
1138 {
1139 struct usbhs_pipe *pipe = pkt->pipe;
1140 int maxp = usbhs_pipe_get_maxpacket(pipe);
1141
1142 usbhsf_dma_stop(pipe, pipe->fifo);
1143 usbhsf_dma_unmap(pkt);
1144 usbhsf_fifo_unselect(pipe, pipe->fifo);
1145
1146 pkt->actual += pkt->trans;
1147
1148 if ((pkt->actual == pkt->length) || /* receive all data */
1149 (pkt->trans < maxp)) { /* short packet */
1150 *is_done = 1;
1151 usbhs_pipe_running(pipe, 0);
1152 } else {
1153 /* re-enable */
1154 usbhs_pipe_running(pipe, 0);
1155 usbhsf_prepare_pop(pkt, is_done);
1156 }
1157
1158 return 0;
1159 }
1160
usbhs_dma_calc_received_size(struct usbhs_pkt * pkt,struct dma_chan * chan,int dtln)1161 static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt,
1162 struct dma_chan *chan, int dtln)
1163 {
1164 struct usbhs_pipe *pipe = pkt->pipe;
1165 size_t received_size;
1166 int maxp = usbhs_pipe_get_maxpacket(pipe);
1167
1168 received_size = pkt->length - pkt->dma_result->residue;
1169
1170 if (dtln) {
1171 received_size -= USBHS_USB_DMAC_XFER_SIZE;
1172 received_size &= ~(maxp - 1);
1173 received_size += dtln;
1174 }
1175
1176 return received_size;
1177 }
1178
usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt * pkt,int * is_done)1179 static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt,
1180 int *is_done)
1181 {
1182 struct usbhs_pipe *pipe = pkt->pipe;
1183 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1184 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
1185 struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
1186 int rcv_len;
1187
1188 /*
1189 * Since the driver disables rx_irq in DMA mode, the interrupt handler
1190 * cannot the BRDYSTS. So, the function clears it here because the
1191 * driver may use PIO mode next time.
1192 */
1193 usbhs_xxxsts_clear(priv, BRDYSTS, usbhs_pipe_number(pipe));
1194
1195 rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
1196 usbhsf_fifo_clear(pipe, fifo);
1197 pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len);
1198
1199 usbhs_pipe_running(pipe, 0);
1200 usbhsf_dma_stop(pipe, fifo);
1201 usbhsf_dma_unmap(pkt);
1202 usbhsf_fifo_unselect(pipe, pipe->fifo);
1203
1204 /* The driver can assume the rx transaction is always "done" */
1205 *is_done = 1;
1206
1207 return 0;
1208 }
1209
usbhsf_dma_pop_done(struct usbhs_pkt * pkt,int * is_done)1210 static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
1211 {
1212 struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
1213
1214 if (usbhs_get_dparam(priv, has_usb_dmac))
1215 return usbhsf_dma_pop_done_with_usb_dmac(pkt, is_done);
1216 else
1217 return usbhsf_dma_pop_done_with_rx_irq(pkt, is_done);
1218 }
1219
1220 const struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
1221 .prepare = usbhsf_dma_prepare_pop,
1222 .try_run = usbhsf_dma_try_pop,
1223 .dma_done = usbhsf_dma_pop_done
1224 };
1225
1226 /*
1227 * DMA setting
1228 */
usbhsf_dma_filter(struct dma_chan * chan,void * param)1229 static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
1230 {
1231 struct sh_dmae_slave *slave = param;
1232
1233 /*
1234 * FIXME
1235 *
1236 * usbhs doesn't recognize id = 0 as valid DMA
1237 */
1238 if (0 == slave->shdma_slave.slave_id)
1239 return false;
1240
1241 chan->private = slave;
1242
1243 return true;
1244 }
1245
usbhsf_dma_quit(struct usbhs_priv * priv,struct usbhs_fifo * fifo)1246 static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
1247 {
1248 if (fifo->tx_chan)
1249 dma_release_channel(fifo->tx_chan);
1250 if (fifo->rx_chan)
1251 dma_release_channel(fifo->rx_chan);
1252
1253 fifo->tx_chan = NULL;
1254 fifo->rx_chan = NULL;
1255 }
1256
usbhsf_dma_init_pdev(struct usbhs_fifo * fifo)1257 static void usbhsf_dma_init_pdev(struct usbhs_fifo *fifo)
1258 {
1259 dma_cap_mask_t mask;
1260
1261 dma_cap_zero(mask);
1262 dma_cap_set(DMA_SLAVE, mask);
1263 fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1264 &fifo->tx_slave);
1265
1266 dma_cap_zero(mask);
1267 dma_cap_set(DMA_SLAVE, mask);
1268 fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1269 &fifo->rx_slave);
1270 }
1271
usbhsf_dma_init_dt(struct device * dev,struct usbhs_fifo * fifo,int channel)1272 static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo,
1273 int channel)
1274 {
1275 char name[16];
1276
1277 /*
1278 * To avoid complex handing for DnFIFOs, the driver uses each
1279 * DnFIFO as TX or RX direction (not bi-direction).
1280 * So, the driver uses odd channels for TX, even channels for RX.
1281 */
1282 snprintf(name, sizeof(name), "ch%d", channel);
1283 if (channel & 1) {
1284 fifo->tx_chan = dma_request_chan(dev, name);
1285 if (IS_ERR(fifo->tx_chan))
1286 fifo->tx_chan = NULL;
1287 } else {
1288 fifo->rx_chan = dma_request_chan(dev, name);
1289 if (IS_ERR(fifo->rx_chan))
1290 fifo->rx_chan = NULL;
1291 }
1292 }
1293
usbhsf_dma_init(struct usbhs_priv * priv,struct usbhs_fifo * fifo,int channel)1294 static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo,
1295 int channel)
1296 {
1297 struct device *dev = usbhs_priv_to_dev(priv);
1298
1299 if (dev_of_node(dev))
1300 usbhsf_dma_init_dt(dev, fifo, channel);
1301 else
1302 usbhsf_dma_init_pdev(fifo);
1303
1304 if (fifo->tx_chan || fifo->rx_chan)
1305 dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
1306 fifo->name,
1307 fifo->tx_chan ? "[TX]" : " ",
1308 fifo->rx_chan ? "[RX]" : " ");
1309 }
1310
1311 /*
1312 * irq functions
1313 */
usbhsf_irq_empty(struct usbhs_priv * priv,struct usbhs_irq_state * irq_state)1314 static int usbhsf_irq_empty(struct usbhs_priv *priv,
1315 struct usbhs_irq_state *irq_state)
1316 {
1317 struct usbhs_pipe *pipe;
1318 struct device *dev = usbhs_priv_to_dev(priv);
1319 int i, ret;
1320
1321 if (!irq_state->bempsts) {
1322 dev_err(dev, "debug %s !!\n", __func__);
1323 return -EIO;
1324 }
1325
1326 dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
1327
1328 /*
1329 * search interrupted "pipe"
1330 * not "uep".
1331 */
1332 usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1333 if (!(irq_state->bempsts & (1 << i)))
1334 continue;
1335
1336 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1337 if (ret < 0)
1338 dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
1339 }
1340
1341 return 0;
1342 }
1343
usbhsf_irq_ready(struct usbhs_priv * priv,struct usbhs_irq_state * irq_state)1344 static int usbhsf_irq_ready(struct usbhs_priv *priv,
1345 struct usbhs_irq_state *irq_state)
1346 {
1347 struct usbhs_pipe *pipe;
1348 struct device *dev = usbhs_priv_to_dev(priv);
1349 int i, ret;
1350
1351 if (!irq_state->brdysts) {
1352 dev_err(dev, "debug %s !!\n", __func__);
1353 return -EIO;
1354 }
1355
1356 dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
1357
1358 /*
1359 * search interrupted "pipe"
1360 * not "uep".
1361 */
1362 usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1363 if (!(irq_state->brdysts & (1 << i)))
1364 continue;
1365
1366 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1367 if (ret < 0)
1368 dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
1369 }
1370
1371 return 0;
1372 }
1373
usbhsf_dma_complete(void * arg,const struct dmaengine_result * result)1374 static void usbhsf_dma_complete(void *arg,
1375 const struct dmaengine_result *result)
1376 {
1377 struct usbhs_pkt *pkt = arg;
1378 struct usbhs_pipe *pipe = pkt->pipe;
1379 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1380 struct device *dev = usbhs_priv_to_dev(priv);
1381 int ret;
1382
1383 pkt->dma_result = result;
1384 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
1385 if (ret < 0)
1386 dev_err(dev, "dma_complete run_error %d : %d\n",
1387 usbhs_pipe_number(pipe), ret);
1388 }
1389
usbhs_fifo_clear_dcp(struct usbhs_pipe * pipe)1390 void usbhs_fifo_clear_dcp(struct usbhs_pipe *pipe)
1391 {
1392 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1393 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
1394
1395 /* clear DCP FIFO of transmission */
1396 if (usbhsf_fifo_select(pipe, fifo, 1) < 0)
1397 return;
1398 usbhsf_fifo_clear(pipe, fifo);
1399 usbhsf_fifo_unselect(pipe, fifo);
1400
1401 /* clear DCP FIFO of reception */
1402 if (usbhsf_fifo_select(pipe, fifo, 0) < 0)
1403 return;
1404 usbhsf_fifo_clear(pipe, fifo);
1405 usbhsf_fifo_unselect(pipe, fifo);
1406 }
1407
1408 /*
1409 * fifo init
1410 */
usbhs_fifo_init(struct usbhs_priv * priv)1411 void usbhs_fifo_init(struct usbhs_priv *priv)
1412 {
1413 struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1414 struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
1415 struct usbhs_fifo *dfifo;
1416 int i;
1417
1418 mod->irq_empty = usbhsf_irq_empty;
1419 mod->irq_ready = usbhsf_irq_ready;
1420 mod->irq_bempsts = 0;
1421 mod->irq_brdysts = 0;
1422
1423 cfifo->pipe = NULL;
1424 usbhs_for_each_dfifo(priv, dfifo, i)
1425 dfifo->pipe = NULL;
1426 }
1427
usbhs_fifo_quit(struct usbhs_priv * priv)1428 void usbhs_fifo_quit(struct usbhs_priv *priv)
1429 {
1430 struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1431
1432 mod->irq_empty = NULL;
1433 mod->irq_ready = NULL;
1434 mod->irq_bempsts = 0;
1435 mod->irq_brdysts = 0;
1436 }
1437
1438 #define __USBHS_DFIFO_INIT(priv, fifo, channel, fifo_port) \
1439 do { \
1440 fifo = usbhsf_get_dnfifo(priv, channel); \
1441 fifo->name = "D"#channel"FIFO"; \
1442 fifo->port = fifo_port; \
1443 fifo->sel = D##channel##FIFOSEL; \
1444 fifo->ctr = D##channel##FIFOCTR; \
1445 fifo->tx_slave.shdma_slave.slave_id = \
1446 usbhs_get_dparam(priv, d##channel##_tx_id); \
1447 fifo->rx_slave.shdma_slave.slave_id = \
1448 usbhs_get_dparam(priv, d##channel##_rx_id); \
1449 usbhsf_dma_init(priv, fifo, channel); \
1450 } while (0)
1451
1452 #define USBHS_DFIFO_INIT(priv, fifo, channel) \
1453 __USBHS_DFIFO_INIT(priv, fifo, channel, D##channel##FIFO)
1454 #define USBHS_DFIFO_INIT_NO_PORT(priv, fifo, channel) \
1455 __USBHS_DFIFO_INIT(priv, fifo, channel, 0)
1456
usbhs_fifo_probe(struct usbhs_priv * priv)1457 int usbhs_fifo_probe(struct usbhs_priv *priv)
1458 {
1459 struct usbhs_fifo *fifo;
1460
1461 /* CFIFO */
1462 fifo = usbhsf_get_cfifo(priv);
1463 fifo->name = "CFIFO";
1464 fifo->port = CFIFO;
1465 fifo->sel = CFIFOSEL;
1466 fifo->ctr = CFIFOCTR;
1467
1468 /* DFIFO */
1469 USBHS_DFIFO_INIT(priv, fifo, 0);
1470 USBHS_DFIFO_INIT(priv, fifo, 1);
1471 USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 2);
1472 USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 3);
1473
1474 return 0;
1475 }
1476
usbhs_fifo_remove(struct usbhs_priv * priv)1477 void usbhs_fifo_remove(struct usbhs_priv *priv)
1478 {
1479 struct usbhs_fifo *fifo;
1480 int i;
1481
1482 usbhs_for_each_dfifo(priv, fifo, i)
1483 usbhsf_dma_quit(priv, fifo);
1484 }
1485