1 /* $FreeBSD$ */
2 /*-
3 * Copyright (c) 2013 Hans Petter Selasky. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <los_memory.h>
28 #include "los_vm_iomap.h"
29 #include "los_vm_map.h"
30 #include <user_copy.h>
31
32 #include "implementation/global_implementation.h"
33
34 #if USB_HAVE_BUSDMA
35 static void usb_pc_common_mem_cb(struct usb_page_cache *pc,
36 void *vaddr, uint32_t length);
37 #endif
38
39 void
usb_dma_cache_invalid(void * addr,unsigned int size)40 usb_dma_cache_invalid(void *addr, unsigned int size)
41 {
42 UINTPTR start = (UINTPTR)addr & ~(USB_CACHE_ALIGN_SIZE - 1);
43 UINTPTR end = (UINTPTR)addr + size;
44
45 end = ALIGN(end, USB_CACHE_ALIGN_SIZE);
46 DCacheInvRange(start, end);
47 }
48
49 void
usb_dma_cache_flush(void * addr,unsigned int size)50 usb_dma_cache_flush(void *addr, unsigned int size)
51 {
52 UINTPTR start = (UINTPTR)addr & ~(USB_CACHE_ALIGN_SIZE - 1);
53 UINTPTR end = (UINTPTR)addr + size;
54
55 end = ALIGN(end, USB_CACHE_ALIGN_SIZE);
56 DCacheFlushRange(start, end);
57 }
58
59 /*------------------------------------------------------------------------*
60 * usbd_get_page - lookup DMA-able memory for the given offset
61 *
62 * NOTE: Only call this function when the "page_cache" structure has
63 * been properly initialized !
64 *------------------------------------------------------------------------*/
65 void
usbd_get_page(struct usb_page_cache * pc,usb_frlength_t offset,struct usb_page_search * res)66 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset,
67 struct usb_page_search *res)
68 {
69 #if USB_HAVE_BUSDMA
70 struct usb_page *page;
71 if (pc->page_start) {
72 /* Case 1 - something has been loaded into DMA */
73
74 if (pc->buffer) {
75
76 /* Case 1a - Kernel Virtual Address */
77
78 res->buffer = USB_ADD_BYTES(pc->buffer, offset);
79 }
80 offset += pc->page_offset_buf;
81
82 /* compute destination page */
83
84 page = pc->page_start;
85
86 if (pc->ismultiseg) {
87
88 page += (offset / USB_PAGE_SIZE);
89
90 offset %= USB_PAGE_SIZE;
91
92 res->length = USB_PAGE_SIZE - offset;
93 res->physaddr = page->physaddr + offset;
94 } else {
95 res->length = (usb_size_t)-1;
96 res->physaddr = page->physaddr + offset;
97 }
98 if (!pc->buffer) {
99
100 /* Case 1b - Non Kernel Virtual Address */
101
102 res->buffer = USB_ADD_BYTES(page->buffer, offset);
103 }
104 return;
105 }
106 #endif
107 /* Case 2 - Plain PIO */
108
109 res->buffer = USB_ADD_BYTES(pc->buffer, offset);
110 res->length = (usb_size_t)-1;
111 #if USB_HAVE_BUSDMA
112 res->physaddr = 0;
113 #endif
114 }
115
116 /*------------------------------------------------------------------------*
117 * usbd_copy_in - copy directly to DMA-able memory
118 *------------------------------------------------------------------------*/
119 void
usbd_copy_in(struct usb_page_cache * cache,usb_frlength_t offset,const void * ptr,usb_frlength_t len)120 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset,
121 const void *ptr, usb_frlength_t len)
122 {
123 struct usb_page_search buf_res;
124 int ret;
125
126 while (len != 0) {
127
128 usbd_get_page(cache, offset, &buf_res);
129
130 if (buf_res.length > len) {
131 buf_res.length = len;
132 }
133 ret = memcpy_s(buf_res.buffer, buf_res.length, ptr, buf_res.length);
134 if (ret != EOK) {
135 return;
136 }
137
138 offset += buf_res.length;
139 len -= buf_res.length;
140 ptr = USB_ADD_BYTES(ptr, buf_res.length);
141 }
142 }
143
144 /*------------------------------------------------------------------------*
145 * usbd_copy_out - copy directly from DMA-able memory
146 *------------------------------------------------------------------------*/
147 void
usbd_copy_out(struct usb_page_cache * cache,usb_frlength_t offset,void * ptr,usb_frlength_t len)148 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset,
149 void *ptr, usb_frlength_t len)
150 {
151 struct usb_page_search res;
152
153 while (len != 0) {
154
155 usbd_get_page(cache, offset, &res);
156
157 if (res.length > len) {
158 res.length = len;
159 }
160 (void)memcpy_s(ptr, len, res.buffer, res.length);
161
162 offset += res.length;
163 len -= res.length;
164 ptr = USB_ADD_BYTES(ptr, res.length);
165 }
166 }
167
168 int
copyin(const void * uaddr,void * kaddr,size_t len)169 copyin(const void *uaddr, void *kaddr, size_t len)
170 {
171 size_t ret = LOS_ArchCopyFromUser(kaddr, uaddr, len);
172 return ret ? EFAULT : 0;
173 }
174
175 int
copyout(const void * kaddr,void * uaddr,size_t len)176 copyout(const void *kaddr, void *uaddr, size_t len)
177 {
178 size_t ret = LOS_ArchCopyToUser(uaddr, kaddr, len);
179 return ret ? EFAULT : 0;
180 }
181
182 /* In user mode, the src buffer is from user */
183 int
usbd_copy_from_user(void * dest,uint32_t dest_len,const void * src,uint32_t src_len)184 usbd_copy_from_user(void *dest, uint32_t dest_len, const void *src, uint32_t src_len)
185 {
186 int ret;
187
188 if (!LOS_IsUserAddressRange((vaddr_t)(UINTPTR)src, src_len)) {
189 ret = memcpy_s(dest, dest_len, src, src_len);
190 } else {
191 ret = ((dest_len >= src_len) ? LOS_ArchCopyFromUser(dest, src, src_len) : ERANGE_AND_RESET);
192 }
193
194 return ret ? EFAULT : 0;
195 }
196
197 /* In user mode, the dest buffer is from user */
198 int
usbd_copy_to_user(void * dest,uint32_t dest_len,const void * src,uint32_t src_len)199 usbd_copy_to_user(void *dest, uint32_t dest_len, const void *src, uint32_t src_len)
200 {
201 int ret;
202
203 if (!LOS_IsUserAddressRange((vaddr_t)(UINTPTR)dest, dest_len)) {
204 ret = memcpy_s(dest, dest_len, src, src_len);
205 } else {
206 ret = ((dest_len >= src_len) ? LOS_ArchCopyToUser(dest, src, src_len) : ERANGE_AND_RESET);
207 }
208
209 return ret ? EFAULT : 0;
210 }
211
212 /*------------------------------------------------------------------------*
213 * usbd_frame_zero - zero DMA-able memory
214 *------------------------------------------------------------------------*/
215 void
usbd_frame_zero(struct usb_page_cache * cache,usb_frlength_t offset,usb_frlength_t len)216 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset,
217 usb_frlength_t len)
218 {
219 struct usb_page_search res;
220
221 while (len != 0) {
222
223 usbd_get_page(cache, offset, &res);
224
225 if (res.length > len) {
226 res.length = len;
227 }
228 (void)memset_s(res.buffer, res.length, 0, res.length);
229
230 offset += res.length;
231 len -= res.length;
232 }
233 }
234
235 #if USB_HAVE_BUSDMA
236 /*------------------------------------------------------------------------*
237 * usb_pc_common_mem_cb - BUS-DMA callback function
238 *------------------------------------------------------------------------*/
239 static void
usb_pc_common_mem_cb(struct usb_page_cache * pc,void * dma_handle,uint32_t length)240 usb_pc_common_mem_cb(struct usb_page_cache *pc, void *dma_handle, uint32_t length)
241 {
242 struct usb_page *pg;
243 usb_size_t rem;
244 bus_size_t off;
245 bus_addr_t phys = (bus_addr_t)(UINTPTR)dma_handle;
246
247 pg = pc->page_start;
248 pg->physaddr = phys & ~(USB_PAGE_SIZE - 1);
249 rem = phys & (USB_PAGE_SIZE - 1);
250 pc->page_offset_buf = rem;
251 pc->page_offset_end += rem;
252 length += rem;
253
254 for (off = USB_PAGE_SIZE; off < length; off += USB_PAGE_SIZE) {
255 pg++;
256 pg->physaddr = (phys + off) & ~(USB_PAGE_SIZE - 1);
257 }
258 }
259
260 /*------------------------------------------------------------------------*
261 * usb_pc_alloc_mem - allocate DMA'able memory
262 *
263 * Returns:
264 * 0: Success
265 * Else: Failure
266 *------------------------------------------------------------------------*/
267 uint8_t
usb_pc_alloc_mem(struct usb_page_cache * pc,struct usb_page * pg,usb_size_t size,usb_size_t align)268 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg,
269 usb_size_t size, usb_size_t align)
270 {
271 void *ptr;
272 DMA_ADDR_T dma_handle;
273
274 /* allocate zeroed memory */
275 if (align < USB_CACHE_ALIGN_SIZE) {
276 ptr = LOS_DmaMemAlloc(&dma_handle, size, USB_CACHE_ALIGN_SIZE, DMA_NOCACHE);
277 } else {
278 ptr = LOS_DmaMemAlloc(&dma_handle, size, align, DMA_NOCACHE);
279 }
280 if (ptr == NULL)
281 goto error;
282
283 (void)memset_s(ptr, size, 0, size);
284 /* setup page cache */
285 pc->buffer = (uint8_t *)ptr;
286 pc->page_start = pg;
287 pc->page_offset_buf = 0;
288 pc->page_offset_end = size;
289 pc->map = NULL;
290 pc->tag = (bus_dma_tag_t)ptr;
291 pc->ismultiseg = (align == 1);
292
293 /* compute physical address */
294 usb_pc_common_mem_cb(pc, (void *)(UINTPTR)dma_handle, size);
295
296 usb_pc_cpu_flush(pc);
297 return (0);
298
299 error:
300 /* reset most of the page cache */
301 pc->buffer = NULL;
302 pc->page_start = NULL;
303 pc->page_offset_buf = 0;
304 pc->page_offset_end = 0;
305 pc->map = NULL;
306 pc->tag = NULL;
307 return (1);
308 }
309
310 /*------------------------------------------------------------------------*
311 * usb_pc_free_mem - free DMA memory
312 *
313 * This function is NULL safe.
314 *------------------------------------------------------------------------*/
315 void
usb_pc_free_mem(struct usb_page_cache * pc)316 usb_pc_free_mem(struct usb_page_cache *pc)
317 {
318 if ((pc != NULL) && (pc->buffer != NULL)) {
319 LOS_DmaMemFree(pc->tag);
320 pc->buffer = NULL;
321 }
322 }
323
324 /*------------------------------------------------------------------------*
325 * usb_pc_load_mem - load virtual memory into DMA
326 *
327 * Return values:
328 * 0: Success
329 * Else: Error
330 *------------------------------------------------------------------------*/
331 uint8_t
usb_pc_load_mem(struct usb_page_cache * pc,usb_size_t size,uint8_t data_sync)332 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t data_sync)
333 {
334 /* setup page cache */
335 pc->page_offset_buf = 0;
336 pc->page_offset_end = size;
337 pc->ismultiseg = 1;
338
339 mtx_assert(pc->tag_parent->mtx, MA_OWNED);
340
341 if (size > 0) {
342 /* compute physical address */
343 #if defined (LOSCFG_DRIVERS_HDF_USB_DDK_HOST) || defined (LOSCFG_DRIVERS_HDF_USB_DDK_DEVICE)
344 usb_pc_common_mem_cb(pc, (void *)VMM_TO_UNCACHED_ADDR((unsigned long)pc->buffer), size);
345 #else
346 usb_pc_common_mem_cb(pc, (void *)(UINTPTR)LOS_DmaVaddrToPaddr(pc->buffer), size);
347 #endif
348 }
349 if (data_sync == 0) {
350 /*
351 * Call callback so that refcount is decremented
352 * properly:
353 */
354 pc->tag_parent->dma_error = 0;
355 (pc->tag_parent->func) (pc->tag_parent);
356 }
357 return (0);
358 }
359
360 /*------------------------------------------------------------------------*
361 * usb_pc_cpu_invalidate - invalidate CPU cache
362 *------------------------------------------------------------------------*/
363 void
usb_pc_cpu_invalidate(struct usb_page_cache * pc)364 usb_pc_cpu_invalidate(struct usb_page_cache *pc)
365 {
366 if (pc->page_offset_end == pc->page_offset_buf) {
367 /* nothing has been loaded into this page cache! */
368 return;
369 }
370 usb_dma_cache_invalid(pc->buffer, pc->page_offset_end - pc->page_offset_buf);
371 }
372
373 /*------------------------------------------------------------------------*
374 * usb_pc_cpu_flush - flush CPU cache
375 *------------------------------------------------------------------------*/
376 void
usb_pc_cpu_flush(struct usb_page_cache * pc)377 usb_pc_cpu_flush(struct usb_page_cache *pc)
378 {
379 if (pc->page_offset_end == pc->page_offset_buf) {
380 /* nothing has been loaded into this page cache! */
381 return;
382 }
383 usb_dma_cache_flush(pc->buffer, pc->page_offset_end - pc->page_offset_buf);
384 }
385
386 /*------------------------------------------------------------------------*
387 * usb_pc_dmamap_create - create a DMA map
388 *
389 * Returns:
390 * 0: Success
391 * Else: Failure
392 *------------------------------------------------------------------------*/
393 uint8_t
usb_pc_dmamap_create(struct usb_page_cache * pc,usb_size_t size)394 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size)
395 {
396 return (0); /* NOP, success */
397 }
398
399 /*------------------------------------------------------------------------*
400 * usb_pc_dmamap_destroy
401 *
402 * This function is NULL safe.
403 *------------------------------------------------------------------------*/
404 void
usb_pc_dmamap_destroy(struct usb_page_cache * pc)405 usb_pc_dmamap_destroy(struct usb_page_cache *pc)
406 {
407 /* NOP */
408 }
409
410 /*------------------------------------------------------------------------*
411 * usb_dma_tag_setup - initialise USB DMA tags
412 *------------------------------------------------------------------------*/
413 void
usb_dma_tag_setup(struct usb_dma_parent_tag * udpt,struct usb_dma_tag * udt,bus_dma_tag_t dmat,struct mtx * mtx,usb_dma_callback_t * func,uint8_t ndmabits,uint8_t nudt)414 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt,
415 struct usb_dma_tag *udt, bus_dma_tag_t dmat,
416 struct mtx *mtx, usb_dma_callback_t *func,
417 uint8_t ndmabits, uint8_t nudt)
418 {
419 (void)memset_s(udpt, sizeof(*udpt), 0, sizeof(*udpt));
420
421 /* sanity checking */
422 if ((nudt == 0) ||
423 (ndmabits == 0) ||
424 (mtx == NULL)) {
425 /* something is corrupt */
426 return;
427 }
428 /* initialise condition variable */
429 cv_init(udpt->cv, "USB DMA CV");
430
431 /* store some information */
432 udpt->mtx = mtx;
433 udpt->func = func;
434 udpt->tag = dmat;
435 udpt->utag_first = udt;
436 udpt->utag_max = nudt;
437 udpt->dma_bits = ndmabits;
438
439 while (nudt--) {
440 (void)memset_s(udt, sizeof(*udt), 0, sizeof(*udt));
441 udt->tag_parent = udpt;
442 udt++;
443 }
444 }
445
446 /*------------------------------------------------------------------------*
447 * usb_bus_tag_unsetup - factored out code
448 *------------------------------------------------------------------------*/
449 void
usb_dma_tag_unsetup(struct usb_dma_parent_tag * udpt)450 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt)
451 {
452 struct usb_dma_tag *udt;
453 uint8_t nudt;
454
455 udt = udpt->utag_first;
456 nudt = udpt->utag_max;
457
458 while (nudt--) {
459 udt->align = 0;
460 udt++;
461 }
462
463 if (udpt->utag_max) {
464 /* destroy the condition variable */
465 cv_destroy(udpt->cv);
466 }
467 }
468
469 /*------------------------------------------------------------------------*
470 * usb_bdma_work_loop
471 *
472 * This function handles loading of virtual buffers into DMA and is
473 * only called when "dma_refcount" is zero.
474 *------------------------------------------------------------------------*/
475 void
usb_bdma_work_loop(struct usb_xfer_queue * pq)476 usb_bdma_work_loop(struct usb_xfer_queue *pq)
477 {
478 struct usb_xfer_root *info;
479 struct usb_xfer *xfer;
480 usb_frcount_t nframes;
481
482 xfer = pq->curr;
483 info = xfer->xroot;
484
485 mtx_assert(info->xfer_mtx, MA_OWNED);
486
487 if (xfer->error) {
488 /* some error happened */
489 USB_BUS_LOCK(info->bus);
490 usbd_transfer_done(xfer, USB_ERR_NORMAL_COMPLETION);
491 USB_BUS_UNLOCK(info->bus);
492 return;
493 }
494 if (!xfer->flags_int.bdma_setup) {
495 struct usb_page *pg;
496 usb_frlength_t frlength_0;
497 uint8_t isread;
498
499 xfer->flags_int.bdma_setup = 1;
500
501 /* reset BUS-DMA load state */
502
503 info->dma_error = 0;
504
505 if (xfer->flags_int.isochronous_xfr) {
506 /* only one frame buffer */
507 nframes = 1;
508 frlength_0 = xfer->sumlen;
509 } else {
510 /* can be multiple frame buffers */
511 nframes = xfer->nframes;
512 frlength_0 = xfer->frlengths[0];
513 }
514
515 /*
516 * Set DMA direction first. This is needed to
517 * select the correct cache invalidate and cache
518 * flush operations.
519 */
520 isread = USB_GET_DATA_ISREAD(xfer);
521 pg = xfer->dma_page_ptr;
522
523 if (xfer->flags_int.control_xfr &&
524 xfer->flags_int.control_hdr) {
525 /* special case */
526 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
527 /* The device controller writes to memory */
528 xfer->frbuffers[0].isread = 1;
529 } else {
530 /* The host controller reads from memory */
531 xfer->frbuffers[0].isread = 0;
532 }
533 } else {
534 /* default case */
535 xfer->frbuffers[0].isread = isread;
536 }
537
538 /*
539 * Setup the "page_start" pointer which points to an array of
540 * USB pages where information about the physical address of a
541 * page will be stored. Also initialise the "isread" field of
542 * the USB page caches.
543 */
544 xfer->frbuffers[0].page_start = pg;
545
546 info->dma_nframes = nframes;
547 info->dma_currframe = 0;
548 info->dma_frlength_0 = frlength_0;
549
550 pg += (frlength_0 / USB_PAGE_SIZE);
551 pg += 2;
552
553 while (--nframes > 0) {
554 xfer->frbuffers[nframes].isread = isread;
555 xfer->frbuffers[nframes].page_start = pg;
556
557 pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE);
558 pg += 2;
559 }
560
561 }
562 if (info->dma_error) {
563 USB_BUS_LOCK(info->bus);
564 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED);
565 USB_BUS_UNLOCK(info->bus);
566 return;
567 }
568 if (info->dma_currframe != info->dma_nframes) {
569
570 if (info->dma_currframe == 0) {
571 /* special case */
572 (void)usb_pc_load_mem(xfer->frbuffers,
573 info->dma_frlength_0, 0);
574 } else {
575 /* default case */
576 nframes = info->dma_currframe;
577 (void)usb_pc_load_mem(xfer->frbuffers + nframes,
578 xfer->frlengths[nframes], 0);
579 }
580
581 /* advance frame index */
582 info->dma_currframe++;
583
584 return;
585 }
586 /* go ahead */
587 usb_bdma_pre_sync(xfer);
588
589 /* start loading next USB transfer, if any */
590 usb_command_wrapper(pq, NULL);
591
592 /* finally start the hardware */
593 usbd_pipe_enter(xfer);
594 }
595
596 /*------------------------------------------------------------------------*
597 * usb_bdma_done_event
598 *
599 * This function is called when the BUS-DMA has loaded virtual memory
600 * into DMA, if any.
601 *------------------------------------------------------------------------*/
602 void
usb_bdma_done_event(struct usb_dma_parent_tag * udpt)603 usb_bdma_done_event(struct usb_dma_parent_tag *udpt)
604 {
605 struct usb_xfer_root *info;
606
607 info = USB_DMATAG_TO_XROOT(udpt);
608
609 mtx_assert(info->xfer_mtx, MA_OWNED);
610
611 /* copy error */
612 info->dma_error = udpt->dma_error;
613
614 /* enter workloop again */
615 usb_command_wrapper(&info->dma_q,
616 info->dma_q.curr);
617 }
618
619 static usb_frcount_t
usb_bdma_frame_num(struct usb_xfer * xfer)620 usb_bdma_frame_num(struct usb_xfer *xfer)
621 {
622 if (xfer->flags_int.isochronous_xfr) {
623 /* only one frame buffer */
624 return (1);
625 } else {
626 /* can be multiple frame buffers */
627 return (xfer->nframes);
628 }
629 }
630
631 /*------------------------------------------------------------------------*
632 * usb_bdma_pre_sync
633 *
634 * This function handles DMA synchronisation that must be done before
635 * an USB transfer is started.
636 *------------------------------------------------------------------------*/
637 void
usb_bdma_pre_sync(struct usb_xfer * xfer)638 usb_bdma_pre_sync(struct usb_xfer *xfer)
639 {
640 struct usb_page_cache *pc;
641 usb_frcount_t nframes;
642
643 nframes = usb_bdma_frame_num(xfer);
644 pc = xfer->frbuffers;
645
646 while (nframes--) {
647
648 if (pc->isread) {
649 usb_pc_cpu_invalidate(pc);
650 } else {
651 usb_pc_cpu_flush(pc);
652 }
653 pc++;
654 }
655 }
656
657 /*------------------------------------------------------------------------*
658 * usb_bdma_post_sync
659 *
660 * This function handles DMA synchronisation that must be done after
661 * an USB transfer is complete.
662 *------------------------------------------------------------------------*/
663 void
usb_bdma_post_sync(struct usb_xfer * xfer)664 usb_bdma_post_sync(struct usb_xfer *xfer)
665 {
666 struct usb_page_cache *pc;
667 usb_frcount_t nframes;
668
669 nframes = usb_bdma_frame_num(xfer);
670 pc = xfer->frbuffers;
671
672 while (nframes--) {
673 if (pc->isread) {
674 usb_pc_cpu_invalidate(pc);
675 }
676 pc++;
677 }
678 }
679 #endif
680