1 /*- 2 * Copyright (c) 2013 Hans Petter Selasky. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 #include <los_memory.h> 27 #include "los_vm_iomap.h" 28 #include "los_vm_map.h" 29 #include <user_copy.h> 30 31 #include "implementation/global_implementation.h" 32 33 #if USB_HAVE_BUSDMA 34 static void usb_pc_common_mem_cb(struct usb_page_cache *pc, 35 void *vaddr, uint32_t length); 36 #endif 37 38 void 39 usb_dma_cache_invalid(void *addr, unsigned int size) 40 { 41 UINTPTR start = (UINTPTR)addr & ~(USB_CACHE_ALIGN_SIZE - 1); 42 UINTPTR end = (UINTPTR)addr + size; 43 44 end = ALIGN(end, USB_CACHE_ALIGN_SIZE); 45 DCacheInvRange(start, end); 46 } 47 48 void 49 usb_dma_cache_flush(void *addr, unsigned int size) 50 { 51 UINTPTR start = (UINTPTR)addr & ~(USB_CACHE_ALIGN_SIZE - 1); 52 UINTPTR end = (UINTPTR)addr + size; 53 54 end = ALIGN(end, USB_CACHE_ALIGN_SIZE); 55 DCacheFlushRange(start, end); 56 } 57 58 /*------------------------------------------------------------------------* 59 * usbd_get_page - lookup DMA-able memory for the given offset 60 * 61 * NOTE: Only call this function when the "page_cache" structure has 62 * been properly initialized ! 63 *------------------------------------------------------------------------*/ 64 void 65 usbd_get_page(struct usb_page_cache *pc, usb_frlength_t offset, 66 struct usb_page_search *res) 67 { 68 #if USB_HAVE_BUSDMA 69 struct usb_page *page; 70 if (pc->page_start) { 71 /* Case 1 - something has been loaded into DMA */ 72 73 if (pc->buffer) { 74 75 /* Case 1a - Kernel Virtual Address */ 76 77 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 78 } 79 offset += pc->page_offset_buf; 80 81 /* compute destination page */ 82 83 page = pc->page_start; 84 85 if (pc->ismultiseg) { 86 87 page += (offset / USB_PAGE_SIZE); 88 89 offset %= USB_PAGE_SIZE; 90 91 res->length = USB_PAGE_SIZE - offset; 92 res->physaddr = page->physaddr + offset; 93 } else { 94 res->length = (usb_size_t)-1; 95 res->physaddr = page->physaddr + offset; 96 } 97 if (!pc->buffer) { 98 99 /* Case 1b - Non Kernel Virtual Address */ 100 101 res->buffer = USB_ADD_BYTES(page->buffer, offset); 102 } 103 return; 104 } 105 #endif 106 /* Case 2 - Plain PIO */ 107 108 res->buffer = USB_ADD_BYTES(pc->buffer, offset); 109 res->length = (usb_size_t)-1; 110 #if USB_HAVE_BUSDMA 111 res->physaddr = 0; 112 #endif 113 } 114 115 /*------------------------------------------------------------------------* 116 * usbd_copy_in - copy directly to DMA-able memory 117 *------------------------------------------------------------------------*/ 118 void 119 usbd_copy_in(struct usb_page_cache *cache, usb_frlength_t offset, 120 const void *ptr, usb_frlength_t len) 121 { 122 struct usb_page_search buf_res; 123 int ret; 124 125 while (len != 0) { 126 127 usbd_get_page(cache, offset, &buf_res); 128 129 if (buf_res.length > len) { 130 buf_res.length = len; 131 } 132 ret = memcpy_s(buf_res.buffer, buf_res.length, ptr, buf_res.length); 133 if (ret != EOK) { 134 return; 135 } 136 137 offset += buf_res.length; 138 len -= buf_res.length; 139 ptr = USB_ADD_BYTES(ptr, buf_res.length); 140 } 141 } 142 143 /*------------------------------------------------------------------------* 144 * usbd_copy_out - copy directly from DMA-able memory 145 *------------------------------------------------------------------------*/ 146 void 147 usbd_copy_out(struct usb_page_cache *cache, usb_frlength_t offset, 148 void *ptr, usb_frlength_t len) 149 { 150 struct usb_page_search res; 151 152 while (len != 0) { 153 154 usbd_get_page(cache, offset, &res); 155 156 if (res.length > len) { 157 res.length = len; 158 } 159 (void)memcpy_s(ptr, len, res.buffer, res.length); 160 161 offset += res.length; 162 len -= res.length; 163 ptr = USB_ADD_BYTES(ptr, res.length); 164 } 165 } 166 167 int 168 copyin(const void *uaddr, void *kaddr, size_t len) 169 { 170 size_t ret = LOS_ArchCopyFromUser(kaddr, uaddr, len); 171 return ret ? EFAULT : 0; 172 } 173 174 int 175 copyout(const void *kaddr, void *uaddr, size_t len) 176 { 177 size_t ret = LOS_ArchCopyToUser(uaddr, kaddr, len); 178 return ret ? EFAULT : 0; 179 } 180 181 /* In user mode, the src buffer is from user */ 182 int 183 usbd_copy_from_user(void *dest, uint32_t dest_len, const void *src, uint32_t src_len) 184 { 185 int ret; 186 187 if (!LOS_IsUserAddressRange((vaddr_t)(UINTPTR)src, src_len)) { 188 ret = memcpy_s(dest, dest_len, src, src_len); 189 } else { 190 ret = ((dest_len >= src_len) ? LOS_ArchCopyFromUser(dest, src, src_len) : ERANGE_AND_RESET); 191 } 192 193 return ret ? EFAULT : 0; 194 } 195 196 /* In user mode, the dest buffer is from user */ 197 int 198 usbd_copy_to_user(void *dest, uint32_t dest_len, const void *src, uint32_t src_len) 199 { 200 int ret; 201 202 if (!LOS_IsUserAddressRange((vaddr_t)(UINTPTR)dest, dest_len)) { 203 ret = memcpy_s(dest, dest_len, src, src_len); 204 } else { 205 ret = ((dest_len >= src_len) ? LOS_ArchCopyToUser(dest, src, src_len) : ERANGE_AND_RESET); 206 } 207 208 return ret ? EFAULT : 0; 209 } 210 211 /*------------------------------------------------------------------------* 212 * usbd_frame_zero - zero DMA-able memory 213 *------------------------------------------------------------------------*/ 214 void 215 usbd_frame_zero(struct usb_page_cache *cache, usb_frlength_t offset, 216 usb_frlength_t len) 217 { 218 struct usb_page_search res; 219 220 while (len != 0) { 221 222 usbd_get_page(cache, offset, &res); 223 224 if (res.length > len) { 225 res.length = len; 226 } 227 (void)memset_s(res.buffer, res.length, 0, res.length); 228 229 offset += res.length; 230 len -= res.length; 231 } 232 } 233 234 #if USB_HAVE_BUSDMA 235 /*------------------------------------------------------------------------* 236 * usb_pc_common_mem_cb - BUS-DMA callback function 237 *------------------------------------------------------------------------*/ 238 static void 239 usb_pc_common_mem_cb(struct usb_page_cache *pc, void *dma_handle, uint32_t length) 240 { 241 struct usb_page *pg; 242 usb_size_t rem; 243 bus_size_t off; 244 bus_addr_t phys = (bus_addr_t)(UINTPTR)dma_handle; 245 246 pg = pc->page_start; 247 pg->physaddr = phys & ~(USB_PAGE_SIZE - 1); 248 rem = phys & (USB_PAGE_SIZE - 1); 249 pc->page_offset_buf = rem; 250 pc->page_offset_end += rem; 251 length += rem; 252 253 for (off = USB_PAGE_SIZE; off < length; off += USB_PAGE_SIZE) { 254 pg++; 255 pg->physaddr = (phys + off) & ~(USB_PAGE_SIZE - 1); 256 } 257 } 258 259 /*------------------------------------------------------------------------* 260 * usb_pc_alloc_mem - allocate DMA'able memory 261 * 262 * Returns: 263 * 0: Success 264 * Else: Failure 265 *------------------------------------------------------------------------*/ 266 uint8_t 267 usb_pc_alloc_mem(struct usb_page_cache *pc, struct usb_page *pg, 268 usb_size_t size, usb_size_t align) 269 { 270 void *ptr; 271 DMA_ADDR_T dma_handle; 272 273 /* allocate zeroed memory */ 274 if (align < USB_CACHE_ALIGN_SIZE) { 275 ptr = LOS_DmaMemAlloc(&dma_handle, size, USB_CACHE_ALIGN_SIZE, DMA_NOCACHE); 276 } else { 277 ptr = LOS_DmaMemAlloc(&dma_handle, size, align, DMA_NOCACHE); 278 } 279 if (ptr == NULL) 280 goto error; 281 282 (void)memset_s(ptr, size, 0, size); 283 /* setup page cache */ 284 pc->buffer = (uint8_t *)ptr; 285 pc->page_start = pg; 286 pc->page_offset_buf = 0; 287 pc->page_offset_end = size; 288 pc->map = NULL; 289 pc->tag = (bus_dma_tag_t)ptr; 290 pc->ismultiseg = (align == 1); 291 292 /* compute physical address */ 293 usb_pc_common_mem_cb(pc, (void *)(UINTPTR)dma_handle, size); 294 295 usb_pc_cpu_flush(pc); 296 return (0); 297 298 error: 299 /* reset most of the page cache */ 300 pc->buffer = NULL; 301 pc->page_start = NULL; 302 pc->page_offset_buf = 0; 303 pc->page_offset_end = 0; 304 pc->map = NULL; 305 pc->tag = NULL; 306 return (1); 307 } 308 309 /*------------------------------------------------------------------------* 310 * usb_pc_free_mem - free DMA memory 311 * 312 * This function is NULL safe. 313 *------------------------------------------------------------------------*/ 314 void 315 usb_pc_free_mem(struct usb_page_cache *pc) 316 { 317 if ((pc != NULL) && (pc->buffer != NULL)) { 318 LOS_DmaMemFree(pc->tag); 319 pc->buffer = NULL; 320 } 321 } 322 323 /*------------------------------------------------------------------------* 324 * usb_pc_load_mem - load virtual memory into DMA 325 * 326 * Return values: 327 * 0: Success 328 * Else: Error 329 *------------------------------------------------------------------------*/ 330 uint8_t 331 usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t data_sync) 332 { 333 /* setup page cache */ 334 pc->page_offset_buf = 0; 335 pc->page_offset_end = size; 336 pc->ismultiseg = 1; 337 338 mtx_assert(pc->tag_parent->mtx, MA_OWNED); 339 340 if (size > 0) { 341 /* compute physical address */ 342 #if defined (LOSCFG_DRIVERS_HDF_USB_DDK_HOST) || defined (LOSCFG_DRIVERS_HDF_USB_DDK_DEVICE) 343 usb_pc_common_mem_cb(pc, (void *)VMM_TO_UNCACHED_ADDR((unsigned long)pc->buffer), size); 344 #else 345 usb_pc_common_mem_cb(pc, (void *)(UINTPTR)LOS_DmaVaddrToPaddr(pc->buffer), size); 346 #endif 347 } 348 if (data_sync == 0) { 349 /* 350 * Call callback so that refcount is decremented 351 * properly: 352 */ 353 pc->tag_parent->dma_error = 0; 354 (pc->tag_parent->func) (pc->tag_parent); 355 } 356 return (0); 357 } 358 359 /*------------------------------------------------------------------------* 360 * usb_pc_cpu_invalidate - invalidate CPU cache 361 *------------------------------------------------------------------------*/ 362 void 363 usb_pc_cpu_invalidate(struct usb_page_cache *pc) 364 { 365 if (pc->page_offset_end == pc->page_offset_buf) { 366 /* nothing has been loaded into this page cache! */ 367 return; 368 } 369 usb_dma_cache_invalid(pc->buffer, pc->page_offset_end - pc->page_offset_buf); 370 } 371 372 /*------------------------------------------------------------------------* 373 * usb_pc_cpu_flush - flush CPU cache 374 *------------------------------------------------------------------------*/ 375 void 376 usb_pc_cpu_flush(struct usb_page_cache *pc) 377 { 378 if (pc->page_offset_end == pc->page_offset_buf) { 379 /* nothing has been loaded into this page cache! */ 380 return; 381 } 382 usb_dma_cache_flush(pc->buffer, pc->page_offset_end - pc->page_offset_buf); 383 } 384 385 /*------------------------------------------------------------------------* 386 * usb_pc_dmamap_create - create a DMA map 387 * 388 * Returns: 389 * 0: Success 390 * Else: Failure 391 *------------------------------------------------------------------------*/ 392 uint8_t 393 usb_pc_dmamap_create(struct usb_page_cache *pc, usb_size_t size) 394 { 395 return (0); /* NOP, success */ 396 } 397 398 /*------------------------------------------------------------------------* 399 * usb_pc_dmamap_destroy 400 * 401 * This function is NULL safe. 402 *------------------------------------------------------------------------*/ 403 void 404 usb_pc_dmamap_destroy(struct usb_page_cache *pc) 405 { 406 /* NOP */ 407 } 408 409 /*------------------------------------------------------------------------* 410 * usb_dma_tag_setup - initialise USB DMA tags 411 *------------------------------------------------------------------------*/ 412 void 413 usb_dma_tag_setup(struct usb_dma_parent_tag *udpt, 414 struct usb_dma_tag *udt, bus_dma_tag_t dmat, 415 struct mtx *mtx, usb_dma_callback_t *func, 416 uint8_t ndmabits, uint8_t nudt) 417 { 418 (void)memset_s(udpt, sizeof(*udpt), 0, sizeof(*udpt)); 419 420 /* sanity checking */ 421 if ((nudt == 0) || 422 (ndmabits == 0) || 423 (mtx == NULL)) { 424 /* something is corrupt */ 425 return; 426 } 427 /* initialise condition variable */ 428 cv_init(udpt->cv, "USB DMA CV"); 429 430 /* store some information */ 431 udpt->mtx = mtx; 432 udpt->func = func; 433 udpt->tag = dmat; 434 udpt->utag_first = udt; 435 udpt->utag_max = nudt; 436 udpt->dma_bits = ndmabits; 437 438 while (nudt--) { 439 (void)memset_s(udt, sizeof(*udt), 0, sizeof(*udt)); 440 udt->tag_parent = udpt; 441 udt++; 442 } 443 } 444 445 /*------------------------------------------------------------------------* 446 * usb_bus_tag_unsetup - factored out code 447 *------------------------------------------------------------------------*/ 448 void 449 usb_dma_tag_unsetup(struct usb_dma_parent_tag *udpt) 450 { 451 struct usb_dma_tag *udt; 452 uint8_t nudt; 453 454 udt = udpt->utag_first; 455 nudt = udpt->utag_max; 456 457 while (nudt--) { 458 udt->align = 0; 459 udt++; 460 } 461 462 if (udpt->utag_max) { 463 /* destroy the condition variable */ 464 cv_destroy(udpt->cv); 465 } 466 } 467 468 /*------------------------------------------------------------------------* 469 * usb_bdma_work_loop 470 * 471 * This function handles loading of virtual buffers into DMA and is 472 * only called when "dma_refcount" is zero. 473 *------------------------------------------------------------------------*/ 474 void 475 usb_bdma_work_loop(struct usb_xfer_queue *pq) 476 { 477 struct usb_xfer_root *info; 478 struct usb_xfer *xfer; 479 usb_frcount_t nframes; 480 481 xfer = pq->curr; 482 info = xfer->xroot; 483 484 mtx_assert(info->xfer_mtx, MA_OWNED); 485 486 if (xfer->error) { 487 /* some error happened */ 488 USB_BUS_LOCK(info->bus); 489 usbd_transfer_done(xfer, USB_ERR_NORMAL_COMPLETION); 490 USB_BUS_UNLOCK(info->bus); 491 return; 492 } 493 if (!xfer->flags_int.bdma_setup) { 494 struct usb_page *pg; 495 usb_frlength_t frlength_0; 496 uint8_t isread; 497 498 xfer->flags_int.bdma_setup = 1; 499 500 /* reset BUS-DMA load state */ 501 502 info->dma_error = 0; 503 504 if (xfer->flags_int.isochronous_xfr) { 505 /* only one frame buffer */ 506 nframes = 1; 507 frlength_0 = xfer->sumlen; 508 } else { 509 /* can be multiple frame buffers */ 510 nframes = xfer->nframes; 511 frlength_0 = xfer->frlengths[0]; 512 } 513 514 /* 515 * Set DMA direction first. This is needed to 516 * select the correct cache invalidate and cache 517 * flush operations. 518 */ 519 isread = USB_GET_DATA_ISREAD(xfer); 520 pg = xfer->dma_page_ptr; 521 522 if (xfer->flags_int.control_xfr && 523 xfer->flags_int.control_hdr) { 524 /* special case */ 525 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { 526 /* The device controller writes to memory */ 527 xfer->frbuffers[0].isread = 1; 528 } else { 529 /* The host controller reads from memory */ 530 xfer->frbuffers[0].isread = 0; 531 } 532 } else { 533 /* default case */ 534 xfer->frbuffers[0].isread = isread; 535 } 536 537 /* 538 * Setup the "page_start" pointer which points to an array of 539 * USB pages where information about the physical address of a 540 * page will be stored. Also initialise the "isread" field of 541 * the USB page caches. 542 */ 543 xfer->frbuffers[0].page_start = pg; 544 545 info->dma_nframes = nframes; 546 info->dma_currframe = 0; 547 info->dma_frlength_0 = frlength_0; 548 549 pg += (frlength_0 / USB_PAGE_SIZE); 550 pg += 2; 551 552 while (--nframes > 0) { 553 xfer->frbuffers[nframes].isread = isread; 554 xfer->frbuffers[nframes].page_start = pg; 555 556 pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE); 557 pg += 2; 558 } 559 560 } 561 if (info->dma_error) { 562 USB_BUS_LOCK(info->bus); 563 usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED); 564 USB_BUS_UNLOCK(info->bus); 565 return; 566 } 567 if (info->dma_currframe != info->dma_nframes) { 568 569 if (info->dma_currframe == 0) { 570 /* special case */ 571 (void)usb_pc_load_mem(xfer->frbuffers, 572 info->dma_frlength_0, 0); 573 } else { 574 /* default case */ 575 nframes = info->dma_currframe; 576 (void)usb_pc_load_mem(xfer->frbuffers + nframes, 577 xfer->frlengths[nframes], 0); 578 } 579 580 /* advance frame index */ 581 info->dma_currframe++; 582 583 return; 584 } 585 /* go ahead */ 586 usb_bdma_pre_sync(xfer); 587 588 /* start loading next USB transfer, if any */ 589 usb_command_wrapper(pq, NULL); 590 591 /* finally start the hardware */ 592 usbd_pipe_enter(xfer); 593 } 594 595 /*------------------------------------------------------------------------* 596 * usb_bdma_done_event 597 * 598 * This function is called when the BUS-DMA has loaded virtual memory 599 * into DMA, if any. 600 *------------------------------------------------------------------------*/ 601 void 602 usb_bdma_done_event(struct usb_dma_parent_tag *udpt) 603 { 604 struct usb_xfer_root *info; 605 606 info = USB_DMATAG_TO_XROOT(udpt); 607 608 mtx_assert(info->xfer_mtx, MA_OWNED); 609 610 /* copy error */ 611 info->dma_error = udpt->dma_error; 612 613 /* enter workloop again */ 614 usb_command_wrapper(&info->dma_q, 615 info->dma_q.curr); 616 } 617 618 static usb_frcount_t 619 usb_bdma_frame_num(struct usb_xfer *xfer) 620 { 621 if (xfer->flags_int.isochronous_xfr) { 622 /* only one frame buffer */ 623 return (1); 624 } else { 625 /* can be multiple frame buffers */ 626 return (xfer->nframes); 627 } 628 } 629 630 /*------------------------------------------------------------------------* 631 * usb_bdma_pre_sync 632 * 633 * This function handles DMA synchronisation that must be done before 634 * an USB transfer is started. 635 *------------------------------------------------------------------------*/ 636 void 637 usb_bdma_pre_sync(struct usb_xfer *xfer) 638 { 639 struct usb_page_cache *pc; 640 usb_frcount_t nframes; 641 642 nframes = usb_bdma_frame_num(xfer); 643 pc = xfer->frbuffers; 644 645 while (nframes--) { 646 647 if (pc->isread) { 648 usb_pc_cpu_invalidate(pc); 649 } else { 650 usb_pc_cpu_flush(pc); 651 } 652 pc++; 653 } 654 } 655 656 /*------------------------------------------------------------------------* 657 * usb_bdma_post_sync 658 * 659 * This function handles DMA synchronisation that must be done after 660 * an USB transfer is complete. 661 *------------------------------------------------------------------------*/ 662 void 663 usb_bdma_post_sync(struct usb_xfer *xfer) 664 { 665 struct usb_page_cache *pc; 666 usb_frcount_t nframes; 667 668 nframes = usb_bdma_frame_num(xfer); 669 pc = xfer->frbuffers; 670 671 while (nframes--) { 672 if (pc->isread) { 673 usb_pc_cpu_invalidate(pc); 674 } 675 pc++; 676 } 677 } 678 #endif 679