• Home
  • Raw
  • Download

Lines Matching refs:dmabuf

48 	struct dma_buf *dmabuf;  in dma_buf_release()  local
53 dmabuf = file->private_data; in dma_buf_release()
55 BUG_ON(dmabuf->vmapping_counter); in dma_buf_release()
65 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); in dma_buf_release()
67 dmabuf->ops->release(dmabuf); in dma_buf_release()
70 list_del(&dmabuf->list_node); in dma_buf_release()
73 if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) in dma_buf_release()
74 reservation_object_fini(dmabuf->resv); in dma_buf_release()
76 module_put(dmabuf->owner); in dma_buf_release()
77 kfree(dmabuf); in dma_buf_release()
83 struct dma_buf *dmabuf; in dma_buf_mmap_internal() local
88 dmabuf = file->private_data; in dma_buf_mmap_internal()
92 dmabuf->size >> PAGE_SHIFT) in dma_buf_mmap_internal()
95 return dmabuf->ops->mmap(dmabuf, vma); in dma_buf_mmap_internal()
100 struct dma_buf *dmabuf; in dma_buf_llseek() local
106 dmabuf = file->private_data; in dma_buf_llseek()
112 base = dmabuf->size; in dma_buf_llseek()
137 struct dma_buf *dmabuf; in dma_buf_poll() local
144 dmabuf = file->private_data; in dma_buf_poll()
145 if (!dmabuf || !dmabuf->resv) in dma_buf_poll()
148 resv = dmabuf->resv; in dma_buf_poll()
150 poll_wait(file, &dmabuf->poll, poll); in dma_buf_poll()
172 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; in dma_buf_poll()
178 spin_lock_irq(&dmabuf->poll.lock); in dma_buf_poll()
184 spin_unlock_irq(&dmabuf->poll.lock); in dma_buf_poll()
207 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared; in dma_buf_poll()
211 spin_lock_irq(&dmabuf->poll.lock); in dma_buf_poll()
216 spin_unlock_irq(&dmabuf->poll.lock); in dma_buf_poll()
286 struct dma_buf *dmabuf; in dma_buf_export() local
311 dmabuf = kzalloc(alloc_size, GFP_KERNEL); in dma_buf_export()
312 if (!dmabuf) { in dma_buf_export()
317 dmabuf->priv = exp_info->priv; in dma_buf_export()
318 dmabuf->ops = exp_info->ops; in dma_buf_export()
319 dmabuf->size = exp_info->size; in dma_buf_export()
320 dmabuf->exp_name = exp_info->exp_name; in dma_buf_export()
321 dmabuf->owner = exp_info->owner; in dma_buf_export()
322 init_waitqueue_head(&dmabuf->poll); in dma_buf_export()
323 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; in dma_buf_export()
324 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; in dma_buf_export()
327 resv = (struct reservation_object *)&dmabuf[1]; in dma_buf_export()
330 dmabuf->resv = resv; in dma_buf_export()
332 file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, in dma_buf_export()
335 kfree(dmabuf); in dma_buf_export()
340 dmabuf->file = file; in dma_buf_export()
342 mutex_init(&dmabuf->lock); in dma_buf_export()
343 INIT_LIST_HEAD(&dmabuf->attachments); in dma_buf_export()
346 list_add(&dmabuf->list_node, &db_list.head); in dma_buf_export()
349 return dmabuf; in dma_buf_export()
360 int dma_buf_fd(struct dma_buf *dmabuf, int flags) in dma_buf_fd() argument
364 if (!dmabuf || !dmabuf->file) in dma_buf_fd()
371 fd_install(fd, dmabuf->file); in dma_buf_fd()
409 void dma_buf_put(struct dma_buf *dmabuf) in dma_buf_put() argument
411 if (WARN_ON(!dmabuf || !dmabuf->file)) in dma_buf_put()
414 fput(dmabuf->file); in dma_buf_put()
427 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, in dma_buf_attach() argument
433 if (WARN_ON(!dmabuf || !dev)) in dma_buf_attach()
441 attach->dmabuf = dmabuf; in dma_buf_attach()
443 mutex_lock(&dmabuf->lock); in dma_buf_attach()
445 if (dmabuf->ops->attach) { in dma_buf_attach()
446 ret = dmabuf->ops->attach(dmabuf, dev, attach); in dma_buf_attach()
450 list_add(&attach->node, &dmabuf->attachments); in dma_buf_attach()
452 mutex_unlock(&dmabuf->lock); in dma_buf_attach()
457 mutex_unlock(&dmabuf->lock); in dma_buf_attach()
469 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) in dma_buf_detach() argument
471 if (WARN_ON(!dmabuf || !attach)) in dma_buf_detach()
474 mutex_lock(&dmabuf->lock); in dma_buf_detach()
476 if (dmabuf->ops->detach) in dma_buf_detach()
477 dmabuf->ops->detach(dmabuf, attach); in dma_buf_detach()
479 mutex_unlock(&dmabuf->lock); in dma_buf_detach()
501 if (WARN_ON(!attach || !attach->dmabuf)) in dma_buf_map_attachment()
504 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); in dma_buf_map_attachment()
527 if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) in dma_buf_unmap_attachment()
530 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, in dma_buf_unmap_attachment()
548 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, in dma_buf_begin_cpu_access() argument
553 if (WARN_ON(!dmabuf)) in dma_buf_begin_cpu_access()
556 if (dmabuf->ops->begin_cpu_access) in dma_buf_begin_cpu_access()
557 ret = dmabuf->ops->begin_cpu_access(dmabuf, start, in dma_buf_begin_cpu_access()
576 void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, in dma_buf_end_cpu_access() argument
579 WARN_ON(!dmabuf); in dma_buf_end_cpu_access()
581 if (dmabuf->ops->end_cpu_access) in dma_buf_end_cpu_access()
582 dmabuf->ops->end_cpu_access(dmabuf, start, len, direction); in dma_buf_end_cpu_access()
595 void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num) in dma_buf_kmap_atomic() argument
597 WARN_ON(!dmabuf); in dma_buf_kmap_atomic()
599 return dmabuf->ops->kmap_atomic(dmabuf, page_num); in dma_buf_kmap_atomic()
611 void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num, in dma_buf_kunmap_atomic() argument
614 WARN_ON(!dmabuf); in dma_buf_kunmap_atomic()
616 if (dmabuf->ops->kunmap_atomic) in dma_buf_kunmap_atomic()
617 dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr); in dma_buf_kunmap_atomic()
630 void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num) in dma_buf_kmap() argument
632 WARN_ON(!dmabuf); in dma_buf_kmap()
634 return dmabuf->ops->kmap(dmabuf, page_num); in dma_buf_kmap()
646 void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num, in dma_buf_kunmap() argument
649 WARN_ON(!dmabuf); in dma_buf_kunmap()
651 if (dmabuf->ops->kunmap) in dma_buf_kunmap()
652 dmabuf->ops->kunmap(dmabuf, page_num, vaddr); in dma_buf_kunmap()
671 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, in dma_buf_mmap() argument
677 if (WARN_ON(!dmabuf || !vma)) in dma_buf_mmap()
686 dmabuf->size >> PAGE_SHIFT) in dma_buf_mmap()
690 get_file(dmabuf->file); in dma_buf_mmap()
692 vma->vm_file = dmabuf->file; in dma_buf_mmap()
695 ret = dmabuf->ops->mmap(dmabuf, vma); in dma_buf_mmap()
699 fput(dmabuf->file); in dma_buf_mmap()
721 void *dma_buf_vmap(struct dma_buf *dmabuf) in dma_buf_vmap() argument
725 if (WARN_ON(!dmabuf)) in dma_buf_vmap()
728 if (!dmabuf->ops->vmap) in dma_buf_vmap()
731 mutex_lock(&dmabuf->lock); in dma_buf_vmap()
732 if (dmabuf->vmapping_counter) { in dma_buf_vmap()
733 dmabuf->vmapping_counter++; in dma_buf_vmap()
734 BUG_ON(!dmabuf->vmap_ptr); in dma_buf_vmap()
735 ptr = dmabuf->vmap_ptr; in dma_buf_vmap()
739 BUG_ON(dmabuf->vmap_ptr); in dma_buf_vmap()
741 ptr = dmabuf->ops->vmap(dmabuf); in dma_buf_vmap()
747 dmabuf->vmap_ptr = ptr; in dma_buf_vmap()
748 dmabuf->vmapping_counter = 1; in dma_buf_vmap()
751 mutex_unlock(&dmabuf->lock); in dma_buf_vmap()
761 void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) in dma_buf_vunmap() argument
763 if (WARN_ON(!dmabuf)) in dma_buf_vunmap()
766 BUG_ON(!dmabuf->vmap_ptr); in dma_buf_vunmap()
767 BUG_ON(dmabuf->vmapping_counter == 0); in dma_buf_vunmap()
768 BUG_ON(dmabuf->vmap_ptr != vaddr); in dma_buf_vunmap()
770 mutex_lock(&dmabuf->lock); in dma_buf_vunmap()
771 if (--dmabuf->vmapping_counter == 0) { in dma_buf_vunmap()
772 if (dmabuf->ops->vunmap) in dma_buf_vunmap()
773 dmabuf->ops->vunmap(dmabuf, vaddr); in dma_buf_vunmap()
774 dmabuf->vmap_ptr = NULL; in dma_buf_vunmap()
776 mutex_unlock(&dmabuf->lock); in dma_buf_vunmap()