1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 *
4 * HDF is dual licensed: you can use it either under the terms of
5 * the GPL, or the BSD license, at your option.
6 * See the LICENSE file in the root of this repository for complete details.
7 */
8
9 #include "camera_buffer_manager_adapter.h"
10
CreateFrameVec(unsigned long start,unsigned long length)11 struct frame_vector *CreateFrameVec(unsigned long start, unsigned long length)
12 {
13 int32_t ret;
14 unsigned long first;
15 unsigned long last;
16 unsigned long num;
17 struct frame_vector *vec = NULL;
18 uint32_t flags = FOLL_FORCE | FOLL_WRITE;
19
20 first = start >> PAGE_SHIFT;
21 if (start == 0 || length == 0) {
22 return ERR_PTR(-EINVAL);
23 }
24 last = (start + length - 1) >> PAGE_SHIFT;
25 num = last - first + 1;
26 vec = frame_vector_create(num);
27 if (vec == NULL) {
28 return ERR_PTR(-ENOMEM);
29 }
30
31 ret = get_vaddr_frames(start & PAGE_MASK, num, flags, vec);
32 if (ret < 0) {
33 goto OUT_DESTROY;
34 }
35
36 /* We accept only complete set of PFNs */
37 if (ret != num) {
38 ret = -EFAULT;
39 goto OUT_RELEASE;
40 }
41 return vec;
42 OUT_RELEASE:
43 put_vaddr_frames(vec);
44 OUT_DESTROY:
45 frame_vector_destroy(vec);
46 return ERR_PTR(ret);
47 }
48
DestroyFrameVec(struct frame_vector * vec)49 void DestroyFrameVec(struct frame_vector *vec)
50 {
51 put_vaddr_frames(vec);
52 frame_vector_destroy(vec);
53 }
54
CommonVmOpen(struct vm_area_struct * vma)55 static void CommonVmOpen(struct vm_area_struct *vma)
56 {
57 if (vma == NULL) {
58 return;
59 }
60 struct VmareaHandler *handler = vma->vm_private_data;
61
62 refcount_inc(handler->refCount);
63 }
64
CommonVmClose(struct vm_area_struct * vma)65 static void CommonVmClose(struct vm_area_struct *vma)
66 {
67 if (vma == NULL) {
68 return;
69 }
70 struct VmareaHandler *handler = vma->vm_private_data;
71
72 handler->free(handler->arg);
73 }
74
75 const struct vm_operations_struct g_commonVmOps = {
76 .open = CommonVmOpen,
77 .close = CommonVmClose,
78 };
79
GetVmOps(void)80 const struct vm_operations_struct *GetVmOps(void)
81 {
82 return &g_commonVmOps;
83 }
84
MemoryAdapterQueueImpInit(struct BufferQueue * queue)85 void MemoryAdapterQueueImpInit(struct BufferQueue *queue)
86 {
87 if (queue == NULL) {
88 return;
89 }
90 struct BufferQueueImp *queueImp = CONTAINER_OF(queue, struct BufferQueueImp, queue);
91
92 queueImp->dmaDir = DMA_FROM_DEVICE;
93 }
94
MemoryAdapterDriverMutexLock(struct BufferQueue * queue)95 void MemoryAdapterDriverMutexLock(struct BufferQueue *queue)
96 {
97 if (queue == NULL) {
98 return;
99 }
100 struct BufferQueueImp *queueImp = CONTAINER_OF(queue, struct BufferQueueImp, queue);
101
102 mutex_lock(queueImp->lock);
103 }
104
MemoryAdapterDriverMutexUnLock(struct BufferQueue * queue)105 void MemoryAdapterDriverMutexUnLock(struct BufferQueue *queue)
106 {
107 if (queue == NULL) {
108 return;
109 }
110 struct BufferQueueImp *queueImp = CONTAINER_OF(queue, struct BufferQueueImp, queue);
111
112 mutex_unlock(queueImp->lock);
113 }
114
MemoryAdapterIsErrOrNullPtr(void * ptr)115 bool MemoryAdapterIsErrOrNullPtr(void *ptr)
116 {
117 return IS_ERR_OR_NULL(ptr);
118 }
119
MemoryAdapterIsErrPtr(void * ptr)120 bool MemoryAdapterIsErrPtr(void *ptr)
121 {
122 return IS_ERR(ptr);
123 }
124
MemoryAdapterPtrErr(void * ptr)125 int32_t MemoryAdapterPtrErr(void *ptr)
126 {
127 return PTR_ERR(ptr);
128 }
129
MemoryAdapterPageAlign(unsigned long size)130 unsigned long MemoryAdapterPageAlign(unsigned long size)
131 {
132 return PAGE_ALIGN(size);
133 }
134
MemoryAdapterGetDmaBuffer(uint32_t fd)135 void *MemoryAdapterGetDmaBuffer(uint32_t fd)
136 {
137 return (void *)dma_buf_get(fd);
138 }
139
MemoryAdapterPutDmaBuffer(void * dmaBuf)140 void MemoryAdapterPutDmaBuffer(void *dmaBuf)
141 {
142 dma_buf_put((struct dma_buf *)dmaBuf);
143 }
144
MemoryAdapterDmaBufSize(void * dmaBuf)145 uint32_t MemoryAdapterDmaBufSize(void *dmaBuf)
146 {
147 if (dmaBuf == NULL) {
148 return 0;
149 }
150 return ((struct dma_buf *)dmaBuf)->size;
151 }