• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  *
4  * HDF is dual licensed: you can use it either under the terms of
5  * the GPL, or the BSD license, at your option.
6  * See the LICENSE file in the root of this repository for complete details.
7  */
8 
9 #include <securec.h>
10 #include <hdf_log.h>
11 #include <osal_mem.h>
12 #include "sg_dma.h"
13 
14 struct SgDmaBuffer {
15     struct device *dev;
16     void *vaddr;
17     struct page **pages;
18     struct frame_vector *vec;
19     int32_t offset;
20     enum dma_data_direction dmaDir;
21     struct sg_table sgTable;
22     struct sg_table *dmaSgt;
23     size_t size;
24     uint32_t numPages;
25     refcount_t refCount;
26     struct VmareaHandler handler;
27     struct dma_buf_attachment *dbAttach;
28 };
29 
SgMmapFree(void * bufPriv)30 static void SgMmapFree(void *bufPriv)
31 {
32     if (bufPriv == NULL) {
33         return;
34     }
35     struct SgDmaBuffer *buf = bufPriv;
36     struct sg_table *sgt = &buf->sgTable;
37     int32_t i = buf->numPages;
38 
39     if (refcount_dec_and_test(&buf->refCount) != 0) {
40         dma_unmap_sgtable(buf->dev, sgt, buf->dmaDir, DMA_ATTR_SKIP_CPU_SYNC);
41         if (buf->vaddr != NULL) {
42             vm_unmap_ram(buf->vaddr, buf->numPages);
43         }
44         sg_free_table(buf->dmaSgt);
45         while (--i >= 0) {
46             __free_page(buf->pages[i]);
47         }
48         kvfree(buf->pages);
49         put_device(buf->dev);
50         OsalMemFree(buf);
51     }
52 }
53 
SgAllocCompacted(struct SgDmaBuffer * buf,gfp_t gfpFlags)54 static int32_t SgAllocCompacted(struct SgDmaBuffer *buf, gfp_t gfpFlags)
55 {
56     if (buf == NULL) {
57         return -EINVAL;
58     }
59     uint32_t lastPage = 0;
60     unsigned long size = buf->size;
61 
62     while (size > 0) {
63         struct page *pages = NULL;
64         int32_t order;
65         int32_t i;
66 
67         order = get_order(size);
68         /* Don't over allocate */
69         if ((PAGE_SIZE << order) > size) {
70             order--;
71         }
72 
73         while (pages == NULL) {
74             pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | gfpFlags, order);
75             if (pages != NULL) {
76                 break;
77             }
78             if (order != 0) {
79                 order--;
80                 continue;
81             }
82             while (lastPage--) {
83                 __free_page(buf->pages[lastPage]);
84             }
85             return -ENOMEM;
86         }
87 
88         split_page(pages, order);
89         for (i = 0; i < (1 << order); i++) {
90             buf->pages[lastPage++] = &pages[i];
91         }
92 
93         size -= PAGE_SIZE << order;
94     }
95 
96     return 0;
97 }
98 
SgMmapAlloc(struct BufferQueue * queue,uint32_t planeNum,unsigned long size)99 static void *SgMmapAlloc(struct BufferQueue *queue, uint32_t planeNum, unsigned long size)
100 {
101     struct BufferQueueImp *queueImp = container_of(queue, struct BufferQueueImp, queue);
102     struct SgDmaBuffer *buf = NULL;
103     struct sg_table *sgt = NULL;
104     int32_t ret;
105     int32_t numPages;
106     struct device *dev = NULL;
107 
108     if (queueImp->allocDev[planeNum] != NULL) {
109         dev = queueImp->allocDev[planeNum];
110     } else {
111         dev = queueImp->dev;
112     }
113 
114     if (dev == NULL) {
115         return ERR_PTR(-EINVAL);
116     }
117     buf = OsalMemCalloc(sizeof(*buf));
118     if (buf == NULL) {
119         return ERR_PTR(-ENOMEM);
120     }
121     buf->vaddr = NULL;
122     buf->dmaDir = queueImp->dmaDir;
123     buf->offset = 0;
124     buf->size = size;
125     /* size is already page aligned */
126     buf->numPages = size >> PAGE_SHIFT;
127     buf->dmaSgt = &buf->sgTable;
128     buf->pages = kvmalloc_array(buf->numPages, sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
129     if (buf->pages == NULL) {
130         goto FAIL_PAGES_ARRAY_ALLOC;
131     }
132     ret = SgAllocCompacted(buf, queueImp->gfpFlags);
133     if (ret != 0) {
134         goto FAIL_PAGES_ALLOC;
135     }
136     ret = sg_alloc_table_from_pages(buf->dmaSgt, buf->pages, buf->numPages, 0, size, GFP_KERNEL);
137     if (ret != 0) {
138         goto FAIL_TABLE_ALLOC;
139     }
140     /* Prevent the device from being released while the buffer is used */
141     buf->dev = get_device(dev);
142     sgt = &buf->sgTable;
143     if (dma_map_sgtable(buf->dev, sgt, buf->dmaDir, DMA_ATTR_SKIP_CPU_SYNC) != 0) {
144         goto FAIL_MAP;
145     }
146     buf->handler.refCount = &buf->refCount;
147     buf->handler.free = SgMmapFree;
148     buf->handler.arg = buf;
149     refcount_set(&buf->refCount, 1);
150     return buf;
151 FAIL_MAP:
152     put_device(buf->dev);
153     sg_free_table(buf->dmaSgt);
154 FAIL_TABLE_ALLOC:
155     numPages = buf->numPages;
156     while (numPages--) {
157         __free_page(buf->pages[numPages]);
158     }
159 FAIL_PAGES_ALLOC:
160     kvfree(buf->pages);
161 FAIL_PAGES_ARRAY_ALLOC:
162     OsalMemFree(buf);
163     return ERR_PTR(-ENOMEM);
164 }
165 
SgMmap(void * bufPriv,void * vm)166 static int32_t SgMmap(void *bufPriv, void *vm)
167 {
168     struct SgDmaBuffer *buf = bufPriv;
169     struct vm_area_struct *vma =  (struct vm_area_struct *)vm;
170     int32_t err;
171 
172     if (buf == NULL) {
173         HDF_LOGE("%s: buf is NULL!!!!!!", __func__);
174         return -EINVAL;
175     }
176 
177     err = vm_map_pages(vma, buf->pages, buf->numPages);
178     if (err != 0) {
179         HDF_LOGE("%s: vm_map_pages err! err = %{public}d", __func__, err);
180         return err;
181     }
182     vma->vm_private_data = &buf->handler;
183     vma->vm_ops = GetVmOps();
184     vma->vm_ops->open(vma);
185     return 0;
186 }
187 
SgAllocUserPtr(struct BufferQueue * queue,uint32_t planeNum,unsigned long vaddr,unsigned long size)188 static void *SgAllocUserPtr(struct BufferQueue *queue,
189     uint32_t planeNum, unsigned long vaddr, unsigned long size)
190 {
191     struct BufferQueueImp *queueImp = container_of(queue, struct BufferQueueImp, queue);
192     struct SgDmaBuffer *buf = NULL;
193     struct sg_table *sgt = NULL;
194     struct frame_vector *vec = NULL;
195     struct device *dev = NULL;
196 
197     if (queueImp->allocDev[planeNum] != NULL) {
198         dev = queueImp->allocDev[planeNum];
199     } else {
200         dev = queueImp->dev;
201     }
202 
203     if (dev == NULL) {
204         return ERR_PTR(-EINVAL);
205     }
206     buf = OsalMemAlloc(sizeof(*buf));
207     if (buf == NULL) {
208         return ERR_PTR(-ENOMEM);
209     }
210     buf->vaddr = NULL;
211     buf->dev = dev;
212     buf->dmaDir = queueImp->dmaDir;
213     buf->offset = vaddr & ~PAGE_MASK;
214     buf->size = size;
215     buf->dmaSgt = &buf->sgTable;
216     buf->dbAttach = NULL;
217     vec = CreateFrameVec(vaddr, size);
218     if (IS_ERR(vec)) {
219         goto USERPTR_FAIL_PFNVEC;
220     }
221     buf->vec = vec;
222     buf->pages = frame_vector_pages(vec);
223     if (IS_ERR(buf->pages)) {
224         goto USERPTR_FAIL_SGTABLE;
225     }
226     buf->numPages = frame_vector_count(vec);
227     if (sg_alloc_table_from_pages(buf->dmaSgt, buf->pages, buf->numPages, buf->offset, size, 0) != 0) {
228         goto USERPTR_FAIL_SGTABLE;
229     }
230     sgt = &buf->sgTable;
231     if (dma_map_sgtable(buf->dev, sgt, buf->dmaDir, DMA_ATTR_SKIP_CPU_SYNC) != 0) {
232         goto USERPTR_FAIL_MAP;
233     }
234     return buf;
235 
236 USERPTR_FAIL_MAP:
237     sg_free_table(&buf->sgTable);
238 USERPTR_FAIL_SGTABLE:
239     DestroyFrameVec(vec);
240 USERPTR_FAIL_PFNVEC:
241     OsalMemFree(buf);
242     return ERR_PTR(-ENOMEM);
243 }
244 
SgFreeUserPtr(void * bufPriv)245 static void SgFreeUserPtr(void *bufPriv)
246 {
247     if (bufPriv == NULL) {
248         return;
249     }
250     struct SgDmaBuffer *buf = bufPriv;
251     struct sg_table *sgt = &buf->sgTable;
252     int32_t i = buf->numPages;
253 
254     dma_unmap_sgtable(buf->dev, sgt, buf->dmaDir, DMA_ATTR_SKIP_CPU_SYNC);
255     if (buf->vaddr != NULL) {
256         vm_unmap_ram(buf->vaddr, buf->numPages);
257     }
258     sg_free_table(buf->dmaSgt);
259     if (buf->dmaDir == DMA_FROM_DEVICE || buf->dmaDir == DMA_BIDIRECTIONAL) {
260         while (--i >= 0) {
261             set_page_dirty_lock(buf->pages[i]);
262         }
263     }
264     DestroyFrameVec(buf->vec);
265     OsalMemFree(buf);
266 }
267 
SgMapDmaBuf(void * memPriv)268 static int SgMapDmaBuf(void *memPriv)
269 {
270     struct SgDmaBuffer *buf = memPriv;
271     struct sg_table *sgt = NULL;
272 
273     if (buf == NULL) {
274         return -EINVAL;
275     }
276     if (WARN_ON(!buf->dbAttach)) {
277         HDF_LOGE("%s: trying to pin a non attached buffer", __func__);
278         return -EINVAL;
279     }
280 
281     if (WARN_ON(buf->dmaSgt)) {
282         HDF_LOGE("%s: dmabuf buffer is already pinned", __func__);
283         return 0;
284     }
285 
286     /* get the associated scatterlist for this buffer */
287     sgt = dma_buf_map_attachment(buf->dbAttach, buf->dmaDir);
288     if (IS_ERR(sgt)) {
289         HDF_LOGE("%s: Error getting dmabuf scatterlist", __func__);
290         return -EINVAL;
291     }
292 
293     buf->dmaSgt = sgt;
294     buf->vaddr = NULL;
295 
296     return 0;
297 }
298 
SgUnmapDmaBuf(void * memPriv)299 static void SgUnmapDmaBuf(void *memPriv)
300 {
301     if (memPriv == NULL) {
302         return;
303     }
304     struct SgDmaBuffer *buf = memPriv;
305     struct sg_table *sgt = buf->dmaSgt;
306 
307     if (WARN_ON(!buf->dbAttach)) {
308         HDF_LOGE("%s: trying to unpin a not attached buffer", __func__);
309         return;
310     }
311 
312     if (WARN_ON(!sgt)) {
313         HDF_LOGE("%s: dmabuf buffer is already unpinned", __func__);
314         return;
315     }
316 
317     if (buf->vaddr != NULL) {
318         dma_buf_vunmap(buf->dbAttach->dmabuf, buf->vaddr);
319         buf->vaddr = NULL;
320     }
321     dma_buf_unmap_attachment(buf->dbAttach, sgt, buf->dmaDir);
322 
323     buf->dmaSgt = NULL;
324 }
325 
SgDetachDmaBuf(void * memPriv)326 static void SgDetachDmaBuf(void *memPriv)
327 {
328     if (memPriv == NULL) {
329         return;
330     }
331 
332     struct SgDmaBuffer *buf = memPriv;
333     /* if vb2 works correctly you should never detach mapped buffer */
334     if (WARN_ON(buf->dmaSgt)) {
335         SgUnmapDmaBuf(buf);
336     }
337 
338     /* detach this attachment */
339     dma_buf_detach(buf->dbAttach->dmabuf, buf->dbAttach);
340     kfree(buf);
341 }
342 
SgAttachDmaBuf(struct BufferQueue * queue,uint32_t planeNum,void * dmaBuf,unsigned long size)343 static void *SgAttachDmaBuf(struct BufferQueue *queue, uint32_t planeNum, void *dmaBuf, unsigned long size)
344 {
345     struct BufferQueueImp *queueImp = container_of(queue, struct BufferQueueImp, queue);
346     struct SgDmaBuffer *buf = NULL;
347     struct dma_buf_attachment *dba = NULL;
348     struct device *dev = NULL;
349     struct dma_buf *dbuf = (struct dma_buf *)dmaBuf;
350 
351     if (queueImp->allocDev[planeNum] != NULL) {
352         dev = queueImp->allocDev[planeNum];
353     } else {
354         dev = queueImp->dev;
355     }
356     if (dev == NULL || dbuf == NULL) {
357         return ERR_PTR(-EINVAL);
358     }
359 
360     if (dbuf->size < size) {
361         return ERR_PTR(-EFAULT);
362     }
363 
364     buf = (struct SgDmaBuffer *)OsalMemCalloc(sizeof(*buf));
365     if (buf == NULL) {
366         return ERR_PTR(-ENOMEM);
367     }
368 
369     buf->dev = dev;
370     /* create attachment for the dmabuf with the user device */
371     dba = dma_buf_attach(dbuf, buf->dev);
372     if (IS_ERR(dba)) {
373         HDF_LOGE("%s: failed to attach dmabuf", __func__);
374         OsalMemFree(buf);
375         return dba;
376     }
377 
378     buf->dmaDir = queueImp->dmaDir;
379     buf->size = size;
380     buf->dbAttach = dba;
381 
382     return buf;
383 }
384 
SgGetCookie(void * bufPriv)385 static void *SgGetCookie(void *bufPriv)
386 {
387     struct SgDmaBuffer *buf = bufPriv;
388     if (buf == NULL) {
389         return ERR_PTR(-EINVAL);
390     }
391     return buf->dmaSgt;
392 }
393 
SgGetVaddr(void * bufPriv)394 static void *SgGetVaddr(void *bufPriv)
395 {
396     struct SgDmaBuffer *buf = bufPriv;
397     if (buf == NULL) {
398         return ERR_PTR(-EINVAL);
399     }
400 
401     if (buf->vaddr == NULL) {
402         if (buf->dbAttach != NULL) {
403             buf->vaddr = dma_buf_vmap(buf->dbAttach->dmabuf);
404         } else {
405             buf->vaddr = vm_map_ram(buf->pages, buf->numPages, -1);
406         }
407     }
408 
409     /* add offset in case userptr is not page-aligned */
410     return buf->vaddr != NULL ? static_cast<void *>(static_cast<uint8_t *>(buf->vaddr) + buf->offset) : NULL;
411 }
412 
SgNumUsers(void * bufPriv)413 static unsigned int SgNumUsers(void *bufPriv)
414 {
415     struct SgDmaBuffer *buf = bufPriv;
416     if (buf == NULL) {
417         return 0;
418     }
419 
420     return refcount_read(&buf->refCount);
421 }
422 
SgPrepareMem(void * bufPriv)423 static void SgPrepareMem(void *bufPriv)
424 {
425     if (bufPriv == NULL) {
426         return;
427     }
428     struct SgDmaBuffer *buf = bufPriv;
429     struct sg_table *sgt = buf->dmaSgt;
430 
431     dma_sync_sgtable_for_device(buf->dev, sgt, buf->dmaDir);
432 }
433 
SgFinishMem(void * bufPriv)434 static void SgFinishMem(void *bufPriv)
435 {
436     if (bufPriv == NULL) {
437         return;
438     }
439     struct SgDmaBuffer *buf = bufPriv;
440     struct sg_table *sgt = buf->dmaSgt;
441 
442     dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dmaDir);
443 }
444 
445 struct MemOps g_sgDmaOps = {
446     .mmapAlloc      = SgMmapAlloc,
447     .mmapFree       = SgMmapFree,
448     .mmap           = SgMmap,
449     .allocUserPtr   = SgAllocUserPtr,
450     .freeUserPtr    = SgFreeUserPtr,
451     .mapDmaBuf      = SgMapDmaBuf,
452     .unmapDmaBuf    = SgUnmapDmaBuf,
453     .attachDmaBuf   = SgAttachDmaBuf,
454     .detachDmaBuf   = SgDetachDmaBuf,
455     .getCookie      = SgGetCookie,
456     .getVaddr       = SgGetVaddr,
457     .numUsers       = SgNumUsers,
458     .syncForDevice  = SgPrepareMem,
459     .syncForUser    = SgFinishMem,
460 };
461 
GetSgDmaOps(void)462 struct MemOps *GetSgDmaOps(void)
463 {
464     return &g_sgDmaOps;
465 }
466