• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4  * Author:Mark Yao <mark.yao@rock-chips.com>
5  */
6 
7 #include <linux/dma-buf-cache.h>
8 #include <linux/iommu.h>
9 #include <linux/vmalloc.h>
10 
11 #include <drm/drm.h>
12 #include <drm/drm_gem.h>
13 #include <drm/drm_prime.h>
14 #include <drm/drm_vma_manager.h>
15 
16 #include <linux/genalloc.h>
17 #include <linux/iommu.h>
18 #include <linux/pagemap.h>
19 #include <linux/vmalloc.h>
20 #include <linux/rockchip/rockchip_sip.h>
21 
22 #include "rockchip_drm_drv.h"
23 #include "rockchip_drm_gem.h"
24 
25 static u32 bank_bit_first = 12;
26 static u32 bank_bit_mask = 0x7;
27 
28 struct page_info {
29     struct page *page;
30     struct list_head list;
31 };
32 
33 #define PG_ROUND 8
34 
rockchip_gem_iommu_map(struct rockchip_gem_object * rk_obj)35 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
36 {
37     struct drm_device *drm = rk_obj->base.dev;
38     struct rockchip_drm_private *private = drm->dev_private;
39     int prot = IOMMU_READ | IOMMU_WRITE;
40     ssize_t ret;
41 
42     mutex_lock(&private->mm_lock);
43     ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm, rk_obj->base.size, PAGE_SIZE, 0, 0);
44     mutex_unlock(&private->mm_lock);
45 
46     if (ret < 0) {
47         DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
48         return ret;
49     }
50 
51     rk_obj->dma_addr = rk_obj->mm.start;
52 
53     ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt, prot);
54     if (ret < rk_obj->base.size) {
55         DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n", ret, rk_obj->base.size);
56         ret = -ENOMEM;
57         goto err_remove_node;
58     }
59 
60     iommu_flush_iotlb_all(private->domain);
61 
62     rk_obj->size = ret;
63 
64     return 0;
65 
66 err_remove_node:
67     mutex_lock(&private->mm_lock);
68     drm_mm_remove_node(&rk_obj->mm);
69     mutex_unlock(&private->mm_lock);
70 
71     return ret;
72 }
73 
rockchip_gem_iommu_unmap(struct rockchip_gem_object * rk_obj)74 static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
75 {
76     struct drm_device *drm = rk_obj->base.dev;
77     struct rockchip_drm_private *private = drm->dev_private;
78 
79     iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
80 
81     mutex_lock(&private->mm_lock);
82 
83     drm_mm_remove_node(&rk_obj->mm);
84 
85     mutex_unlock(&private->mm_lock);
86 
87     return 0;
88 }
89 
rockchip_gem_free_list(struct list_head lists[])90 static void rockchip_gem_free_list(struct list_head lists[])
91 {
92     struct page_info *info, *tmp_info;
93     int i;
94 
95     for (i = 0; i < PG_ROUND; i++) {
96         list_for_each_entry_safe(info, tmp_info, &lists[i], list)
97         {
98             list_del(&info->list);
99             kfree(info);
100         }
101     }
102 }
103 
rockchip_gem_get_ddr_info(void)104 void rockchip_gem_get_ddr_info(void)
105 {
106     struct dram_addrmap_info *ddr_map_info;
107 
108     ddr_map_info = sip_smc_get_dram_map();
109     if (ddr_map_info) {
110         bank_bit_first = ddr_map_info->bank_bit_first;
111         bank_bit_mask = ddr_map_info->bank_bit_mask;
112     }
113 }
114 
rockchip_gem_get_pages(struct rockchip_gem_object * rk_obj)115 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
116 {
117     struct drm_device *drm = rk_obj->base.dev;
118     int ret, i;
119     struct scatterlist *s;
120     unsigned int cur_page;
121     struct page **pages, **dst_pages;
122     int j;
123     int n_pages;
124     unsigned long chunk_pages;
125     unsigned long remain;
126     struct list_head lists[PG_ROUND];
127     dma_addr_t phys;
128     int end = 0;
129     unsigned int bit_index;
130     unsigned int block_index[PG_ROUND] = {0};
131     struct page_info *info;
132     unsigned int maximum;
133 
134     for (i = 0; i < PG_ROUND; i++) {
135         INIT_LIST_HEAD(&lists[i]);
136     }
137 
138     pages = drm_gem_get_pages(&rk_obj->base);
139     if (IS_ERR(pages)) {
140         return PTR_ERR(pages);
141     }
142 
143     rk_obj->pages = pages;
144 
145     rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
146 
147     n_pages = rk_obj->num_pages;
148 
149     dst_pages = __vmalloc(sizeof(struct page *) * n_pages, GFP_KERNEL | __GFP_HIGHMEM);
150     if (!dst_pages) {
151         ret = -ENOMEM;
152         goto err_put_pages;
153     }
154 
155     DRM_DEBUG_KMS("bank_bit_first = 0x%x, bank_bit_mask = 0x%x\n", bank_bit_first, bank_bit_mask);
156 
157     cur_page = 0;
158     remain = n_pages;
159     /* look for the end of the current chunk */
160     while (remain) {
161         for (j = cur_page + 1; j < n_pages; ++j) {
162             if (page_to_pfn(pages[j]) != page_to_pfn(pages[j - 1]) + 1) {
163                 break;
164             }
165         }
166 
167         chunk_pages = j - cur_page;
168         if (chunk_pages >= PG_ROUND) {
169             for (i = 0; i < chunk_pages; i++) {
170                 dst_pages[end + i] = pages[cur_page + i];
171             }
172             end += chunk_pages;
173         } else {
174             for (i = 0; i < chunk_pages; i++) {
175                 info = kmalloc(sizeof(*info), GFP_KERNEL);
176                 if (!info) {
177                     ret = -ENOMEM;
178                     goto err_put_list;
179                 }
180 
181                 INIT_LIST_HEAD(&info->list);
182                 info->page = pages[cur_page + i];
183                 phys = page_to_phys(info->page);
184                 bit_index = ((phys >> bank_bit_first) & bank_bit_mask) % PG_ROUND;
185                 list_add_tail(&info->list, &lists[bit_index]);
186                 block_index[bit_index]++;
187             }
188         }
189 
190         cur_page = j;
191         remain -= chunk_pages;
192     }
193 
194     maximum = block_index[0];
195     for (i = 1; i < PG_ROUND; i++) {
196         maximum = max(maximum, block_index[i]);
197     }
198 
199     for (i = 0; i < maximum; i++) {
200         for (j = 0; j < PG_ROUND; j++) {
201             if (!list_empty(&lists[j])) {
202                 struct page_info *info_ex;
203 
204                 info_ex = list_first_entry(&lists[j], struct page_info, list);
205                 dst_pages[end++] = info_ex->page;
206                 list_del(&info_ex->list);
207                 kfree(info_ex);
208             }
209         }
210     }
211 
212     DRM_DEBUG_KMS("%s, %d, end = %d, n_pages = %d\n", __func__, __LINE__, end, n_pages);
213     rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev, dst_pages, rk_obj->num_pages);
214     if (IS_ERR(rk_obj->sgt)) {
215         ret = PTR_ERR(rk_obj->sgt);
216         goto err_put_list;
217     }
218 
219     rk_obj->pages = dst_pages;
220 
221     /*
222      * Fake up the SG table so that dma_sync_sg_for_device() can be used
223      * to flush the pages associated with it.
224      *
225      * Replace this by drm_clflush_sg() once it can be implemented
226      * without relying on symbols that are not exported.
227      */
228     for_each_sgtable_sg(rk_obj->sgt, s, i) sg_dma_address(s) = sg_phys(s);
229 
230     dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
231 
232     kvfree(pages);
233 
234     return 0;
235 
236 err_put_list:
237     rockchip_gem_free_list(lists);
238     kvfree(dst_pages);
239 err_put_pages:
240     drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
241     return ret;
242 }
243 
rockchip_gem_put_pages(struct rockchip_gem_object * rk_obj)244 static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
245 {
246     sg_free_table(rk_obj->sgt);
247     kfree(rk_obj->sgt);
248     drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
249 }
250 
251 static inline void *drm_calloc_large(size_t nmemb, size_t size);
252 static inline void drm_free_large(void *ptr);
253 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj);
rockchip_gem_alloc_dma(struct rockchip_gem_object * rk_obj,bool alloc_kmap)254 static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj, bool alloc_kmap)
255 {
256     struct drm_gem_object *obj = &rk_obj->base;
257     struct drm_device *drm = obj->dev;
258     struct sg_table *sgt;
259     int ret, i;
260     struct scatterlist *s;
261 
262     rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
263 
264     if (!alloc_kmap) {
265         rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
266     }
267 
268     rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size, &rk_obj->dma_handle, GFP_KERNEL, rk_obj->dma_attrs);
269     if (!rk_obj->kvaddr) {
270         DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
271         return -ENOMEM;
272     }
273 
274     sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
275     if (!sgt) {
276         ret = -ENOMEM;
277         goto err_dma_free;
278     }
279 
280     ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr, rk_obj->dma_handle, obj->size, rk_obj->dma_attrs);
281     if (ret) {
282         DRM_ERROR("failed to allocate sgt, %d\n", ret);
283         goto err_sgt_free;
284     }
285 
286     for_each_sg(sgt->sgl, s, sgt->nents, i) sg_dma_address(s) = sg_phys(s);
287 
288     rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
289 
290     rk_obj->pages = drm_calloc_large(rk_obj->num_pages, sizeof(*rk_obj->pages));
291     if (!rk_obj->pages) {
292         DRM_ERROR("failed to allocate pages.\n");
293         goto err_sg_table_free;
294     }
295 
296     if (drm_prime_sg_to_page_addr_arrays(sgt, rk_obj->pages, NULL, rk_obj->num_pages)) {
297         DRM_ERROR("invalid sgtable.\n");
298         ret = -EINVAL;
299         goto err_page_free;
300     }
301 
302     rk_obj->sgt = sgt;
303 
304     return 0;
305 
306 err_page_free:
307     drm_free_large(rk_obj->pages);
308 err_sg_table_free:
309     sg_free_table(sgt);
310 err_sgt_free:
311     kfree(sgt);
312 err_dma_free:
313     dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_handle, rk_obj->dma_attrs);
314 
315     return ret;
316 }
317 
drm_calloc_large(size_t nmemb,size_t size)318 static inline void *drm_calloc_large(size_t nmemb, size_t size)
319 {
320     if (size != 0 && nmemb > SIZE_MAX / size) {
321         return NULL;
322     }
323 
324     if (size * nmemb <= PAGE_SIZE) {
325         return kcalloc(nmemb, size, GFP_KERNEL);
326     }
327 
328     return __vmalloc(size * nmemb, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
329 }
330 
drm_free_large(void * ptr)331 static inline void drm_free_large(void *ptr)
332 {
333     kvfree(ptr);
334 }
335 
rockchip_gem_alloc_secure(struct rockchip_gem_object * rk_obj)336 static int rockchip_gem_alloc_secure(struct rockchip_gem_object *rk_obj)
337 {
338     struct drm_gem_object *obj = &rk_obj->base;
339     struct drm_device *drm = obj->dev;
340     struct rockchip_drm_private *private = drm->dev_private;
341     unsigned long paddr;
342     struct sg_table *sgt;
343     int ret = 0, i;
344 
345     if (!private->secure_buffer_pool) {
346         DRM_ERROR("No secure buffer pool found\n");
347         return -ENOMEM;
348     }
349 
350     paddr = gen_pool_alloc(private->secure_buffer_pool, rk_obj->base.size);
351     if (!paddr) {
352         DRM_ERROR("failed to allocate secure buffer\n");
353         return -ENOMEM;
354     }
355 
356     rk_obj->dma_handle = paddr;
357     rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
358 
359     rk_obj->pages = drm_calloc_large(rk_obj->num_pages, sizeof(*rk_obj->pages));
360     if (!rk_obj->pages) {
361         DRM_ERROR("failed to allocate pages.\n");
362         ret = -ENOMEM;
363         goto err_buf_free;
364     }
365 
366     i = 0;
367     while (i < rk_obj->num_pages) {
368         rk_obj->pages[i] = phys_to_page(paddr);
369         paddr += PAGE_SIZE;
370         i++;
371     }
372     sgt = drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
373     if (IS_ERR(sgt)) {
374         ret = PTR_ERR(sgt);
375         goto err_free_pages;
376     }
377 
378     rk_obj->sgt = sgt;
379 
380     return 0;
381 
382 err_free_pages:
383     drm_free_large(rk_obj->pages);
384 err_buf_free:
385     gen_pool_free(private->secure_buffer_pool, paddr, rk_obj->base.size);
386 
387     return ret;
388 }
389 
rockchip_gem_free_secure(struct rockchip_gem_object * rk_obj)390 static void rockchip_gem_free_secure(struct rockchip_gem_object *rk_obj)
391 {
392     struct drm_gem_object *obj = &rk_obj->base;
393     struct drm_device *drm = obj->dev;
394     struct rockchip_drm_private *private = drm->dev_private;
395 
396     drm_free_large(rk_obj->pages);
397     sg_free_table(rk_obj->sgt);
398     kfree(rk_obj->sgt);
399     gen_pool_free(private->secure_buffer_pool, rk_obj->dma_handle, rk_obj->base.size);
400 }
401 
rockchip_gem_alloc_buf(struct rockchip_gem_object * rk_obj,bool alloc_kmap)402 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, bool alloc_kmap)
403 {
404     struct drm_gem_object *obj = &rk_obj->base;
405     struct drm_device *drm = obj->dev;
406     struct rockchip_drm_private *private = drm->dev_private;
407     int ret = 0;
408 
409     if (!private->domain) {
410         rk_obj->flags |= ROCKCHIP_BO_CONTIG;
411     }
412 
413     if (rk_obj->flags & ROCKCHIP_BO_SECURE) {
414         rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_SECURE;
415         rk_obj->flags |= ROCKCHIP_BO_CONTIG;
416         if (alloc_kmap) {
417             DRM_ERROR("Not allow alloc secure buffer with kmap\n");
418             return -EINVAL;
419         }
420         ret = rockchip_gem_alloc_secure(rk_obj);
421         if (ret) {
422             return ret;
423         }
424     } else if (rk_obj->flags & ROCKCHIP_BO_CONTIG) {
425         rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_CMA;
426         ret = rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
427         if (ret) {
428             return ret;
429         }
430     } else {
431         rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_SHMEM;
432         ret = rockchip_gem_get_pages(rk_obj);
433         if (ret < 0) {
434             return ret;
435         }
436 
437         if (alloc_kmap) {
438             rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
439             if (!rk_obj->kvaddr) {
440                 DRM_ERROR("failed to vmap() buffer\n");
441                 ret = -ENOMEM;
442                 goto err_iommu_free;
443             }
444         }
445     }
446 
447     if (private->domain) {
448         ret = rockchip_gem_iommu_map(rk_obj);
449         if (ret < 0) {
450             goto err_free;
451         }
452     } else {
453         WARN_ON(!rk_obj->dma_handle);
454         rk_obj->dma_addr = rk_obj->dma_handle;
455     }
456 
457     return 0;
458 
459 err_iommu_free:
460     if (private->domain) {
461         rockchip_gem_iommu_unmap(rk_obj);
462     }
463 err_free:
464     if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SECURE) {
465         rockchip_gem_free_secure(rk_obj);
466     } else if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_CMA) {
467         rockchip_gem_free_dma(rk_obj);
468     } else {
469         rockchip_gem_put_pages(rk_obj);
470     }
471     return ret;
472 }
473 
rockchip_gem_free_dma(struct rockchip_gem_object * rk_obj)474 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
475 {
476     struct drm_gem_object *obj = &rk_obj->base;
477     struct drm_device *drm = obj->dev;
478 
479     drm_free_large(rk_obj->pages);
480     sg_free_table(rk_obj->sgt);
481     kfree(rk_obj->sgt);
482     dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_handle, rk_obj->dma_attrs);
483 }
484 
rockchip_gem_free_buf(struct rockchip_gem_object * rk_obj)485 static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
486 {
487     struct drm_device *drm = rk_obj->base.dev;
488     struct rockchip_drm_private *private = drm->dev_private;
489 
490     if (private->domain) {
491         rockchip_gem_iommu_unmap(rk_obj);
492     }
493 
494     if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SHMEM) {
495         vunmap(rk_obj->kvaddr);
496         rockchip_gem_put_pages(rk_obj);
497     } else if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SECURE) {
498         rockchip_gem_free_secure(rk_obj);
499     } else {
500         rockchip_gem_free_dma(rk_obj);
501     }
502 }
503 
rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object * obj,struct vm_area_struct * vma)504 static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj, struct vm_area_struct *vma)
505 {
506     struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
507     unsigned int count = obj->size >> PAGE_SHIFT;
508     unsigned long user_count = vma_pages(vma);
509     if (user_count == 0) {
510         return -ENXIO;
511     }
512 
513     return vm_map_pages(vma, rk_obj->pages, count);
514 }
515 
rockchip_drm_gem_object_mmap_dma(struct drm_gem_object * obj,struct vm_area_struct * vma)516 static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj, struct vm_area_struct *vma)
517 {
518     struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
519     struct drm_device *drm = obj->dev;
520 
521     return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, obj->size, rk_obj->dma_attrs);
522 }
523 
rockchip_drm_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)524 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
525 {
526     int ret;
527     struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
528 
529     /* default is wc. */
530     if (rk_obj->flags & ROCKCHIP_BO_CACHABLE) {
531         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
532     }
533 
534     /*
535      * We allocated a struct page table for rk_obj, so clear
536      * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
537      */
538     vma->vm_flags &= ~VM_PFNMAP;
539 
540     if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SECURE) {
541         DRM_ERROR("Disallow mmap for secure buffer\n");
542         ret = -EINVAL;
543     } else if (rk_obj->pages) {
544         ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
545     } else {
546         ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
547     }
548 
549     if (ret) {
550         drm_gem_vm_close(vma);
551     }
552 
553     return ret;
554 }
555 
rockchip_gem_mmap_buf(struct drm_gem_object * obj,struct vm_area_struct * vma)556 int rockchip_gem_mmap_buf(struct drm_gem_object *obj, struct vm_area_struct *vma)
557 {
558     int ret;
559 
560     ret = drm_gem_mmap_obj(obj, obj->size, vma);
561     if (ret) {
562         return ret;
563     }
564 
565     return rockchip_drm_gem_object_mmap(obj, vma);
566 }
567 
568 /* drm driver mmap file operations */
rockchip_gem_mmap(struct file * filp,struct vm_area_struct * vma)569 int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
570 {
571     struct drm_gem_object *obj;
572     int ret;
573 
574     ret = drm_gem_mmap(filp, vma);
575     if (ret) {
576         return ret;
577     }
578 
579     /*
580      * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
581      * whole buffer from the start.
582      */
583     vma->vm_pgoff = 0;
584 
585     obj = vma->vm_private_data;
586 
587     return rockchip_drm_gem_object_mmap(obj, vma);
588 }
589 
rockchip_gem_release_object(struct rockchip_gem_object * rk_obj)590 static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
591 {
592     drm_gem_object_release(&rk_obj->base);
593     kfree(rk_obj);
594 }
595 
rockchip_gem_alloc_object(struct drm_device * drm,unsigned int size)596 static struct rockchip_gem_object *rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
597 {
598     struct address_space *mapping;
599     struct rockchip_gem_object *rk_obj;
600     struct drm_gem_object *obj;
601 
602 #ifdef CONFIG_ARM_LPAE
603     gfp_t gfp_mask = GFP_HIGHUSER | __GFP_RECLAIMABLE | __GFP_DMA32;
604 #else
605     gfp_t gfp_mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
606 #endif
607     size = round_up(size, PAGE_SIZE);
608 
609     rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
610     if (!rk_obj) {
611         return ERR_PTR(-ENOMEM);
612     }
613 
614     obj = &rk_obj->base;
615 
616     drm_gem_object_init(drm, obj, size);
617 
618     mapping = file_inode(obj->filp)->i_mapping;
619     mapping_set_gfp_mask(mapping, gfp_mask);
620 
621     return rk_obj;
622 }
623 
rockchip_gem_create_object(struct drm_device * drm,unsigned int size,bool alloc_kmap,unsigned int flags)624 struct rockchip_gem_object *rockchip_gem_create_object(struct drm_device *drm, unsigned int size, bool alloc_kmap,
625                                                        unsigned int flags)
626 {
627     struct rockchip_gem_object *rk_obj;
628     int ret;
629 
630     rk_obj = rockchip_gem_alloc_object(drm, size);
631     if (IS_ERR(rk_obj)) {
632         return rk_obj;
633     }
634     rk_obj->flags = flags;
635 
636     ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
637     if (ret) {
638         goto err_free_rk_obj;
639     }
640 
641     return rk_obj;
642 
643 err_free_rk_obj:
644     rockchip_gem_release_object(rk_obj);
645     return ERR_PTR(ret);
646 }
647 
648 /*
649  * rockchip_gem_destroy - destroy gem object
650  *
651  * The dma_buf_unmap_attachment and dma_buf_detach will be re-defined if
652  * CONFIG_DMABUF_CACHE is enabled.
653  *
654  * Same as drm_prime_gem_destroy
655  */
rockchip_gem_destroy(struct drm_gem_object * obj,struct sg_table * sg)656 static void rockchip_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
657 {
658     struct dma_buf_attachment *attach;
659     struct dma_buf *dma_buf;
660 
661     attach = obj->import_attach;
662     if (sg) {
663         dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
664     }
665     dma_buf = attach->dmabuf;
666     dma_buf_detach(attach->dmabuf, attach);
667     /* remove the reference */
668     dma_buf_put(dma_buf);
669 }
670 
671 /*
672  * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
673  * callback function
674  */
rockchip_gem_free_object(struct drm_gem_object * obj)675 void rockchip_gem_free_object(struct drm_gem_object *obj)
676 {
677     struct drm_device *drm = obj->dev;
678     struct rockchip_drm_private *private = drm->dev_private;
679     struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
680 
681     if (obj->import_attach) {
682         if (private->domain) {
683             rockchip_gem_iommu_unmap(rk_obj);
684         } else {
685             dma_unmap_sgtable(drm->dev, rk_obj->sgt, DMA_BIDIRECTIONAL, 0);
686         }
687         drm_free_large(rk_obj->pages);
688         if (IS_ENABLED(CONFIG_DMABUF_CACHE)) {
689             rockchip_gem_destroy(obj, rk_obj->sgt);
690         } else {
691             drm_prime_gem_destroy(obj, rk_obj->sgt);
692         }
693     } else {
694         rockchip_gem_free_buf(rk_obj);
695     }
696 
697     rockchip_gem_release_object(rk_obj);
698 }
699 
700 /*
701  * rockchip_gem_create_with_handle - allocate an object with the given
702  * size and create a gem handle on it
703  *
704  * returns a struct rockchip_gem_object* on success or ERR_PTR values
705  * on failure.
706  */
rockchip_gem_create_with_handle(struct drm_file * file_priv,struct drm_device * drm,unsigned int size,unsigned int * handle,unsigned int flags)707 static struct rockchip_gem_object *rockchip_gem_create_with_handle(struct drm_file *file_priv, struct drm_device *drm,
708                                                                    unsigned int size, unsigned int *handle,
709                                                                    unsigned int flags)
710 {
711     struct rockchip_gem_object *rk_obj;
712     struct drm_gem_object *obj;
713     int ret;
714     bool alloc_kmap = flags & ROCKCHIP_BO_ALLOC_KMAP ? true : false;
715 
716     rk_obj = rockchip_gem_create_object(drm, size, alloc_kmap, flags);
717     if (IS_ERR(rk_obj)) {
718         return ERR_CAST(rk_obj);
719     }
720 
721     obj = &rk_obj->base;
722 
723     /*
724      * allocate a id of idr table where the obj is registered
725      * and handle has the id what user can see.
726      */
727     ret = drm_gem_handle_create(file_priv, obj, handle);
728     if (ret) {
729         goto err_handle_create;
730     }
731 
732     /* drop reference from allocate - handle holds it now. */
733     drm_gem_object_put(obj);
734 
735     return rk_obj;
736 
737 err_handle_create:
738     rockchip_gem_free_object(obj);
739 
740     return ERR_PTR(ret);
741 }
742 
743 /*
744  * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
745  * function
746  *
747  * This aligns the pitch and size arguments to the minimum required. wrap
748  * this into your own function if you need bigger alignment.
749  */
rockchip_gem_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)750 int rockchip_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args)
751 {
752     struct rockchip_gem_object *rk_obj;
753     u32 min_pitch = args->width * DIV_ROUND_UP(args->bpp, 0x8);
754 
755     /*
756      * align to 64 bytes since Mali requires it.
757      */
758     args->pitch = ALIGN(min_pitch, 0x40);
759     args->size = args->pitch * args->height;
760 
761     rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size, &args->handle, args->flags);
762 
763     return PTR_ERR_OR_ZERO(rk_obj);
764 }
765 
766 /*
767  * Allocate a sg_table for this GEM object.
768  * Note: Both the table's contents, and the sg_table itself must be freed by
769  *       the caller.
770  * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
771  */
rockchip_gem_prime_get_sg_table(struct drm_gem_object * obj)772 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
773 {
774     struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
775     struct drm_device *drm = obj->dev;
776     struct sg_table *sgt;
777     int ret;
778 
779     if (rk_obj->pages) {
780         return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
781     }
782 
783     sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
784     if (!sgt) {
785         return ERR_PTR(-ENOMEM);
786     }
787 
788     ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr, rk_obj->dma_addr, obj->size, rk_obj->dma_attrs);
789     if (ret) {
790         DRM_ERROR("failed to allocate sgt, %d\n", ret);
791         kfree(sgt);
792         return ERR_PTR(ret);
793     }
794 
795     return sgt;
796 }
797 
rockchip_gem_iommu_map_sg(struct drm_device * drm,struct dma_buf_attachment * attach,struct sg_table * sg,struct rockchip_gem_object * rk_obj)798 static int rockchip_gem_iommu_map_sg(struct drm_device *drm, struct dma_buf_attachment *attach, struct sg_table *sg,
799                                      struct rockchip_gem_object *rk_obj)
800 {
801     rk_obj->sgt = sg;
802     return rockchip_gem_iommu_map(rk_obj);
803 }
804 
rockchip_gem_dma_map_sg(struct drm_device * drm,struct dma_buf_attachment * attach,struct sg_table * sg,struct rockchip_gem_object * rk_obj)805 static int rockchip_gem_dma_map_sg(struct drm_device *drm, struct dma_buf_attachment *attach, struct sg_table *sg,
806                                    struct rockchip_gem_object *rk_obj)
807 {
808     int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
809     if (err) {
810         return err;
811     }
812 
813     if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
814         DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
815         dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
816         return -EINVAL;
817     }
818 
819     rk_obj->dma_addr = sg_dma_address(sg->sgl);
820     rk_obj->sgt = sg;
821     return 0;
822 }
823 
rockchip_gem_prime_import_sg_table(struct drm_device * drm,struct dma_buf_attachment * attach,struct sg_table * sg)824 struct drm_gem_object *rockchip_gem_prime_import_sg_table(struct drm_device *drm, struct dma_buf_attachment *attach,
825                                                           struct sg_table *sg)
826 {
827     struct rockchip_drm_private *private = drm->dev_private;
828     struct rockchip_gem_object *rk_obj;
829     int ret;
830 
831     rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
832     if (IS_ERR(rk_obj)) {
833         return ERR_CAST(rk_obj);
834     }
835 
836     if (private->domain) {
837         ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
838     } else {
839         ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
840     }
841 
842     if (ret < 0) {
843         DRM_ERROR("failed to import sg table: %d\n", ret);
844         goto err_free_rk_obj;
845     }
846 
847     rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
848     rk_obj->pages = drm_calloc_large(rk_obj->num_pages, sizeof(*rk_obj->pages));
849     if (!rk_obj->pages) {
850         DRM_ERROR("failed to allocate pages.\n");
851         ret = -ENOMEM;
852         goto err_free_rk_obj;
853     }
854 
855     ret = drm_prime_sg_to_page_addr_arrays(sg, rk_obj->pages, NULL, rk_obj->num_pages);
856     if (ret < 0) {
857         DRM_ERROR("invalid sgtable.\n");
858         drm_free_large(rk_obj->pages);
859         goto err_free_rk_obj;
860     }
861 
862     return &rk_obj->base;
863 
864 err_free_rk_obj:
865     rockchip_gem_release_object(rk_obj);
866     return ERR_PTR(ret);
867 }
868 
rockchip_gem_prime_vmap(struct drm_gem_object * obj)869 void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
870 {
871     struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
872 
873     if (rk_obj->pages) {
874         return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
875     }
876 
877     if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
878         return NULL;
879     }
880 
881     return rk_obj->kvaddr;
882 }
883 
rockchip_gem_prime_vunmap(struct drm_gem_object * obj,void * vaddr)884 void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
885 {
886     struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
887 
888     if (rk_obj->pages) {
889         vunmap(vaddr);
890         return;
891     }
892 
893     /* Nothing to do if allocated by DMA mapping API. */
894 }
895 
rockchip_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)896 int rockchip_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
897 {
898     struct drm_rockchip_gem_create *args = data;
899     struct rockchip_gem_object *rk_obj;
900 
901     rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size, &args->handle, args->flags);
902     return PTR_ERR_OR_ZERO(rk_obj);
903 }
904 
rockchip_gem_map_offset_ioctl(struct drm_device * drm,void * data,struct drm_file * file_priv)905 int rockchip_gem_map_offset_ioctl(struct drm_device *drm, void *data, struct drm_file *file_priv)
906 {
907     struct drm_rockchip_gem_map_off *args = data;
908 
909     return drm_gem_dumb_map_offset(file_priv, drm, args->handle, &args->offset);
910 }
911 
rockchip_gem_get_phys_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)912 int rockchip_gem_get_phys_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
913 {
914     struct drm_rockchip_gem_phys *args = data;
915     struct rockchip_gem_object *rk_obj;
916     struct drm_gem_object *obj;
917     int ret = 0;
918 
919     obj = drm_gem_object_lookup(file_priv, args->handle);
920     if (!obj) {
921         DRM_ERROR("failed to lookup gem object.\n");
922         return -EINVAL;
923     }
924     rk_obj = to_rockchip_obj(obj);
925     if (!(rk_obj->flags & ROCKCHIP_BO_CONTIG)) {
926         DRM_ERROR("Can't get phys address from non-continue buf.\n");
927         ret = -EINVAL;
928         goto out;
929     }
930 
931     args->phy_addr = page_to_phys(rk_obj->pages[0]);
932 
933 out:
934     drm_gem_object_put(obj);
935 
936     return ret;
937 }
938 
rockchip_gem_prime_begin_cpu_access(struct drm_gem_object * obj,enum dma_data_direction dir)939 int rockchip_gem_prime_begin_cpu_access(struct drm_gem_object *obj, enum dma_data_direction dir)
940 {
941     struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
942     struct drm_device *drm = obj->dev;
943 
944     if (!rk_obj->sgt) {
945         return 0;
946     }
947 
948     dma_sync_sg_for_cpu(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents, dir);
949     return 0;
950 }
951 
rockchip_gem_prime_end_cpu_access(struct drm_gem_object * obj,enum dma_data_direction dir)952 int rockchip_gem_prime_end_cpu_access(struct drm_gem_object *obj, enum dma_data_direction dir)
953 {
954     struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
955     struct drm_device *drm = obj->dev;
956 
957     if (!rk_obj->sgt) {
958         return 0;
959     }
960 
961     dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents, dir);
962     return 0;
963 }
964 
rockchip_gem_prime_sgl_sync_range(struct device * dev,struct scatterlist * sgl,unsigned int nents,unsigned int offset,unsigned int length,enum dma_data_direction dir,bool for_cpu)965 static int rockchip_gem_prime_sgl_sync_range(struct device *dev, struct scatterlist *sgl, unsigned int nents,
966                                              unsigned int offset, unsigned int length, enum dma_data_direction dir,
967                                              bool for_cpu)
968 {
969     int i;
970     struct scatterlist *sg;
971     unsigned int len = 0;
972     dma_addr_t sg_dma_addr;
973 
974     for_each_sg(sgl, sg, nents, i)
975     {
976         unsigned int sg_offset, sg_left, size = 0;
977 
978         len += sg->length;
979         if (len <= offset) {
980             continue;
981         }
982 
983         sg_dma_addr = sg_dma_address(sg);
984         sg_left = len - offset;
985         sg_offset = sg->length - sg_left;
986 
987         size = (length < sg_left) ? length : sg_left;
988         if (for_cpu) {
989             dma_sync_single_range_for_cpu(dev, sg_dma_addr, sg_offset, size, dir);
990         } else {
991             dma_sync_single_range_for_device(dev, sg_dma_addr, sg_offset, size, dir);
992         }
993 
994         offset += size;
995         length -= size;
996 
997         if (length == 0) {
998             break;
999         }
1000     }
1001 
1002     return 0;
1003 }
1004 
rockchip_gem_prime_begin_cpu_access_partial(struct drm_gem_object * obj,enum dma_data_direction dir,unsigned int offset,unsigned int len)1005 int rockchip_gem_prime_begin_cpu_access_partial(struct drm_gem_object *obj, enum dma_data_direction dir,
1006                                                 unsigned int offset, unsigned int len)
1007 {
1008     struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
1009     struct drm_device *drm = obj->dev;
1010 
1011     if (!rk_obj->sgt) {
1012         return 0;
1013     }
1014 
1015     rockchip_gem_prime_sgl_sync_range(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents, offset, len, dir, true);
1016 
1017     return 0;
1018 }
1019 
rockchip_gem_prime_end_cpu_access_partial(struct drm_gem_object * obj,enum dma_data_direction dir,unsigned int offset,unsigned int len)1020 int rockchip_gem_prime_end_cpu_access_partial(struct drm_gem_object *obj, enum dma_data_direction dir,
1021                                               unsigned int offset, unsigned int len)
1022 {
1023     struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
1024     struct drm_device *drm = obj->dev;
1025 
1026     if (!rk_obj->sgt) {
1027         return 0;
1028     }
1029 
1030     rockchip_gem_prime_sgl_sync_range(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents, offset, len, dir, false);
1031 
1032     return 0;
1033 }
1034