• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/device.h>
3 #include <linux/dma-buf.h>
4 #include <linux/err.h>
5 #include <linux/highmem.h>
6 #include <linux/idr.h>
7 #include <linux/list.h>
8 #include <linux/slab.h>
9 #include <linux/uaccess.h>
10 #include <linux/vmalloc.h>
11 #include <uapi/linux/dma-heap.h>
12 
13 #include "heap-helpers.h"
14 
init_heap_helper_buffer(struct heap_helper_buffer * buffer,void (* free)(struct heap_helper_buffer *))15 void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
16 			     void (*free)(struct heap_helper_buffer *))
17 {
18 	buffer->priv_virt = NULL;
19 	mutex_init(&buffer->lock);
20 	buffer->vmap_cnt = 0;
21 	buffer->vaddr = NULL;
22 	buffer->pagecount = 0;
23 	buffer->pages = NULL;
24 	INIT_LIST_HEAD(&buffer->attachments);
25 	buffer->free = free;
26 }
27 
heap_helper_export_dmabuf(struct heap_helper_buffer * buffer,int fd_flags)28 struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
29 					  int fd_flags)
30 {
31 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
32 
33 	exp_info.exp_name = dma_heap_get_name(buffer->heap);
34 	exp_info.ops = &heap_helper_ops;
35 	exp_info.size = buffer->size;
36 	exp_info.flags = fd_flags;
37 	exp_info.priv = buffer;
38 
39 	return dma_buf_export(&exp_info);
40 }
41 
dma_heap_map_kernel(struct heap_helper_buffer * buffer)42 static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer)
43 {
44 	void *vaddr;
45 
46 	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
47 	if (!vaddr)
48 		return ERR_PTR(-ENOMEM);
49 
50 	return vaddr;
51 }
52 
dma_heap_buffer_destroy(struct heap_helper_buffer * buffer)53 static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer)
54 {
55 	if (buffer->vmap_cnt > 0) {
56 		WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
57 		vunmap(buffer->vaddr);
58 	}
59 
60 	buffer->free(buffer);
61 }
62 
dma_heap_buffer_vmap_get(struct heap_helper_buffer * buffer)63 static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer)
64 {
65 	void *vaddr;
66 
67 	if (buffer->vmap_cnt) {
68 		buffer->vmap_cnt++;
69 		return buffer->vaddr;
70 	}
71 	vaddr = dma_heap_map_kernel(buffer);
72 	if (IS_ERR(vaddr))
73 		return vaddr;
74 	buffer->vaddr = vaddr;
75 	buffer->vmap_cnt++;
76 	return vaddr;
77 }
78 
dma_heap_buffer_vmap_put(struct heap_helper_buffer * buffer)79 static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer)
80 {
81 	if (!--buffer->vmap_cnt) {
82 		vunmap(buffer->vaddr);
83 		buffer->vaddr = NULL;
84 	}
85 }
86 
87 struct dma_heaps_attachment {
88 	struct device *dev;
89 	struct sg_table table;
90 	struct list_head list;
91 };
92 
dma_heap_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)93 static int dma_heap_attach(struct dma_buf *dmabuf,
94 			   struct dma_buf_attachment *attachment)
95 {
96 	struct dma_heaps_attachment *a;
97 	struct heap_helper_buffer *buffer = dmabuf->priv;
98 	int ret;
99 
100 	a = kzalloc(sizeof(*a), GFP_KERNEL);
101 	if (!a)
102 		return -ENOMEM;
103 
104 	ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
105 					buffer->pagecount, 0,
106 					buffer->pagecount << PAGE_SHIFT,
107 					GFP_KERNEL);
108 	if (ret) {
109 		kfree(a);
110 		return ret;
111 	}
112 
113 	a->dev = attachment->dev;
114 	INIT_LIST_HEAD(&a->list);
115 
116 	attachment->priv = a;
117 
118 	mutex_lock(&buffer->lock);
119 	list_add(&a->list, &buffer->attachments);
120 	mutex_unlock(&buffer->lock);
121 
122 	return 0;
123 }
124 
dma_heap_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)125 static void dma_heap_detach(struct dma_buf *dmabuf,
126 			    struct dma_buf_attachment *attachment)
127 {
128 	struct dma_heaps_attachment *a = attachment->priv;
129 	struct heap_helper_buffer *buffer = dmabuf->priv;
130 
131 	mutex_lock(&buffer->lock);
132 	list_del(&a->list);
133 	mutex_unlock(&buffer->lock);
134 
135 	sg_free_table(&a->table);
136 	kfree(a);
137 }
138 
139 static
dma_heap_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)140 struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
141 				      enum dma_data_direction direction)
142 {
143 	struct dma_heaps_attachment *a = attachment->priv;
144 	struct sg_table *table = &a->table;
145 	int ret;
146 
147 	ret = dma_map_sgtable(attachment->dev, table, direction, 0);
148 	if (ret)
149 		table = ERR_PTR(ret);
150 	return table;
151 }
152 
dma_heap_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)153 static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
154 				   struct sg_table *table,
155 				   enum dma_data_direction direction)
156 {
157 	dma_unmap_sgtable(attachment->dev, table, direction, 0);
158 }
159 
dma_heap_vm_fault(struct vm_fault * vmf)160 static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
161 {
162 	struct vm_area_struct *vma = vmf->vma;
163 	struct heap_helper_buffer *buffer = vma->vm_private_data;
164 
165 	if (vmf->pgoff > buffer->pagecount)
166 		return VM_FAULT_SIGBUS;
167 
168 	vmf->page = buffer->pages[vmf->pgoff];
169 	get_page(vmf->page);
170 
171 	return 0;
172 }
173 
174 static const struct vm_operations_struct dma_heap_vm_ops = {
175 	.fault = dma_heap_vm_fault,
176 };
177 
dma_heap_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)178 static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
179 {
180 	struct heap_helper_buffer *buffer = dmabuf->priv;
181 
182 	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
183 		return -EINVAL;
184 
185 	vma->vm_ops = &dma_heap_vm_ops;
186 	vma->vm_private_data = buffer;
187 
188 	return 0;
189 }
190 
dma_heap_dma_buf_release(struct dma_buf * dmabuf)191 static void dma_heap_dma_buf_release(struct dma_buf *dmabuf)
192 {
193 	struct heap_helper_buffer *buffer = dmabuf->priv;
194 
195 	dma_heap_buffer_destroy(buffer);
196 }
197 
dma_heap_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)198 static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
199 					     enum dma_data_direction direction)
200 {
201 	struct heap_helper_buffer *buffer = dmabuf->priv;
202 	struct dma_heaps_attachment *a;
203 	int ret = 0;
204 
205 	mutex_lock(&buffer->lock);
206 
207 	if (buffer->vmap_cnt)
208 		invalidate_kernel_vmap_range(buffer->vaddr, buffer->size);
209 
210 	list_for_each_entry(a, &buffer->attachments, list) {
211 		dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents,
212 				    direction);
213 	}
214 	mutex_unlock(&buffer->lock);
215 
216 	return ret;
217 }
218 
dma_heap_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)219 static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
220 					   enum dma_data_direction direction)
221 {
222 	struct heap_helper_buffer *buffer = dmabuf->priv;
223 	struct dma_heaps_attachment *a;
224 
225 	mutex_lock(&buffer->lock);
226 
227 	if (buffer->vmap_cnt)
228 		flush_kernel_vmap_range(buffer->vaddr, buffer->size);
229 
230 	list_for_each_entry(a, &buffer->attachments, list) {
231 		dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents,
232 				       direction);
233 	}
234 	mutex_unlock(&buffer->lock);
235 
236 	return 0;
237 }
238 
dma_heap_dma_buf_vmap(struct dma_buf * dmabuf)239 static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
240 {
241 	struct heap_helper_buffer *buffer = dmabuf->priv;
242 	void *vaddr;
243 
244 	mutex_lock(&buffer->lock);
245 	vaddr = dma_heap_buffer_vmap_get(buffer);
246 	mutex_unlock(&buffer->lock);
247 
248 	return vaddr;
249 }
250 
dma_heap_dma_buf_vunmap(struct dma_buf * dmabuf,void * vaddr)251 static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
252 {
253 	struct heap_helper_buffer *buffer = dmabuf->priv;
254 
255 	mutex_lock(&buffer->lock);
256 	dma_heap_buffer_vmap_put(buffer);
257 	mutex_unlock(&buffer->lock);
258 }
259 
260 const struct dma_buf_ops heap_helper_ops = {
261 	.map_dma_buf = dma_heap_map_dma_buf,
262 	.unmap_dma_buf = dma_heap_unmap_dma_buf,
263 	.mmap = dma_heap_mmap,
264 	.release = dma_heap_dma_buf_release,
265 	.attach = dma_heap_attach,
266 	.detach = dma_heap_detach,
267 	.begin_cpu_access = dma_heap_dma_buf_begin_cpu_access,
268 	.end_cpu_access = dma_heap_dma_buf_end_cpu_access,
269 	.vmap = dma_heap_dma_buf_vmap,
270 	.vunmap = dma_heap_dma_buf_vunmap,
271 };
272