1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ION Memory Allocator - dmabuf interface
4 *
5 * Copyright (c) 2019, Google, Inc.
6 */
7
8 #include <linux/device.h>
9 #include <linux/mm.h>
10 #include <linux/scatterlist.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13
14 #include "ion_private.h"
15
dup_sg_table(struct sg_table * table)16 static struct sg_table *dup_sg_table(struct sg_table *table)
17 {
18 struct sg_table *new_table;
19 int ret, i;
20 struct scatterlist *sg, *new_sg;
21
22 new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
23 if (!new_table)
24 return ERR_PTR(-ENOMEM);
25
26 ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
27 if (ret) {
28 kfree(new_table);
29 return ERR_PTR(-ENOMEM);
30 }
31
32 new_sg = new_table->sgl;
33 for_each_sg(table->sgl, sg, table->nents, i) {
34 memcpy(new_sg, sg, sizeof(*sg));
35 new_sg->dma_address = 0;
36 new_sg = sg_next(new_sg);
37 }
38
39 return new_table;
40 }
41
free_duped_table(struct sg_table * table)42 static void free_duped_table(struct sg_table *table)
43 {
44 sg_free_table(table);
45 kfree(table);
46 }
47
48 struct ion_dma_buf_attachment {
49 struct device *dev;
50 struct sg_table *table;
51 struct list_head list;
52 };
53
ion_dma_buf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)54 static int ion_dma_buf_attach(struct dma_buf *dmabuf,
55 struct dma_buf_attachment *attachment)
56 {
57 struct ion_dma_buf_attachment *a;
58 struct sg_table *table;
59 struct ion_buffer *buffer = dmabuf->priv;
60
61 a = kzalloc(sizeof(*a), GFP_KERNEL);
62 if (!a)
63 return -ENOMEM;
64
65 table = dup_sg_table(buffer->sg_table);
66 if (IS_ERR(table)) {
67 kfree(a);
68 return -ENOMEM;
69 }
70
71 a->table = table;
72 a->dev = attachment->dev;
73 INIT_LIST_HEAD(&a->list);
74
75 attachment->priv = a;
76
77 mutex_lock(&buffer->lock);
78 list_add(&a->list, &buffer->attachments);
79 mutex_unlock(&buffer->lock);
80
81 return 0;
82 }
83
ion_dma_buf_detatch(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)84 static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
85 struct dma_buf_attachment *attachment)
86 {
87 struct ion_dma_buf_attachment *a = attachment->priv;
88 struct ion_buffer *buffer = dmabuf->priv;
89
90 mutex_lock(&buffer->lock);
91 list_del(&a->list);
92 mutex_unlock(&buffer->lock);
93 free_duped_table(a->table);
94
95 kfree(a);
96 }
97
ion_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)98 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
99 enum dma_data_direction direction)
100 {
101 struct ion_buffer *buffer = attachment->dmabuf->priv;
102 struct ion_heap *heap = buffer->heap;
103 struct ion_dma_buf_attachment *a;
104 struct sg_table *table;
105
106 if (heap->buf_ops.map_dma_buf)
107 return heap->buf_ops.map_dma_buf(attachment, direction);
108
109 a = attachment->priv;
110 table = a->table;
111
112 if (!dma_map_sg(attachment->dev, table->sgl, table->nents, direction))
113 return ERR_PTR(-ENOMEM);
114
115 return table;
116 }
117
ion_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)118 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
119 struct sg_table *table,
120 enum dma_data_direction direction)
121 {
122 struct ion_buffer *buffer = attachment->dmabuf->priv;
123 struct ion_heap *heap = buffer->heap;
124
125 if (heap->buf_ops.unmap_dma_buf)
126 return heap->buf_ops.unmap_dma_buf(attachment, table,
127 direction);
128
129 dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
130 }
131
ion_dma_buf_release(struct dma_buf * dmabuf)132 static void ion_dma_buf_release(struct dma_buf *dmabuf)
133 {
134 struct ion_buffer *buffer = dmabuf->priv;
135 struct ion_heap *heap = buffer->heap;
136
137 if (heap->buf_ops.release)
138 return heap->buf_ops.release(dmabuf);
139
140 ion_free(buffer);
141 }
142
ion_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)143 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
144 enum dma_data_direction direction)
145 {
146 struct ion_buffer *buffer = dmabuf->priv;
147 struct ion_heap *heap = buffer->heap;
148 void *vaddr;
149 struct ion_dma_buf_attachment *a;
150 int ret;
151
152 if (heap->buf_ops.begin_cpu_access)
153 return heap->buf_ops.begin_cpu_access(dmabuf, direction);
154
155 /*
156 * TODO: Move this elsewhere because we don't always need a vaddr
157 * FIXME: Why do we need a vaddr here?
158 */
159 ret = 0;
160 mutex_lock(&buffer->lock);
161 vaddr = ion_buffer_kmap_get(buffer);
162 if (IS_ERR(vaddr)) {
163 ret = PTR_ERR(vaddr);
164 goto unlock;
165 }
166
167 list_for_each_entry(a, &buffer->attachments, list) {
168 dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
169 direction);
170 }
171
172 unlock:
173 mutex_unlock(&buffer->lock);
174 return ret;
175 }
176
177 static int
ion_dma_buf_begin_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)178 ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
179 enum dma_data_direction direction,
180 unsigned int offset, unsigned int len)
181 {
182 struct ion_buffer *buffer = dmabuf->priv;
183 struct ion_heap *heap = buffer->heap;
184
185 /* This is done to make sure partial buffer cache flush / invalidate is
186 * allowed. The implementation may be vendor specific in this case, so
187 * ion core does not provide a default implementation
188 */
189 if (!heap->buf_ops.begin_cpu_access_partial)
190 return -EOPNOTSUPP;
191
192 return heap->buf_ops.begin_cpu_access_partial(dmabuf, direction, offset,
193 len);
194 }
195
ion_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)196 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
197 enum dma_data_direction direction)
198 {
199 struct ion_buffer *buffer = dmabuf->priv;
200 struct ion_heap *heap = buffer->heap;
201 struct ion_dma_buf_attachment *a;
202
203 if (heap->buf_ops.end_cpu_access)
204 return heap->buf_ops.end_cpu_access(dmabuf, direction);
205
206 mutex_lock(&buffer->lock);
207
208 ion_buffer_kmap_put(buffer);
209 list_for_each_entry(a, &buffer->attachments, list) {
210 dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
211 direction);
212 }
213 mutex_unlock(&buffer->lock);
214
215 return 0;
216 }
217
ion_dma_buf_end_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)218 static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
219 enum dma_data_direction direction,
220 unsigned int offset,
221 unsigned int len)
222 {
223 struct ion_buffer *buffer = dmabuf->priv;
224 struct ion_heap *heap = buffer->heap;
225
226 /* This is done to make sure partial buffer cache flush / invalidate is
227 * allowed. The implementation may be vendor specific in this case, so
228 * ion core does not provide a default implementation
229 */
230 if (!heap->buf_ops.end_cpu_access_partial)
231 return -EOPNOTSUPP;
232
233 return heap->buf_ops.end_cpu_access_partial(dmabuf, direction, offset,
234 len);
235 }
236
ion_dma_buf_map(struct dma_buf * dmabuf,unsigned long offset)237 static void *ion_dma_buf_map(struct dma_buf *dmabuf, unsigned long offset)
238 {
239 struct ion_buffer *buffer = dmabuf->priv;
240 struct ion_heap *heap = buffer->heap;
241
242 if (heap->buf_ops.map)
243 return heap->buf_ops.map(dmabuf, offset);
244
245 return ion_buffer_kmap_get(buffer) + offset * PAGE_SIZE;
246 }
247
ion_dma_buf_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)248 static int ion_dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
249 {
250 struct ion_buffer *buffer = dmabuf->priv;
251 struct ion_heap *heap = buffer->heap;
252 int ret;
253
254 /* now map it to userspace */
255 if (heap->buf_ops.mmap) {
256 ret = heap->buf_ops.mmap(dmabuf, vma);
257 } else {
258 mutex_lock(&buffer->lock);
259 if (!(buffer->flags & ION_FLAG_CACHED))
260 vma->vm_page_prot =
261 pgprot_writecombine(vma->vm_page_prot);
262
263 ret = ion_heap_map_user(heap, buffer, vma);
264 mutex_unlock(&buffer->lock);
265 }
266
267 if (ret)
268 pr_err("%s: failure mapping buffer to userspace\n", __func__);
269
270 return ret;
271 }
272
ion_dma_buf_unmap(struct dma_buf * dmabuf,unsigned long offset,void * addr)273 static void ion_dma_buf_unmap(struct dma_buf *dmabuf, unsigned long offset,
274 void *addr)
275 {
276 struct ion_buffer *buffer = dmabuf->priv;
277 struct ion_heap *heap = buffer->heap;
278
279 if (!heap->buf_ops.unmap)
280 return;
281 heap->buf_ops.unmap(dmabuf, offset, addr);
282 }
283
ion_dma_buf_vmap(struct dma_buf * dmabuf)284 static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
285 {
286 struct ion_buffer *buffer = dmabuf->priv;
287 struct ion_heap *heap = buffer->heap;
288
289 if (!heap->buf_ops.vmap)
290 return ERR_PTR(-EOPNOTSUPP);
291
292 return heap->buf_ops.vmap(dmabuf);
293 }
294
ion_dma_buf_vunmap(struct dma_buf * dmabuf,void * vaddr)295 static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
296 {
297 struct ion_buffer *buffer = dmabuf->priv;
298 struct ion_heap *heap = buffer->heap;
299
300 if (!heap->buf_ops.vunmap)
301 return;
302
303 return heap->buf_ops.vunmap(dmabuf, vaddr);
304 }
305
ion_dma_buf_get_flags(struct dma_buf * dmabuf,unsigned long * flags)306 static int ion_dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
307 {
308 struct ion_buffer *buffer = dmabuf->priv;
309 struct ion_heap *heap = buffer->heap;
310
311 if (!heap->buf_ops.get_flags)
312 return -EOPNOTSUPP;
313
314 return heap->buf_ops.get_flags(dmabuf, flags);
315 }
316
317 static const struct dma_buf_ops dma_buf_ops = {
318 .attach = ion_dma_buf_attach,
319 .detach = ion_dma_buf_detatch,
320 .map_dma_buf = ion_map_dma_buf,
321 .unmap_dma_buf = ion_unmap_dma_buf,
322 .release = ion_dma_buf_release,
323 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
324 .begin_cpu_access_partial = ion_dma_buf_begin_cpu_access_partial,
325 .end_cpu_access = ion_dma_buf_end_cpu_access,
326 .end_cpu_access_partial = ion_dma_buf_end_cpu_access_partial,
327 .mmap = ion_dma_buf_mmap,
328 .map = ion_dma_buf_map,
329 .unmap = ion_dma_buf_unmap,
330 .vmap = ion_dma_buf_vmap,
331 .vunmap = ion_dma_buf_vunmap,
332 .get_flags = ion_dma_buf_get_flags,
333 };
334
ion_dmabuf_alloc(struct ion_device * dev,size_t len,unsigned int heap_id_mask,unsigned int flags)335 struct dma_buf *ion_dmabuf_alloc(struct ion_device *dev, size_t len,
336 unsigned int heap_id_mask,
337 unsigned int flags)
338 {
339 struct ion_buffer *buffer;
340 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
341 struct dma_buf *dmabuf;
342
343 pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
344 len, heap_id_mask, flags);
345
346 buffer = ion_buffer_alloc(dev, len, heap_id_mask, flags);
347 if (IS_ERR(buffer))
348 return ERR_CAST(buffer);
349
350 exp_info.ops = &dma_buf_ops;
351 exp_info.size = buffer->size;
352 exp_info.flags = O_RDWR;
353 exp_info.priv = buffer;
354
355 dmabuf = dma_buf_export(&exp_info);
356 if (IS_ERR(dmabuf))
357 ion_buffer_destroy(dev, buffer);
358
359 return dmabuf;
360 }
361