1 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2 /*
3 * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4 *
5 * author:
6 * Alpha Lin, alpha.lin@rock-chips.com
7 * Randy Li, randy.li@rock-chips.com
8 * Ding Wei, leo.ding@rock-chips.com
9 *
10 */
11 #include <linux/delay.h>
12 #include <linux/dma-buf.h>
13 #include <linux/dma-iommu.h>
14 #include <linux/iommu.h>
15 #include <linux/of.h>
16 #include <linux/of_platform.h>
17 #include <linux/kref.h>
18 #include <linux/slab.h>
19 #include <linux/pm_runtime.h>
20
21 #ifdef CONFIG_ARM_DMA_USE_IOMMU
22 #include <asm/dma-iommu.h>
23 #endif
24 #include <soc/rockchip/rockchip_iommu.h>
25
26 #include "mpp_debug.h"
27 #include "mpp_iommu.h"
28
29 static struct mpp_dma_buffer *
mpp_dma_find_buffer_fd(struct mpp_dma_session * dma,int fd)30 mpp_dma_find_buffer_fd(struct mpp_dma_session *dma, int fd)
31 {
32 struct dma_buf *dmabuf;
33 struct mpp_dma_buffer *out = NULL;
34 struct mpp_dma_buffer *buffer = NULL, *n;
35
36 dmabuf = dma_buf_get(fd);
37 if (IS_ERR(dmabuf))
38 return NULL;
39
40 mutex_lock(&dma->list_mutex);
41 list_for_each_entry_safe(buffer, n,
42 &dma->used_list, link) {
43 /*
44 * fd may dup several and point the same dambuf.
45 * thus, here should be distinguish with the dmabuf.
46 */
47 if (buffer->dmabuf == dmabuf) {
48 out = buffer;
49 break;
50 }
51 }
52 mutex_unlock(&dma->list_mutex);
53 dma_buf_put(dmabuf);
54
55 return out;
56 }
57
58 /* Release the buffer from the current list */
mpp_dma_release_buffer(struct kref * ref)59 static void mpp_dma_release_buffer(struct kref *ref)
60 {
61 struct mpp_dma_buffer *buffer =
62 container_of(ref, struct mpp_dma_buffer, ref);
63
64 buffer->dma->buffer_count--;
65 list_move_tail(&buffer->link, &buffer->dma->unused_list);
66
67 dma_buf_unmap_attachment(buffer->attach, buffer->sgt, buffer->dir);
68 dma_buf_detach(buffer->dmabuf, buffer->attach);
69 dma_buf_put(buffer->dmabuf);
70 }
71
72 /* Remove the oldest buffer when count more than the setting */
73 static int
mpp_dma_remove_extra_buffer(struct mpp_dma_session * dma)74 mpp_dma_remove_extra_buffer(struct mpp_dma_session *dma)
75 {
76 struct mpp_dma_buffer *n;
77 struct mpp_dma_buffer *oldest = NULL, *buffer = NULL;
78 ktime_t oldest_time = ktime_set(0, 0);
79
80 if (dma->buffer_count > dma->max_buffers) {
81 mutex_lock(&dma->list_mutex);
82 list_for_each_entry_safe(buffer, n,
83 &dma->used_list,
84 link) {
85 if (ktime_to_ns(oldest_time) == 0 ||
86 ktime_after(oldest_time, buffer->last_used)) {
87 oldest_time = buffer->last_used;
88 oldest = buffer;
89 }
90 }
91 if (oldest)
92 kref_put(&oldest->ref, mpp_dma_release_buffer);
93 mutex_unlock(&dma->list_mutex);
94 }
95
96 return 0;
97 }
98
mpp_dma_release(struct mpp_dma_session * dma,struct mpp_dma_buffer * buffer)99 int mpp_dma_release(struct mpp_dma_session *dma,
100 struct mpp_dma_buffer *buffer)
101 {
102 mutex_lock(&dma->list_mutex);
103 kref_put(&buffer->ref, mpp_dma_release_buffer);
104 mutex_unlock(&dma->list_mutex);
105
106 return 0;
107 }
108
mpp_dma_release_fd(struct mpp_dma_session * dma,int fd)109 int mpp_dma_release_fd(struct mpp_dma_session *dma, int fd)
110 {
111 struct device *dev = dma->dev;
112 struct mpp_dma_buffer *buffer = NULL;
113
114 buffer = mpp_dma_find_buffer_fd(dma, fd);
115 if (IS_ERR_OR_NULL(buffer)) {
116 dev_err(dev, "can not find %d buffer in list\n", fd);
117
118 return -EINVAL;
119 }
120
121 mutex_lock(&dma->list_mutex);
122 kref_put(&buffer->ref, mpp_dma_release_buffer);
123 mutex_unlock(&dma->list_mutex);
124
125 return 0;
126 }
127
128 struct mpp_dma_buffer *
mpp_dma_alloc(struct device * dev,size_t size)129 mpp_dma_alloc(struct device *dev, size_t size)
130 {
131 size_t align_size;
132 dma_addr_t iova;
133 struct mpp_dma_buffer *buffer;
134
135 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
136 if (!buffer)
137 return NULL;
138
139 align_size = PAGE_ALIGN(size);
140 buffer->vaddr = dma_alloc_coherent(dev, align_size, &iova, GFP_KERNEL);
141 if (!buffer->vaddr)
142 goto fail_dma_alloc;
143
144 buffer->size = align_size;
145 buffer->iova = iova;
146 buffer->dev = dev;
147
148 return buffer;
149 fail_dma_alloc:
150 kfree(buffer);
151 return NULL;
152 }
153
mpp_dma_free(struct mpp_dma_buffer * buffer)154 int mpp_dma_free(struct mpp_dma_buffer *buffer)
155 {
156 dma_free_coherent(buffer->dev, buffer->size,
157 buffer->vaddr, buffer->iova);
158 buffer->vaddr = NULL;
159 buffer->iova = 0;
160 buffer->size = 0;
161 buffer->dev = NULL;
162 kfree(buffer);
163
164 return 0;
165 }
166
mpp_dma_import_fd(struct mpp_iommu_info * iommu_info,struct mpp_dma_session * dma,int fd)167 struct mpp_dma_buffer *mpp_dma_import_fd(struct mpp_iommu_info *iommu_info,
168 struct mpp_dma_session *dma,
169 int fd)
170 {
171 int ret = 0;
172 struct sg_table *sgt;
173 struct dma_buf *dmabuf;
174 struct mpp_dma_buffer *buffer;
175 struct dma_buf_attachment *attach;
176
177 if (!dma) {
178 mpp_err("dma session is null\n");
179 return ERR_PTR(-EINVAL);
180 }
181
182 /* remove the oldest before add buffer */
183 mpp_dma_remove_extra_buffer(dma);
184
185 /* Check whether in dma session */
186 buffer = mpp_dma_find_buffer_fd(dma, fd);
187 if (!IS_ERR_OR_NULL(buffer)) {
188 if (kref_get_unless_zero(&buffer->ref)) {
189 buffer->last_used = ktime_get();
190 return buffer;
191 }
192 dev_dbg(dma->dev, "missing the fd %d\n", fd);
193 }
194
195 dmabuf = dma_buf_get(fd);
196 if (IS_ERR(dmabuf)) {
197 mpp_err("dma_buf_get fd %d failed\n", fd);
198 return NULL;
199 }
200 /* A new DMA buffer */
201 mutex_lock(&dma->list_mutex);
202 buffer = list_first_entry_or_null(&dma->unused_list,
203 struct mpp_dma_buffer,
204 link);
205 if (!buffer) {
206 ret = -ENOMEM;
207 mutex_unlock(&dma->list_mutex);
208 goto fail;
209 }
210 list_del_init(&buffer->link);
211 mutex_unlock(&dma->list_mutex);
212
213 buffer->dmabuf = dmabuf;
214 buffer->dir = DMA_BIDIRECTIONAL;
215 buffer->last_used = ktime_get();
216
217 attach = dma_buf_attach(buffer->dmabuf, dma->dev);
218 if (IS_ERR(attach)) {
219 mpp_err("dma_buf_attach fd %d failed\n", fd);
220 ret = PTR_ERR(attach);
221 goto fail_attach;
222 }
223
224 sgt = dma_buf_map_attachment(attach, buffer->dir);
225 if (IS_ERR(sgt)) {
226 mpp_err("dma_buf_map_attachment fd %d failed\n", fd);
227 ret = PTR_ERR(sgt);
228 goto fail_map;
229 }
230 buffer->iova = sg_dma_address(sgt->sgl);
231 buffer->size = sg_dma_len(sgt->sgl);
232 buffer->attach = attach;
233 buffer->sgt = sgt;
234 buffer->dma = dma;
235
236 kref_init(&buffer->ref);
237 /* Increase the reference for used outside the buffer pool */
238 kref_get(&buffer->ref);
239
240 mutex_lock(&dma->list_mutex);
241 dma->buffer_count++;
242 list_add_tail(&buffer->link, &dma->used_list);
243 mutex_unlock(&dma->list_mutex);
244
245 return buffer;
246
247 fail_map:
248 dma_buf_detach(buffer->dmabuf, attach);
249 fail_attach:
250 mutex_lock(&dma->list_mutex);
251 list_add_tail(&buffer->link, &dma->unused_list);
252 mutex_unlock(&dma->list_mutex);
253 fail:
254 dma_buf_put(dmabuf);
255 return ERR_PTR(ret);
256 }
257
mpp_dma_unmap_kernel(struct mpp_dma_session * dma,struct mpp_dma_buffer * buffer)258 int mpp_dma_unmap_kernel(struct mpp_dma_session *dma,
259 struct mpp_dma_buffer *buffer)
260 {
261 void *vaddr = buffer->vaddr;
262 struct dma_buf *dmabuf = buffer->dmabuf;
263
264 if (IS_ERR_OR_NULL(vaddr) ||
265 IS_ERR_OR_NULL(dmabuf))
266 return -EINVAL;
267
268 dma_buf_vunmap(dmabuf, vaddr);
269 buffer->vaddr = NULL;
270
271 dma_buf_end_cpu_access(dmabuf, DMA_FROM_DEVICE);
272
273 return 0;
274 }
275
mpp_dma_map_kernel(struct mpp_dma_session * dma,struct mpp_dma_buffer * buffer)276 int mpp_dma_map_kernel(struct mpp_dma_session *dma,
277 struct mpp_dma_buffer *buffer)
278 {
279 int ret;
280 void *vaddr;
281 struct dma_buf *dmabuf = buffer->dmabuf;
282
283 if (IS_ERR_OR_NULL(dmabuf))
284 return -EINVAL;
285
286 ret = dma_buf_begin_cpu_access(dmabuf, DMA_FROM_DEVICE);
287 if (ret) {
288 dev_dbg(dma->dev, "can't access the dma buffer\n");
289 goto failed_access;
290 }
291
292 vaddr = dma_buf_vmap(dmabuf);
293 if (!vaddr) {
294 dev_dbg(dma->dev, "can't vmap the dma buffer\n");
295 ret = -EIO;
296 goto failed_vmap;
297 }
298
299 buffer->vaddr = vaddr;
300
301 return 0;
302
303 failed_vmap:
304 dma_buf_end_cpu_access(dmabuf, DMA_FROM_DEVICE);
305 failed_access:
306
307 return ret;
308 }
309
mpp_dma_session_destroy(struct mpp_dma_session * dma)310 int mpp_dma_session_destroy(struct mpp_dma_session *dma)
311 {
312 struct mpp_dma_buffer *n, *buffer = NULL;
313
314 if (!dma)
315 return -EINVAL;
316
317 mutex_lock(&dma->list_mutex);
318 list_for_each_entry_safe(buffer, n,
319 &dma->used_list,
320 link) {
321 kref_put(&buffer->ref, mpp_dma_release_buffer);
322 }
323 mutex_unlock(&dma->list_mutex);
324
325 kfree(dma);
326
327 return 0;
328 }
329
330 struct mpp_dma_session *
mpp_dma_session_create(struct device * dev,u32 max_buffers)331 mpp_dma_session_create(struct device *dev, u32 max_buffers)
332 {
333 int i;
334 struct mpp_dma_session *dma = NULL;
335 struct mpp_dma_buffer *buffer = NULL;
336
337 dma = kzalloc(sizeof(*dma), GFP_KERNEL);
338 if (!dma)
339 return NULL;
340
341 mutex_init(&dma->list_mutex);
342 INIT_LIST_HEAD(&dma->unused_list);
343 INIT_LIST_HEAD(&dma->used_list);
344
345 if (max_buffers > MPP_SESSION_MAX_BUFFERS) {
346 mpp_debug(DEBUG_IOCTL, "session_max_buffer %d must less than %d\n",
347 max_buffers, MPP_SESSION_MAX_BUFFERS);
348 dma->max_buffers = MPP_SESSION_MAX_BUFFERS;
349 } else {
350 dma->max_buffers = max_buffers;
351 }
352
353 for (i = 0; i < ARRAY_SIZE(dma->dma_bufs); i++) {
354 buffer = &dma->dma_bufs[i];
355 buffer->dma = dma;
356 INIT_LIST_HEAD(&buffer->link);
357 list_add_tail(&buffer->link, &dma->unused_list);
358 }
359 dma->dev = dev;
360
361 return dma;
362 }
363
mpp_iommu_detach(struct mpp_iommu_info * info)364 int mpp_iommu_detach(struct mpp_iommu_info *info)
365 {
366 if (!info)
367 return 0;
368
369 iommu_detach_group(info->domain, info->group);
370 return 0;
371 }
372
mpp_iommu_attach(struct mpp_iommu_info * info)373 int mpp_iommu_attach(struct mpp_iommu_info *info)
374 {
375 if (!info)
376 return 0;
377
378 return iommu_attach_group(info->domain, info->group);
379 }
380
381 struct mpp_iommu_info *
mpp_iommu_probe(struct device * dev)382 mpp_iommu_probe(struct device *dev)
383 {
384 int ret = 0;
385 struct device_node *np = NULL;
386 struct platform_device *pdev = NULL;
387 struct mpp_iommu_info *info = NULL;
388 struct iommu_domain *domain = NULL;
389 struct iommu_group *group = NULL;
390 #ifdef CONFIG_ARM_DMA_USE_IOMMU
391 struct dma_iommu_mapping *mapping;
392 #endif
393 np = of_parse_phandle(dev->of_node, "iommus", 0);
394 if (!np || !of_device_is_available(np)) {
395 mpp_err("failed to get device node\n");
396 return ERR_PTR(-ENODEV);
397 }
398
399 pdev = of_find_device_by_node(np);
400 of_node_put(np);
401 if (!pdev) {
402 mpp_err("failed to get platform device\n");
403 return ERR_PTR(-ENODEV);
404 }
405
406 group = iommu_group_get(dev);
407 if (!group) {
408 ret = -EINVAL;
409 goto err_put_pdev;
410 }
411
412 /*
413 * On arm32-arch, group->default_domain should be NULL,
414 * domain store in mapping created by arm32-arch.
415 * we re-attach domain here
416 */
417 #ifdef CONFIG_ARM_DMA_USE_IOMMU
418 if (!iommu_group_default_domain(group)) {
419 mapping = to_dma_iommu_mapping(dev);
420 WARN_ON(!mapping);
421 domain = mapping->domain;
422 }
423 #endif
424 if (!domain) {
425 domain = iommu_get_domain_for_dev(dev);
426 if (!domain) {
427 ret = -EINVAL;
428 goto err_put_group;
429 }
430 }
431
432 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
433 if (!info) {
434 ret = -ENOMEM;
435 goto err_put_group;
436 }
437
438 init_rwsem(&info->rw_sem);
439 info->dev = dev;
440 info->pdev = pdev;
441 info->group = group;
442 info->domain = domain;
443
444 return info;
445
446 err_put_group:
447 if (group)
448 iommu_group_put(group);
449 err_put_pdev:
450 if (pdev)
451 platform_device_put(pdev);
452
453 return ERR_PTR(ret);
454 }
455
mpp_iommu_remove(struct mpp_iommu_info * info)456 int mpp_iommu_remove(struct mpp_iommu_info *info)
457 {
458 if (!info)
459 return 0;
460
461 iommu_group_put(info->group);
462 platform_device_put(info->pdev);
463
464 return 0;
465 }
466
mpp_iommu_refresh(struct mpp_iommu_info * info,struct device * dev)467 int mpp_iommu_refresh(struct mpp_iommu_info *info, struct device *dev)
468 {
469 int ret;
470
471 if (!info || info->skip_refresh)
472 return 0;
473
474 /* disable iommu */
475 ret = rockchip_iommu_disable(dev);
476 if (ret)
477 return ret;
478 /* re-enable iommu */
479 return rockchip_iommu_enable(dev);
480 }
481
mpp_iommu_flush_tlb(struct mpp_iommu_info * info)482 int mpp_iommu_flush_tlb(struct mpp_iommu_info *info)
483 {
484 if (!info)
485 return 0;
486
487 if (info->domain && info->domain->ops)
488 iommu_flush_iotlb_all(info->domain);
489
490 return 0;
491 }
492