• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* exynos_drm_dmabuf.c
2  *
3  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11 
12 #include <drm/drmP.h>
13 #include <drm/exynos_drm.h>
14 #include "exynos_drm_dmabuf.h"
15 #include "exynos_drm_drv.h"
16 #include "exynos_drm_gem.h"
17 
18 #include <linux/dma-buf.h>
19 
20 struct exynos_drm_dmabuf_attachment {
21 	struct sg_table sgt;
22 	enum dma_data_direction dir;
23 	bool is_mapped;
24 };
25 
dma_buf_to_obj(struct dma_buf * buf)26 static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
27 {
28 	return to_exynos_gem_obj(buf->priv);
29 }
30 
exynos_gem_attach_dma_buf(struct dma_buf * dmabuf,struct device * dev,struct dma_buf_attachment * attach)31 static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
32 					struct device *dev,
33 					struct dma_buf_attachment *attach)
34 {
35 	struct exynos_drm_dmabuf_attachment *exynos_attach;
36 
37 	exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
38 	if (!exynos_attach)
39 		return -ENOMEM;
40 
41 	exynos_attach->dir = DMA_NONE;
42 	attach->priv = exynos_attach;
43 
44 	return 0;
45 }
46 
exynos_gem_detach_dma_buf(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)47 static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
48 					struct dma_buf_attachment *attach)
49 {
50 	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
51 	struct sg_table *sgt;
52 
53 	if (!exynos_attach)
54 		return;
55 
56 	sgt = &exynos_attach->sgt;
57 
58 	if (exynos_attach->dir != DMA_NONE)
59 		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
60 				exynos_attach->dir);
61 
62 	sg_free_table(sgt);
63 	kfree(exynos_attach);
64 	attach->priv = NULL;
65 }
66 
67 static struct sg_table *
exynos_gem_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)68 		exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
69 					enum dma_data_direction dir)
70 {
71 	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
72 	struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
73 	struct drm_device *dev = gem_obj->base.dev;
74 	struct exynos_drm_gem_buf *buf;
75 	struct scatterlist *rd, *wr;
76 	struct sg_table *sgt = NULL;
77 	unsigned int i;
78 	int nents, ret;
79 
80 	/* just return current sgt if already requested. */
81 	if (exynos_attach->dir == dir && exynos_attach->is_mapped)
82 		return &exynos_attach->sgt;
83 
84 	buf = gem_obj->buffer;
85 	if (!buf) {
86 		DRM_ERROR("buffer is null.\n");
87 		return ERR_PTR(-ENOMEM);
88 	}
89 
90 	sgt = &exynos_attach->sgt;
91 
92 	ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
93 	if (ret) {
94 		DRM_ERROR("failed to alloc sgt.\n");
95 		return ERR_PTR(-ENOMEM);
96 	}
97 
98 	mutex_lock(&dev->struct_mutex);
99 
100 	rd = buf->sgt->sgl;
101 	wr = sgt->sgl;
102 	for (i = 0; i < sgt->orig_nents; ++i) {
103 		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
104 		rd = sg_next(rd);
105 		wr = sg_next(wr);
106 	}
107 
108 	if (dir != DMA_NONE) {
109 		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
110 		if (!nents) {
111 			DRM_ERROR("failed to map sgl with iommu.\n");
112 			sg_free_table(sgt);
113 			sgt = ERR_PTR(-EIO);
114 			goto err_unlock;
115 		}
116 	}
117 
118 	exynos_attach->is_mapped = true;
119 	exynos_attach->dir = dir;
120 	attach->priv = exynos_attach;
121 
122 	DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
123 
124 err_unlock:
125 	mutex_unlock(&dev->struct_mutex);
126 	return sgt;
127 }
128 
exynos_gem_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)129 static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
130 						struct sg_table *sgt,
131 						enum dma_data_direction dir)
132 {
133 	/* Nothing to do. */
134 }
135 
exynos_gem_dmabuf_kmap_atomic(struct dma_buf * dma_buf,unsigned long page_num)136 static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
137 						unsigned long page_num)
138 {
139 	/* TODO */
140 
141 	return NULL;
142 }
143 
exynos_gem_dmabuf_kunmap_atomic(struct dma_buf * dma_buf,unsigned long page_num,void * addr)144 static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
145 						unsigned long page_num,
146 						void *addr)
147 {
148 	/* TODO */
149 }
150 
exynos_gem_dmabuf_kmap(struct dma_buf * dma_buf,unsigned long page_num)151 static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
152 					unsigned long page_num)
153 {
154 	/* TODO */
155 
156 	return NULL;
157 }
158 
exynos_gem_dmabuf_kunmap(struct dma_buf * dma_buf,unsigned long page_num,void * addr)159 static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
160 					unsigned long page_num, void *addr)
161 {
162 	/* TODO */
163 }
164 
exynos_gem_dmabuf_mmap(struct dma_buf * dma_buf,struct vm_area_struct * vma)165 static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
166 	struct vm_area_struct *vma)
167 {
168 	return -ENOTTY;
169 }
170 
171 static struct dma_buf_ops exynos_dmabuf_ops = {
172 	.attach			= exynos_gem_attach_dma_buf,
173 	.detach			= exynos_gem_detach_dma_buf,
174 	.map_dma_buf		= exynos_gem_map_dma_buf,
175 	.unmap_dma_buf		= exynos_gem_unmap_dma_buf,
176 	.kmap			= exynos_gem_dmabuf_kmap,
177 	.kmap_atomic		= exynos_gem_dmabuf_kmap_atomic,
178 	.kunmap			= exynos_gem_dmabuf_kunmap,
179 	.kunmap_atomic		= exynos_gem_dmabuf_kunmap_atomic,
180 	.mmap			= exynos_gem_dmabuf_mmap,
181 	.release		= drm_gem_dmabuf_release,
182 };
183 
exynos_dmabuf_prime_export(struct drm_device * drm_dev,struct drm_gem_object * obj,int flags)184 struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
185 				struct drm_gem_object *obj, int flags)
186 {
187 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
188 
189 	return dma_buf_export(obj, &exynos_dmabuf_ops,
190 				exynos_gem_obj->base.size, flags, NULL);
191 }
192 
exynos_dmabuf_prime_import(struct drm_device * drm_dev,struct dma_buf * dma_buf)193 struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
194 				struct dma_buf *dma_buf)
195 {
196 	struct dma_buf_attachment *attach;
197 	struct sg_table *sgt;
198 	struct scatterlist *sgl;
199 	struct exynos_drm_gem_obj *exynos_gem_obj;
200 	struct exynos_drm_gem_buf *buffer;
201 	int ret;
202 
203 	/* is this one of own objects? */
204 	if (dma_buf->ops == &exynos_dmabuf_ops) {
205 		struct drm_gem_object *obj;
206 
207 		obj = dma_buf->priv;
208 
209 		/* is it from our device? */
210 		if (obj->dev == drm_dev) {
211 			/*
212 			 * Importing dmabuf exported from out own gem increases
213 			 * refcount on gem itself instead of f_count of dmabuf.
214 			 */
215 			drm_gem_object_reference(obj);
216 			return obj;
217 		}
218 	}
219 
220 	attach = dma_buf_attach(dma_buf, drm_dev->dev);
221 	if (IS_ERR(attach))
222 		return ERR_PTR(-EINVAL);
223 
224 	get_dma_buf(dma_buf);
225 
226 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
227 	if (IS_ERR(sgt)) {
228 		ret = PTR_ERR(sgt);
229 		goto err_buf_detach;
230 	}
231 
232 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
233 	if (!buffer) {
234 		ret = -ENOMEM;
235 		goto err_unmap_attach;
236 	}
237 
238 	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
239 	if (!exynos_gem_obj) {
240 		ret = -ENOMEM;
241 		goto err_free_buffer;
242 	}
243 
244 	sgl = sgt->sgl;
245 
246 	buffer->size = dma_buf->size;
247 	buffer->dma_addr = sg_dma_address(sgl);
248 
249 	if (sgt->nents == 1) {
250 		/* always physically continuous memory if sgt->nents is 1. */
251 		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
252 	} else {
253 		/*
254 		 * this case could be CONTIG or NONCONTIG type but for now
255 		 * sets NONCONTIG.
256 		 * TODO. we have to find a way that exporter can notify
257 		 * the type of its own buffer to importer.
258 		 */
259 		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
260 	}
261 
262 	exynos_gem_obj->buffer = buffer;
263 	buffer->sgt = sgt;
264 	exynos_gem_obj->base.import_attach = attach;
265 
266 	DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
267 								buffer->size);
268 
269 	return &exynos_gem_obj->base;
270 
271 err_free_buffer:
272 	kfree(buffer);
273 	buffer = NULL;
274 err_unmap_attach:
275 	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
276 err_buf_detach:
277 	dma_buf_detach(dma_buf, attach);
278 	dma_buf_put(dma_buf);
279 
280 	return ERR_PTR(ret);
281 }
282 
283 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
284 MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
285 MODULE_LICENSE("GPL");
286