• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 Red Hat Inc
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Dave Airlie <airlied@redhat.com>
25  */
26 #include <drm/drmP.h>
27 #include "i915_drv.h"
28 #include <linux/dma-buf.h>
29 
dma_buf_to_obj(struct dma_buf * buf)30 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
31 {
32 	return to_intel_bo(buf->priv);
33 }
34 
i915_gem_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction dir)35 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
36 					     enum dma_data_direction dir)
37 {
38 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
39 	struct sg_table *st;
40 	struct scatterlist *src, *dst;
41 	int ret, i;
42 
43 	ret = i915_mutex_lock_interruptible(obj->base.dev);
44 	if (ret)
45 		goto err;
46 
47 	ret = i915_gem_object_get_pages(obj);
48 	if (ret)
49 		goto err_unlock;
50 
51 	i915_gem_object_pin_pages(obj);
52 
53 	/* Copy sg so that we make an independent mapping */
54 	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
55 	if (st == NULL) {
56 		ret = -ENOMEM;
57 		goto err_unpin;
58 	}
59 
60 	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
61 	if (ret)
62 		goto err_free;
63 
64 	src = obj->pages->sgl;
65 	dst = st->sgl;
66 	for (i = 0; i < obj->pages->nents; i++) {
67 		sg_set_page(dst, sg_page(src), src->length, 0);
68 		dst = sg_next(dst);
69 		src = sg_next(src);
70 	}
71 
72 	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
73 		ret =-ENOMEM;
74 		goto err_free_sg;
75 	}
76 
77 	mutex_unlock(&obj->base.dev->struct_mutex);
78 	return st;
79 
80 err_free_sg:
81 	sg_free_table(st);
82 err_free:
83 	kfree(st);
84 err_unpin:
85 	i915_gem_object_unpin_pages(obj);
86 err_unlock:
87 	mutex_unlock(&obj->base.dev->struct_mutex);
88 err:
89 	return ERR_PTR(ret);
90 }
91 
i915_gem_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * sg,enum dma_data_direction dir)92 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
93 				   struct sg_table *sg,
94 				   enum dma_data_direction dir)
95 {
96 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
97 
98 	mutex_lock(&obj->base.dev->struct_mutex);
99 
100 	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
101 	sg_free_table(sg);
102 	kfree(sg);
103 
104 	i915_gem_object_unpin_pages(obj);
105 
106 	mutex_unlock(&obj->base.dev->struct_mutex);
107 }
108 
i915_gem_dmabuf_vmap(struct dma_buf * dma_buf)109 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
110 {
111 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
112 	struct drm_device *dev = obj->base.dev;
113 	struct sg_page_iter sg_iter;
114 	struct page **pages;
115 	int ret, i;
116 
117 	ret = i915_mutex_lock_interruptible(dev);
118 	if (ret)
119 		return ERR_PTR(ret);
120 
121 	if (obj->dma_buf_vmapping) {
122 		obj->vmapping_count++;
123 		goto out_unlock;
124 	}
125 
126 	ret = i915_gem_object_get_pages(obj);
127 	if (ret)
128 		goto err;
129 
130 	i915_gem_object_pin_pages(obj);
131 
132 	ret = -ENOMEM;
133 
134 	pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
135 	if (pages == NULL)
136 		goto err_unpin;
137 
138 	i = 0;
139 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
140 		pages[i++] = sg_page_iter_page(&sg_iter);
141 
142 	obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
143 	drm_free_large(pages);
144 
145 	if (!obj->dma_buf_vmapping)
146 		goto err_unpin;
147 
148 	obj->vmapping_count = 1;
149 out_unlock:
150 	mutex_unlock(&dev->struct_mutex);
151 	return obj->dma_buf_vmapping;
152 
153 err_unpin:
154 	i915_gem_object_unpin_pages(obj);
155 err:
156 	mutex_unlock(&dev->struct_mutex);
157 	return ERR_PTR(ret);
158 }
159 
i915_gem_dmabuf_vunmap(struct dma_buf * dma_buf,void * vaddr)160 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
161 {
162 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
163 	struct drm_device *dev = obj->base.dev;
164 
165 	mutex_lock(&dev->struct_mutex);
166 	if (--obj->vmapping_count == 0) {
167 		vunmap(obj->dma_buf_vmapping);
168 		obj->dma_buf_vmapping = NULL;
169 
170 		i915_gem_object_unpin_pages(obj);
171 	}
172 	mutex_unlock(&dev->struct_mutex);
173 }
174 
i915_gem_dmabuf_kmap_atomic(struct dma_buf * dma_buf,unsigned long page_num)175 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
176 {
177 	return NULL;
178 }
179 
i915_gem_dmabuf_kunmap_atomic(struct dma_buf * dma_buf,unsigned long page_num,void * addr)180 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
181 {
182 
183 }
i915_gem_dmabuf_kmap(struct dma_buf * dma_buf,unsigned long page_num)184 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
185 {
186 	return NULL;
187 }
188 
i915_gem_dmabuf_kunmap(struct dma_buf * dma_buf,unsigned long page_num,void * addr)189 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
190 {
191 
192 }
193 
i915_gem_dmabuf_mmap(struct dma_buf * dma_buf,struct vm_area_struct * vma)194 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
195 {
196 	return -EINVAL;
197 }
198 
i915_gem_begin_cpu_access(struct dma_buf * dma_buf,size_t start,size_t length,enum dma_data_direction direction)199 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
200 {
201 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
202 	struct drm_device *dev = obj->base.dev;
203 	int ret;
204 	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
205 
206 	ret = i915_mutex_lock_interruptible(dev);
207 	if (ret)
208 		return ret;
209 
210 	ret = i915_gem_object_set_to_cpu_domain(obj, write);
211 	mutex_unlock(&dev->struct_mutex);
212 	return ret;
213 }
214 
215 static const struct dma_buf_ops i915_dmabuf_ops =  {
216 	.map_dma_buf = i915_gem_map_dma_buf,
217 	.unmap_dma_buf = i915_gem_unmap_dma_buf,
218 	.release = drm_gem_dmabuf_release,
219 	.kmap = i915_gem_dmabuf_kmap,
220 	.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
221 	.kunmap = i915_gem_dmabuf_kunmap,
222 	.kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
223 	.mmap = i915_gem_dmabuf_mmap,
224 	.vmap = i915_gem_dmabuf_vmap,
225 	.vunmap = i915_gem_dmabuf_vunmap,
226 	.begin_cpu_access = i915_gem_begin_cpu_access,
227 };
228 
i915_gem_prime_export(struct drm_device * dev,struct drm_gem_object * gem_obj,int flags)229 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
230 				      struct drm_gem_object *gem_obj, int flags)
231 {
232 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
233 
234 	if (obj->ops->dmabuf_export) {
235 		int ret = obj->ops->dmabuf_export(obj);
236 		if (ret)
237 			return ERR_PTR(ret);
238 	}
239 
240 	return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags,
241 			      NULL);
242 }
243 
i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object * obj)244 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
245 {
246 	struct sg_table *sg;
247 
248 	sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
249 	if (IS_ERR(sg))
250 		return PTR_ERR(sg);
251 
252 	obj->pages = sg;
253 	obj->has_dma_mapping = true;
254 	return 0;
255 }
256 
i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object * obj)257 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
258 {
259 	dma_buf_unmap_attachment(obj->base.import_attach,
260 				 obj->pages, DMA_BIDIRECTIONAL);
261 	obj->has_dma_mapping = false;
262 }
263 
264 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
265 	.get_pages = i915_gem_object_get_pages_dmabuf,
266 	.put_pages = i915_gem_object_put_pages_dmabuf,
267 };
268 
i915_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)269 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
270 					     struct dma_buf *dma_buf)
271 {
272 	struct dma_buf_attachment *attach;
273 	struct drm_i915_gem_object *obj;
274 	int ret;
275 
276 	/* is this one of own objects? */
277 	if (dma_buf->ops == &i915_dmabuf_ops) {
278 		obj = dma_buf_to_obj(dma_buf);
279 		/* is it from our device? */
280 		if (obj->base.dev == dev) {
281 			/*
282 			 * Importing dmabuf exported from out own gem increases
283 			 * refcount on gem itself instead of f_count of dmabuf.
284 			 */
285 			drm_gem_object_reference(&obj->base);
286 			return &obj->base;
287 		}
288 	}
289 
290 	/* need to attach */
291 	attach = dma_buf_attach(dma_buf, dev->dev);
292 	if (IS_ERR(attach))
293 		return ERR_CAST(attach);
294 
295 	get_dma_buf(dma_buf);
296 
297 	obj = i915_gem_object_alloc(dev);
298 	if (obj == NULL) {
299 		ret = -ENOMEM;
300 		goto fail_detach;
301 	}
302 
303 	drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
304 	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
305 	obj->base.import_attach = attach;
306 
307 	return &obj->base;
308 
309 fail_detach:
310 	dma_buf_detach(dma_buf, attach);
311 	dma_buf_put(dma_buf);
312 
313 	return ERR_PTR(ret);
314 }
315