• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2012 Red Hat Inc
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Dave Airlie <airlied@redhat.com>
25  */
26 #include <drm/drmP.h>
27 #include "i915_drv.h"
28 #include <linux/dma-buf.h>
29 
i915_gem_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction dir)30 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
31 					     enum dma_data_direction dir)
32 {
33 	struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
34 	struct sg_table *st;
35 	struct scatterlist *src, *dst;
36 	int ret, i;
37 
38 	ret = i915_mutex_lock_interruptible(obj->base.dev);
39 	if (ret)
40 		return ERR_PTR(ret);
41 
42 	ret = i915_gem_object_get_pages(obj);
43 	if (ret) {
44 		st = ERR_PTR(ret);
45 		goto out;
46 	}
47 
48 	/* Copy sg so that we make an independent mapping */
49 	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
50 	if (st == NULL) {
51 		st = ERR_PTR(-ENOMEM);
52 		goto out;
53 	}
54 
55 	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
56 	if (ret) {
57 		kfree(st);
58 		st = ERR_PTR(ret);
59 		goto out;
60 	}
61 
62 	src = obj->pages->sgl;
63 	dst = st->sgl;
64 	for (i = 0; i < obj->pages->nents; i++) {
65 		sg_set_page(dst, sg_page(src), src->length, 0);
66 		dst = sg_next(dst);
67 		src = sg_next(src);
68 	}
69 
70 	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
71 		sg_free_table(st);
72 		kfree(st);
73 		st = ERR_PTR(-ENOMEM);
74 		goto out;
75 	}
76 
77 	i915_gem_object_pin_pages(obj);
78 
79 out:
80 	mutex_unlock(&obj->base.dev->struct_mutex);
81 	return st;
82 }
83 
i915_gem_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * sg,enum dma_data_direction dir)84 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
85 				   struct sg_table *sg,
86 				   enum dma_data_direction dir)
87 {
88 	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
89 	sg_free_table(sg);
90 	kfree(sg);
91 }
92 
i915_gem_dmabuf_release(struct dma_buf * dma_buf)93 static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
94 {
95 	struct drm_i915_gem_object *obj = dma_buf->priv;
96 
97 	if (obj->base.export_dma_buf == dma_buf) {
98 		/* drop the reference on the export fd holds */
99 		obj->base.export_dma_buf = NULL;
100 		drm_gem_object_unreference_unlocked(&obj->base);
101 	}
102 }
103 
i915_gem_dmabuf_vmap(struct dma_buf * dma_buf)104 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
105 {
106 	struct drm_i915_gem_object *obj = dma_buf->priv;
107 	struct drm_device *dev = obj->base.dev;
108 	struct sg_page_iter sg_iter;
109 	struct page **pages;
110 	int ret, i;
111 
112 	ret = i915_mutex_lock_interruptible(dev);
113 	if (ret)
114 		return ERR_PTR(ret);
115 
116 	if (obj->dma_buf_vmapping) {
117 		obj->vmapping_count++;
118 		goto out_unlock;
119 	}
120 
121 	ret = i915_gem_object_get_pages(obj);
122 	if (ret)
123 		goto error;
124 
125 	ret = -ENOMEM;
126 
127 	pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
128 	if (pages == NULL)
129 		goto error;
130 
131 	i = 0;
132 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
133 		pages[i++] = sg_page_iter_page(&sg_iter);
134 
135 	obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
136 	drm_free_large(pages);
137 
138 	if (!obj->dma_buf_vmapping)
139 		goto error;
140 
141 	obj->vmapping_count = 1;
142 	i915_gem_object_pin_pages(obj);
143 out_unlock:
144 	mutex_unlock(&dev->struct_mutex);
145 	return obj->dma_buf_vmapping;
146 
147 error:
148 	mutex_unlock(&dev->struct_mutex);
149 	return ERR_PTR(ret);
150 }
151 
i915_gem_dmabuf_vunmap(struct dma_buf * dma_buf,void * vaddr)152 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
153 {
154 	struct drm_i915_gem_object *obj = dma_buf->priv;
155 	struct drm_device *dev = obj->base.dev;
156 	int ret;
157 
158 	ret = i915_mutex_lock_interruptible(dev);
159 	if (ret)
160 		return;
161 
162 	if (--obj->vmapping_count == 0) {
163 		vunmap(obj->dma_buf_vmapping);
164 		obj->dma_buf_vmapping = NULL;
165 
166 		i915_gem_object_unpin_pages(obj);
167 	}
168 	mutex_unlock(&dev->struct_mutex);
169 }
170 
i915_gem_dmabuf_kmap_atomic(struct dma_buf * dma_buf,unsigned long page_num)171 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
172 {
173 	return NULL;
174 }
175 
i915_gem_dmabuf_kunmap_atomic(struct dma_buf * dma_buf,unsigned long page_num,void * addr)176 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
177 {
178 
179 }
i915_gem_dmabuf_kmap(struct dma_buf * dma_buf,unsigned long page_num)180 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
181 {
182 	return NULL;
183 }
184 
i915_gem_dmabuf_kunmap(struct dma_buf * dma_buf,unsigned long page_num,void * addr)185 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
186 {
187 
188 }
189 
i915_gem_dmabuf_mmap(struct dma_buf * dma_buf,struct vm_area_struct * vma)190 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
191 {
192 	return -EINVAL;
193 }
194 
i915_gem_begin_cpu_access(struct dma_buf * dma_buf,size_t start,size_t length,enum dma_data_direction direction)195 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
196 {
197 	struct drm_i915_gem_object *obj = dma_buf->priv;
198 	struct drm_device *dev = obj->base.dev;
199 	int ret;
200 	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
201 
202 	ret = i915_mutex_lock_interruptible(dev);
203 	if (ret)
204 		return ret;
205 
206 	ret = i915_gem_object_set_to_cpu_domain(obj, write);
207 	mutex_unlock(&dev->struct_mutex);
208 	return ret;
209 }
210 
211 static const struct dma_buf_ops i915_dmabuf_ops =  {
212 	.map_dma_buf = i915_gem_map_dma_buf,
213 	.unmap_dma_buf = i915_gem_unmap_dma_buf,
214 	.release = i915_gem_dmabuf_release,
215 	.kmap = i915_gem_dmabuf_kmap,
216 	.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
217 	.kunmap = i915_gem_dmabuf_kunmap,
218 	.kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
219 	.mmap = i915_gem_dmabuf_mmap,
220 	.vmap = i915_gem_dmabuf_vmap,
221 	.vunmap = i915_gem_dmabuf_vunmap,
222 	.begin_cpu_access = i915_gem_begin_cpu_access,
223 };
224 
i915_gem_prime_export(struct drm_device * dev,struct drm_gem_object * gem_obj,int flags)225 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
226 				      struct drm_gem_object *gem_obj, int flags)
227 {
228 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
229 
230 	return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
231 }
232 
i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object * obj)233 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
234 {
235 	struct sg_table *sg;
236 
237 	sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
238 	if (IS_ERR(sg))
239 		return PTR_ERR(sg);
240 
241 	obj->pages = sg;
242 	obj->has_dma_mapping = true;
243 	return 0;
244 }
245 
i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object * obj)246 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
247 {
248 	dma_buf_unmap_attachment(obj->base.import_attach,
249 				 obj->pages, DMA_BIDIRECTIONAL);
250 	obj->has_dma_mapping = false;
251 }
252 
253 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
254 	.get_pages = i915_gem_object_get_pages_dmabuf,
255 	.put_pages = i915_gem_object_put_pages_dmabuf,
256 };
257 
i915_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)258 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
259 					     struct dma_buf *dma_buf)
260 {
261 	struct dma_buf_attachment *attach;
262 	struct drm_i915_gem_object *obj;
263 	int ret;
264 
265 	/* is this one of own objects? */
266 	if (dma_buf->ops == &i915_dmabuf_ops) {
267 		obj = dma_buf->priv;
268 		/* is it from our device? */
269 		if (obj->base.dev == dev) {
270 			/*
271 			 * Importing dmabuf exported from out own gem increases
272 			 * refcount on gem itself instead of f_count of dmabuf.
273 			 */
274 			drm_gem_object_reference(&obj->base);
275 			return &obj->base;
276 		}
277 	}
278 
279 	/* need to attach */
280 	attach = dma_buf_attach(dma_buf, dev->dev);
281 	if (IS_ERR(attach))
282 		return ERR_CAST(attach);
283 
284 	get_dma_buf(dma_buf);
285 
286 	obj = i915_gem_object_alloc(dev);
287 	if (obj == NULL) {
288 		ret = -ENOMEM;
289 		goto fail_detach;
290 	}
291 
292 	ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
293 	if (ret) {
294 		i915_gem_object_free(obj);
295 		goto fail_detach;
296 	}
297 
298 	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
299 	obj->base.import_attach = attach;
300 
301 	return &obj->base;
302 
303 fail_detach:
304 	dma_buf_detach(dma_buf, attach);
305 	dma_buf_put(dma_buf);
306 
307 	return ERR_PTR(ret);
308 }
309