• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright 2012 Red Hat Inc
5  */
6 
7 #include <linux/dma-buf.h>
8 #include <linux/highmem.h>
9 #include <linux/dma-resv.h>
10 
11 #include "i915_drv.h"
12 #include "i915_gem_object.h"
13 #include "i915_scatterlist.h"
14 
dma_buf_to_obj(struct dma_buf * buf)15 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
16 {
17 	return to_intel_bo(buf->priv);
18 }
19 
i915_gem_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction dir)20 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
21 					     enum dma_data_direction dir)
22 {
23 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
24 	struct sg_table *st;
25 	struct scatterlist *src, *dst;
26 	int ret, i;
27 
28 	ret = i915_gem_object_pin_pages(obj);
29 	if (ret)
30 		goto err;
31 
32 	/* Copy sg so that we make an independent mapping */
33 	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
34 	if (st == NULL) {
35 		ret = -ENOMEM;
36 		goto err_unpin_pages;
37 	}
38 
39 	ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
40 	if (ret)
41 		goto err_free;
42 
43 	src = obj->mm.pages->sgl;
44 	dst = st->sgl;
45 	for (i = 0; i < obj->mm.pages->nents; i++) {
46 		sg_set_page(dst, sg_page(src), src->length, 0);
47 		dst = sg_next(dst);
48 		src = sg_next(src);
49 	}
50 
51 	ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
52 	if (ret)
53 		goto err_free_sg;
54 
55 	return st;
56 
57 err_free_sg:
58 	sg_free_table(st);
59 err_free:
60 	kfree(st);
61 err_unpin_pages:
62 	i915_gem_object_unpin_pages(obj);
63 err:
64 	return ERR_PTR(ret);
65 }
66 
i915_gem_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * sg,enum dma_data_direction dir)67 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
68 				   struct sg_table *sg,
69 				   enum dma_data_direction dir)
70 {
71 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
72 
73 	dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
74 	sg_free_table(sg);
75 	kfree(sg);
76 
77 	i915_gem_object_unpin_pages(obj);
78 }
79 
i915_gem_dmabuf_vmap(struct dma_buf * dma_buf)80 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
81 {
82 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
83 
84 	return i915_gem_object_pin_map(obj, I915_MAP_WB);
85 }
86 
i915_gem_dmabuf_vunmap(struct dma_buf * dma_buf,void * vaddr)87 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
88 {
89 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
90 
91 	i915_gem_object_flush_map(obj);
92 	i915_gem_object_unpin_map(obj);
93 }
94 
i915_gem_dmabuf_mmap(struct dma_buf * dma_buf,struct vm_area_struct * vma)95 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
96 {
97 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
98 	int ret;
99 
100 	if (obj->base.size < vma->vm_end - vma->vm_start)
101 		return -EINVAL;
102 
103 	if (!obj->base.filp)
104 		return -ENODEV;
105 
106 	ret = call_mmap(obj->base.filp, vma);
107 	if (ret)
108 		return ret;
109 
110 	fput(vma->vm_file);
111 	vma->vm_file = get_file(obj->base.filp);
112 
113 	return 0;
114 }
115 
i915_gem_begin_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction direction)116 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
117 {
118 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
119 	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
120 	int err;
121 
122 	err = i915_gem_object_pin_pages(obj);
123 	if (err)
124 		return err;
125 
126 	err = i915_gem_object_lock_interruptible(obj, NULL);
127 	if (err)
128 		goto out;
129 
130 	err = i915_gem_object_set_to_cpu_domain(obj, write);
131 	i915_gem_object_unlock(obj);
132 
133 out:
134 	i915_gem_object_unpin_pages(obj);
135 	return err;
136 }
137 
i915_gem_end_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction direction)138 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
139 {
140 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
141 	int err;
142 
143 	err = i915_gem_object_pin_pages(obj);
144 	if (err)
145 		return err;
146 
147 	err = i915_gem_object_lock_interruptible(obj, NULL);
148 	if (err)
149 		goto out;
150 
151 	err = i915_gem_object_set_to_gtt_domain(obj, false);
152 	i915_gem_object_unlock(obj);
153 
154 out:
155 	i915_gem_object_unpin_pages(obj);
156 	return err;
157 }
158 
159 static const struct dma_buf_ops i915_dmabuf_ops =  {
160 	.map_dma_buf = i915_gem_map_dma_buf,
161 	.unmap_dma_buf = i915_gem_unmap_dma_buf,
162 	.release = drm_gem_dmabuf_release,
163 	.mmap = i915_gem_dmabuf_mmap,
164 	.vmap = i915_gem_dmabuf_vmap,
165 	.vunmap = i915_gem_dmabuf_vunmap,
166 	.begin_cpu_access = i915_gem_begin_cpu_access,
167 	.end_cpu_access = i915_gem_end_cpu_access,
168 };
169 
i915_gem_prime_export(struct drm_gem_object * gem_obj,int flags)170 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
171 {
172 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
173 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
174 
175 	exp_info.ops = &i915_dmabuf_ops;
176 	exp_info.size = gem_obj->size;
177 	exp_info.flags = flags;
178 	exp_info.priv = gem_obj;
179 	exp_info.resv = obj->base.resv;
180 
181 	if (obj->ops->dmabuf_export) {
182 		int ret = obj->ops->dmabuf_export(obj);
183 		if (ret)
184 			return ERR_PTR(ret);
185 	}
186 
187 	return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
188 }
189 
i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object * obj)190 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
191 {
192 	struct sg_table *pages;
193 	unsigned int sg_page_sizes;
194 
195 	pages = dma_buf_map_attachment(obj->base.import_attach,
196 				       DMA_BIDIRECTIONAL);
197 	if (IS_ERR(pages))
198 		return PTR_ERR(pages);
199 
200 	sg_page_sizes = i915_sg_page_sizes(pages->sgl);
201 
202 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
203 
204 	return 0;
205 }
206 
i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object * obj,struct sg_table * pages)207 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
208 					     struct sg_table *pages)
209 {
210 	dma_buf_unmap_attachment(obj->base.import_attach, pages,
211 				 DMA_BIDIRECTIONAL);
212 }
213 
214 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
215 	.name = "i915_gem_object_dmabuf",
216 	.get_pages = i915_gem_object_get_pages_dmabuf,
217 	.put_pages = i915_gem_object_put_pages_dmabuf,
218 };
219 
i915_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)220 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
221 					     struct dma_buf *dma_buf)
222 {
223 	static struct lock_class_key lock_class;
224 	struct dma_buf_attachment *attach;
225 	struct drm_i915_gem_object *obj;
226 	int ret;
227 
228 	/* is this one of own objects? */
229 	if (dma_buf->ops == &i915_dmabuf_ops) {
230 		obj = dma_buf_to_obj(dma_buf);
231 		/* is it from our device? */
232 		if (obj->base.dev == dev) {
233 			/*
234 			 * Importing dmabuf exported from out own gem increases
235 			 * refcount on gem itself instead of f_count of dmabuf.
236 			 */
237 			return &i915_gem_object_get(obj)->base;
238 		}
239 	}
240 
241 	/* need to attach */
242 	attach = dma_buf_attach(dma_buf, dev->dev);
243 	if (IS_ERR(attach))
244 		return ERR_CAST(attach);
245 
246 	get_dma_buf(dma_buf);
247 
248 	obj = i915_gem_object_alloc();
249 	if (obj == NULL) {
250 		ret = -ENOMEM;
251 		goto fail_detach;
252 	}
253 
254 	drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
255 	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
256 	obj->base.import_attach = attach;
257 	obj->base.resv = dma_buf->resv;
258 
259 	/* We use GTT as shorthand for a coherent domain, one that is
260 	 * neither in the GPU cache nor in the CPU cache, where all
261 	 * writes are immediately visible in memory. (That's not strictly
262 	 * true, but it's close! There are internal buffers such as the
263 	 * write-combined buffer or a delay through the chipset for GTT
264 	 * writes that do require us to treat GTT as a separate cache domain.)
265 	 */
266 	obj->read_domains = I915_GEM_DOMAIN_GTT;
267 	obj->write_domain = 0;
268 
269 	return &obj->base;
270 
271 fail_detach:
272 	dma_buf_detach(dma_buf, attach);
273 	dma_buf_put(dma_buf);
274 
275 	return ERR_PTR(ret);
276 }
277 
278 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
279 #include "selftests/mock_dmabuf.c"
280 #include "selftests/i915_gem_dmabuf.c"
281 #endif
282