• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 
3 /*
4  *  Xen para-virtual DRM device
5  *
6  * Copyright (C) 2016-2018 EPAM Systems Inc.
7  *
8  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9  */
10 
11 #include <linux/dma-buf.h>
12 #include <linux/scatterlist.h>
13 #include <linux/shmem_fs.h>
14 
15 #include <drm/drm_fb_helper.h>
16 #include <drm/drm_gem.h>
17 #include <drm/drm_prime.h>
18 #include <drm/drm_probe_helper.h>
19 
20 #include <xen/balloon.h>
21 #include <xen/xen.h>
22 
23 #include "xen_drm_front.h"
24 #include "xen_drm_front_gem.h"
25 
26 struct xen_gem_object {
27 	struct drm_gem_object base;
28 
29 	size_t num_pages;
30 	struct page **pages;
31 
32 	/* set for buffers allocated by the backend */
33 	bool be_alloc;
34 
35 	/* this is for imported PRIME buffer */
36 	struct sg_table *sgt_imported;
37 };
38 
39 static inline struct xen_gem_object *
to_xen_gem_obj(struct drm_gem_object * gem_obj)40 to_xen_gem_obj(struct drm_gem_object *gem_obj)
41 {
42 	return container_of(gem_obj, struct xen_gem_object, base);
43 }
44 
gem_alloc_pages_array(struct xen_gem_object * xen_obj,size_t buf_size)45 static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
46 				 size_t buf_size)
47 {
48 	xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
49 	xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
50 					sizeof(struct page *), GFP_KERNEL);
51 	return !xen_obj->pages ? -ENOMEM : 0;
52 }
53 
gem_free_pages_array(struct xen_gem_object * xen_obj)54 static void gem_free_pages_array(struct xen_gem_object *xen_obj)
55 {
56 	kvfree(xen_obj->pages);
57 	xen_obj->pages = NULL;
58 }
59 
gem_create_obj(struct drm_device * dev,size_t size)60 static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
61 					     size_t size)
62 {
63 	struct xen_gem_object *xen_obj;
64 	int ret;
65 
66 	xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
67 	if (!xen_obj)
68 		return ERR_PTR(-ENOMEM);
69 
70 	ret = drm_gem_object_init(dev, &xen_obj->base, size);
71 	if (ret < 0) {
72 		kfree(xen_obj);
73 		return ERR_PTR(ret);
74 	}
75 
76 	return xen_obj;
77 }
78 
gem_create(struct drm_device * dev,size_t size)79 static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
80 {
81 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
82 	struct xen_gem_object *xen_obj;
83 	int ret;
84 
85 	size = round_up(size, PAGE_SIZE);
86 	xen_obj = gem_create_obj(dev, size);
87 	if (IS_ERR(xen_obj))
88 		return xen_obj;
89 
90 	if (drm_info->front_info->cfg.be_alloc) {
91 		/*
92 		 * backend will allocate space for this buffer, so
93 		 * only allocate array of pointers to pages
94 		 */
95 		ret = gem_alloc_pages_array(xen_obj, size);
96 		if (ret < 0)
97 			goto fail;
98 
99 		/*
100 		 * allocate ballooned pages which will be used to map
101 		 * grant references provided by the backend
102 		 */
103 		ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
104 					          xen_obj->pages);
105 		if (ret < 0) {
106 			DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
107 				  xen_obj->num_pages, ret);
108 			gem_free_pages_array(xen_obj);
109 			goto fail;
110 		}
111 
112 		xen_obj->be_alloc = true;
113 		return xen_obj;
114 	}
115 	/*
116 	 * need to allocate backing pages now, so we can share those
117 	 * with the backend
118 	 */
119 	xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
120 	xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
121 	if (IS_ERR(xen_obj->pages)) {
122 		ret = PTR_ERR(xen_obj->pages);
123 		xen_obj->pages = NULL;
124 		goto fail;
125 	}
126 
127 	return xen_obj;
128 
129 fail:
130 	DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
131 	return ERR_PTR(ret);
132 }
133 
xen_drm_front_gem_create(struct drm_device * dev,size_t size)134 struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
135 						size_t size)
136 {
137 	struct xen_gem_object *xen_obj;
138 
139 	xen_obj = gem_create(dev, size);
140 	if (IS_ERR(xen_obj))
141 		return ERR_CAST(xen_obj);
142 
143 	return &xen_obj->base;
144 }
145 
xen_drm_front_gem_free_object_unlocked(struct drm_gem_object * gem_obj)146 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
147 {
148 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
149 
150 	if (xen_obj->base.import_attach) {
151 		drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
152 		gem_free_pages_array(xen_obj);
153 	} else {
154 		if (xen_obj->pages) {
155 			if (xen_obj->be_alloc) {
156 				xen_free_unpopulated_pages(xen_obj->num_pages,
157 							   xen_obj->pages);
158 				gem_free_pages_array(xen_obj);
159 			} else {
160 				drm_gem_put_pages(&xen_obj->base,
161 						  xen_obj->pages, true, false);
162 			}
163 		}
164 	}
165 	drm_gem_object_release(gem_obj);
166 	kfree(xen_obj);
167 }
168 
xen_drm_front_gem_get_pages(struct drm_gem_object * gem_obj)169 struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
170 {
171 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
172 
173 	return xen_obj->pages;
174 }
175 
xen_drm_front_gem_get_sg_table(struct drm_gem_object * gem_obj)176 struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
177 {
178 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
179 
180 	if (!xen_obj->pages)
181 		return ERR_PTR(-ENOMEM);
182 
183 	return drm_prime_pages_to_sg(gem_obj->dev,
184 				     xen_obj->pages, xen_obj->num_pages);
185 }
186 
187 struct drm_gem_object *
xen_drm_front_gem_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)188 xen_drm_front_gem_import_sg_table(struct drm_device *dev,
189 				  struct dma_buf_attachment *attach,
190 				  struct sg_table *sgt)
191 {
192 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
193 	struct xen_gem_object *xen_obj;
194 	size_t size;
195 	int ret;
196 
197 	size = attach->dmabuf->size;
198 	xen_obj = gem_create_obj(dev, size);
199 	if (IS_ERR(xen_obj))
200 		return ERR_CAST(xen_obj);
201 
202 	ret = gem_alloc_pages_array(xen_obj, size);
203 	if (ret < 0)
204 		return ERR_PTR(ret);
205 
206 	xen_obj->sgt_imported = sgt;
207 
208 	ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages,
209 					       NULL, xen_obj->num_pages);
210 	if (ret < 0)
211 		return ERR_PTR(ret);
212 
213 	ret = xen_drm_front_dbuf_create(drm_info->front_info,
214 					xen_drm_front_dbuf_to_cookie(&xen_obj->base),
215 					0, 0, 0, size, sgt->sgl->offset,
216 					xen_obj->pages);
217 	if (ret < 0)
218 		return ERR_PTR(ret);
219 
220 	DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
221 		  size, sgt->orig_nents);
222 
223 	return &xen_obj->base;
224 }
225 
gem_mmap_obj(struct xen_gem_object * xen_obj,struct vm_area_struct * vma)226 static int gem_mmap_obj(struct xen_gem_object *xen_obj,
227 			struct vm_area_struct *vma)
228 {
229 	int ret;
230 
231 	/*
232 	 * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
233 	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
234 	 * the whole buffer.
235 	 */
236 	vma->vm_flags &= ~VM_PFNMAP;
237 	vma->vm_flags |= VM_MIXEDMAP;
238 	vma->vm_pgoff = 0;
239 	/*
240 	 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
241 	 * all memory which is shared with other entities in the system
242 	 * (including the hypervisor and other guests) must reside in memory
243 	 * which is mapped as Normal Inner Write-Back Outer Write-Back
244 	 * Inner-Shareable.
245 	 */
246 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
247 
248 	/*
249 	 * vm_operations_struct.fault handler will be called if CPU access
250 	 * to VM is here. For GPUs this isn't the case, because CPU
251 	 * doesn't touch the memory. Insert pages now, so both CPU and GPU are
252 	 * happy.
253 	 * FIXME: as we insert all the pages now then no .fault handler must
254 	 * be called, so don't provide one
255 	 */
256 	ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
257 	if (ret < 0)
258 		DRM_ERROR("Failed to map pages into vma: %d\n", ret);
259 
260 	return ret;
261 }
262 
xen_drm_front_gem_mmap(struct file * filp,struct vm_area_struct * vma)263 int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
264 {
265 	struct xen_gem_object *xen_obj;
266 	struct drm_gem_object *gem_obj;
267 	int ret;
268 
269 	ret = drm_gem_mmap(filp, vma);
270 	if (ret < 0)
271 		return ret;
272 
273 	gem_obj = vma->vm_private_data;
274 	xen_obj = to_xen_gem_obj(gem_obj);
275 	return gem_mmap_obj(xen_obj, vma);
276 }
277 
xen_drm_front_gem_prime_vmap(struct drm_gem_object * gem_obj)278 void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj)
279 {
280 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
281 
282 	if (!xen_obj->pages)
283 		return NULL;
284 
285 	/* Please see comment in gem_mmap_obj on mapping and attributes. */
286 	return vmap(xen_obj->pages, xen_obj->num_pages,
287 		    VM_MAP, PAGE_KERNEL);
288 }
289 
xen_drm_front_gem_prime_vunmap(struct drm_gem_object * gem_obj,void * vaddr)290 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
291 				    void *vaddr)
292 {
293 	vunmap(vaddr);
294 }
295 
xen_drm_front_gem_prime_mmap(struct drm_gem_object * gem_obj,struct vm_area_struct * vma)296 int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
297 				 struct vm_area_struct *vma)
298 {
299 	struct xen_gem_object *xen_obj;
300 	int ret;
301 
302 	ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma);
303 	if (ret < 0)
304 		return ret;
305 
306 	xen_obj = to_xen_gem_obj(gem_obj);
307 	return gem_mmap_obj(xen_obj, vma);
308 }
309