1 /*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include <linux/dma-mapping.h>
27 #include <linux/moduleparam.h>
28
29 #include "virtgpu_drv.h"
30
31 static int virtio_gpu_virglrenderer_workaround = 1;
32 module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
33
virtio_gpu_resource_id_get(struct virtio_gpu_device * vgdev,uint32_t * resid)34 int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
35 {
36 if (virtio_gpu_virglrenderer_workaround) {
37 /*
38 * Hack to avoid re-using resource IDs.
39 *
40 * virglrenderer versions up to (and including) 0.7.0
41 * can't deal with that. virglrenderer commit
42 * "f91a9dd35715 Fix unlinking resources from hash
43 * table." (Feb 2019) fixes the bug.
44 */
45 static atomic_t seqno = ATOMIC_INIT(0);
46 int handle = atomic_inc_return(&seqno);
47 *resid = handle + 1;
48 } else {
49 int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
50 if (handle < 0)
51 return handle;
52 *resid = handle + 1;
53 }
54 return 0;
55 }
56
virtio_gpu_resource_id_put(struct virtio_gpu_device * vgdev,uint32_t id)57 static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
58 {
59 if (!virtio_gpu_virglrenderer_workaround) {
60 ida_free(&vgdev->resource_ida, id - 1);
61 }
62 }
63
virtio_gpu_cleanup_object(struct virtio_gpu_object * bo)64 void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
65 {
66 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
67
68 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
69 if (virtio_gpu_is_shmem(bo)) {
70 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
71
72 if (shmem->pages) {
73 if (shmem->mapped) {
74 dma_unmap_sgtable(vgdev->vdev->dev.parent,
75 shmem->pages, DMA_TO_DEVICE, 0);
76 shmem->mapped = 0;
77 }
78
79 sg_free_table(shmem->pages);
80 kfree(shmem->pages);
81 shmem->pages = NULL;
82 drm_gem_shmem_unpin(&bo->base.base);
83 }
84
85 drm_gem_shmem_free_object(&bo->base.base);
86 } else if (virtio_gpu_is_vram(bo)) {
87 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
88
89 spin_lock(&vgdev->host_visible_lock);
90 if (drm_mm_node_allocated(&vram->vram_node))
91 drm_mm_remove_node(&vram->vram_node);
92
93 spin_unlock(&vgdev->host_visible_lock);
94
95 drm_gem_free_mmap_offset(&vram->base.base.base);
96 drm_gem_object_release(&vram->base.base.base);
97 kfree(vram);
98 }
99 }
100
virtio_gpu_free_object(struct drm_gem_object * obj)101 static void virtio_gpu_free_object(struct drm_gem_object *obj)
102 {
103 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
104 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
105
106 if (bo->created) {
107 virtio_gpu_cmd_unref_resource(vgdev, bo);
108 virtio_gpu_notify(vgdev);
109 /* completion handler calls virtio_gpu_cleanup_object() */
110 return;
111 }
112 virtio_gpu_cleanup_object(bo);
113 }
114
115 static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
116 .free = virtio_gpu_free_object,
117 .open = virtio_gpu_gem_object_open,
118 .close = virtio_gpu_gem_object_close,
119 .print_info = drm_gem_shmem_object_print_info,
120 .export = virtgpu_gem_prime_export,
121 .pin = drm_gem_shmem_object_pin,
122 .unpin = drm_gem_shmem_object_unpin,
123 .get_sg_table = drm_gem_shmem_object_get_sg_table,
124 .vmap = drm_gem_shmem_object_vmap,
125 .vunmap = drm_gem_shmem_object_vunmap,
126 .mmap = drm_gem_shmem_object_mmap,
127 };
128
virtio_gpu_is_shmem(struct virtio_gpu_object * bo)129 bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
130 {
131 return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
132 }
133
virtio_gpu_create_object(struct drm_device * dev,size_t size)134 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
135 size_t size)
136 {
137 struct virtio_gpu_object_shmem *shmem;
138 struct drm_gem_shmem_object *dshmem;
139
140 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
141 if (!shmem)
142 return NULL;
143
144 dshmem = &shmem->base.base;
145 dshmem->base.funcs = &virtio_gpu_shmem_funcs;
146 return &dshmem->base;
147 }
148
virtio_gpu_object_shmem_init(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_mem_entry ** ents,unsigned int * nents)149 static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
150 struct virtio_gpu_object *bo,
151 struct virtio_gpu_mem_entry **ents,
152 unsigned int *nents)
153 {
154 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
155 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
156 struct scatterlist *sg;
157 int si, ret;
158
159 ret = drm_gem_shmem_pin(&bo->base.base);
160 if (ret < 0)
161 return -EINVAL;
162
163 /*
164 * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
165 * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
166 * dma-ops. This is discouraged for other drivers, but should be fine
167 * since virtio_gpu doesn't support dma-buf import from other devices.
168 */
169 shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
170 if (IS_ERR(shmem->pages)) {
171 drm_gem_shmem_unpin(&bo->base.base);
172 ret = PTR_ERR(shmem->pages);
173 shmem->pages = NULL;
174 return ret;
175 }
176
177 if (use_dma_api) {
178 ret = dma_map_sgtable(vgdev->vdev->dev.parent,
179 shmem->pages, DMA_TO_DEVICE, 0);
180 if (ret)
181 return ret;
182 *nents = shmem->mapped = shmem->pages->nents;
183 } else {
184 *nents = shmem->pages->orig_nents;
185 }
186
187 *ents = kvmalloc_array(*nents,
188 sizeof(struct virtio_gpu_mem_entry),
189 GFP_KERNEL);
190 if (!(*ents)) {
191 DRM_ERROR("failed to allocate ent list\n");
192 return -ENOMEM;
193 }
194
195 if (use_dma_api) {
196 for_each_sgtable_dma_sg(shmem->pages, sg, si) {
197 (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
198 (*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
199 (*ents)[si].padding = 0;
200 }
201 } else {
202 for_each_sgtable_sg(shmem->pages, sg, si) {
203 (*ents)[si].addr = cpu_to_le64(sg_phys(sg));
204 (*ents)[si].length = cpu_to_le32(sg->length);
205 (*ents)[si].padding = 0;
206 }
207 }
208
209 return 0;
210 }
211
virtio_gpu_object_create(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_params * params,struct virtio_gpu_object ** bo_ptr,struct virtio_gpu_fence * fence)212 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
213 struct virtio_gpu_object_params *params,
214 struct virtio_gpu_object **bo_ptr,
215 struct virtio_gpu_fence *fence)
216 {
217 struct virtio_gpu_object_array *objs = NULL;
218 struct drm_gem_shmem_object *shmem_obj;
219 struct virtio_gpu_object *bo;
220 struct virtio_gpu_mem_entry *ents;
221 unsigned int nents;
222 int ret;
223
224 *bo_ptr = NULL;
225
226 params->size = roundup(params->size, PAGE_SIZE);
227 shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
228 if (IS_ERR(shmem_obj))
229 return PTR_ERR(shmem_obj);
230 bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
231
232 ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
233 if (ret < 0)
234 goto err_free_gem;
235
236 bo->dumb = params->dumb;
237
238 if (fence) {
239 ret = -ENOMEM;
240 objs = virtio_gpu_array_alloc(1);
241 if (!objs)
242 goto err_put_id;
243 virtio_gpu_array_add_obj(objs, &bo->base.base);
244
245 ret = virtio_gpu_array_lock_resv(objs);
246 if (ret != 0)
247 goto err_put_objs;
248 }
249
250 ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
251 if (ret != 0) {
252 if (fence)
253 virtio_gpu_array_unlock_resv(objs);
254 virtio_gpu_array_put_free(objs);
255 virtio_gpu_free_object(&shmem_obj->base);
256 return ret;
257 }
258
259 if (params->blob) {
260 if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
261 bo->guest_blob = true;
262
263 virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
264 ents, nents);
265 } else if (params->virgl) {
266 virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
267 objs, fence);
268 virtio_gpu_object_attach(vgdev, bo, ents, nents);
269 } else {
270 virtio_gpu_cmd_create_resource(vgdev, bo, params,
271 objs, fence);
272 virtio_gpu_object_attach(vgdev, bo, ents, nents);
273 }
274
275 *bo_ptr = bo;
276 return 0;
277
278 err_put_objs:
279 virtio_gpu_array_put_free(objs);
280 err_put_id:
281 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
282 err_free_gem:
283 drm_gem_shmem_free_object(&shmem_obj->base);
284 return ret;
285 }
286