• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <drm/drm_atomic_helper.h>
27 #include <drm/drm_fourcc.h>
28 #include <drm/drm_plane_helper.h>
29 
30 #include "virtgpu_drv.h"
31 
32 static const uint32_t virtio_gpu_formats[] = {
33 	DRM_FORMAT_XRGB8888,
34 	DRM_FORMAT_ARGB8888,
35 	DRM_FORMAT_BGRX8888,
36 	DRM_FORMAT_BGRA8888,
37 	DRM_FORMAT_RGBX8888,
38 	DRM_FORMAT_RGBA8888,
39 	DRM_FORMAT_XBGR8888,
40 	DRM_FORMAT_ABGR8888,
41 };
42 
43 static const uint32_t virtio_gpu_cursor_formats[] = {
44 	DRM_FORMAT_HOST_ARGB8888,
45 };
46 
virtio_gpu_translate_format(uint32_t drm_fourcc)47 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
48 {
49 	uint32_t format;
50 
51 	switch (drm_fourcc) {
52 #ifdef __BIG_ENDIAN
53 	case DRM_FORMAT_XRGB8888:
54 		format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
55 		break;
56 	case DRM_FORMAT_ARGB8888:
57 		format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
58 		break;
59 	case DRM_FORMAT_BGRX8888:
60 		format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
61 		break;
62 	case DRM_FORMAT_BGRA8888:
63 		format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
64 		break;
65 	case DRM_FORMAT_RGBX8888:
66 		format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
67 		break;
68 	case DRM_FORMAT_RGBA8888:
69 		format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
70 		break;
71 	case DRM_FORMAT_XBGR8888:
72 		format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
73 		break;
74 	case DRM_FORMAT_ABGR8888:
75 		format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
76 		break;
77 #else
78 	case DRM_FORMAT_XRGB8888:
79 		format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
80 		break;
81 	case DRM_FORMAT_ARGB8888:
82 		format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
83 		break;
84 	case DRM_FORMAT_BGRX8888:
85 		format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
86 		break;
87 	case DRM_FORMAT_BGRA8888:
88 		format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
89 		break;
90 	case DRM_FORMAT_RGBX8888:
91 		format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
92 		break;
93 	case DRM_FORMAT_RGBA8888:
94 		format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
95 		break;
96 	case DRM_FORMAT_XBGR8888:
97 		format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
98 		break;
99 	case DRM_FORMAT_ABGR8888:
100 		format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
101 		break;
102 #endif
103 	default:
104 		/*
105 		 * This should not happen, we handle everything listed
106 		 * in virtio_gpu_formats[].
107 		 */
108 		format = 0;
109 		break;
110 	}
111 	WARN_ON(format == 0);
112 	return format;
113 }
114 
virtio_gpu_plane_destroy(struct drm_plane * plane)115 static void virtio_gpu_plane_destroy(struct drm_plane *plane)
116 {
117 	drm_plane_cleanup(plane);
118 	kfree(plane);
119 }
120 
121 static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
122 	.update_plane		= drm_atomic_helper_update_plane,
123 	.disable_plane		= drm_atomic_helper_disable_plane,
124 	.destroy		= virtio_gpu_plane_destroy,
125 	.reset			= drm_atomic_helper_plane_reset,
126 	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
127 	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
128 };
129 
virtio_gpu_plane_atomic_check(struct drm_plane * plane,struct drm_plane_state * state)130 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
131 					 struct drm_plane_state *state)
132 {
133 	return 0;
134 }
135 
virtio_gpu_primary_plane_update(struct drm_plane * plane,struct drm_plane_state * old_state)136 static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
137 					    struct drm_plane_state *old_state)
138 {
139 	struct drm_device *dev = plane->dev;
140 	struct virtio_gpu_device *vgdev = dev->dev_private;
141 	struct virtio_gpu_output *output = NULL;
142 	struct virtio_gpu_framebuffer *vgfb;
143 	struct virtio_gpu_object *bo;
144 	uint32_t handle;
145 
146 	if (plane->state->crtc)
147 		output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
148 	if (old_state->crtc)
149 		output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
150 	if (WARN_ON(!output))
151 		return;
152 
153 	if (plane->state->fb && output->enabled) {
154 		vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
155 		bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
156 		handle = bo->hw_res_handle;
157 		if (bo->dumb) {
158 			virtio_gpu_cmd_transfer_to_host_2d
159 				(vgdev, bo, 0,
160 				 cpu_to_le32(plane->state->src_w >> 16),
161 				 cpu_to_le32(plane->state->src_h >> 16),
162 				 cpu_to_le32(plane->state->src_x >> 16),
163 				 cpu_to_le32(plane->state->src_y >> 16), NULL);
164 		}
165 	} else {
166 		handle = 0;
167 	}
168 
169 	DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle,
170 		  plane->state->crtc_w, plane->state->crtc_h,
171 		  plane->state->crtc_x, plane->state->crtc_y,
172 		  plane->state->src_w >> 16,
173 		  plane->state->src_h >> 16,
174 		  plane->state->src_x >> 16,
175 		  plane->state->src_y >> 16);
176 	virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
177 				   plane->state->src_w >> 16,
178 				   plane->state->src_h >> 16,
179 				   plane->state->src_x >> 16,
180 				   plane->state->src_y >> 16);
181 	if (handle)
182 		virtio_gpu_cmd_resource_flush(vgdev, handle,
183 					      plane->state->src_x >> 16,
184 					      plane->state->src_y >> 16,
185 					      plane->state->src_w >> 16,
186 					      plane->state->src_h >> 16);
187 }
188 
virtio_gpu_cursor_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)189 static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
190 					struct drm_plane_state *new_state)
191 {
192 	struct drm_device *dev = plane->dev;
193 	struct virtio_gpu_device *vgdev = dev->dev_private;
194 	struct virtio_gpu_framebuffer *vgfb;
195 	struct virtio_gpu_object *bo;
196 
197 	if (!new_state->fb)
198 		return 0;
199 
200 	vgfb = to_virtio_gpu_framebuffer(new_state->fb);
201 	bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
202 	if (bo && bo->dumb && (plane->state->fb != new_state->fb)) {
203 		vgfb->fence = virtio_gpu_fence_alloc(vgdev);
204 		if (!vgfb->fence)
205 			return -ENOMEM;
206 	}
207 
208 	return 0;
209 }
210 
virtio_gpu_cursor_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)211 static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
212 					 struct drm_plane_state *old_state)
213 {
214 	struct virtio_gpu_framebuffer *vgfb;
215 
216 	if (!plane->state->fb)
217 		return;
218 
219 	vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
220 	if (vgfb->fence) {
221 		dma_fence_put(&vgfb->fence->f);
222 		vgfb->fence = NULL;
223 	}
224 }
225 
virtio_gpu_cursor_plane_update(struct drm_plane * plane,struct drm_plane_state * old_state)226 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
227 					   struct drm_plane_state *old_state)
228 {
229 	struct drm_device *dev = plane->dev;
230 	struct virtio_gpu_device *vgdev = dev->dev_private;
231 	struct virtio_gpu_output *output = NULL;
232 	struct virtio_gpu_framebuffer *vgfb;
233 	struct virtio_gpu_object *bo = NULL;
234 	uint32_t handle;
235 	int ret = 0;
236 
237 	if (plane->state->crtc)
238 		output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
239 	if (old_state->crtc)
240 		output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
241 	if (WARN_ON(!output))
242 		return;
243 
244 	if (plane->state->fb) {
245 		vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
246 		bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
247 		handle = bo->hw_res_handle;
248 	} else {
249 		handle = 0;
250 	}
251 
252 	if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
253 		/* new cursor -- update & wait */
254 		virtio_gpu_cmd_transfer_to_host_2d
255 			(vgdev, bo, 0,
256 			 cpu_to_le32(plane->state->crtc_w),
257 			 cpu_to_le32(plane->state->crtc_h),
258 			 0, 0, vgfb->fence);
259 		ret = virtio_gpu_object_reserve(bo, false);
260 		if (!ret) {
261 			dma_resv_add_excl_fence(bo->tbo.base.resv,
262 							  &vgfb->fence->f);
263 			dma_fence_put(&vgfb->fence->f);
264 			vgfb->fence = NULL;
265 			virtio_gpu_object_unreserve(bo);
266 			virtio_gpu_object_wait(bo, false);
267 		}
268 	}
269 
270 	if (plane->state->fb != old_state->fb) {
271 		DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
272 			  plane->state->crtc_x,
273 			  plane->state->crtc_y,
274 			  plane->state->fb ? plane->state->fb->hot_x : 0,
275 			  plane->state->fb ? plane->state->fb->hot_y : 0);
276 		output->cursor.hdr.type =
277 			cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
278 		output->cursor.resource_id = cpu_to_le32(handle);
279 		if (plane->state->fb) {
280 			output->cursor.hot_x =
281 				cpu_to_le32(plane->state->fb->hot_x);
282 			output->cursor.hot_y =
283 				cpu_to_le32(plane->state->fb->hot_y);
284 		} else {
285 			output->cursor.hot_x = cpu_to_le32(0);
286 			output->cursor.hot_y = cpu_to_le32(0);
287 		}
288 	} else {
289 		DRM_DEBUG("move +%d+%d\n",
290 			  plane->state->crtc_x,
291 			  plane->state->crtc_y);
292 		output->cursor.hdr.type =
293 			cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
294 	}
295 	output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
296 	output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
297 	virtio_gpu_cursor_ping(vgdev, output);
298 }
299 
300 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
301 	.atomic_check		= virtio_gpu_plane_atomic_check,
302 	.atomic_update		= virtio_gpu_primary_plane_update,
303 };
304 
305 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
306 	.prepare_fb		= virtio_gpu_cursor_prepare_fb,
307 	.cleanup_fb		= virtio_gpu_cursor_cleanup_fb,
308 	.atomic_check		= virtio_gpu_plane_atomic_check,
309 	.atomic_update		= virtio_gpu_cursor_plane_update,
310 };
311 
virtio_gpu_plane_init(struct virtio_gpu_device * vgdev,enum drm_plane_type type,int index)312 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
313 					enum drm_plane_type type,
314 					int index)
315 {
316 	struct drm_device *dev = vgdev->ddev;
317 	const struct drm_plane_helper_funcs *funcs;
318 	struct drm_plane *plane;
319 	const uint32_t *formats;
320 	int ret, nformats;
321 
322 	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
323 	if (!plane)
324 		return ERR_PTR(-ENOMEM);
325 
326 	if (type == DRM_PLANE_TYPE_CURSOR) {
327 		formats = virtio_gpu_cursor_formats;
328 		nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
329 		funcs = &virtio_gpu_cursor_helper_funcs;
330 	} else {
331 		formats = virtio_gpu_formats;
332 		nformats = ARRAY_SIZE(virtio_gpu_formats);
333 		funcs = &virtio_gpu_primary_helper_funcs;
334 	}
335 	ret = drm_universal_plane_init(dev, plane, 1 << index,
336 				       &virtio_gpu_plane_funcs,
337 				       formats, nformats,
338 				       NULL, type, NULL);
339 	if (ret)
340 		goto err_plane_init;
341 
342 	drm_plane_helper_add(plane, funcs);
343 	return plane;
344 
345 err_plane_init:
346 	kfree(plane);
347 	return ERR_PTR(ret);
348 }
349