• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 The Chromium OS Authors. All rights reserved.
3  * Use of this source code is governed by a BSD-style license that can be
4  * found in the LICENSE file.
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <stdatomic.h>
11 #include <stdint.h>
12 #include <stdio.h>
13 #include <string.h>
14 #include <sys/mman.h>
15 #include <unistd.h>
16 #include <xf86drm.h>
17 
18 #include "drv_helpers.h"
19 #include "drv_priv.h"
20 #include "external/virgl_hw.h"
21 #include "external/virgl_protocol.h"
22 #include "external/virtgpu_drm.h"
23 #include "util.h"
24 #include "virtgpu.h"
25 
26 #define PIPE_TEXTURE_2D 2
27 
28 // This comes from a combination of SwiftShader's VkPhysicalDeviceLimits::maxFramebufferWidth and
29 // VkPhysicalDeviceLimits::maxImageDimension2D (see https://crrev.com/c/1917130).
30 #define ANGLE_ON_SWIFTSHADER_MAX_TEXTURE_2D_SIZE 8192
31 
32 #ifndef MIN
33 #define MIN(a, b) ((a) < (b) ? (a) : (b))
34 #endif
35 #define VIRGL_2D_MAX_TEXTURE_2D_SIZE                                                               \
36 	MIN(ANGLE_ON_SWIFTSHADER_MAX_TEXTURE_2D_SIZE, MESA_LLVMPIPE_MAX_TEXTURE_2D_SIZE)
37 
38 static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
39 						  DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
40 						  DRM_FORMAT_XRGB8888 };
41 
42 static const uint32_t dumb_texture_source_formats[] = {
43 	DRM_FORMAT_R8,		DRM_FORMAT_R16,		 DRM_FORMAT_YVU420,
44 	DRM_FORMAT_NV12,	DRM_FORMAT_NV21,	 DRM_FORMAT_YVU420_ANDROID,
45 	DRM_FORMAT_ABGR2101010, DRM_FORMAT_ABGR16161616F
46 };
47 
48 static const uint32_t texture_source_formats[] = {
49 	DRM_FORMAT_NV12,	DRM_FORMAT_NV21,	 DRM_FORMAT_R8,
50 	DRM_FORMAT_R16,	        DRM_FORMAT_RG88,	 DRM_FORMAT_YVU420_ANDROID,
51 	DRM_FORMAT_ABGR2101010, DRM_FORMAT_ABGR16161616F
52 };
53 
54 extern struct virtgpu_param params[];
55 
56 struct virgl_priv {
57 	int caps_is_v2;
58 	union virgl_caps caps;
59 	int host_gbm_enabled;
60 	atomic_int next_blob_id;
61 };
62 
translate_format(uint32_t drm_fourcc)63 static uint32_t translate_format(uint32_t drm_fourcc)
64 {
65 	switch (drm_fourcc) {
66 	case DRM_FORMAT_BGR888:
67 	case DRM_FORMAT_RGB888:
68 		return VIRGL_FORMAT_R8G8B8_UNORM;
69 	case DRM_FORMAT_XRGB8888:
70 		return VIRGL_FORMAT_B8G8R8X8_UNORM;
71 	case DRM_FORMAT_ARGB8888:
72 		return VIRGL_FORMAT_B8G8R8A8_UNORM;
73 	case DRM_FORMAT_XBGR8888:
74 		return VIRGL_FORMAT_R8G8B8X8_UNORM;
75 	case DRM_FORMAT_ABGR8888:
76 		return VIRGL_FORMAT_R8G8B8A8_UNORM;
77 	case DRM_FORMAT_ABGR16161616F:
78 		return VIRGL_FORMAT_R16G16B16A16_FLOAT;
79 	case DRM_FORMAT_ABGR2101010:
80 		return VIRGL_FORMAT_R10G10B10A2_UNORM;
81 	case DRM_FORMAT_RGB565:
82 		return VIRGL_FORMAT_B5G6R5_UNORM;
83 	case DRM_FORMAT_R8:
84 		return VIRGL_FORMAT_R8_UNORM;
85 	case DRM_FORMAT_R16:
86 		return VIRGL_FORMAT_R16_UNORM;
87 	case DRM_FORMAT_RG88:
88 		return VIRGL_FORMAT_R8G8_UNORM;
89 	case DRM_FORMAT_NV12:
90 		return VIRGL_FORMAT_NV12;
91 	case DRM_FORMAT_NV21:
92 		return VIRGL_FORMAT_NV21;
93 	case DRM_FORMAT_P010:
94 		return VIRGL_FORMAT_P010;
95 	case DRM_FORMAT_YVU420:
96 	case DRM_FORMAT_YVU420_ANDROID:
97 		return VIRGL_FORMAT_YV12;
98 	default:
99 		drv_loge("Unhandled format:%d\n", drm_fourcc);
100 		return 0;
101 	}
102 }
103 
virgl_bitmask_supports_format(struct virgl_supported_format_mask * supported,uint32_t drm_format)104 static bool virgl_bitmask_supports_format(struct virgl_supported_format_mask *supported,
105 					  uint32_t drm_format)
106 {
107 	uint32_t virgl_format = translate_format(drm_format);
108 	if (!virgl_format)
109 		return false;
110 
111 	uint32_t bitmask_index = virgl_format / 32;
112 	uint32_t bit_index = virgl_format % 32;
113 	return supported->bitmask[bitmask_index] & (1 << bit_index);
114 }
115 
116 // The metadata generated here for emulated buffers is slightly different than the metadata
117 // generated by drv_bo_from_format. In order to simplify transfers in the flush and invalidate
118 // functions below, the emulated buffers are oversized. For example, ignoring stride alignment
119 // requirements to demonstrate, a 6x6 YUV420 image buffer might have the following layout from
120 // drv_bo_from_format:
121 //
122 // | Y | Y | Y | Y | Y | Y |
123 // | Y | Y | Y | Y | Y | Y |
124 // | Y | Y | Y | Y | Y | Y |
125 // | Y | Y | Y | Y | Y | Y |
126 // | Y | Y | Y | Y | Y | Y |
127 // | Y | Y | Y | Y | Y | Y |
128 // | U | U | U | U | U | U |
129 // | U | U | U | V | V | V |
130 // | V | V | V | V | V | V |
131 //
132 // where each plane immediately follows the previous plane in memory. This layout makes it
133 // difficult to compute the transfers needed for example when the middle 2x2 region of the
134 // image is locked and needs to be flushed/invalidated.
135 //
136 // Emulated multi-plane buffers instead have a layout of:
137 //
138 // | Y | Y | Y | Y | Y | Y |
139 // | Y | Y | Y | Y | Y | Y |
140 // | Y | Y | Y | Y | Y | Y |
141 // | Y | Y | Y | Y | Y | Y |
142 // | Y | Y | Y | Y | Y | Y |
143 // | Y | Y | Y | Y | Y | Y |
144 // | U | U | U |   |   |   |
145 // | U | U | U |   |   |   |
146 // | U | U | U |   |   |   |
147 // | V | V | V |   |   |   |
148 // | V | V | V |   |   |   |
149 // | V | V | V |   |   |   |
150 //
151 // where each plane is placed as a sub-image (albeit with a very large stride) in order to
152 // simplify transfers into 3 sub-image transfers for the above example.
153 //
154 // Additional note: the V-plane is not placed to the right of the U-plane due to some
155 // observed failures in media framework code which assumes the V-plane is not
156 // "row-interlaced" with the U-plane.
virgl_get_emulated_metadata(const struct bo * bo,struct bo_metadata * metadata)157 static void virgl_get_emulated_metadata(const struct bo *bo, struct bo_metadata *metadata)
158 {
159 	uint32_t y_plane_height;
160 	uint32_t c_plane_height;
161 	uint32_t original_width = bo->meta.width;
162 	uint32_t original_height = bo->meta.height;
163 
164 	metadata->format = DRM_FORMAT_R8;
165 	switch (bo->meta.format) {
166 	case DRM_FORMAT_NV12:
167 	case DRM_FORMAT_NV21:
168 		// Bi-planar
169 		metadata->num_planes = 2;
170 
171 		y_plane_height = original_height;
172 		c_plane_height = DIV_ROUND_UP(original_height, 2);
173 
174 		metadata->width = original_width;
175 		metadata->height = y_plane_height + c_plane_height;
176 
177 		// Y-plane (full resolution)
178 		metadata->strides[0] = metadata->width;
179 		metadata->offsets[0] = 0;
180 		metadata->sizes[0] = metadata->width * y_plane_height;
181 
182 		// CbCr-plane  (half resolution, interleaved, placed below Y-plane)
183 		metadata->strides[1] = metadata->width;
184 		metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
185 		metadata->sizes[1] = metadata->width * c_plane_height;
186 
187 		metadata->total_size = metadata->width * metadata->height;
188 		break;
189 	case DRM_FORMAT_YVU420:
190 	case DRM_FORMAT_YVU420_ANDROID:
191 		// Tri-planar
192 		metadata->num_planes = 3;
193 
194 		y_plane_height = original_height;
195 		c_plane_height = DIV_ROUND_UP(original_height, 2);
196 
197 		metadata->width = ALIGN(original_width, 32);
198 		metadata->height = y_plane_height + (2 * c_plane_height);
199 
200 		// Y-plane (full resolution)
201 		metadata->strides[0] = metadata->width;
202 		metadata->offsets[0] = 0;
203 		metadata->sizes[0] = metadata->width * original_height;
204 
205 		// Cb-plane (half resolution, placed below Y-plane)
206 		metadata->strides[1] = metadata->width;
207 		metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0];
208 		metadata->sizes[1] = metadata->width * c_plane_height;
209 
210 		// Cr-plane (half resolution, placed below Cb-plane)
211 		metadata->strides[2] = metadata->width;
212 		metadata->offsets[2] = metadata->offsets[1] + metadata->sizes[1];
213 		metadata->sizes[2] = metadata->width * c_plane_height;
214 
215 		metadata->total_size = metadata->width * metadata->height;
216 		break;
217 	default:
218 		break;
219 	}
220 }
221 
222 struct virtio_transfers_params {
223 	size_t xfers_needed;
224 	struct rectangle xfer_boxes[DRV_MAX_PLANES];
225 };
226 
virgl_get_emulated_transfers_params(const struct bo * bo,const struct rectangle * transfer_box,struct virtio_transfers_params * xfer_params)227 static void virgl_get_emulated_transfers_params(const struct bo *bo,
228 						const struct rectangle *transfer_box,
229 						struct virtio_transfers_params *xfer_params)
230 {
231 	uint32_t y_plane_height;
232 	uint32_t c_plane_height;
233 	struct bo_metadata emulated_metadata;
234 
235 	if (transfer_box->x == 0 && transfer_box->y == 0 && transfer_box->width == bo->meta.width &&
236 	    transfer_box->height == bo->meta.height) {
237 		virgl_get_emulated_metadata(bo, &emulated_metadata);
238 
239 		xfer_params->xfers_needed = 1;
240 		xfer_params->xfer_boxes[0].x = 0;
241 		xfer_params->xfer_boxes[0].y = 0;
242 		xfer_params->xfer_boxes[0].width = emulated_metadata.width;
243 		xfer_params->xfer_boxes[0].height = emulated_metadata.height;
244 
245 		return;
246 	}
247 
248 	switch (bo->meta.format) {
249 	case DRM_FORMAT_NV12:
250 	case DRM_FORMAT_NV21:
251 		// Bi-planar
252 		xfer_params->xfers_needed = 2;
253 
254 		y_plane_height = bo->meta.height;
255 		c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
256 
257 		// Y-plane (full resolution)
258 		xfer_params->xfer_boxes[0].x = transfer_box->x;
259 		xfer_params->xfer_boxes[0].y = transfer_box->y;
260 		xfer_params->xfer_boxes[0].width = transfer_box->width;
261 		xfer_params->xfer_boxes[0].height = transfer_box->height;
262 
263 		// CbCr-plane (half resolution, interleaved, placed below Y-plane)
264 		xfer_params->xfer_boxes[1].x = transfer_box->x;
265 		xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
266 		xfer_params->xfer_boxes[1].width = transfer_box->width;
267 		xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
268 
269 		break;
270 	case DRM_FORMAT_YVU420:
271 	case DRM_FORMAT_YVU420_ANDROID:
272 		// Tri-planar
273 		xfer_params->xfers_needed = 3;
274 
275 		y_plane_height = bo->meta.height;
276 		c_plane_height = DIV_ROUND_UP(bo->meta.height, 2);
277 
278 		// Y-plane (full resolution)
279 		xfer_params->xfer_boxes[0].x = transfer_box->x;
280 		xfer_params->xfer_boxes[0].y = transfer_box->y;
281 		xfer_params->xfer_boxes[0].width = transfer_box->width;
282 		xfer_params->xfer_boxes[0].height = transfer_box->height;
283 
284 		// Cb-plane (half resolution, placed below Y-plane)
285 		xfer_params->xfer_boxes[1].x = transfer_box->x;
286 		xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height;
287 		xfer_params->xfer_boxes[1].width = DIV_ROUND_UP(transfer_box->width, 2);
288 		xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2);
289 
290 		// Cr-plane (half resolution, placed below Cb-plane)
291 		xfer_params->xfer_boxes[2].x = transfer_box->x;
292 		xfer_params->xfer_boxes[2].y = transfer_box->y + y_plane_height + c_plane_height;
293 		xfer_params->xfer_boxes[2].width = DIV_ROUND_UP(transfer_box->width, 2);
294 		xfer_params->xfer_boxes[2].height = DIV_ROUND_UP(transfer_box->height, 2);
295 
296 		break;
297 	}
298 }
299 
virgl_supports_combination_natively(struct driver * drv,uint32_t drm_format,uint64_t use_flags)300 static bool virgl_supports_combination_natively(struct driver *drv, uint32_t drm_format,
301 						uint64_t use_flags)
302 {
303 	struct virgl_priv *priv = (struct virgl_priv *)drv->priv;
304 
305 	if (priv->caps.max_version == 0)
306 		return true;
307 
308 	if ((use_flags & BO_USE_RENDERING) &&
309 	    !virgl_bitmask_supports_format(&priv->caps.v1.render, drm_format))
310 		return false;
311 
312 	if ((use_flags & BO_USE_TEXTURE) &&
313 	    !virgl_bitmask_supports_format(&priv->caps.v1.sampler, drm_format))
314 		return false;
315 
316 	if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 &&
317 	    !virgl_bitmask_supports_format(&priv->caps.v2.scanout, drm_format))
318 		return false;
319 
320 	return true;
321 }
322 
323 // For virtio backends that do not support formats natively (e.g. multi-planar formats are not
324 // supported in virglrenderer when gbm is unavailable on the host machine), whether or not the
325 // format and usage combination can be handled as a blob (byte buffer).
virgl_supports_combination_through_emulation(struct driver * drv,uint32_t drm_format,uint64_t use_flags)326 static bool virgl_supports_combination_through_emulation(struct driver *drv, uint32_t drm_format,
327 							 uint64_t use_flags)
328 {
329 	struct virgl_priv *priv = (struct virgl_priv *)drv->priv;
330 
331 	// Only enable emulation on non-gbm virtio backends.
332 	if (priv->host_gbm_enabled)
333 		return false;
334 
335 	if (use_flags & (BO_USE_RENDERING | BO_USE_SCANOUT))
336 		return false;
337 
338 	if (!virgl_supports_combination_natively(drv, DRM_FORMAT_R8, use_flags))
339 		return false;
340 
341 	return drm_format == DRM_FORMAT_NV12 || drm_format == DRM_FORMAT_NV21 ||
342 	       drm_format == DRM_FORMAT_YVU420 || drm_format == DRM_FORMAT_YVU420_ANDROID;
343 }
344 
345 // Adds the given buffer combination to the list of supported buffer combinations if the
346 // combination is supported by the virtio backend.
virgl_add_combination(struct driver * drv,uint32_t drm_format,struct format_metadata * metadata,uint64_t use_flags)347 static void virgl_add_combination(struct driver *drv, uint32_t drm_format,
348 				  struct format_metadata *metadata, uint64_t use_flags)
349 {
350 	if (params[param_3d].value) {
351 		if ((use_flags & BO_USE_SCANOUT) &&
352 		    !virgl_supports_combination_natively(drv, drm_format, BO_USE_SCANOUT)) {
353 			drv_logi("Strip scanout on format: %d\n", drm_format);
354 			use_flags &= ~BO_USE_SCANOUT;
355 		}
356 
357 		if (!virgl_supports_combination_natively(drv, drm_format, use_flags) &&
358 		    !virgl_supports_combination_through_emulation(drv, drm_format, use_flags)) {
359 			drv_logi("Skipping unsupported combination format:%d\n", drm_format);
360 			return;
361 		}
362 	}
363 
364 	drv_add_combination(drv, drm_format, metadata, use_flags);
365 }
366 
367 // Adds each given buffer combination to the list of supported buffer combinations if the
368 // combination supported by the virtio backend.
virgl_add_combinations(struct driver * drv,const uint32_t * drm_formats,uint32_t num_formats,struct format_metadata * metadata,uint64_t use_flags)369 static void virgl_add_combinations(struct driver *drv, const uint32_t *drm_formats,
370 				   uint32_t num_formats, struct format_metadata *metadata,
371 				   uint64_t use_flags)
372 {
373 	uint32_t i;
374 
375 	for (i = 0; i < num_formats; i++)
376 		virgl_add_combination(drv, drm_formats[i], metadata, use_flags);
377 }
378 
virgl_2d_dumb_bo_create(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)379 static int virgl_2d_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
380 				   uint64_t use_flags)
381 {
382 	if (bo->meta.format != DRM_FORMAT_R8) {
383 		width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE);
384 		height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE);
385 	}
386 
387 	return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_DUMB32BPP);
388 }
389 
handle_flag(uint64_t * flag,uint64_t check_flag,uint32_t * bind,uint32_t virgl_bind)390 static inline void handle_flag(uint64_t *flag, uint64_t check_flag, uint32_t *bind,
391 			       uint32_t virgl_bind)
392 {
393 	if ((*flag) & check_flag) {
394 		(*flag) &= ~check_flag;
395 		(*bind) |= virgl_bind;
396 	}
397 }
398 
compute_virgl_bind_flags(uint64_t use_flags)399 static uint32_t compute_virgl_bind_flags(uint64_t use_flags)
400 {
401 	/* In crosvm, VIRGL_BIND_SHARED means minigbm will allocate, not virglrenderer. */
402 	uint32_t bind = VIRGL_BIND_SHARED;
403 
404 	handle_flag(&use_flags, BO_USE_TEXTURE, &bind, VIRGL_BIND_SAMPLER_VIEW);
405 	handle_flag(&use_flags, BO_USE_RENDERING, &bind, VIRGL_BIND_RENDER_TARGET);
406 	handle_flag(&use_flags, BO_USE_SCANOUT, &bind, VIRGL_BIND_SCANOUT);
407 	handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR);
408 	handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR);
409 	handle_flag(&use_flags, BO_USE_SENSOR_DIRECT_DATA, &bind, VIRGL_BIND_LINEAR);
410 	handle_flag(&use_flags, BO_USE_GPU_DATA_BUFFER, &bind, VIRGL_BIND_LINEAR);
411 	handle_flag(&use_flags, BO_USE_FRONT_RENDERING, &bind, VIRGL_BIND_LINEAR);
412 
413 	if (use_flags & BO_USE_PROTECTED) {
414 		handle_flag(&use_flags, BO_USE_PROTECTED, &bind, VIRGL_BIND_MINIGBM_PROTECTED);
415 	} else {
416 		// Make sure we don't set both flags, since that could be mistaken for
417 		// protected. Give OFTEN priority over RARELY.
418 		if (use_flags & BO_USE_SW_READ_OFTEN) {
419 			handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind,
420 				    VIRGL_BIND_MINIGBM_SW_READ_OFTEN);
421 		} else {
422 			handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind,
423 				    VIRGL_BIND_MINIGBM_SW_READ_RARELY);
424 		}
425 		if (use_flags & BO_USE_SW_WRITE_OFTEN) {
426 			handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind,
427 				    VIRGL_BIND_MINIGBM_SW_WRITE_OFTEN);
428 		} else {
429 			handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind,
430 				    VIRGL_BIND_MINIGBM_SW_WRITE_RARELY);
431 		}
432 	}
433 
434 	handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_MINIGBM_CAMERA_WRITE);
435 	handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_MINIGBM_CAMERA_READ);
436 	handle_flag(&use_flags, BO_USE_HW_VIDEO_DECODER, &bind,
437 		    VIRGL_BIND_MINIGBM_HW_VIDEO_DECODER);
438 	handle_flag(&use_flags, BO_USE_HW_VIDEO_ENCODER, &bind,
439 		    VIRGL_BIND_MINIGBM_HW_VIDEO_ENCODER);
440 
441 	if (use_flags)
442 		drv_loge("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags);
443 
444 	return bind;
445 }
446 
virgl_3d_bo_create(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)447 static int virgl_3d_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
448 			      uint64_t use_flags)
449 {
450 	int ret;
451 	size_t i;
452 	uint32_t stride;
453 	struct drm_virtgpu_resource_create res_create = { 0 };
454 	struct bo_metadata emulated_metadata;
455 
456 	if (virgl_supports_combination_natively(bo->drv, format, use_flags)) {
457 		stride = drv_stride_from_format(format, width, 0);
458 		drv_bo_from_format(bo, stride, 1, height, format);
459 	} else {
460 		assert(virgl_supports_combination_through_emulation(bo->drv, format, use_flags));
461 
462 		virgl_get_emulated_metadata(bo, &emulated_metadata);
463 
464 		format = emulated_metadata.format;
465 		width = emulated_metadata.width;
466 		height = emulated_metadata.height;
467 		for (i = 0; i < emulated_metadata.num_planes; i++) {
468 			bo->meta.strides[i] = emulated_metadata.strides[i];
469 			bo->meta.offsets[i] = emulated_metadata.offsets[i];
470 			bo->meta.sizes[i] = emulated_metadata.sizes[i];
471 		}
472 		bo->meta.total_size = emulated_metadata.total_size;
473 	}
474 
475 	/*
476 	 * Setting the target is intended to ensure this resource gets bound as a 2D
477 	 * texture in the host renderer's GL state. All of these resource properties are
478 	 * sent unchanged by the kernel to the host, which in turn sends them unchanged to
479 	 * virglrenderer. When virglrenderer makes a resource, it will convert the target
480 	 * enum to the equivalent one in GL and then bind the resource to that target.
481 	 */
482 
483 	res_create.target = PIPE_TEXTURE_2D;
484 	res_create.format = translate_format(format);
485 	res_create.bind = compute_virgl_bind_flags(use_flags);
486 	res_create.width = width;
487 	res_create.height = height;
488 
489 	/* For virgl 3D */
490 	res_create.depth = 1;
491 	res_create.array_size = 1;
492 	res_create.last_level = 0;
493 	res_create.nr_samples = 0;
494 
495 	res_create.size = ALIGN(bo->meta.total_size, PAGE_SIZE); // PAGE_SIZE = 0x1000
496 	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create);
497 	if (ret) {
498 		drv_loge("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n", strerror(errno));
499 		return ret;
500 	}
501 
502 	for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
503 		bo->handles[plane].u32 = res_create.bo_handle;
504 
505 	return 0;
506 }
507 
virgl_3d_bo_map(struct bo * bo,struct vma * vma,uint32_t map_flags)508 static void *virgl_3d_bo_map(struct bo *bo, struct vma *vma, uint32_t map_flags)
509 {
510 	int ret;
511 	struct drm_virtgpu_map gem_map = { 0 };
512 
513 	gem_map.handle = bo->handles[0].u32;
514 	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
515 	if (ret) {
516 		drv_loge("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
517 		return MAP_FAILED;
518 	}
519 
520 	vma->length = bo->meta.total_size;
521 	return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
522 		    gem_map.offset);
523 }
524 
virgl_3d_get_max_texture_2d_size(struct driver * drv)525 static uint32_t virgl_3d_get_max_texture_2d_size(struct driver *drv)
526 {
527 	struct virgl_priv *priv = (struct virgl_priv *)drv->priv;
528 
529 	if (priv->caps.v2.max_texture_2d_size)
530 		return priv->caps.v2.max_texture_2d_size;
531 
532 	return UINT32_MAX;
533 }
534 
virgl_get_caps(struct driver * drv,union virgl_caps * caps,int * caps_is_v2)535 static int virgl_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2)
536 {
537 	int ret;
538 	struct drm_virtgpu_get_caps cap_args = { 0 };
539 
540 	*caps_is_v2 = 0;
541 	cap_args.addr = (unsigned long long)caps;
542 	if (params[param_capset_fix].value) {
543 		*caps_is_v2 = 1;
544 		cap_args.cap_set_id = 2;
545 		cap_args.size = sizeof(union virgl_caps);
546 	} else {
547 		cap_args.cap_set_id = 1;
548 		cap_args.size = sizeof(struct virgl_caps_v1);
549 	}
550 
551 	ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
552 	if (ret) {
553 		drv_loge("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
554 		*caps_is_v2 = 0;
555 
556 		// Fallback to v1
557 		cap_args.cap_set_id = 1;
558 		cap_args.size = sizeof(struct virgl_caps_v1);
559 
560 		ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args);
561 		if (ret)
562 			drv_loge("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
563 	}
564 
565 	return ret;
566 }
567 
virgl_init_params_and_caps(struct driver * drv)568 static void virgl_init_params_and_caps(struct driver *drv)
569 {
570 	struct virgl_priv *priv = (struct virgl_priv *)drv->priv;
571 	if (params[param_3d].value) {
572 		virgl_get_caps(drv, &priv->caps, &priv->caps_is_v2);
573 
574 		// We use two criteria to determine whether host minigbm is used on the host for
575 		// swapchain allocations.
576 		//
577 		// (1) Host minigbm is only available via virglrenderer, and only virglrenderer
578 		//     advertises capabilities.
579 		// (2) Only host minigbm doesn't emulate YUV formats.  Checking this is a bit of a
580 		//     proxy, but it works.
581 		priv->host_gbm_enabled =
582 		    priv->caps.max_version > 0 &&
583 		    virgl_supports_combination_natively(drv, DRM_FORMAT_NV12, BO_USE_TEXTURE);
584 	}
585 }
586 
virgl_init(struct driver * drv)587 static int virgl_init(struct driver *drv)
588 {
589 	struct virgl_priv *priv;
590 
591 	priv = calloc(1, sizeof(*priv));
592 	if (!priv)
593 		return -ENOMEM;
594 
595 	drv->priv = priv;
596 
597 	virgl_init_params_and_caps(drv);
598 
599 	if (params[param_3d].value) {
600 		/* This doesn't mean host can scanout everything, it just means host
601 		 * hypervisor can show it. */
602 		virgl_add_combinations(drv, render_target_formats,
603 				       ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
604 				       BO_USE_RENDER_MASK | BO_USE_SCANOUT);
605 		virgl_add_combinations(drv, texture_source_formats,
606 				       ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA,
607 				       BO_USE_TEXTURE_MASK);
608 		/* NV12 with scanout must flow through virgl_add_combination, so that the native
609 		 * support is checked and scanout use_flag can be conditionally stripped. */
610 		virgl_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
611 				      BO_USE_TEXTURE_MASK | BO_USE_CAMERA_READ |
612 					  BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
613 					  BO_USE_HW_VIDEO_ENCODER | BO_USE_SCANOUT);
614 	} else {
615 		/* Virtio primary plane only allows this format. */
616 		virgl_add_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA,
617 				      BO_USE_RENDER_MASK | BO_USE_SCANOUT);
618 		/* Virtio cursor plane only allows this format and Chrome cannot live without
619 		 * ARGB888 renderable format. */
620 		virgl_add_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA,
621 				      BO_USE_RENDER_MASK | BO_USE_CURSOR);
622 		/* Android needs more, but they cannot be bound as scanouts anymore after
623 		 * "drm/virtio: fix DRM_FORMAT_* handling" */
624 		virgl_add_combinations(drv, render_target_formats,
625 				       ARRAY_SIZE(render_target_formats), &LINEAR_METADATA,
626 				       BO_USE_RENDER_MASK);
627 		virgl_add_combinations(drv, dumb_texture_source_formats,
628 				       ARRAY_SIZE(dumb_texture_source_formats), &LINEAR_METADATA,
629 				       BO_USE_TEXTURE_MASK);
630 		drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA,
631 				       BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
632 					   BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
633 	}
634 
635 	/* Android CTS tests require this. */
636 	virgl_add_combination(drv, DRM_FORMAT_RGB888, &LINEAR_METADATA, BO_USE_SW_MASK);
637 	virgl_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK);
638 	/* Android Camera CTS tests requires this. Additionally, the scanout usage is needed for
639 	 * Camera preview and is expected to be conditionally stripped by virgl_add_combination
640 	 * when not natively supported and instead handled by HWComposer. */
641 	virgl_add_combination(drv, DRM_FORMAT_P010, &LINEAR_METADATA,
642 			      BO_USE_SCANOUT | BO_USE_TEXTURE | BO_USE_SW_MASK |
643 				  BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE);
644 	/* Android VTS sensors hal tests require BO_USE_SENSOR_DIRECT_DATA. */
645 	drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA,
646 			       BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
647 				   BO_USE_HW_VIDEO_ENCODER | BO_USE_SENSOR_DIRECT_DATA |
648 				   BO_USE_GPU_DATA_BUFFER);
649 
650 	if (!priv->host_gbm_enabled) {
651 		drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &LINEAR_METADATA,
652 				       BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
653 					   BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
654 		drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &LINEAR_METADATA,
655 				       BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
656 					   BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
657 		drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA,
658 				       BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
659 					   BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
660 		drv_modify_combination(drv, DRM_FORMAT_R16, &LINEAR_METADATA,
661 				       BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
662 					   BO_USE_HW_VIDEO_DECODER);
663 		drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA,
664 				       BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
665 					   BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
666 		drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &LINEAR_METADATA,
667 				       BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE |
668 					   BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER);
669 	}
670 
671 	return drv_modify_linear_combinations(drv);
672 }
673 
virgl_close(struct driver * drv)674 static void virgl_close(struct driver *drv)
675 {
676 	free(drv->priv);
677 	drv->priv = NULL;
678 }
679 
virgl_bo_create_blob(struct driver * drv,struct bo * bo)680 static int virgl_bo_create_blob(struct driver *drv, struct bo *bo)
681 {
682 	int ret;
683 	uint32_t stride;
684 	uint32_t cur_blob_id;
685 	uint32_t cmd[VIRGL_PIPE_RES_CREATE_SIZE + 1] = { 0 };
686 	struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
687 	struct virgl_priv *priv = (struct virgl_priv *)drv->priv;
688 
689 	uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
690 	if (bo->meta.use_flags & (BO_USE_SW_MASK | BO_USE_GPU_DATA_BUFFER))
691 		blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
692 
693 	// For now, all blob use cases are cross device. When we add wider
694 	// support for blobs, we can revisit making this unconditional.
695 	blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
696 
697 	cur_blob_id = atomic_fetch_add(&priv->next_blob_id, 1);
698 	stride = drv_stride_from_format(bo->meta.format, bo->meta.width, 0);
699 	drv_bo_from_format(bo, stride, 1, bo->meta.height, bo->meta.format);
700 	bo->meta.total_size = ALIGN(bo->meta.total_size, PAGE_SIZE);
701 	bo->meta.tiling = blob_flags;
702 
703 	cmd[0] = VIRGL_CMD0(VIRGL_CCMD_PIPE_RESOURCE_CREATE, 0, VIRGL_PIPE_RES_CREATE_SIZE);
704 	cmd[VIRGL_PIPE_RES_CREATE_TARGET] = PIPE_TEXTURE_2D;
705 	cmd[VIRGL_PIPE_RES_CREATE_WIDTH] = bo->meta.width;
706 	cmd[VIRGL_PIPE_RES_CREATE_HEIGHT] = bo->meta.height;
707 	cmd[VIRGL_PIPE_RES_CREATE_FORMAT] = translate_format(bo->meta.format);
708 	cmd[VIRGL_PIPE_RES_CREATE_BIND] = compute_virgl_bind_flags(bo->meta.use_flags);
709 	cmd[VIRGL_PIPE_RES_CREATE_DEPTH] = 1;
710 	cmd[VIRGL_PIPE_RES_CREATE_BLOB_ID] = cur_blob_id;
711 
712 	drm_rc_blob.cmd = (uint64_t)&cmd;
713 	drm_rc_blob.cmd_size = 4 * (VIRGL_PIPE_RES_CREATE_SIZE + 1);
714 	drm_rc_blob.size = bo->meta.total_size;
715 	drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
716 	drm_rc_blob.blob_flags = blob_flags;
717 	drm_rc_blob.blob_id = cur_blob_id;
718 
719 	ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
720 	if (ret < 0) {
721 		drv_loge("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
722 		return -errno;
723 	}
724 
725 	for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
726 		bo->handles[plane].u32 = drm_rc_blob.bo_handle;
727 
728 	return 0;
729 }
730 
should_use_blob(struct driver * drv,uint32_t format,uint64_t use_flags)731 static bool should_use_blob(struct driver *drv, uint32_t format, uint64_t use_flags)
732 {
733 	struct virgl_priv *priv = (struct virgl_priv *)drv->priv;
734 
735 	// TODO(gurchetansingh): remove once all minigbm users are blob-safe
736 #ifndef VIRTIO_GPU_NEXT
737 	return false;
738 #endif
739 
740 	// Only use blob when host gbm is available
741 	if (!priv->host_gbm_enabled)
742 		return false;
743 
744 	// Use regular resources if only the GPU needs efficient access. Blob resource is a better
745 	// fit for BO_USE_GPU_DATA_BUFFER which is mapped to VIRGL_BIND_LINEAR.
746 	if (!(use_flags & (BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | BO_USE_LINEAR |
747 			   BO_USE_NON_GPU_HW | BO_USE_GPU_DATA_BUFFER)))
748 		return false;
749 
750 	switch (format) {
751 	case DRM_FORMAT_R8:
752 		// Formats with strictly defined strides are supported
753 		return true;
754 	case DRM_FORMAT_YVU420_ANDROID:
755 	case DRM_FORMAT_NV12:
756 		// Knowing buffer metadata at buffer creation isn't yet supported, so buffers
757 		// can't be properly mapped into the guest.
758 		return (use_flags & BO_USE_SW_MASK) == 0;
759 	default:
760 		return false;
761 	}
762 }
763 
virgl_bo_create(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)764 static int virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
765 			   uint64_t use_flags)
766 {
767 	if (params[param_resource_blob].value && params[param_host_visible].value &&
768 	    should_use_blob(bo->drv, format, use_flags))
769 		return virgl_bo_create_blob(bo->drv, bo);
770 
771 	if (params[param_3d].value)
772 		return virgl_3d_bo_create(bo, width, height, format, use_flags);
773 	else
774 		return virgl_2d_dumb_bo_create(bo, width, height, format, use_flags);
775 }
776 
virgl_bo_create_with_modifiers(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,const uint64_t * modifiers,uint32_t count)777 static int virgl_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height,
778 					  uint32_t format, const uint64_t *modifiers,
779 					  uint32_t count)
780 {
781 	uint64_t use_flags = 0;
782 
783 	for (uint32_t i = 0; i < count; i++) {
784 		if (modifiers[i] == DRM_FORMAT_MOD_LINEAR) {
785 			return virgl_bo_create(bo, width, height, format, use_flags);
786 		}
787 	}
788 
789 	return -EINVAL;
790 }
791 
virgl_bo_destroy(struct bo * bo)792 static int virgl_bo_destroy(struct bo *bo)
793 {
794 	if (params[param_3d].value)
795 		return drv_gem_bo_destroy(bo);
796 	else
797 		return drv_dumb_bo_destroy(bo);
798 }
799 
virgl_bo_map(struct bo * bo,struct vma * vma,uint32_t map_flags)800 static void *virgl_bo_map(struct bo *bo, struct vma *vma, uint32_t map_flags)
801 {
802 	if (params[param_3d].value)
803 		return virgl_3d_bo_map(bo, vma, map_flags);
804 	else
805 		return drv_dumb_bo_map(bo, vma, map_flags);
806 }
807 
is_arc_screen_capture_bo(struct bo * bo)808 static bool is_arc_screen_capture_bo(struct bo *bo)
809 {
810 	struct drm_prime_handle prime_handle = {};
811 	int ret, fd;
812 	char tmp[256];
813 
814 	if (bo->meta.num_planes != 1 ||
815 	    (bo->meta.format != DRM_FORMAT_ABGR8888 && bo->meta.format != DRM_FORMAT_ARGB8888 &&
816 	     bo->meta.format != DRM_FORMAT_XRGB8888 && bo->meta.format != DRM_FORMAT_XBGR8888))
817 		return false;
818 	prime_handle.handle = bo->handles[0].u32;
819 	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &prime_handle);
820 	if (ret < 0)
821 		return false;
822 	snprintf(tmp, sizeof(tmp), "/proc/self/fdinfo/%d", prime_handle.fd);
823 	fd = open(tmp, O_RDONLY);
824 	if (fd < 0) {
825 		close(prime_handle.fd);
826 		return false;
827 	}
828 	ret = read(fd, tmp, sizeof(tmp) - 1);
829 	close(prime_handle.fd);
830 	close(fd);
831 	if (ret < 0)
832 		return false;
833 	tmp[ret] = 0;
834 
835 	return strstr(tmp, "ARC-SCREEN-CAP");
836 }
837 
virgl_bo_invalidate(struct bo * bo,struct mapping * mapping)838 static int virgl_bo_invalidate(struct bo *bo, struct mapping *mapping)
839 {
840 	int ret;
841 	size_t i;
842 	struct drm_virtgpu_3d_transfer_from_host xfer = { 0 };
843 	struct drm_virtgpu_3d_wait waitcmd = { 0 };
844 	struct virtio_transfers_params xfer_params;
845 	struct virgl_priv *priv = (struct virgl_priv *)bo->drv->priv;
846 	uint64_t host_write_flags;
847 
848 	if (!params[param_3d].value)
849 		return 0;
850 
851 	// Invalidate is only necessary if the host writes to the buffer. The encoder and
852 	// decoder flags don't differentiate between input and output buffers, but we can
853 	// use the format to determine whether this buffer could be encoder/decoder output.
854 	host_write_flags = BO_USE_RENDERING | BO_USE_CAMERA_WRITE | BO_USE_GPU_DATA_BUFFER;
855 	if (bo->meta.format == DRM_FORMAT_R8)
856 		host_write_flags |= BO_USE_HW_VIDEO_ENCODER;
857 	else
858 		host_write_flags |= BO_USE_HW_VIDEO_DECODER;
859 
860 	// TODO(b/267892346): Revert this workaround after migrating to virtgpu_cross_domain
861 	// backend since it's a special arc only behavior.
862 	if (!(bo->meta.use_flags & (BO_USE_ARC_SCREEN_CAP_PROBED | BO_USE_RENDERING))) {
863 		bo->meta.use_flags |= BO_USE_ARC_SCREEN_CAP_PROBED;
864 		if (is_arc_screen_capture_bo(bo)) {
865 			bo->meta.use_flags |= BO_USE_RENDERING;
866 		}
867 	}
868 
869 	if ((bo->meta.use_flags & host_write_flags) == 0)
870 		return 0;
871 
872 	if (params[param_resource_blob].value && (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
873 		return 0;
874 
875 	xfer.bo_handle = mapping->vma->handle;
876 
877 	if (mapping->rect.x || mapping->rect.y) {
878 		/*
879 		 * virglrenderer uses the box parameters and assumes that offset == 0 for planar
880 		 * images
881 		 */
882 		if (bo->meta.num_planes == 1) {
883 			xfer.offset =
884 			    (bo->meta.strides[0] * mapping->rect.y) +
885 			    drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
886 		}
887 	}
888 
889 	if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) {
890 		// Unfortunately, the kernel doesn't actually pass the guest layer_stride
891 		// and guest stride to the host (compare virgl.h and virtgpu_drm.h).
892 		// For gbm based resources, we can work around this by using the level field
893 		// to pass the stride to virglrenderer's gbm transfer code. However, we need
894 		// to avoid doing this for resources which don't rely on that transfer code,
895 		// which is resources with the BO_USE_RENDERING flag set.
896 		// TODO(b/145993887): Send also stride when the patches are landed
897 		if (priv->host_gbm_enabled)
898 			xfer.level = bo->meta.strides[0];
899 	}
900 
901 	if (virgl_supports_combination_natively(bo->drv, bo->meta.format, bo->meta.use_flags)) {
902 		xfer_params.xfers_needed = 1;
903 		xfer_params.xfer_boxes[0] = mapping->rect;
904 	} else {
905 		assert(virgl_supports_combination_through_emulation(bo->drv, bo->meta.format,
906 								    bo->meta.use_flags));
907 
908 		virgl_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
909 	}
910 
911 	for (i = 0; i < xfer_params.xfers_needed; i++) {
912 		xfer.box.x = xfer_params.xfer_boxes[i].x;
913 		xfer.box.y = xfer_params.xfer_boxes[i].y;
914 		xfer.box.w = xfer_params.xfer_boxes[i].width;
915 		xfer.box.h = xfer_params.xfer_boxes[i].height;
916 		xfer.box.d = 1;
917 
918 		ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer);
919 		if (ret) {
920 			drv_loge("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n",
921 				 strerror(errno));
922 			return -errno;
923 		}
924 	}
925 
926 	// The transfer needs to complete before invalidate returns so that any host changes
927 	// are visible and to ensure the host doesn't overwrite subsequent guest changes.
928 	// TODO(b/136733358): Support returning fences from transfers
929 	waitcmd.handle = mapping->vma->handle;
930 	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
931 	if (ret) {
932 		drv_loge("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
933 		return -errno;
934 	}
935 
936 	return 0;
937 }
938 
virgl_bo_flush(struct bo * bo,struct mapping * mapping)939 static int virgl_bo_flush(struct bo *bo, struct mapping *mapping)
940 {
941 	int ret;
942 	size_t i;
943 	struct drm_virtgpu_3d_transfer_to_host xfer = { 0 };
944 	struct drm_virtgpu_3d_wait waitcmd = { 0 };
945 	struct virtio_transfers_params xfer_params;
946 	struct virgl_priv *priv = (struct virgl_priv *)bo->drv->priv;
947 
948 	if (!params[param_3d].value)
949 		return 0;
950 
951 	if (!(mapping->vma->map_flags & BO_MAP_WRITE))
952 		return 0;
953 
954 	if (params[param_resource_blob].value && (bo->meta.tiling & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
955 		return 0;
956 
957 	xfer.bo_handle = mapping->vma->handle;
958 
959 	if (mapping->rect.x || mapping->rect.y) {
960 		/*
961 		 * virglrenderer uses the box parameters and assumes that offset == 0 for planar
962 		 * images
963 		 */
964 		if (bo->meta.num_planes == 1) {
965 			xfer.offset =
966 			    (bo->meta.strides[0] * mapping->rect.y) +
967 			    drv_bytes_per_pixel_from_format(bo->meta.format, 0) * mapping->rect.x;
968 		}
969 	}
970 
971 	// Unfortunately, the kernel doesn't actually pass the guest layer_stride and
972 	// guest stride to the host (compare virgl.h and virtgpu_drm.h). We can use
973 	// the level to work around this.
974 	if (priv->host_gbm_enabled)
975 		xfer.level = bo->meta.strides[0];
976 
977 	if (virgl_supports_combination_natively(bo->drv, bo->meta.format, bo->meta.use_flags)) {
978 		xfer_params.xfers_needed = 1;
979 		xfer_params.xfer_boxes[0] = mapping->rect;
980 	} else {
981 		assert(virgl_supports_combination_through_emulation(bo->drv, bo->meta.format,
982 								    bo->meta.use_flags));
983 
984 		virgl_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params);
985 	}
986 
987 	for (i = 0; i < xfer_params.xfers_needed; i++) {
988 		xfer.box.x = xfer_params.xfer_boxes[i].x;
989 		xfer.box.y = xfer_params.xfer_boxes[i].y;
990 		xfer.box.w = xfer_params.xfer_boxes[i].width;
991 		xfer.box.h = xfer_params.xfer_boxes[i].height;
992 		xfer.box.d = 1;
993 
994 		ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer);
995 		if (ret) {
996 			drv_loge("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n",
997 				 strerror(errno));
998 			return -errno;
999 		}
1000 	}
1001 
1002 	// If the buffer is only accessed by the host GPU, then the flush is ordered
1003 	// with subsequent commands. However, if other host hardware can access the
1004 	// buffer, we need to wait for the transfer to complete for consistency.
1005 	// TODO(b/136733358): Support returning fences from transfers
1006 	if (bo->meta.use_flags & BO_USE_NON_GPU_HW) {
1007 		waitcmd.handle = mapping->vma->handle;
1008 
1009 		ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
1010 		if (ret) {
1011 			drv_loge("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
1012 			return -errno;
1013 		}
1014 	}
1015 
1016 	return 0;
1017 }
1018 
virgl_3d_resolve_format_and_use_flags(struct driver * drv,uint32_t format,uint64_t use_flags,uint32_t * out_format,uint64_t * out_use_flags)1019 static void virgl_3d_resolve_format_and_use_flags(struct driver *drv, uint32_t format,
1020 						  uint64_t use_flags, uint32_t *out_format,
1021 						  uint64_t *out_use_flags)
1022 {
1023 	*out_format = format;
1024 	*out_use_flags = use_flags;
1025 
1026 	/* resolve flexible format into explicit format */
1027 	switch (format) {
1028 	case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
1029 		/* Camera subsystem requires NV12. */
1030 		if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE)) {
1031 			*out_format = DRM_FORMAT_NV12;
1032 		} else {
1033 			/* HACK: See b/28671744 and b/264408280 */
1034 			*out_format = DRM_FORMAT_XBGR8888;
1035 			*out_use_flags &= ~BO_USE_HW_VIDEO_ENCODER;
1036 			*out_use_flags |= BO_USE_LINEAR;
1037 		}
1038 		break;
1039 	case DRM_FORMAT_FLEX_YCbCr_420_888:
1040 		/* All of our host drivers prefer NV12 as their flexible media format.
1041 		 * If that changes, this will need to be modified. */
1042 		*out_format = DRM_FORMAT_NV12;
1043 		break;
1044 	default:
1045 		break;
1046 	}
1047 
1048 	/* resolve explicit format */
1049 	switch (*out_format) {
1050 	case DRM_FORMAT_NV12:
1051 	case DRM_FORMAT_ABGR8888:
1052 	case DRM_FORMAT_ARGB8888:
1053 	case DRM_FORMAT_RGB565:
1054 	case DRM_FORMAT_XBGR8888:
1055 	case DRM_FORMAT_XRGB8888:
1056 		/* These are the scanout capable formats to the guest. Strip scanout use_flag if the
1057 		 * host does not natively support scanout on the requested format. */
1058 		if ((*out_use_flags & BO_USE_SCANOUT) &&
1059 		    !virgl_supports_combination_natively(drv, *out_format, BO_USE_SCANOUT))
1060 			*out_use_flags &= ~BO_USE_SCANOUT;
1061 		break;
1062 	case DRM_FORMAT_YVU420_ANDROID:
1063 		*out_use_flags &= ~BO_USE_SCANOUT;
1064 		/* HACK: See b/172389166. Also see gbm_bo_create. */
1065 		*out_use_flags |= BO_USE_LINEAR;
1066 		break;
1067 	default:
1068 		break;
1069 	}
1070 }
1071 
virgl_2d_resolve_format_and_use_flags(uint32_t format,uint64_t use_flags,uint32_t * out_format,uint64_t * out_use_flags)1072 static void virgl_2d_resolve_format_and_use_flags(uint32_t format, uint64_t use_flags,
1073 						  uint32_t *out_format, uint64_t *out_use_flags)
1074 {
1075 	*out_format = format;
1076 	*out_use_flags = use_flags;
1077 
1078 	/* HACK: See crrev/c/1849773 */
1079 	if (format != DRM_FORMAT_XRGB8888)
1080 		*out_use_flags &= ~BO_USE_SCANOUT;
1081 
1082 	switch (format) {
1083 	case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
1084 		/* Camera subsystem requires NV12. */
1085 		if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE)) {
1086 			*out_format = DRM_FORMAT_NV12;
1087 		} else {
1088 			/* HACK: See b/28671744 */
1089 			*out_format = DRM_FORMAT_XBGR8888;
1090 			*out_use_flags &= ~BO_USE_HW_VIDEO_ENCODER;
1091 		}
1092 		break;
1093 	case DRM_FORMAT_FLEX_YCbCr_420_888:
1094 		*out_format = DRM_FORMAT_YVU420_ANDROID;
1095 		/* fallthrough */
1096 	case DRM_FORMAT_YVU420_ANDROID:
1097 		*out_use_flags &= ~BO_USE_SCANOUT;
1098 		/* HACK: See b/172389166. Also see gbm_bo_create. */
1099 		*out_use_flags |= BO_USE_LINEAR;
1100 		break;
1101 	default:
1102 		break;
1103 	}
1104 }
1105 
virgl_resolve_format_and_use_flags(struct driver * drv,uint32_t format,uint64_t use_flags,uint32_t * out_format,uint64_t * out_use_flags)1106 static void virgl_resolve_format_and_use_flags(struct driver *drv, uint32_t format,
1107 					       uint64_t use_flags, uint32_t *out_format,
1108 					       uint64_t *out_use_flags)
1109 {
1110 	if (params[param_3d].value) {
1111 		return virgl_3d_resolve_format_and_use_flags(drv, format, use_flags, out_format,
1112 							     out_use_flags);
1113 	} else {
1114 		return virgl_2d_resolve_format_and_use_flags(format, use_flags, out_format,
1115 							     out_use_flags);
1116 	}
1117 }
1118 
virgl_resource_info(struct bo * bo,uint32_t strides[DRV_MAX_PLANES],uint32_t offsets[DRV_MAX_PLANES],uint64_t * format_modifier)1119 static int virgl_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES],
1120 			       uint32_t offsets[DRV_MAX_PLANES], uint64_t *format_modifier)
1121 {
1122 	int ret;
1123 	struct drm_virtgpu_resource_info_cros res_info = { 0 };
1124 
1125 	if (!params[param_3d].value)
1126 		return 0;
1127 
1128 	res_info.bo_handle = bo->handles[0].u32;
1129 	res_info.type = VIRTGPU_RESOURCE_INFO_TYPE_EXTENDED;
1130 	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO_CROS, &res_info);
1131 	if (ret) {
1132 		drv_loge("DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed with %s\n", strerror(errno));
1133 		return ret;
1134 	}
1135 
1136 	for (uint32_t plane = 0; plane < DRV_MAX_PLANES; plane++) {
1137 		/*
1138 		 * Currently, kernel v4.14 (Betty) doesn't have the extended resource info
1139 		 * ioctl.
1140 		 */
1141 		if (!res_info.strides[plane])
1142 			break;
1143 
1144 		strides[plane] = res_info.strides[plane];
1145 		offsets[plane] = res_info.offsets[plane];
1146 	}
1147 	*format_modifier = res_info.format_modifier;
1148 
1149 	return 0;
1150 }
1151 
virgl_get_max_texture_2d_size(struct driver * drv)1152 static uint32_t virgl_get_max_texture_2d_size(struct driver *drv)
1153 {
1154 	if (params[param_3d].value)
1155 		return virgl_3d_get_max_texture_2d_size(drv);
1156 	else
1157 		return VIRGL_2D_MAX_TEXTURE_2D_SIZE;
1158 }
1159 
1160 const struct backend virtgpu_virgl = { .name = "virtgpu_virgl",
1161 				       .init = virgl_init,
1162 				       .close = virgl_close,
1163 				       .bo_create = virgl_bo_create,
1164 				       .bo_create_with_modifiers = virgl_bo_create_with_modifiers,
1165 				       .bo_destroy = virgl_bo_destroy,
1166 				       .bo_import = drv_prime_bo_import,
1167 				       .bo_map = virgl_bo_map,
1168 				       .bo_unmap = drv_bo_munmap,
1169 				       .bo_invalidate = virgl_bo_invalidate,
1170 				       .bo_flush = virgl_bo_flush,
1171 				       .resolve_format_and_use_flags =
1172 					   virgl_resolve_format_and_use_flags,
1173 				       .resource_info = virgl_resource_info,
1174 				       .get_max_texture_2d_size = virgl_get_max_texture_2d_size };
1175