• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2021 The Chromium OS Authors. All rights reserved.
3  * Use of this source code is governed by a BSD-style license that can be
4  * found in the LICENSE file.
5  */
6 
7 #include <errno.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <xf86drm.h>
11 
12 #include "drv_helpers.h"
13 #include "drv_priv.h"
14 #include "external/virtgpu_cross_domain_protocol.h"
15 #include "external/virtgpu_drm.h"
16 #include "util.h"
17 #include "virtgpu.h"
18 
19 #define CAPSET_CROSS_FAKE 30
20 
21 static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888,
22 						   DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888,
23 						   DRM_FORMAT_XRGB8888 };
24 
25 static const uint32_t texture_only_formats[] = {
26 	DRM_FORMAT_R8,		 DRM_FORMAT_NV12,	    DRM_FORMAT_P010,
27 	DRM_FORMAT_YVU420,	 DRM_FORMAT_YVU420_ANDROID, DRM_FORMAT_ABGR2101010,
28 	DRM_FORMAT_ARGB2101010,	 DRM_FORMAT_XBGR2101010,    DRM_FORMAT_XRGB2101010,
29 	DRM_FORMAT_ABGR16161616F
30 };
31 
32 extern struct virtgpu_param params[];
33 
34 struct cross_domain_private {
35 	uint32_t ring_handle;
36 	void *ring_addr;
37 	struct drv_array *metadata_cache;
38 	pthread_mutex_t bo_create_lock;
39 	bool mt8183_camera_quirk_;
40 };
41 
cross_domain_release_private(struct driver * drv)42 static void cross_domain_release_private(struct driver *drv)
43 {
44 	int ret;
45 	struct cross_domain_private *priv = drv->priv;
46 	struct drm_gem_close gem_close = { 0 };
47 
48 	if (priv->ring_addr != MAP_FAILED)
49 		munmap(priv->ring_addr, PAGE_SIZE);
50 
51 	if (priv->ring_handle) {
52 		gem_close.handle = priv->ring_handle;
53 
54 		ret = drmIoctl(drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
55 		if (ret) {
56 			drv_loge("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
57 				 priv->ring_handle, ret);
58 		}
59 	}
60 
61 	if (priv->metadata_cache)
62 		drv_array_destroy(priv->metadata_cache);
63 
64 	pthread_mutex_destroy(&priv->bo_create_lock);
65 
66 	free(priv);
67 }
68 
add_combinations(struct driver * drv)69 static void add_combinations(struct driver *drv)
70 {
71 	struct format_metadata metadata;
72 
73 	// Linear metadata always supported.
74 	metadata.tiling = 0;
75 	metadata.priority = 1;
76 	metadata.modifier = DRM_FORMAT_MOD_LINEAR;
77 
78 	drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
79 			     &metadata, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
80 
81 	drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats), &metadata,
82 			     BO_USE_TEXTURE_MASK);
83 
84 	/* Android CTS tests require this. */
85 	drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
86 
87 	drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER);
88 	drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
89 			       BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
90 				   BO_USE_SCANOUT | BO_USE_HW_VIDEO_ENCODER);
91 
92 	/*
93 	 * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
94 	 * from camera, input/output from hardware decoder/encoder and sensors, and
95 	 * AHBs used as SSBOs/UBOs.
96 	 */
97 	drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
98 			       BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
99 				   BO_USE_HW_VIDEO_ENCODER | BO_USE_SENSOR_DIRECT_DATA |
100 				   BO_USE_GPU_DATA_BUFFER);
101 
102 	drv_modify_linear_combinations(drv);
103 }
104 
cross_domain_submit_cmd(struct driver * drv,uint32_t * cmd,uint32_t cmd_size,bool wait)105 static int cross_domain_submit_cmd(struct driver *drv, uint32_t *cmd, uint32_t cmd_size, bool wait)
106 {
107 	int ret;
108 	struct drm_virtgpu_3d_wait wait_3d = { 0 };
109 	struct drm_virtgpu_execbuffer exec = { 0 };
110 	struct cross_domain_private *priv = drv->priv;
111 
112 	exec.flags = VIRTGPU_EXECBUF_RING_IDX;
113 	exec.command = (uint64_t)&cmd[0];
114 	exec.size = cmd_size;
115 	if (wait) {
116 		exec.bo_handles = (uint64_t)&priv->ring_handle;
117 		exec.num_bo_handles = 1;
118 	}
119 
120 	ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &exec);
121 	if (ret < 0) {
122 		drv_loge("DRM_IOCTL_VIRTGPU_EXECBUFFER failed with %s\n", strerror(errno));
123 		return -EINVAL;
124 	}
125 
126 	ret = -EAGAIN;
127 	while (ret == -EAGAIN) {
128 		wait_3d.handle = priv->ring_handle;
129 		ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &wait_3d);
130 	}
131 
132 	if (ret < 0) {
133 		drv_loge("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
134 		return ret;
135 	}
136 
137 	return 0;
138 }
139 
metadata_equal(struct bo_metadata * current,struct bo_metadata * cached)140 static bool metadata_equal(struct bo_metadata *current, struct bo_metadata *cached)
141 {
142 	if ((current->width == cached->width) && (current->height == cached->height) &&
143 	    (current->format == cached->format) && (current->use_flags == cached->use_flags))
144 		return true;
145 	return false;
146 }
147 
cross_domain_metadata_query(struct driver * drv,struct bo_metadata * metadata)148 static int cross_domain_metadata_query(struct driver *drv, struct bo_metadata *metadata)
149 {
150 	int ret = 0;
151 	struct bo_metadata *cached_data = NULL;
152 	struct cross_domain_private *priv = drv->priv;
153 	struct CrossDomainGetImageRequirements cmd_get_reqs;
154 	uint32_t *addr = (uint32_t *)priv->ring_addr;
155 	uint32_t plane;
156 
157 	memset(&cmd_get_reqs, 0, sizeof(cmd_get_reqs));
158 	for (uint32_t i = 0; i < drv_array_size(priv->metadata_cache); i++) {
159 		cached_data = (struct bo_metadata *)drv_array_at_idx(priv->metadata_cache, i);
160 		if (!metadata_equal(metadata, cached_data))
161 			continue;
162 
163 		memcpy(metadata, cached_data, sizeof(*cached_data));
164 		return 0;
165 	}
166 
167 	cmd_get_reqs.hdr.cmd = CROSS_DOMAIN_CMD_GET_IMAGE_REQUIREMENTS;
168 	cmd_get_reqs.hdr.cmd_size = sizeof(struct CrossDomainGetImageRequirements);
169 
170 	cmd_get_reqs.width = metadata->width;
171 	cmd_get_reqs.height = metadata->height;
172 	cmd_get_reqs.drm_format = metadata->format;
173 	cmd_get_reqs.flags = metadata->use_flags;
174 
175 	// HACK(b/360937659): see also: b/172389166,  for history
176 	// host minigbm has a hack that recognizes DRM_FORMAT_YVU420 + BO_USE_LINEAR and replaces
177 	// the format internally back to DRM_FORMAT_YVU420_ANDROID to use the approrpriate layout
178 	// rules.
179 	if (cmd_get_reqs.drm_format == DRM_FORMAT_YVU420_ANDROID) {
180 		cmd_get_reqs.drm_format = DRM_FORMAT_YVU420;
181 		cmd_get_reqs.flags |= BO_USE_LINEAR;
182 	}
183 
184 	/*
185 	 * It is possible to avoid blocking other bo_create() calls by unlocking before
186 	 * cross_domain_submit_cmd() and re-locking afterwards.  However, that would require
187 	 * another scan of the metadata cache before drv_array_append in case two bo_create() calls
188 	 * do the same metadata query.  Until cross_domain functionality is more widely tested,
189 	 * leave this optimization out for now.
190 	 */
191 	ret = cross_domain_submit_cmd(drv, (uint32_t *)&cmd_get_reqs, cmd_get_reqs.hdr.cmd_size,
192 				      true);
193 	if (ret < 0)
194 		return ret;
195 
196 	memcpy(&metadata->strides, &addr[0], 4 * sizeof(uint32_t));
197 	memcpy(&metadata->offsets, &addr[4], 4 * sizeof(uint32_t));
198 	memcpy(&metadata->format_modifier, &addr[8], sizeof(uint64_t));
199 	memcpy(&metadata->total_size, &addr[10], sizeof(uint64_t));
200 	memcpy(&metadata->blob_id, &addr[12], sizeof(uint32_t));
201 
202 	metadata->map_info = addr[13];
203 	metadata->memory_idx = addr[14];
204 	metadata->physical_device_idx = addr[15];
205 
206 	for (plane = 1; plane < metadata->num_planes; plane++) {
207 		metadata->sizes[plane - 1] =
208 		    metadata->offsets[plane] - metadata->offsets[plane - 1];
209 	}
210 	metadata->sizes[plane - 1] = metadata->total_size - metadata->offsets[plane - 1];
211 
212 	drv_array_append(priv->metadata_cache, metadata);
213 	return 0;
214 }
215 
216 /* Fill out metadata for guest buffers, used only for CPU access: */
cross_domain_get_emulated_metadata(struct bo_metadata * metadata)217 void cross_domain_get_emulated_metadata(struct bo_metadata *metadata)
218 {
219 	uint32_t offset = 0;
220 
221 	for (size_t i = 0; i < metadata->num_planes; i++) {
222 		metadata->strides[i] = drv_stride_from_format(metadata->format, metadata->width, i);
223 		metadata->sizes[i] = drv_size_from_format(metadata->format, metadata->strides[i],
224 							  metadata->height, i);
225 		metadata->offsets[i] = offset;
226 		offset += metadata->sizes[i];
227 	}
228 
229 	metadata->total_size = offset;
230 }
231 
cross_domain_init(struct driver * drv)232 static int cross_domain_init(struct driver *drv)
233 {
234 	int ret;
235 	struct cross_domain_private *priv;
236 	struct drm_virtgpu_map map = { 0 };
237 	struct drm_virtgpu_get_caps args = { 0 };
238 	struct drm_virtgpu_context_init init = { 0 };
239 	struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
240 	struct drm_virtgpu_context_set_param ctx_set_params[2] = { { 0 } };
241 
242 	struct CrossDomainInit cmd_init;
243 	struct CrossDomainCapabilities cross_domain_caps;
244 
245 	memset(&cmd_init, 0, sizeof(cmd_init));
246 	if (!params[param_context_init].value)
247 		return -ENOTSUP;
248 
249 	if ((params[param_supported_capset_ids].value & (1 << VIRTIO_GPU_CAPSET_CROSS_DOMAIN)) == 0)
250 		return -ENOTSUP;
251 
252 	if (!params[param_resource_blob].value)
253 		return -ENOTSUP;
254 
255 	/// Need zero copy memory
256 	if (!params[param_host_visible].value && !params[param_create_guest_handle].value)
257 		return -ENOTSUP;
258 
259 	priv = calloc(1, sizeof(*priv));
260 	if (!priv)
261 		return -ENOMEM;
262 
263 	ret = pthread_mutex_init(&priv->bo_create_lock, NULL);
264 	if (ret) {
265 		free(priv);
266 		return ret;
267 	}
268 
269 	priv->metadata_cache = drv_array_init(sizeof(struct bo_metadata));
270 	if (!priv->metadata_cache) {
271 		ret = -ENOMEM;
272 		goto free_private;
273 	}
274 
275 	priv->ring_addr = MAP_FAILED;
276 	drv->priv = priv;
277 
278 	args.cap_set_id = VIRTIO_GPU_CAPSET_CROSS_DOMAIN;
279 	args.size = sizeof(struct CrossDomainCapabilities);
280 	args.addr = (unsigned long long)&cross_domain_caps;
281 
282 	ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
283 	if (ret) {
284 		drv_loge("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
285 		goto free_private;
286 	}
287 
288 	// When 3D features are avilable, but the host does not support external memory, fall back
289 	// to the virgl minigbm backend.  This typically means the guest side minigbm resource will
290 	// be backed by a host OpenGL texture.
291 	if (!cross_domain_caps.supports_external_gpu_memory && params[param_3d].value) {
292 		ret = -ENOTSUP;
293 		goto free_private;
294 	}
295 
296 	// Intialize the cross domain context.  Create one fence context to wait for metadata
297 	// queries.
298 	ctx_set_params[0].param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID;
299 	ctx_set_params[0].value = VIRTIO_GPU_CAPSET_CROSS_DOMAIN;
300 	ctx_set_params[1].param = VIRTGPU_CONTEXT_PARAM_NUM_RINGS;
301 	ctx_set_params[1].value = 1;
302 
303 	init.ctx_set_params = (unsigned long long)&ctx_set_params[0];
304 	init.num_params = 2;
305 	ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &init);
306 	if (ret) {
307 		drv_loge("DRM_IOCTL_VIRTGPU_CONTEXT_INIT failed with %s\n", strerror(errno));
308 		goto free_private;
309 	}
310 
311 	// Create a shared ring buffer to read metadata queries.
312 	drm_rc_blob.size = PAGE_SIZE;
313 	drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
314 	drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
315 
316 	ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
317 	if (ret < 0) {
318 		drv_loge("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
319 		goto free_private;
320 	}
321 
322 	priv->ring_handle = drm_rc_blob.bo_handle;
323 
324 	// Map shared ring buffer.
325 	map.handle = priv->ring_handle;
326 	ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_MAP, &map);
327 	if (ret < 0) {
328 		drv_loge("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
329 		goto free_private;
330 	}
331 
332 	priv->ring_addr =
333 	    mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, drv->fd, map.offset);
334 
335 	if (priv->ring_addr == MAP_FAILED) {
336 		drv_loge("mmap failed with %s\n", strerror(errno));
337 		goto free_private;
338 	}
339 
340 	// Notify host about ring buffer
341 	cmd_init.hdr.cmd = CROSS_DOMAIN_CMD_INIT;
342 	cmd_init.hdr.cmd_size = sizeof(struct CrossDomainInit);
343 	cmd_init.ring_id = drm_rc_blob.res_handle;
344 	ret = cross_domain_submit_cmd(drv, (uint32_t *)&cmd_init, cmd_init.hdr.cmd_size, false);
345 	if (ret < 0)
346 		goto free_private;
347 
348 	const char *name;
349 	name = drv_get_os_option("ro.product.name");
350 	priv->mt8183_camera_quirk_ = name && !strcmp(name, "kukui");
351 
352 	// minigbm bookkeeping
353 	add_combinations(drv);
354 	return 0;
355 
356 free_private:
357 	cross_domain_release_private(drv);
358 	return ret;
359 }
360 
cross_domain_close(struct driver * drv)361 static void cross_domain_close(struct driver *drv)
362 {
363 	cross_domain_release_private(drv);
364 }
365 
cross_domain_bo_create_locked(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)366 static int cross_domain_bo_create_locked(struct bo *bo, uint32_t width, uint32_t height,
367 					 uint32_t format, uint64_t use_flags)
368 {
369 	int ret;
370 	uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
371 	struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
372 
373 	if (use_flags & (BO_USE_SW_MASK | BO_USE_GPU_DATA_BUFFER))
374 		blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
375 
376 	if (!(use_flags & BO_USE_HW_MASK)) {
377 		cross_domain_get_emulated_metadata(&bo->meta);
378 		drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
379 	} else {
380 		ret = cross_domain_metadata_query(bo->drv, &bo->meta);
381 		if (ret < 0) {
382 			drv_loge("Metadata query failed");
383 			return ret;
384 		}
385 
386 		if (params[param_cross_device].value)
387 			blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
388 
389 		/// It may be possible to have host3d blobs and handles from guest memory at the
390 		/// same time. But for the immediate use cases, we will either have one or the
391 		/// other.  For now, just prefer guest memory since adding that feature is more
392 		/// involved (requires --udmabuf flag to crosvm), so developers would likely test
393 		/// that.
394 		if (params[param_create_guest_handle].value) {
395 			drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
396 			blob_flags |= VIRTGPU_BLOB_FLAG_CREATE_GUEST_HANDLE;
397 		} else if (params[param_host_visible].value) {
398 			drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
399 		}
400 		drm_rc_blob.blob_id = (uint64_t)bo->meta.blob_id;
401 	}
402 
403 	drm_rc_blob.size = bo->meta.total_size;
404 	drm_rc_blob.blob_flags = blob_flags;
405 
406 	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
407 	if (ret < 0) {
408 		drv_loge("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
409 		return -errno;
410 	}
411 
412 	bo->handle.u32 = drm_rc_blob.bo_handle;
413 
414 	return 0;
415 }
416 
cross_domain_bo_create(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)417 static int cross_domain_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
418 				  uint64_t use_flags)
419 {
420 
421 	int ret = 0;
422 	struct cross_domain_private *priv = bo->drv->priv;
423 
424 	// HACK(b/395748805): Any host GET_IMAGE_REQUIREMENTS request must be immediately followed
425 	// by the matching CREATE_BLOB request, as the current implementation in crosvm stashes a
426 	// single buffer allocation for the first to be returned by the second. We ensure the two
427 	// requests are made back to back by using a mutex lock, where the lock is acquired for the
428 	// duration of the allocation requests.
429 	//
430 	// This forces all guest allocations to be made in serial order, and allows the host buffer
431 	// stash to be an optimization.
432 	pthread_mutex_lock(&priv->bo_create_lock);
433 
434 	ret = cross_domain_bo_create_locked(bo, width, height, format, use_flags);
435 
436 	pthread_mutex_unlock(&priv->bo_create_lock);
437 	return ret;
438 }
439 
cross_domain_bo_map(struct bo * bo,struct vma * vma,uint32_t map_flags)440 static void *cross_domain_bo_map(struct bo *bo, struct vma *vma, uint32_t map_flags)
441 {
442 	int ret;
443 	struct drm_virtgpu_map gem_map = { 0 };
444 
445 	gem_map.handle = bo->handle.u32;
446 	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
447 	if (ret) {
448 		drv_loge("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
449 		return MAP_FAILED;
450 	}
451 
452 	vma->length = bo->meta.total_size;
453 	return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
454 		    gem_map.offset);
455 }
456 
cross_domain_resolve_format_and_use_flags(struct driver * drv,uint32_t format,uint64_t use_flags,uint32_t * out_format,uint64_t * out_use_flags)457 static void cross_domain_resolve_format_and_use_flags(struct driver *drv, uint32_t format,
458 						      uint64_t use_flags, uint32_t *out_format,
459 						      uint64_t *out_use_flags)
460 {
461 	struct cross_domain_private *priv = drv->priv;
462 	*out_format = format;
463 	*out_use_flags = use_flags;
464 
465 	switch (format) {
466 	case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED:
467 		if (priv->mt8183_camera_quirk_ && (use_flags & BO_USE_CAMERA_READ) &&
468 		    !(use_flags & BO_USE_SCANOUT)) {
469 			*out_format = DRM_FORMAT_MTISP_SXYZW10;
470 			break;
471 		}
472 		/* Common camera implementation defined format. */
473 		if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE)) {
474 			*out_format = DRM_FORMAT_NV12;
475 		} else {
476 			/* HACK: See b/28671744 */
477 			*out_format = DRM_FORMAT_XBGR8888;
478 			*out_use_flags &= ~BO_USE_HW_VIDEO_ENCODER;
479 		}
480 		break;
481 	case DRM_FORMAT_FLEX_YCbCr_420_888:
482 		/* Common flexible video format. */
483 		*out_format = DRM_FORMAT_NV12;
484 		break;
485 	case DRM_FORMAT_YVU420_ANDROID:
486 		*out_use_flags &= ~BO_USE_SCANOUT;
487 		break;
488 	default:
489 		break;
490 	}
491 }
492 
493 const struct backend virtgpu_cross_domain = {
494 	.name = "virtgpu_cross_domain",
495 	.init = cross_domain_init,
496 	.close = cross_domain_close,
497 	.bo_create = cross_domain_bo_create,
498 	.bo_import = drv_prime_bo_import,
499 	.bo_destroy = drv_gem_bo_destroy,
500 	.bo_map = cross_domain_bo_map,
501 	.bo_unmap = drv_bo_munmap,
502 	.resolve_format_and_use_flags = cross_domain_resolve_format_and_use_flags,
503 };
504