• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2021 The Chromium OS Authors. All rights reserved.
3  * Use of this source code is governed by a BSD-style license that can be
4  * found in the LICENSE file.
5  */
6 
7 #include <errno.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <xf86drm.h>
11 
12 #include "drv_priv.h"
13 #include "external/virtgpu_cross_domain_protocol.h"
14 #include "external/virtgpu_drm.h"
15 #include "helpers.h"
16 #include "util.h"
17 #include "virtgpu.h"
18 
19 #define CAPSET_CROSS_DOMAIN 5
20 #define CAPSET_CROSS_FAKE 30
21 
22 static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ABGR2101010, DRM_FORMAT_ABGR8888,
23 						   DRM_FORMAT_ARGB2101010, DRM_FORMAT_ARGB8888,
24 						   DRM_FORMAT_RGB565,	   DRM_FORMAT_XBGR2101010,
25 						   DRM_FORMAT_XBGR8888,	   DRM_FORMAT_XRGB2101010,
26 						   DRM_FORMAT_XRGB8888 };
27 
28 static const uint32_t render_formats[] = { DRM_FORMAT_ABGR16161616F };
29 
30 static const uint32_t texture_only_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12, DRM_FORMAT_P010,
31 						 DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID };
32 
33 extern struct virtgpu_param params[];
34 
35 struct cross_domain_private {
36 	uint32_t ring_handle;
37 	void *ring_addr;
38 	struct drv_array *metadata_cache;
39 };
40 
cross_domain_release_private(struct driver * drv)41 static void cross_domain_release_private(struct driver *drv)
42 {
43 	int ret;
44 	struct cross_domain_private *priv = drv->priv;
45 	struct drm_gem_close gem_close = { 0 };
46 
47 	if (priv->ring_addr != MAP_FAILED)
48 		munmap(priv->ring_addr, PAGE_SIZE);
49 
50 	if (priv->ring_handle) {
51 		gem_close.handle = priv->ring_handle;
52 
53 		ret = drmIoctl(drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
54 		if (ret) {
55 			drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n",
56 				priv->ring_handle, ret);
57 		}
58 	}
59 
60 	drv_array_destroy(priv->metadata_cache);
61 	free(priv);
62 }
63 
add_combinations(struct driver * drv)64 static void add_combinations(struct driver *drv)
65 {
66 	struct format_metadata metadata;
67 
68 	// Linear metadata always supported.
69 	metadata.tiling = 0;
70 	metadata.priority = 1;
71 	metadata.modifier = DRM_FORMAT_MOD_LINEAR;
72 
73 	drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats),
74 			     &metadata, BO_USE_RENDER_MASK | BO_USE_SCANOUT);
75 
76 	drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata,
77 			     BO_USE_RENDER_MASK);
78 
79 	drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats), &metadata,
80 			     BO_USE_TEXTURE_MASK);
81 
82 	/* Android CTS tests require this. */
83 	drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK);
84 
85 	drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER);
86 	drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata,
87 			       BO_USE_HW_VIDEO_DECODER | BO_USE_SCANOUT | BO_USE_HW_VIDEO_ENCODER);
88 
89 	/*
90 	 * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots
91 	 * from camera and input/output from hardware decoder/encoder.
92 	 */
93 	drv_modify_combination(drv, DRM_FORMAT_R8, &metadata,
94 			       BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER |
95 				   BO_USE_HW_VIDEO_ENCODER);
96 
97 	drv_modify_linear_combinations(drv);
98 }
99 
cross_domain_submit_cmd(struct driver * drv,uint32_t * cmd,uint32_t cmd_size,bool wait)100 static int cross_domain_submit_cmd(struct driver *drv, uint32_t *cmd, uint32_t cmd_size, bool wait)
101 {
102 	int ret;
103 	struct drm_virtgpu_3d_wait wait_3d = { 0 };
104 	struct drm_virtgpu_execbuffer exec = { 0 };
105 	struct cross_domain_private *priv = drv->priv;
106 
107 	exec.command = (uint64_t)&cmd[0];
108 	exec.size = cmd_size;
109 	if (wait) {
110 		exec.flags = VIRTGPU_EXECBUF_FENCE_CONTEXT;
111 		exec.bo_handles = (uint64_t)&priv->ring_handle;
112 		exec.num_bo_handles = 1;
113 	}
114 
115 	ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &exec);
116 	if (ret < 0) {
117 		drv_log("DRM_IOCTL_VIRTGPU_EXECBUFFER failed with %s\n", strerror(errno));
118 		return -EINVAL;
119 	}
120 
121 	ret = -EAGAIN;
122 	while (ret == -EAGAIN) {
123 		wait_3d.handle = priv->ring_handle;
124 		ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &wait_3d);
125 	}
126 
127 	if (ret < 0) {
128 		drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno));
129 		return ret;
130 	}
131 
132 	return 0;
133 }
134 
metadata_equal(struct bo_metadata * current,struct bo_metadata * cached)135 static bool metadata_equal(struct bo_metadata *current, struct bo_metadata *cached)
136 {
137 	if ((current->width == cached->width) && (current->height == cached->height) &&
138 	    (current->format == cached->format) && (current->use_flags == cached->use_flags))
139 		return true;
140 	return false;
141 }
142 
cross_domain_metadata_query(struct driver * drv,struct bo_metadata * metadata)143 static int cross_domain_metadata_query(struct driver *drv, struct bo_metadata *metadata)
144 {
145 	int ret = 0;
146 	struct bo_metadata *cached_data = NULL;
147 	struct cross_domain_private *priv = drv->priv;
148 	struct CrossDomainGetImageRequirements cmd_get_reqs;
149 	uint32_t *addr = (uint32_t *)priv->ring_addr;
150 	uint32_t plane, remaining_size;
151 
152 	memset(&cmd_get_reqs, 0, sizeof(cmd_get_reqs));
153 	pthread_mutex_lock(&drv->driver_lock);
154 	for (uint32_t i = 0; i < drv_array_size(priv->metadata_cache); i++) {
155 		cached_data = (struct bo_metadata *)drv_array_at_idx(priv->metadata_cache, i);
156 		if (!metadata_equal(metadata, cached_data))
157 			continue;
158 
159 		memcpy(metadata, cached_data, sizeof(*cached_data));
160 		goto out_unlock;
161 	}
162 
163 	cmd_get_reqs.hdr.cmd = CROSS_DOMAIN_CMD_GET_IMAGE_REQUIREMENTS;
164 	cmd_get_reqs.hdr.cmd_size = sizeof(struct CrossDomainGetImageRequirements);
165 
166 	cmd_get_reqs.width = metadata->width;
167 	cmd_get_reqs.height = metadata->height;
168 	cmd_get_reqs.drm_format =
169 	    (metadata->format == DRM_FORMAT_YVU420_ANDROID) ? DRM_FORMAT_YVU420 : metadata->format;
170 	cmd_get_reqs.flags = metadata->use_flags;
171 
172 	/*
173 	 * It is possible to avoid blocking other bo_create() calls by unlocking before
174 	 * cross_domain_submit_cmd() and re-locking afterwards.  However, that would require
175 	 * another scan of the metadata cache before drv_array_append in case two bo_create() calls
176 	 * do the same metadata query.  Until cross_domain functionality is more widely tested,
177 	 * leave this optimization out for now.
178 	 */
179 	ret = cross_domain_submit_cmd(drv, (uint32_t *)&cmd_get_reqs, cmd_get_reqs.hdr.cmd_size,
180 				      true);
181 	if (ret < 0)
182 		goto out_unlock;
183 
184 	memcpy(&metadata->strides, &addr[0], 4 * sizeof(uint32_t));
185 	memcpy(&metadata->offsets, &addr[4], 4 * sizeof(uint32_t));
186 	memcpy(&metadata->format_modifier, &addr[8], sizeof(uint64_t));
187 	memcpy(&metadata->total_size, &addr[10], sizeof(uint64_t));
188 	memcpy(&metadata->blob_id, &addr[12], sizeof(uint64_t));
189 
190 	metadata->map_info = addr[14];
191 	metadata->memory_idx = addr[16];
192 	metadata->physical_device_idx = addr[17];
193 
194 	remaining_size = metadata->total_size;
195 	for (plane = 0; plane < metadata->num_planes; plane++) {
196 		if (plane != 0) {
197 			metadata->sizes[plane - 1] = metadata->offsets[plane];
198 			remaining_size -= metadata->offsets[plane];
199 		}
200 	}
201 
202 	metadata->sizes[plane - 1] = remaining_size;
203 	drv_array_append(priv->metadata_cache, metadata);
204 
205 out_unlock:
206 	pthread_mutex_unlock(&drv->driver_lock);
207 	return ret;
208 }
209 
cross_domain_init(struct driver * drv)210 static int cross_domain_init(struct driver *drv)
211 {
212 	int ret;
213 	struct cross_domain_private *priv;
214 	struct drm_virtgpu_map map = { 0 };
215 	struct drm_virtgpu_get_caps args = { 0 };
216 	struct drm_virtgpu_context_init init = { 0 };
217 	struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
218 	struct drm_virtgpu_context_set_param ctx_set_params[2] = { { 0 } };
219 
220 	struct CrossDomainInit cmd_init;
221 	struct CrossDomainCapabilities cross_domain_caps;
222 
223 	memset(&cmd_init, 0, sizeof(cmd_init));
224 	if (!params[param_context_init].value)
225 		return -ENOTSUP;
226 
227 	if ((params[param_supported_capset_ids].value & (1 << CAPSET_CROSS_DOMAIN)) == 0)
228 		return -ENOTSUP;
229 
230 	if (!params[param_resource_blob].value)
231 		return -ENOTSUP;
232 
233 	/// Need zero copy memory
234 	if (!params[param_host_visible].value && !params[param_create_guest_handle].value)
235 		return -ENOTSUP;
236 
237 	/*
238 	 * crosvm never reports the fake capset.  This is just an extra check to make sure we
239 	 * don't use the cross-domain context by accident.  Developers may remove this for
240 	 * testing purposes.
241 	 */
242 	if ((params[param_supported_capset_ids].value & (1 << CAPSET_CROSS_FAKE)) == 0)
243 		return -ENOTSUP;
244 
245 	priv = calloc(1, sizeof(*priv));
246 	priv->metadata_cache = drv_array_init(sizeof(struct bo_metadata));
247 	priv->ring_addr = MAP_FAILED;
248 	drv->priv = priv;
249 
250 	args.cap_set_id = CAPSET_CROSS_DOMAIN;
251 	args.size = sizeof(struct CrossDomainCapabilities);
252 	args.addr = (unsigned long long)&cross_domain_caps;
253 
254 	ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
255 	if (ret) {
256 		drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno));
257 		goto free_private;
258 	}
259 
260 	// When 3D features are avilable, but the host does not support external memory, fall back
261 	// to the virgl minigbm backend.  This typically means the guest side minigbm resource will
262 	// be backed by a host OpenGL texture.
263 	if (!cross_domain_caps.supports_external_gpu_memory && params[param_3d].value) {
264 		ret = -ENOTSUP;
265 		goto free_private;
266 	}
267 
268 	// Intialize the cross domain context.  Create one fence context to wait for metadata
269 	// queries.
270 	ctx_set_params[0].param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID;
271 	ctx_set_params[0].value = CAPSET_CROSS_DOMAIN;
272 	ctx_set_params[1].param = VIRTGPU_CONTEXT_PARAM_NUM_FENCE_CONTEXTS;
273 	ctx_set_params[1].value = 1;
274 
275 	init.ctx_set_params = (unsigned long long)&ctx_set_params[0];
276 	init.num_params = 2;
277 	ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &init);
278 	if (ret) {
279 		drv_log("DRM_IOCTL_VIRTGPU_CONTEXT_INIT failed with %s\n", strerror(errno));
280 		goto free_private;
281 	}
282 
283 	// Create a shared ring buffer to read metadata queries.
284 	drm_rc_blob.size = PAGE_SIZE;
285 	drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
286 	drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
287 
288 	ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
289 	if (ret < 0) {
290 		drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
291 		goto free_private;
292 	}
293 
294 	priv->ring_handle = drm_rc_blob.bo_handle;
295 
296 	// Map shared ring buffer.
297 	map.handle = priv->ring_handle;
298 	ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_MAP, &map);
299 	if (ret < 0) {
300 		drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
301 		goto free_private;
302 	}
303 
304 	priv->ring_addr =
305 	    mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, drv->fd, map.offset);
306 
307 	if (priv->ring_addr == MAP_FAILED) {
308 		drv_log("mmap failed with %s\n", strerror(errno));
309 		goto free_private;
310 	}
311 
312 	// Notify host about ring buffer
313 	cmd_init.hdr.cmd = CROSS_DOMAIN_CMD_INIT;
314 	cmd_init.hdr.cmd_size = sizeof(struct CrossDomainInit);
315 	cmd_init.ring_id = drm_rc_blob.res_handle;
316 	ret = cross_domain_submit_cmd(drv, (uint32_t *)&cmd_init, cmd_init.hdr.cmd_size, false);
317 	if (ret < 0)
318 		goto free_private;
319 
320 	// minigbm bookkeeping
321 	add_combinations(drv);
322 	return 0;
323 
324 free_private:
325 	cross_domain_release_private(drv);
326 	return ret;
327 }
328 
cross_domain_close(struct driver * drv)329 static void cross_domain_close(struct driver *drv)
330 {
331 	cross_domain_release_private(drv);
332 }
333 
cross_domain_bo_create(struct bo * bo,uint32_t width,uint32_t height,uint32_t format,uint64_t use_flags)334 static int cross_domain_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format,
335 				  uint64_t use_flags)
336 {
337 	int ret;
338 	uint32_t blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
339 	struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
340 
341 	ret = cross_domain_metadata_query(bo->drv, &bo->meta);
342 	if (ret < 0) {
343 		drv_log("Metadata query failed");
344 		return ret;
345 	}
346 
347 	if (use_flags & BO_USE_SW_MASK)
348 		blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
349 
350 	if (params[param_cross_device].value && (use_flags & BO_USE_NON_GPU_HW))
351 		blob_flags |= VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE;
352 
353 	/// It may be possible to have host3d blobs and handles from guest memory at the same time.
354 	/// But for the immediate use cases, we will either have one or the other.  For now, just
355 	/// prefer guest memory since adding that feature is more involved (requires --udmabuf
356 	/// flag to crosvm), so developers would likely test that.
357 	if (params[param_create_guest_handle].value) {
358 		drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
359 		blob_flags |= VIRTGPU_BLOB_FLAG_CREATE_GUEST_HANDLE;
360 	} else if (params[param_host_visible].value) {
361 		drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST3D;
362 	}
363 
364 	drm_rc_blob.size = bo->meta.total_size;
365 	drm_rc_blob.blob_flags = blob_flags;
366 	drm_rc_blob.blob_id = bo->meta.blob_id;
367 
368 	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
369 	if (ret < 0) {
370 		drv_log("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s\n", strerror(errno));
371 		return -errno;
372 	}
373 
374 	for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++)
375 		bo->handles[plane].u32 = drm_rc_blob.bo_handle;
376 
377 	return 0;
378 }
379 
cross_domain_bo_map(struct bo * bo,struct vma * vma,size_t plane,uint32_t map_flags)380 static void *cross_domain_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags)
381 {
382 	int ret;
383 	struct drm_virtgpu_map gem_map = { 0 };
384 
385 	gem_map.handle = bo->handles[0].u32;
386 	ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map);
387 	if (ret) {
388 		drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno));
389 		return MAP_FAILED;
390 	}
391 
392 	vma->length = bo->meta.total_size;
393 	return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd,
394 		    gem_map.offset);
395 }
396 
397 const struct backend virtgpu_cross_domain = {
398 	.name = "virtgpu_cross_domain",
399 	.init = cross_domain_init,
400 	.close = cross_domain_close,
401 	.bo_create = cross_domain_bo_create,
402 	.bo_import = drv_prime_bo_import,
403 	.bo_destroy = drv_gem_bo_destroy,
404 	.bo_map = cross_domain_bo_map,
405 	.bo_unmap = drv_bo_munmap,
406 	.resolve_format = drv_resolve_format_helper,
407 };
408