1 /**************************************************************************
2 *
3 * Copyright (C) 2019 Chromium.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included
13 * in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 **************************************************************************/
24
25 #ifndef _GNU_SOURCE
26 #define _GNU_SOURCE 1
27 #endif
28
29 #include <stdio.h>
30 #include <dirent.h>
31 #include <fcntl.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <xf86drm.h>
35 #include <unistd.h>
36
37 #include "util/u_math.h"
38 #include "util/u_memory.h"
39 #include "pipe/p_state.h"
40
41 #include "vrend_winsys.h"
42 #include "vrend_winsys_gbm.h"
43 #include "virgl_hw.h"
44 #include "vrend_debug.h"
45
46 struct planar_layout {
47 size_t num_planes;
48 int horizontal_subsampling[VIRGL_GBM_MAX_PLANES];
49 int vertical_subsampling[VIRGL_GBM_MAX_PLANES];
50 int bytes_per_pixel[VIRGL_GBM_MAX_PLANES];
51 };
52
53 struct format_conversion {
54 uint32_t gbm_format;
55 uint32_t virgl_format;
56 };
57
58 static const struct planar_layout packed_1bpp_layout = {
59 .num_planes = 1,
60 .horizontal_subsampling = { 1 },
61 .vertical_subsampling = { 1 },
62 .bytes_per_pixel = { 1 }
63 };
64
65 static const struct planar_layout packed_2bpp_layout = {
66 .num_planes = 1,
67 .horizontal_subsampling = { 1 },
68 .vertical_subsampling = { 1 },
69 .bytes_per_pixel = { 2 }
70 };
71
72 static const struct planar_layout packed_4bpp_layout = {
73 .num_planes = 1,
74 .horizontal_subsampling = { 1 },
75 .vertical_subsampling = { 1 },
76 .bytes_per_pixel = { 4 }
77 };
78
79 static const struct planar_layout packed_8bpp_layout = {
80 .num_planes = 1,
81 .horizontal_subsampling = { 1 },
82 .vertical_subsampling = { 1 },
83 .bytes_per_pixel = { 8 }
84 };
85
86 static const struct planar_layout biplanar_yuv_420_layout = {
87 .num_planes = 2,
88 .horizontal_subsampling = { 1, 2 },
89 .vertical_subsampling = { 1, 2 },
90 .bytes_per_pixel = { 1, 2 }
91 };
92
93 static const struct planar_layout triplanar_yuv_420_layout = {
94 .num_planes = 3,
95 .horizontal_subsampling = { 1, 2, 2 },
96 .vertical_subsampling = { 1, 2, 2 },
97 .bytes_per_pixel = { 1, 1, 1 }
98 };
99
100 static const struct format_conversion conversions[] = {
101 { GBM_FORMAT_RGB565, VIRGL_FORMAT_B5G6R5_UNORM },
102 { GBM_FORMAT_ARGB8888, VIRGL_FORMAT_B8G8R8A8_UNORM },
103 { GBM_FORMAT_XRGB8888, VIRGL_FORMAT_B8G8R8X8_UNORM },
104 { GBM_FORMAT_ABGR2101010, VIRGL_FORMAT_R10G10B10A2_UNORM },
105 { GBM_FORMAT_ABGR16161616F, VIRGL_FORMAT_R16G16B16A16_FLOAT },
106 { GBM_FORMAT_NV12, VIRGL_FORMAT_NV12 },
107 { GBM_FORMAT_ABGR8888, VIRGL_FORMAT_R8G8B8A8_UNORM},
108 { GBM_FORMAT_XBGR8888, VIRGL_FORMAT_R8G8B8X8_UNORM},
109 { GBM_FORMAT_R8, VIRGL_FORMAT_R8_UNORM},
110 { GBM_FORMAT_YVU420, VIRGL_FORMAT_YV12},
111 };
112
rendernode_open(void)113 static int rendernode_open(void)
114 {
115 DIR *dir;
116 int ret, fd;
117 bool undesired_found;
118 drmVersionPtr version;
119 char *rendernode_name;
120 struct dirent *dir_ent;
121 const char *undesired[3] = { "vgem", "pvr", NULL };
122
123 dir = opendir("/dev/dri");
124 if (!dir)
125 return -1;
126
127 fd = -1;
128 while ((dir_ent = readdir(dir))) {
129 if (dir_ent->d_type != DT_CHR)
130 continue;
131
132 if (strncmp(dir_ent->d_name, "renderD", 7))
133 continue;
134
135 ret = asprintf(&rendernode_name, "/dev/dri/%s", dir_ent->d_name);
136 if (ret < 0)
137 goto out;
138
139 fd = open(rendernode_name, O_RDWR | O_CLOEXEC | O_NOCTTY | O_NONBLOCK);
140 free(rendernode_name);
141
142 if (fd < 0)
143 continue;
144
145 version = drmGetVersion(fd);
146 if (!version) {
147 close(fd);
148 fd = -1;
149 continue;
150 }
151
152 undesired_found = false;
153 for (uint32_t i = 0; i < ARRAY_SIZE(undesired); i++) {
154 if (undesired[i] && !strcmp(version->name, undesired[i]))
155 undesired_found = true;
156 }
157
158 drmFreeVersion(version);
159 if (undesired_found) {
160 close(fd);
161 fd = -1;
162 continue;
163 }
164
165 break;
166 }
167
168 out:
169 closedir(dir);
170 return fd;
171 }
172
layout_from_format(uint32_t format)173 static const struct planar_layout *layout_from_format(uint32_t format)
174 {
175 switch (format) {
176 case GBM_FORMAT_R8:
177 return &packed_1bpp_layout;
178 case GBM_FORMAT_YVU420:
179 return &triplanar_yuv_420_layout;
180 case GBM_FORMAT_NV12:
181 return &biplanar_yuv_420_layout;
182 case GBM_FORMAT_RGB565:
183 return &packed_2bpp_layout;
184 case GBM_FORMAT_ARGB8888:
185 case GBM_FORMAT_XRGB8888:
186 case GBM_FORMAT_ABGR8888:
187 case GBM_FORMAT_XBGR8888:
188 case GBM_FORMAT_ABGR2101010:
189 return &packed_4bpp_layout;
190 case GBM_FORMAT_ABGR16161616F:
191 return &packed_8bpp_layout;
192 default:
193 return NULL;
194 }
195 }
196
197 #ifdef ENABLE_MINIGBM_ALLOCATION
virgl_gbm_transfer_internal(uint32_t planar_bytes_per_pixel,uint32_t subsampled_width,uint32_t subsampled_height,uint32_t guest_plane_stride,uint32_t guest_resource_offset,uint32_t host_plane_stride,uint8_t * host_address,const struct iovec * iovecs,uint32_t num_iovecs,uint32_t direction)198 static void virgl_gbm_transfer_internal(uint32_t planar_bytes_per_pixel,
199 uint32_t subsampled_width,
200 uint32_t subsampled_height,
201 uint32_t guest_plane_stride,
202 uint32_t guest_resource_offset,
203 uint32_t host_plane_stride, uint8_t *host_address,
204 const struct iovec *iovecs, uint32_t num_iovecs,
205 uint32_t direction)
206 {
207 bool next_iovec, next_line;
208 uint32_t current_height, current_iovec, iovec_start_offset;
209 current_height = current_iovec = iovec_start_offset = 0;
210
211 while (current_height < subsampled_height && current_iovec < num_iovecs) {
212 uint32_t iovec_size = iovecs[current_iovec].iov_len;
213 uint32_t iovec_end_offset = iovec_start_offset + iovec_size;
214
215 uint32_t box_start_offset = guest_resource_offset + current_height * guest_plane_stride;
216 uint32_t box_end_offset = box_start_offset + subsampled_width * planar_bytes_per_pixel;
217
218 uint32_t max_start = MAX2(iovec_start_offset, box_start_offset);
219 uint32_t min_end = MIN2(iovec_end_offset, box_end_offset);
220
221 if (max_start < min_end) {
222 uint32_t offset_in_iovec = (max_start > iovec_start_offset) ?
223 (max_start - iovec_start_offset) : 0;
224
225 uint32_t copy_iovec_size = min_end - max_start;
226 if (min_end >= iovec_end_offset) {
227 next_iovec = true;
228 next_line = false;
229 } else {
230 next_iovec = false;
231 next_line = true;
232 }
233
234 uint8_t *guest_start = (uint8_t*)iovecs[current_iovec].iov_base + offset_in_iovec;
235 uint8_t *host_start = host_address + (current_height * host_plane_stride) +
236 (max_start - box_start_offset);
237
238 if (direction == VIRGL_TRANSFER_TO_HOST)
239 memcpy(host_start, guest_start, copy_iovec_size);
240 else
241 memcpy(guest_start, host_start, copy_iovec_size);
242 } else {
243 if (box_start_offset >= iovec_start_offset) {
244 next_iovec = true;
245 next_line = false;
246 } else {
247 next_iovec = false;
248 next_line = true;
249 }
250 }
251
252 if (next_iovec) {
253 iovec_start_offset += iovec_size;
254 current_iovec++;
255 }
256
257 if (next_line)
258 current_height++;
259 }
260 }
261 #endif /* ENABLE_MINIGBM_ALLOCATION */
262
virgl_gbm_init(int fd)263 struct virgl_gbm *virgl_gbm_init(int fd)
264 {
265 struct virgl_gbm *gbm = calloc(1, sizeof(struct virgl_gbm));
266 if (!gbm)
267 return NULL;
268
269 gbm->fd = -1;
270 if (fd < 0) {
271 #ifdef ENABLE_MINIGBM_ALLOCATION
272 gbm->fd = gbm_get_default_device_fd();
273 if (gbm->fd < 0)
274 #endif
275 gbm->fd = rendernode_open();
276 if (gbm->fd < 0)
277 goto out_error;
278
279 gbm->device = gbm_create_device(gbm->fd);
280 if (!gbm->device) {
281 close(gbm->fd);
282 goto out_error;
283 }
284 } else {
285 gbm->device = gbm_create_device(fd);
286 if (!gbm->device)
287 goto out_error;
288 gbm->fd = fd;
289 }
290
291 return gbm;
292
293 out_error:
294 free(gbm);
295 return NULL;
296 }
297
virgl_gbm_fini(struct virgl_gbm * gbm)298 void virgl_gbm_fini(struct virgl_gbm *gbm)
299 {
300 gbm_device_destroy(gbm->device);
301 if (gbm->fd >= 0)
302 close(gbm->fd);
303 free(gbm);
304 }
305
virgl_gbm_convert_format(uint32_t * virgl_format,uint32_t * gbm_format)306 int virgl_gbm_convert_format(uint32_t *virgl_format, uint32_t *gbm_format)
307 {
308
309 if (!virgl_format || !gbm_format)
310 return -1;
311
312 if (*virgl_format != 0 && *gbm_format != 0)
313 return -1;
314
315 for (uint32_t i = 0; i < ARRAY_SIZE(conversions); i++) {
316 if (conversions[i].gbm_format == *gbm_format ||
317 conversions[i].virgl_format == *virgl_format) {
318 *gbm_format = conversions[i].gbm_format;
319 *virgl_format = conversions[i].virgl_format;
320 return 0;
321 }
322 }
323
324 return -1;
325 }
326
327 #ifdef ENABLE_MINIGBM_ALLOCATION
virgl_gbm_transfer(struct gbm_bo * bo,uint32_t direction,const struct iovec * iovecs,uint32_t num_iovecs,const struct vrend_transfer_info * info)328 int virgl_gbm_transfer(struct gbm_bo *bo, uint32_t direction, const struct iovec *iovecs,
329 uint32_t num_iovecs, const struct vrend_transfer_info *info)
330 {
331 void *map_data;
332 uint32_t guest_plane_offset, guest_stride0, host_map_stride0;
333
334 uint32_t width = gbm_bo_get_width(bo);
335 uint32_t height = gbm_bo_get_height(bo);
336 uint32_t format = gbm_bo_get_format(bo);
337 int plane_count = gbm_bo_get_plane_count(bo);
338 const struct planar_layout *layout = layout_from_format(format);
339 if (!layout)
340 return -1;
341
342 host_map_stride0 = 0;
343 uint32_t map_flags = (direction == VIRGL_TRANSFER_TO_HOST) ? GBM_BO_TRANSFER_WRITE :
344 GBM_BO_TRANSFER_READ;
345 /* XXX remove this and map just the region when single plane and GBM honors the region */
346 if (direction == VIRGL_TRANSFER_TO_HOST &&
347 !(info->box->x == 0 && info->box->y == 0 &&
348 info->box->width == (int)width && info->box->height == (int)height))
349 map_flags |= GBM_BO_TRANSFER_READ;
350
351 void *addr = gbm_bo_map(bo, 0, 0, width, height, map_flags, &host_map_stride0, &map_data);
352 if (!addr)
353 return -1;
354
355 guest_plane_offset = info->offset;
356 guest_stride0 = 0;
357
358 /*
359 * Unfortunately, the kernel doesn't actually pass the guest layer_stride and
360 * guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
361 * the level (always zero for 2D images) to work around this.
362 */
363 if (info->stride || info->level) {
364 guest_stride0 = info->stride ? info->stride : info->level;
365 if (guest_stride0 < (uint32_t)info->box->width * layout->bytes_per_pixel[0])
366 return -1;
367 } else {
368 guest_stride0 = width * layout->bytes_per_pixel[0];
369 }
370
371 if (guest_stride0 > host_map_stride0)
372 return -1;
373
374 for (int plane = 0; plane < plane_count; plane++) {
375 uint32_t host_plane_offset = gbm_bo_get_offset(bo, plane);
376
377 uint32_t subsampled_x = info->box->x / layout->horizontal_subsampling[plane];
378 uint32_t subsampled_y = info->box->y / layout->vertical_subsampling[plane];
379 uint32_t subsampled_width = info->box->width / layout->horizontal_subsampling[plane];
380 uint32_t subsampled_height = info->box->height / layout->vertical_subsampling[plane];
381 uint32_t plane_height = height / layout->vertical_subsampling[plane];
382
383 uint32_t plane_byte_ratio = layout->bytes_per_pixel[plane] / layout->bytes_per_pixel[0];
384 uint32_t guest_plane_stride = (guest_stride0 * plane_byte_ratio)
385 / layout->horizontal_subsampling[plane];
386 uint32_t host_plane_stride = plane == 0
387 ? host_map_stride0 : gbm_bo_get_stride_for_plane(bo, plane);
388
389 uint32_t guest_resource_offset = guest_plane_offset;
390 uint32_t host_resource_offset = host_plane_offset + (subsampled_y * host_plane_stride)
391 + subsampled_x * layout->bytes_per_pixel[plane];
392
393 uint8_t *host_address = (uint8_t*)addr + host_resource_offset;
394
395 /*
396 * Here we apply another hack. info->offset does not account for
397 * info->box for planar resources and we need to make adjustments.
398 */
399 if (plane_count > 1) {
400 guest_resource_offset += (subsampled_y * guest_plane_stride)
401 + subsampled_x * layout->bytes_per_pixel[plane];
402 }
403
404 virgl_gbm_transfer_internal(layout->bytes_per_pixel[plane], subsampled_width,
405 subsampled_height, guest_plane_stride, guest_resource_offset,
406 host_plane_stride, host_address, iovecs, num_iovecs, direction);
407
408 if (info->layer_stride) {
409 guest_plane_offset += (info->layer_stride * plane_byte_ratio)
410 / (layout->horizontal_subsampling[plane] * layout->vertical_subsampling[plane]);
411 } else {
412 guest_plane_offset += plane_height * guest_plane_stride;
413 }
414 }
415
416 gbm_bo_unmap(bo, map_data);
417 return 0;
418 }
419
virgl_gbm_convert_flags(uint32_t virgl_bind_flags)420 uint32_t virgl_gbm_convert_flags(uint32_t virgl_bind_flags)
421 {
422 uint32_t flags = 0;
423 if (virgl_bind_flags & VIRGL_BIND_SAMPLER_VIEW)
424 flags |= GBM_BO_USE_TEXTURING;
425 if (virgl_bind_flags & VIRGL_BIND_RENDER_TARGET)
426 flags |= GBM_BO_USE_RENDERING;
427 if (virgl_bind_flags & VIRGL_BIND_SCANOUT)
428 flags |= GBM_BO_USE_SCANOUT;
429 if (virgl_bind_flags & VIRGL_BIND_CURSOR)
430 flags |= GBM_BO_USE_CURSOR;
431 if (virgl_bind_flags & VIRGL_BIND_LINEAR)
432 flags |= GBM_BO_USE_LINEAR;
433
434 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_CAMERA_WRITE)
435 flags |= GBM_BO_USE_CAMERA_WRITE;
436 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_CAMERA_READ)
437 flags |= GBM_BO_USE_CAMERA_READ;
438 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_HW_VIDEO_DECODER)
439 flags |= GBM_BO_USE_HW_VIDEO_DECODER;
440 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_HW_VIDEO_ENCODER)
441 flags |= GBM_BO_USE_HW_VIDEO_ENCODER;
442
443 if ((virgl_bind_flags & VIRGL_BIND_MINIGBM_PROTECTED) ==
444 (uint32_t)VIRGL_BIND_MINIGBM_PROTECTED) {
445 flags |= GBM_BO_USE_PROTECTED;
446 } else {
447 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_SW_READ_OFTEN)
448 flags |= GBM_BO_USE_SW_READ_OFTEN;
449 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_SW_READ_RARELY)
450 flags |= GBM_BO_USE_SW_READ_RARELY;
451 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_SW_WRITE_OFTEN)
452 flags |= GBM_BO_USE_SW_WRITE_OFTEN;
453 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_SW_WRITE_RARELY)
454 flags |= GBM_BO_USE_SW_WRITE_RARELY;
455 }
456
457 return flags;
458 }
459
virgl_gbm_export_query(struct gbm_bo * bo,struct virgl_renderer_export_query * query)460 int virgl_gbm_export_query(struct gbm_bo *bo, struct virgl_renderer_export_query *query)
461 {
462 int ret = -1;
463 uint32_t handles[VIRGL_GBM_MAX_PLANES] = { 0 };
464 struct gbm_device *gbm = gbm_bo_get_device(bo);
465 int num_planes = gbm_bo_get_plane_count(bo);
466 if (num_planes < 0 || num_planes > VIRGL_GBM_MAX_PLANES)
467 return ret;
468
469 query->out_num_fds = 0;
470 query->out_fourcc = 0;
471 query->out_modifier = DRM_FORMAT_MOD_INVALID;
472 for (int plane = 0; plane < VIRGL_GBM_MAX_PLANES; plane++) {
473 query->out_fds[plane] = -1;
474 query->out_strides[plane] = 0;
475 query->out_offsets[plane] = 0;
476 }
477
478 for (int plane = 0; plane < num_planes; plane++) {
479 uint32_t i, handle;
480 query->out_strides[plane] = gbm_bo_get_stride_for_plane(bo, plane);
481 query->out_offsets[plane] = gbm_bo_get_offset(bo, plane);
482 handle = gbm_bo_get_handle_for_plane(bo, plane).u32;
483
484 for (i = 0; i < query->out_num_fds; i++) {
485 if (handles[i] == handle)
486 break;
487 }
488
489 if (i == query->out_num_fds) {
490 if (query->in_export_fds) {
491 ret = virgl_gbm_export_fd(gbm, handle, &query->out_fds[query->out_num_fds]);
492 if (ret)
493 goto err_close;
494 }
495 handles[query->out_num_fds] = handle;
496 query->out_num_fds++;
497 }
498 }
499
500 query->out_modifier = gbm_bo_get_modifier(bo);
501 query->out_fourcc = gbm_bo_get_format(bo);
502 return 0;
503
504 err_close:
505 for (int plane = 0; plane < VIRGL_GBM_MAX_PLANES; plane++) {
506 if (query->out_fds[plane] >= 0) {
507 close(query->out_fds[plane]);
508 query->out_fds[plane] = -1;
509 }
510
511 query->out_strides[plane] = 0;
512 query->out_offsets[plane] = 0;
513 }
514
515 query->out_num_fds = 0;
516 return ret;
517 }
518 #endif
519
virgl_gbm_export_fd(struct gbm_device * gbm,uint32_t handle,int32_t * out_fd)520 int virgl_gbm_export_fd(struct gbm_device *gbm, uint32_t handle, int32_t *out_fd)
521 {
522 int ret;
523 ret = drmPrimeHandleToFD(gbm_device_get_fd(gbm), handle, DRM_CLOEXEC | DRM_RDWR, out_fd);
524 // Kernels with older DRM core versions block DRM_RDWR but give a
525 // read/write mapping anyway.
526 if (ret)
527 ret = drmPrimeHandleToFD(gbm_device_get_fd(gbm), handle, DRM_CLOEXEC, out_fd);
528
529 return ret;
530 }
531
virgl_gbm_get_plane_width(struct gbm_bo * bo,int plane)532 int virgl_gbm_get_plane_width(struct gbm_bo *bo, int plane) {
533 uint32_t format = gbm_bo_get_format(bo);
534 const struct planar_layout *layout = layout_from_format(format);
535 if (!layout)
536 return -1;
537 return gbm_bo_get_width(bo) / layout->horizontal_subsampling[plane];
538 }
539
virgl_gbm_get_plane_height(struct gbm_bo * bo,int plane)540 int virgl_gbm_get_plane_height(struct gbm_bo *bo, int plane) {
541 uint32_t format = gbm_bo_get_format(bo);
542 const struct planar_layout *layout = layout_from_format(format);
543 if (!layout)
544 return -1;
545 return gbm_bo_get_height(bo) / layout->vertical_subsampling[plane];
546 }
547
virgl_gbm_get_plane_bytes_per_pixel(struct gbm_bo * bo,int plane)548 int virgl_gbm_get_plane_bytes_per_pixel(struct gbm_bo *bo, int plane) {
549 uint32_t format = gbm_bo_get_format(bo);
550 const struct planar_layout *layout = layout_from_format(format);
551 if (!layout)
552 return -1;
553 return layout->bytes_per_pixel[plane];
554 }
555
virgl_gbm_external_allocation_preferred(uint32_t flags)556 bool virgl_gbm_external_allocation_preferred(uint32_t flags) {
557 return (flags & (VIRGL_RES_BIND_SCANOUT | VIRGL_RES_BIND_SHARED)) != 0;
558 }
559
virgl_gbm_gpu_import_required(uint32_t flags)560 bool virgl_gbm_gpu_import_required(uint32_t flags) {
561 return !virgl_gbm_external_allocation_preferred(flags) ||
562 (flags & (VIRGL_BIND_RENDER_TARGET | VIRGL_BIND_SAMPLER_VIEW)) != 0;
563 }
564