1 /**************************************************************************
2 *
3 * Copyright (C) 2019 Chromium.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included
13 * in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 **************************************************************************/
24
25 #ifndef _GNU_SOURCE
26 #define _GNU_SOURCE 1
27 #endif
28
29 #include <stdio.h>
30 #include <dirent.h>
31 #include <fcntl.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <xf86drm.h>
35 #include <unistd.h>
36
37 #include "util/u_math.h"
38 #include "util/u_memory.h"
39 #include "pipe/p_state.h"
40
41 #include "vrend_winsys.h"
42 #include "vrend_winsys_gbm.h"
43 #include "virgl_hw.h"
44 #include "vrend_debug.h"
45
46 struct planar_layout {
47 size_t num_planes;
48 int horizontal_subsampling[VIRGL_GBM_MAX_PLANES];
49 int vertical_subsampling[VIRGL_GBM_MAX_PLANES];
50 int bytes_per_pixel[VIRGL_GBM_MAX_PLANES];
51 };
52
53 struct format_conversion {
54 uint32_t gbm_format;
55 uint32_t virgl_format;
56 };
57
58 static const struct planar_layout packed_1bpp_layout = {
59 .num_planes = 1,
60 .horizontal_subsampling = { 1 },
61 .vertical_subsampling = { 1 },
62 .bytes_per_pixel = { 1 }
63 };
64
65 static const struct planar_layout packed_2bpp_layout = {
66 .num_planes = 1,
67 .horizontal_subsampling = { 1 },
68 .vertical_subsampling = { 1 },
69 .bytes_per_pixel = { 2 }
70 };
71
72 static const struct planar_layout packed_4bpp_layout = {
73 .num_planes = 1,
74 .horizontal_subsampling = { 1 },
75 .vertical_subsampling = { 1 },
76 .bytes_per_pixel = { 4 }
77 };
78
79 static const struct planar_layout packed_8bpp_layout = {
80 .num_planes = 1,
81 .horizontal_subsampling = { 1 },
82 .vertical_subsampling = { 1 },
83 .bytes_per_pixel = { 8 }
84 };
85
86 static const struct planar_layout biplanar_yuv_420_layout = {
87 .num_planes = 2,
88 .horizontal_subsampling = { 1, 2 },
89 .vertical_subsampling = { 1, 2 },
90 .bytes_per_pixel = { 1, 2 }
91 };
92
93 static const struct planar_layout triplanar_yuv_420_layout = {
94 .num_planes = 3,
95 .horizontal_subsampling = { 1, 2, 2 },
96 .vertical_subsampling = { 1, 2, 2 },
97 .bytes_per_pixel = { 1, 1, 1 }
98 };
99
100 static const struct format_conversion conversions[] = {
101 { GBM_FORMAT_RGB565, VIRGL_FORMAT_B5G6R5_UNORM },
102 { GBM_FORMAT_ARGB8888, VIRGL_FORMAT_B8G8R8A8_UNORM },
103 { GBM_FORMAT_XRGB8888, VIRGL_FORMAT_B8G8R8X8_UNORM },
104 { GBM_FORMAT_ABGR16161616F, VIRGL_FORMAT_R16G16B16A16_FLOAT },
105 { GBM_FORMAT_NV12, VIRGL_FORMAT_NV12 },
106 { GBM_FORMAT_ABGR8888, VIRGL_FORMAT_R8G8B8A8_UNORM},
107 { GBM_FORMAT_XBGR8888, VIRGL_FORMAT_R8G8B8X8_UNORM},
108 { GBM_FORMAT_R8, VIRGL_FORMAT_R8_UNORM},
109 { GBM_FORMAT_YVU420, VIRGL_FORMAT_YV12},
110 { GBM_FORMAT_ABGR8888, VIRGL_FORMAT_B8G8R8A8_UNORM_EMULATED},
111 { GBM_FORMAT_XBGR8888, VIRGL_FORMAT_B8G8R8X8_UNORM_EMULATED},
112 };
113
rendernode_open(void)114 static int rendernode_open(void)
115 {
116 DIR *dir;
117 int ret, fd;
118 bool undesired_found;
119 drmVersionPtr version;
120 char *rendernode_name;
121 struct dirent *dir_ent;
122 const char *undesired[3] = { "vgem", "pvr", NULL };
123
124 dir = opendir("/dev/dri");
125 if (!dir)
126 return -1;
127
128 fd = -1;
129 while ((dir_ent = readdir(dir))) {
130 if (dir_ent->d_type != DT_CHR)
131 continue;
132
133 if (strncmp(dir_ent->d_name, "renderD", 7))
134 continue;
135
136 ret = asprintf(&rendernode_name, "/dev/dri/%s", dir_ent->d_name);
137 if (ret < 0)
138 goto out;
139
140 fd = open(rendernode_name, O_RDWR | O_CLOEXEC | O_NOCTTY | O_NONBLOCK);
141 free(rendernode_name);
142
143 if (fd < 0)
144 continue;
145
146 version = drmGetVersion(fd);
147 if (!version) {
148 close(fd);
149 fd = -1;
150 continue;
151 }
152
153 undesired_found = false;
154 for (uint32_t i = 0; i < ARRAY_SIZE(undesired); i++) {
155 if (undesired[i] && !strcmp(version->name, undesired[i]))
156 undesired_found = true;
157 }
158
159 drmFreeVersion(version);
160 if (undesired_found) {
161 close(fd);
162 fd = -1;
163 continue;
164 }
165
166 break;
167 }
168
169 out:
170 closedir(dir);
171 return fd;
172 }
173
layout_from_format(uint32_t format)174 static const struct planar_layout *layout_from_format(uint32_t format)
175 {
176 switch (format) {
177 case GBM_FORMAT_R8:
178 return &packed_1bpp_layout;
179 case GBM_FORMAT_YVU420:
180 return &triplanar_yuv_420_layout;
181 case GBM_FORMAT_NV12:
182 return &biplanar_yuv_420_layout;
183 case GBM_FORMAT_RGB565:
184 return &packed_2bpp_layout;
185 case GBM_FORMAT_ARGB8888:
186 case GBM_FORMAT_XRGB8888:
187 case GBM_FORMAT_ABGR8888:
188 case GBM_FORMAT_XBGR8888:
189 return &packed_4bpp_layout;
190 case GBM_FORMAT_ABGR16161616F:
191 return &packed_8bpp_layout;
192 default:
193 return NULL;
194 }
195 }
196
197 #ifdef ENABLE_MINIGBM_ALLOCATION
virgl_gbm_transfer_internal(uint32_t planar_bytes_per_pixel,uint32_t subsampled_width,uint32_t subsampled_height,uint32_t guest_plane_stride,uint32_t guest_resource_offset,uint32_t host_plane_stride,uint8_t * host_address,const struct iovec * iovecs,uint32_t num_iovecs,uint32_t direction)198 static void virgl_gbm_transfer_internal(uint32_t planar_bytes_per_pixel,
199 uint32_t subsampled_width,
200 uint32_t subsampled_height,
201 uint32_t guest_plane_stride,
202 uint32_t guest_resource_offset,
203 uint32_t host_plane_stride, uint8_t *host_address,
204 const struct iovec *iovecs, uint32_t num_iovecs,
205 uint32_t direction)
206 {
207 bool next_iovec, next_line;
208 uint32_t current_height, current_iovec, iovec_start_offset;
209 current_height = current_iovec = iovec_start_offset = 0;
210
211 while (current_height < subsampled_height && current_iovec < num_iovecs) {
212 uint32_t iovec_size = iovecs[current_iovec].iov_len;
213 uint32_t iovec_end_offset = iovec_start_offset + iovec_size;
214
215 uint32_t box_start_offset = guest_resource_offset + current_height * guest_plane_stride;
216 uint32_t box_end_offset = box_start_offset + subsampled_width * planar_bytes_per_pixel;
217
218 uint32_t max_start = MAX2(iovec_start_offset, box_start_offset);
219 uint32_t min_end = MIN2(iovec_end_offset, box_end_offset);
220
221 if (max_start < min_end) {
222 uint32_t offset_in_iovec = (max_start > iovec_start_offset) ?
223 (max_start - iovec_start_offset) : 0;
224
225 uint32_t copy_iovec_size = min_end - max_start;
226 if (min_end >= iovec_end_offset) {
227 next_iovec = true;
228 next_line = false;
229 } else {
230 next_iovec = false;
231 next_line = true;
232 }
233
234 uint8_t *guest_start = (uint8_t*)iovecs[current_iovec].iov_base + offset_in_iovec;
235 uint8_t *host_start = host_address + (current_height * host_plane_stride) +
236 (max_start - box_start_offset);
237
238 if (direction == VIRGL_TRANSFER_TO_HOST)
239 memcpy(host_start, guest_start, copy_iovec_size);
240 else
241 memcpy(guest_start, host_start, copy_iovec_size);
242 } else {
243 if (box_start_offset >= iovec_start_offset) {
244 next_iovec = true;
245 next_line = false;
246 } else {
247 next_iovec = false;
248 next_line = true;
249 }
250 }
251
252 if (next_iovec) {
253 iovec_start_offset += iovec_size;
254 current_iovec++;
255 }
256
257 if (next_line)
258 current_height++;
259 }
260 }
261 #endif /* ENABLE_MINIGBM_ALLOCATION */
262
virgl_gbm_init(int fd)263 struct virgl_gbm *virgl_gbm_init(int fd)
264 {
265 struct virgl_gbm *gbm = calloc(1, sizeof(struct virgl_gbm));
266 if (!gbm)
267 return NULL;
268
269 gbm->fd = -1;
270 if (fd < 0) {
271 gbm->fd = rendernode_open();
272 if (gbm->fd < 0)
273 goto out_error;
274
275 gbm->device = gbm_create_device(gbm->fd);
276 if (!gbm->device) {
277 close(gbm->fd);
278 goto out_error;
279 }
280 } else {
281 gbm->device = gbm_create_device(fd);
282 if (!gbm->device)
283 goto out_error;
284 }
285
286 return gbm;
287
288 out_error:
289 free(gbm);
290 return NULL;
291 }
292
virgl_gbm_fini(struct virgl_gbm * gbm)293 void virgl_gbm_fini(struct virgl_gbm *gbm)
294 {
295 gbm_device_destroy(gbm->device);
296 if (gbm->fd >= 0)
297 close(gbm->fd);
298 free(gbm);
299 }
300
virgl_gbm_convert_format(uint32_t * virgl_format,uint32_t * gbm_format)301 int virgl_gbm_convert_format(uint32_t *virgl_format, uint32_t *gbm_format)
302 {
303
304 if (!virgl_format || !gbm_format)
305 return -1;
306
307 if (*virgl_format != 0 && *gbm_format != 0)
308 return -1;
309
310 for (uint32_t i = 0; i < ARRAY_SIZE(conversions); i++) {
311 if (conversions[i].gbm_format == *gbm_format ||
312 conversions[i].virgl_format == *virgl_format) {
313 *gbm_format = conversions[i].gbm_format;
314 *virgl_format = conversions[i].virgl_format;
315 return 0;
316 }
317 }
318
319 return -1;
320 }
321
322 #ifdef ENABLE_MINIGBM_ALLOCATION
virgl_gbm_transfer(struct gbm_bo * bo,uint32_t direction,const struct iovec * iovecs,uint32_t num_iovecs,const struct vrend_transfer_info * info)323 int virgl_gbm_transfer(struct gbm_bo *bo, uint32_t direction, const struct iovec *iovecs,
324 uint32_t num_iovecs, const struct vrend_transfer_info *info)
325 {
326 void *map_data;
327 uint32_t guest_plane_offset, guest_stride0, host_map_stride0;
328
329 uint32_t width = gbm_bo_get_width(bo);
330 uint32_t height = gbm_bo_get_height(bo);
331 uint32_t format = gbm_bo_get_format(bo);
332 int plane_count = gbm_bo_get_plane_count(bo);
333 const struct planar_layout *layout = layout_from_format(format);
334 if (!layout)
335 return -1;
336
337 host_map_stride0 = 0;
338 uint32_t map_flags = (direction == VIRGL_TRANSFER_TO_HOST) ? GBM_BO_TRANSFER_WRITE :
339 GBM_BO_TRANSFER_READ;
340 /* XXX remove this and map just the region when single plane and GBM honors the region */
341 if (direction == VIRGL_TRANSFER_TO_HOST &&
342 !(info->box->x == 0 && info->box->y == 0 &&
343 info->box->width == width && info->box->height == height))
344 map_flags |= GBM_BO_TRANSFER_READ;
345
346 void *addr = gbm_bo_map(bo, 0, 0, width, height, map_flags, &host_map_stride0, &map_data);
347 if (!addr)
348 return -1;
349
350 guest_plane_offset = info->offset;
351 guest_stride0 = 0;
352
353 /*
354 * Unfortunately, the kernel doesn't actually pass the guest layer_stride and
355 * guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use
356 * the level (always zero for 2D images) to work around this.
357 */
358 if (info->stride || info->level) {
359 guest_stride0 = info->stride ? info->stride : info->level;
360 if (guest_stride0 < (uint32_t)info->box->width * layout->bytes_per_pixel[0])
361 return -1;
362 } else {
363 guest_stride0 = width * layout->bytes_per_pixel[0];
364 }
365
366 if (guest_stride0 > host_map_stride0)
367 return -1;
368
369 for (int plane = 0; plane < plane_count; plane++) {
370 uint32_t host_plane_offset = gbm_bo_get_offset(bo, plane);
371
372 uint32_t subsampled_x = info->box->x / layout->horizontal_subsampling[plane];
373 uint32_t subsampled_y = info->box->y / layout->vertical_subsampling[plane];
374 uint32_t subsampled_width = info->box->width / layout->horizontal_subsampling[plane];
375 uint32_t subsampled_height = info->box->height / layout->vertical_subsampling[plane];
376 uint32_t plane_height = height / layout->vertical_subsampling[plane];
377
378 uint32_t plane_byte_ratio = layout->bytes_per_pixel[plane] / layout->bytes_per_pixel[0];
379 uint32_t guest_plane_stride = (guest_stride0 * plane_byte_ratio)
380 / layout->horizontal_subsampling[plane];
381 uint32_t host_plane_stride = plane == 0
382 ? host_map_stride0 : gbm_bo_get_stride_for_plane(bo, plane);
383
384 uint32_t guest_resource_offset = guest_plane_offset;
385 uint32_t host_resource_offset = host_plane_offset + (subsampled_y * host_plane_stride)
386 + subsampled_x * layout->bytes_per_pixel[plane];
387
388 uint8_t *host_address = (uint8_t*)addr + host_resource_offset;
389
390 /*
391 * Here we apply another hack. info->offset does not account for
392 * info->box for planar resources and we need to make adjustments.
393 */
394 if (plane_count > 1) {
395 guest_resource_offset += (subsampled_y * guest_plane_stride)
396 + subsampled_x * layout->bytes_per_pixel[plane];
397 }
398
399 virgl_gbm_transfer_internal(layout->bytes_per_pixel[plane], subsampled_width,
400 subsampled_height, guest_plane_stride, guest_resource_offset,
401 host_plane_stride, host_address, iovecs, num_iovecs, direction);
402
403 if (info->layer_stride) {
404 guest_plane_offset += (info->layer_stride * plane_byte_ratio)
405 / (layout->horizontal_subsampling[plane] * layout->vertical_subsampling[plane]);
406 } else {
407 guest_plane_offset += plane_height * guest_plane_stride;
408 }
409 }
410
411 gbm_bo_unmap(bo, map_data);
412 return 0;
413 }
414
virgl_gbm_convert_flags(uint32_t virgl_bind_flags)415 uint32_t virgl_gbm_convert_flags(uint32_t virgl_bind_flags)
416 {
417 uint32_t flags = 0;
418 if (virgl_bind_flags & VIRGL_BIND_SAMPLER_VIEW)
419 flags |= GBM_BO_USE_TEXTURING;
420 if (virgl_bind_flags & VIRGL_BIND_RENDER_TARGET)
421 flags |= GBM_BO_USE_RENDERING;
422 if (virgl_bind_flags & VIRGL_BIND_SCANOUT)
423 flags |= GBM_BO_USE_SCANOUT;
424 if (virgl_bind_flags & VIRGL_BIND_CURSOR)
425 flags |= GBM_BO_USE_CURSOR;
426 if (virgl_bind_flags & VIRGL_BIND_LINEAR)
427 flags |= GBM_BO_USE_LINEAR;
428
429 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_CAMERA_WRITE)
430 flags |= GBM_BO_USE_CAMERA_WRITE;
431 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_CAMERA_READ)
432 flags |= GBM_BO_USE_CAMERA_READ;
433 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_HW_VIDEO_DECODER)
434 flags |= GBM_BO_USE_HW_VIDEO_DECODER;
435 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_HW_VIDEO_ENCODER)
436 flags |= GBM_BO_USE_HW_VIDEO_ENCODER;
437
438 if ((virgl_bind_flags & VIRGL_BIND_MINIGBM_PROTECTED) == VIRGL_BIND_MINIGBM_PROTECTED) {
439 flags |= GBM_BO_USE_PROTECTED;
440 } else {
441 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_SW_READ_OFTEN)
442 flags |= GBM_BO_USE_SW_READ_OFTEN;
443 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_SW_READ_RARELY)
444 flags |= GBM_BO_USE_SW_READ_RARELY;
445 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_SW_WRITE_OFTEN)
446 flags |= GBM_BO_USE_SW_WRITE_OFTEN;
447 if (virgl_bind_flags & VIRGL_BIND_MINIGBM_SW_WRITE_RARELY)
448 flags |= GBM_BO_USE_SW_WRITE_RARELY;
449 }
450
451 return flags;
452 }
453
virgl_gbm_export_query(struct gbm_bo * bo,struct virgl_renderer_export_query * query)454 int virgl_gbm_export_query(struct gbm_bo *bo, struct virgl_renderer_export_query *query)
455 {
456 int ret = -1;
457 uint32_t handles[VIRGL_GBM_MAX_PLANES] = { 0 };
458 struct gbm_device *gbm = gbm_bo_get_device(bo);
459 int num_planes = gbm_bo_get_plane_count(bo);
460 if (num_planes < 0 || num_planes > VIRGL_GBM_MAX_PLANES)
461 return ret;
462
463 query->out_num_fds = 0;
464 query->out_fourcc = 0;
465 query->out_modifier = DRM_FORMAT_MOD_INVALID;
466 for (int plane = 0; plane < VIRGL_GBM_MAX_PLANES; plane++) {
467 query->out_fds[plane] = -1;
468 query->out_strides[plane] = 0;
469 query->out_offsets[plane] = 0;
470 }
471
472 for (int plane = 0; plane < num_planes; plane++) {
473 uint32_t i, handle;
474 query->out_strides[plane] = gbm_bo_get_stride_for_plane(bo, plane);
475 query->out_offsets[plane] = gbm_bo_get_offset(bo, plane);
476 handle = gbm_bo_get_handle_for_plane(bo, plane).u32;
477
478 for (i = 0; i < query->out_num_fds; i++) {
479 if (handles[i] == handle)
480 break;
481 }
482
483 if (i == query->out_num_fds) {
484 if (query->in_export_fds) {
485 ret = virgl_gbm_export_fd(gbm, handle, &query->out_fds[query->out_num_fds]);
486 if (ret)
487 goto err_close;
488 }
489 handles[query->out_num_fds] = handle;
490 query->out_num_fds++;
491 }
492 }
493
494 query->out_modifier = gbm_bo_get_modifier(bo);
495 query->out_fourcc = gbm_bo_get_format(bo);
496 return 0;
497
498 err_close:
499 for (int plane = 0; plane < VIRGL_GBM_MAX_PLANES; plane++) {
500 if (query->out_fds[plane] >= 0) {
501 close(query->out_fds[plane]);
502 query->out_fds[plane] = -1;
503 }
504
505 query->out_strides[plane] = 0;
506 query->out_offsets[plane] = 0;
507 }
508
509 query->out_num_fds = 0;
510 return ret;
511 }
512 #endif
513
virgl_gbm_export_fd(struct gbm_device * gbm,uint32_t handle,int32_t * out_fd)514 int virgl_gbm_export_fd(struct gbm_device *gbm, uint32_t handle, int32_t *out_fd)
515 {
516 int ret;
517 ret = drmPrimeHandleToFD(gbm_device_get_fd(gbm), handle, DRM_CLOEXEC | DRM_RDWR, out_fd);
518 // Kernels with older DRM core versions block DRM_RDWR but give a
519 // read/write mapping anyway.
520 if (ret)
521 ret = drmPrimeHandleToFD(gbm_device_get_fd(gbm), handle, DRM_CLOEXEC, out_fd);
522
523 return ret;
524 }
525
virgl_gbm_get_plane_width(struct gbm_bo * bo,int plane)526 int virgl_gbm_get_plane_width(struct gbm_bo *bo, int plane) {
527 uint32_t format = gbm_bo_get_format(bo);
528 const struct planar_layout *layout = layout_from_format(format);
529 if (!layout)
530 return -1;
531 return gbm_bo_get_width(bo) / layout->horizontal_subsampling[plane];
532 }
533
virgl_gbm_get_plane_height(struct gbm_bo * bo,int plane)534 int virgl_gbm_get_plane_height(struct gbm_bo *bo, int plane) {
535 uint32_t format = gbm_bo_get_format(bo);
536 const struct planar_layout *layout = layout_from_format(format);
537 if (!layout)
538 return -1;
539 return gbm_bo_get_height(bo) / layout->vertical_subsampling[plane];
540 }
541
virgl_gbm_get_plane_bytes_per_pixel(struct gbm_bo * bo,int plane)542 int virgl_gbm_get_plane_bytes_per_pixel(struct gbm_bo *bo, int plane) {
543 uint32_t format = gbm_bo_get_format(bo);
544 const struct planar_layout *layout = layout_from_format(format);
545 if (!layout)
546 return -1;
547 return layout->bytes_per_pixel[plane];
548 }
549
virgl_gbm_external_allocation_preferred(uint32_t flags)550 bool virgl_gbm_external_allocation_preferred(uint32_t flags) {
551 return (flags & (VIRGL_RES_BIND_SCANOUT | VIRGL_RES_BIND_SHARED)) != 0;
552 }
553
virgl_gbm_gpu_import_required(uint32_t flags)554 bool virgl_gbm_gpu_import_required(uint32_t flags) {
555 return !virgl_gbm_external_allocation_preferred(flags) ||
556 (flags & (VIRGL_BIND_RENDER_TARGET | VIRGL_BIND_SAMPLER_VIEW)) != 0;
557 }
558