1 /*
2 * Copyright 2022 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <cutils/log.h>
18 #include <fcntl.h>
19 #include <pthread.h>
20 #include <stdlib.h>
21 #include <sys/mman.h>
22 #include <unistd.h>
23 #include <xf86drm.h>
24
25 #include <cerrno>
26 #include <cstring>
27 #include <fstream>
28 #include <string>
29
30 #include "LinuxVirtGpu.h"
31 #include "virtgpu_drm.h"
32 #include "virtgpu_gfxstream_protocol.h"
33
34 #define PARAM(x) \
35 (struct VirtGpuParam) { x, #x, 0 }
36
align_up(uint32_t n,uint32_t a)37 static inline uint32_t align_up(uint32_t n, uint32_t a) { return ((n + a - 1) / a) * a; }
38
LinuxVirtGpuDevice(enum VirtGpuCapset capset,int fd)39 LinuxVirtGpuDevice::LinuxVirtGpuDevice(enum VirtGpuCapset capset, int fd) : VirtGpuDevice(capset) {
40 struct VirtGpuParam params[] = {
41 PARAM(VIRTGPU_PARAM_3D_FEATURES), PARAM(VIRTGPU_PARAM_CAPSET_QUERY_FIX),
42 PARAM(VIRTGPU_PARAM_RESOURCE_BLOB), PARAM(VIRTGPU_PARAM_HOST_VISIBLE),
43 PARAM(VIRTGPU_PARAM_CROSS_DEVICE), PARAM(VIRTGPU_PARAM_CONTEXT_INIT),
44 PARAM(VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs), PARAM(VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME),
45 PARAM(VIRTGPU_PARAM_CREATE_GUEST_HANDLE),
46 };
47
48 int ret;
49 struct drm_virtgpu_get_caps get_caps = {0};
50 struct drm_virtgpu_context_init init = {0};
51 struct drm_virtgpu_context_set_param ctx_set_params[3] = {{0}};
52 const char* processName = nullptr;
53
54 memset(&mCaps, 0, sizeof(struct VirtGpuCaps));
55
56 #ifdef __ANDROID__
57 processName = getprogname();
58 #endif
59
60 if (fd < 0) {
61 mDeviceHandle = static_cast<int64_t>(drmOpenRender(128));
62 if (mDeviceHandle < 0) {
63 ALOGE("Failed to open rendernode: %s", strerror(errno));
64 return;
65 }
66 } else {
67 mDeviceHandle = dup(fd);
68 if (mDeviceHandle < 0) {
69 ALOGE("Failed to dup rendernode: %s", strerror(errno));
70 return;
71 }
72 }
73
74 for (uint32_t i = 0; i < kParamMax; i++) {
75 struct drm_virtgpu_getparam get_param = {0};
76 get_param.param = params[i].param;
77 get_param.value = (uint64_t)(uintptr_t)¶ms[i].value;
78
79 ret = drmIoctl(mDeviceHandle, DRM_IOCTL_VIRTGPU_GETPARAM, &get_param);
80 if (ret) {
81 ALOGV("virtgpu backend not enabling %s", params[i].name);
82 continue;
83 }
84
85 mCaps.params[i] = params[i].value;
86 }
87
88 get_caps.cap_set_id = static_cast<uint32_t>(capset);
89 switch (capset) {
90 case kCapsetGfxStreamVulkan:
91 get_caps.size = sizeof(struct vulkanCapset);
92 get_caps.addr = (unsigned long long)&mCaps.vulkanCapset;
93 break;
94 case kCapsetGfxStreamMagma:
95 get_caps.size = sizeof(struct magmaCapset);
96 get_caps.addr = (unsigned long long)&mCaps.magmaCapset;
97 break;
98 case kCapsetGfxStreamGles:
99 get_caps.size = sizeof(struct vulkanCapset);
100 get_caps.addr = (unsigned long long)&mCaps.glesCapset;
101 break;
102 case kCapsetGfxStreamComposer:
103 get_caps.size = sizeof(struct vulkanCapset);
104 get_caps.addr = (unsigned long long)&mCaps.composerCapset;
105 break;
106 default:
107 get_caps.size = 0;
108 }
109
110 ret = drmIoctl(mDeviceHandle, DRM_IOCTL_VIRTGPU_GET_CAPS, &get_caps);
111 if (ret) {
112 // Don't fail get capabilities just yet, AEMU doesn't use this API
113 // yet (b/272121235);
114 ALOGE("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s", strerror(errno));
115 }
116
117 // We always need an ASG blob in some cases, so always define blobAlignment
118 if (!mCaps.vulkanCapset.blobAlignment) {
119 mCaps.vulkanCapset.blobAlignment = 4096;
120 }
121
122 ctx_set_params[0].param = VIRTGPU_CONTEXT_PARAM_NUM_RINGS;
123 ctx_set_params[0].value = 2;
124 init.num_params = 1;
125
126 if (capset != kCapsetNone) {
127 ctx_set_params[init.num_params].param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID;
128 ctx_set_params[init.num_params].value = static_cast<uint32_t>(capset);
129 init.num_params++;
130 }
131
132 if (mCaps.params[kParamExplicitDebugName] && processName) {
133 ctx_set_params[init.num_params].param = VIRTGPU_CONTEXT_PARAM_DEBUG_NAME;
134 ctx_set_params[init.num_params].value = reinterpret_cast<uint64_t>(processName);
135 init.num_params++;
136 }
137
138 init.ctx_set_params = (unsigned long long)&ctx_set_params[0];
139 ret = drmIoctl(mDeviceHandle, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &init);
140 if (ret) {
141 ALOGE("DRM_IOCTL_VIRTGPU_CONTEXT_INIT failed with %s, continuing without context...",
142 strerror(errno));
143 }
144 }
145
~LinuxVirtGpuDevice()146 LinuxVirtGpuDevice::~LinuxVirtGpuDevice() { close(mDeviceHandle); }
147
getCaps(void)148 struct VirtGpuCaps LinuxVirtGpuDevice::getCaps(void) { return mCaps; }
149
getDeviceHandle(void)150 int64_t LinuxVirtGpuDevice::getDeviceHandle(void) { return mDeviceHandle; }
151
createResource(uint32_t width,uint32_t height,uint32_t stride,uint32_t virglFormat,uint32_t target,uint32_t bind)152 VirtGpuResourcePtr LinuxVirtGpuDevice::createResource(uint32_t width, uint32_t height,
153 uint32_t stride, uint32_t virglFormat,
154 uint32_t target, uint32_t bind) {
155 drm_virtgpu_resource_create create = {
156 .target = target,
157 .format = virglFormat,
158 .bind = bind,
159 .width = width,
160 .height = height,
161 .depth = 1U,
162 .array_size = 1U,
163 .last_level = 0,
164 .nr_samples = 0,
165 .size = stride * height,
166 .stride = stride,
167 };
168
169 int ret = drmIoctl(mDeviceHandle, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &create);
170 if (ret) {
171 ALOGE("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s", strerror(errno));
172 return nullptr;
173 }
174
175 return std::make_shared<LinuxVirtGpuResource>(
176 mDeviceHandle, create.bo_handle, create.res_handle, static_cast<uint64_t>(create.size));
177 }
178
createBlob(const struct VirtGpuCreateBlob & blobCreate)179 VirtGpuResourcePtr LinuxVirtGpuDevice::createBlob(const struct VirtGpuCreateBlob& blobCreate) {
180 int ret;
181 struct drm_virtgpu_resource_create_blob create = {0};
182
183 create.size = blobCreate.size;
184 create.blob_mem = blobCreate.blobMem;
185 create.blob_flags = blobCreate.flags;
186 create.blob_id = blobCreate.blobId;
187
188 ret = drmIoctl(mDeviceHandle, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &create);
189 if (ret < 0) {
190 ALOGE("DRM_VIRTGPU_RESOURCE_CREATE_BLOB failed with %s", strerror(errno));
191 return nullptr;
192 }
193
194 return std::make_shared<LinuxVirtGpuResource>(mDeviceHandle, create.bo_handle,
195 create.res_handle, blobCreate.size);
196 }
197
importBlob(const struct VirtGpuExternalHandle & handle)198 VirtGpuResourcePtr LinuxVirtGpuDevice::importBlob(const struct VirtGpuExternalHandle& handle) {
199 struct drm_virtgpu_resource_info info = {0};
200 uint32_t blobHandle;
201 int ret;
202
203 ret = drmPrimeFDToHandle(mDeviceHandle, handle.osHandle, &blobHandle);
204 close(handle.osHandle);
205 if (ret) {
206 ALOGE("DRM_IOCTL_PRIME_FD_TO_HANDLE failed: %s", strerror(errno));
207 return nullptr;
208 }
209
210 info.bo_handle = blobHandle;
211 ret = drmIoctl(mDeviceHandle, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info);
212 if (ret) {
213 ALOGE("DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed: %s", strerror(errno));
214 return nullptr;
215 }
216
217 return std::make_shared<LinuxVirtGpuResource>(mDeviceHandle, blobHandle, info.res_handle,
218 static_cast<uint64_t>(info.size));
219 }
220
execBuffer(struct VirtGpuExecBuffer & execbuffer,const VirtGpuResource * blob)221 int LinuxVirtGpuDevice::execBuffer(struct VirtGpuExecBuffer& execbuffer,
222 const VirtGpuResource* blob) {
223 int ret;
224 struct drm_virtgpu_execbuffer exec = {0};
225 uint32_t blobHandle;
226
227 exec.flags = execbuffer.flags;
228 exec.size = execbuffer.command_size;
229 exec.ring_idx = execbuffer.ring_idx;
230 exec.command = (uint64_t)(uintptr_t)(execbuffer.command);
231 exec.fence_fd = -1;
232
233 if (blob) {
234 blobHandle = blob->getBlobHandle();
235 exec.bo_handles = (uint64_t)(uintptr_t)(&blobHandle);
236 exec.num_bo_handles = 1;
237 }
238
239 ret = drmIoctl(mDeviceHandle, DRM_IOCTL_VIRTGPU_EXECBUFFER, &exec);
240 if (ret) {
241 ALOGE("DRM_IOCTL_VIRTGPU_EXECBUFFER failed: %s", strerror(errno));
242 return ret;
243 }
244
245 if (execbuffer.flags & kFenceOut) {
246 execbuffer.handle.osHandle = exec.fence_fd;
247 execbuffer.handle.type = kFenceHandleSyncFd;
248 }
249
250 return 0;
251 }
252
createPlatformVirtGpuDevice(enum VirtGpuCapset capset,int fd)253 VirtGpuDevice* createPlatformVirtGpuDevice(enum VirtGpuCapset capset, int fd) {
254 return new LinuxVirtGpuDevice(capset, fd);
255 }
256