1 // Copyright (C) 2023 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include <errno.h>
16 #include <string.h>
17
18 #include "../vulkan_enc/vk_util.h"
19 #include "HostConnection.h"
20 #include "ProcessPipe.h"
21 #include "ResourceTracker.h"
22 #include "VkEncoder.h"
23 #include "gfxstream_vk_entrypoints.h"
24 #include "gfxstream_vk_private.h"
25 #include "vk_alloc.h"
26 #include "vk_device.h"
27 #include "vk_instance.h"
28 #include "vk_sync_dummy.h"
29
getConnection(void)30 static HostConnection* getConnection(void) {
31 auto hostCon = HostConnection::get();
32 return hostCon;
33 }
34
getVkEncoder(HostConnection * con)35 static gfxstream::vk::VkEncoder* getVkEncoder(HostConnection* con) { return con->vkEncoder(); }
36
37 gfxstream::vk::ResourceTracker::ThreadingCallbacks threadingCallbacks = {
38 .hostConnectionGetFunc = getConnection,
39 .vkEncoderGetFunc = getVkEncoder,
40 };
41
SetupInstance(void)42 VkResult SetupInstance(void) {
43 uint32_t noRenderControlEnc = 0;
44 HostConnection* hostCon = HostConnection::getOrCreate(kCapsetGfxStreamVulkan);
45 if (!hostCon) {
46 ALOGE("vulkan: Failed to get host connection\n");
47 return VK_ERROR_DEVICE_LOST;
48 }
49
50 gfxstream::vk::ResourceTracker::get()->setupCaps(noRenderControlEnc);
51 // Legacy goldfish path: could be deleted once goldfish not used guest-side.
52 if (!noRenderControlEnc) {
53 #if GFXSTREAM_GUEST_ENABLE_GLES
54 // Implicitly sets up sequence number
55 ExtendedRCEncoderContext* rcEnc = hostCon->rcEncoder();
56 if (!rcEnc) {
57 ALOGE("vulkan: Failed to get renderControl encoder context\n");
58 return VK_ERROR_DEVICE_LOST;
59 }
60
61 gfxstream::vk::ResourceTracker::get()->setupFeatures(rcEnc->featureInfo_const());
62 #endif
63 }
64
65 gfxstream::vk::ResourceTracker::get()->setThreadingCallbacks(threadingCallbacks);
66 gfxstream::vk::ResourceTracker::get()->setSeqnoPtr(getSeqnoPtrForProcess());
67 gfxstream::vk::VkEncoder* vkEnc = hostCon->vkEncoder();
68 if (!vkEnc) {
69 ALOGE("vulkan: Failed to get Vulkan encoder\n");
70 return VK_ERROR_DEVICE_LOST;
71 }
72
73 return VK_SUCCESS;
74 }
75
76 #define VK_HOST_CONNECTION(ret) \
77 HostConnection* hostCon = HostConnection::getOrCreate(kCapsetGfxStreamVulkan); \
78 gfxstream::vk::VkEncoder* vkEnc = hostCon->vkEncoder(); \
79 if (!vkEnc) { \
80 ALOGE("vulkan: Failed to get Vulkan encoder\n"); \
81 return ret; \
82 }
83
84 static bool instance_extension_table_initialized = false;
85 static struct vk_instance_extension_table gfxstream_vk_instance_extensions_supported = {0};
86
87 // Provided by Mesa components only; never encoded/decoded through gfxstream
88 static const char* const kMesaOnlyInstanceExtension[] = {
89 VK_KHR_SURFACE_EXTENSION_NAME,
90 #if defined(LINUX_GUEST_BUILD)
91 VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
92 #endif
93 VK_EXT_DEBUG_UTILS_EXTENSION_NAME,
94 };
95
96 static const char* const kMesaOnlyDeviceExtensions[] = {
97 VK_KHR_SWAPCHAIN_EXTENSION_NAME,
98 };
99
isMesaOnlyInstanceExtension(const char * name)100 static bool isMesaOnlyInstanceExtension(const char* name) {
101 for (auto mesaExt : kMesaOnlyInstanceExtension) {
102 if (!strncmp(mesaExt, name, VK_MAX_EXTENSION_NAME_SIZE)) return true;
103 }
104 return false;
105 }
106
isMesaOnlyDeviceExtension(const char * name)107 static bool isMesaOnlyDeviceExtension(const char* name) {
108 for (auto mesaExt : kMesaOnlyDeviceExtensions) {
109 if (!strncmp(mesaExt, name, VK_MAX_EXTENSION_NAME_SIZE)) return true;
110 }
111 return false;
112 }
113
114 // Filtered extension names for encoding
filteredInstanceExtensionNames(uint32_t count,const char * const * extNames)115 static std::vector<const char*> filteredInstanceExtensionNames(uint32_t count,
116 const char* const* extNames) {
117 std::vector<const char*> retList;
118 for (uint32_t i = 0; i < count; ++i) {
119 auto extName = extNames[i];
120 if (!isMesaOnlyInstanceExtension(extName)) {
121 retList.push_back(extName);
122 }
123 }
124 return retList;
125 }
126
filteredDeviceExtensionNames(uint32_t count,const char * const * extNames)127 static std::vector<const char*> filteredDeviceExtensionNames(uint32_t count,
128 const char* const* extNames) {
129 std::vector<const char*> retList;
130 for (uint32_t i = 0; i < count; ++i) {
131 auto extName = extNames[i];
132 if (!isMesaOnlyDeviceExtension(extName)) {
133 retList.push_back(extName);
134 }
135 }
136 return retList;
137 }
138
get_device_extensions(VkPhysicalDevice physDevInternal,struct vk_device_extension_table * deviceExts)139 static void get_device_extensions(VkPhysicalDevice physDevInternal,
140 struct vk_device_extension_table* deviceExts) {
141 VkResult result = (VkResult)0;
142 auto vkEnc = gfxstream::vk::ResourceTracker::getThreadLocalEncoder();
143 auto resources = gfxstream::vk::ResourceTracker::get();
144 uint32_t numDeviceExts = 0;
145 result = resources->on_vkEnumerateDeviceExtensionProperties(vkEnc, VK_SUCCESS, physDevInternal,
146 NULL, &numDeviceExts, NULL);
147 if (VK_SUCCESS == result) {
148 std::vector<VkExtensionProperties> extProps(numDeviceExts);
149 result = resources->on_vkEnumerateDeviceExtensionProperties(
150 vkEnc, VK_SUCCESS, physDevInternal, NULL, &numDeviceExts, extProps.data());
151 if (VK_SUCCESS == result) {
152 // device extensions from gfxstream
153 for (uint32_t i = 0; i < numDeviceExts; i++) {
154 for (uint32_t j = 0; j < VK_DEVICE_EXTENSION_COUNT; j++) {
155 if (0 == strncmp(extProps[i].extensionName,
156 vk_device_extensions[j].extensionName,
157 VK_MAX_EXTENSION_NAME_SIZE)) {
158 deviceExts->extensions[j] = true;
159 break;
160 }
161 }
162 }
163 // device extensions from Mesa
164 for (uint32_t j = 0; j < VK_DEVICE_EXTENSION_COUNT; j++) {
165 if (isMesaOnlyDeviceExtension(vk_device_extensions[j].extensionName)) {
166 deviceExts->extensions[j] = true;
167 break;
168 }
169 }
170 }
171 }
172 }
173
gfxstream_vk_physical_device_init(struct gfxstream_vk_physical_device * physical_device,struct gfxstream_vk_instance * instance,VkPhysicalDevice internal_object)174 static VkResult gfxstream_vk_physical_device_init(
175 struct gfxstream_vk_physical_device* physical_device, struct gfxstream_vk_instance* instance,
176 VkPhysicalDevice internal_object) {
177 struct vk_device_extension_table supported_extensions = {0};
178 get_device_extensions(internal_object, &supported_extensions);
179
180 struct vk_physical_device_dispatch_table dispatch_table;
181 memset(&dispatch_table, 0, sizeof(struct vk_physical_device_dispatch_table));
182 vk_physical_device_dispatch_table_from_entrypoints(
183 &dispatch_table, &gfxstream_vk_physical_device_entrypoints, false);
184 vk_physical_device_dispatch_table_from_entrypoints(&dispatch_table,
185 &wsi_physical_device_entrypoints, false);
186
187 // Initialize the mesa object
188 VkResult result = vk_physical_device_init(&physical_device->vk, &instance->vk,
189 &supported_extensions, NULL, NULL, &dispatch_table);
190
191 if (VK_SUCCESS == result) {
192 // Set the gfxstream-internal object
193 physical_device->internal_object = internal_object;
194 physical_device->instance = instance;
195 // Note: Must use dummy_sync for correct sync object path in WSI operations
196 physical_device->sync_types[0] = &vk_sync_dummy_type;
197 physical_device->sync_types[1] = NULL;
198 physical_device->vk.supported_sync_types = physical_device->sync_types;
199
200 result = gfxstream_vk_wsi_init(physical_device);
201 }
202
203 return result;
204 }
205
gfxstream_vk_physical_device_finish(struct gfxstream_vk_physical_device * physical_device)206 static void gfxstream_vk_physical_device_finish(
207 struct gfxstream_vk_physical_device* physical_device) {
208 gfxstream_vk_wsi_finish(physical_device);
209
210 vk_physical_device_finish(&physical_device->vk);
211 }
212
gfxstream_vk_destroy_physical_device(struct vk_physical_device * physical_device)213 static void gfxstream_vk_destroy_physical_device(struct vk_physical_device* physical_device) {
214 gfxstream_vk_physical_device_finish((struct gfxstream_vk_physical_device*)physical_device);
215 vk_free(&physical_device->instance->alloc, physical_device);
216 }
217
gfxstream_vk_enumerate_devices(struct vk_instance * vk_instance)218 static VkResult gfxstream_vk_enumerate_devices(struct vk_instance* vk_instance) {
219 VkResult result = VK_SUCCESS;
220 gfxstream_vk_instance* gfxstream_instance = (gfxstream_vk_instance*)vk_instance;
221 uint32_t deviceCount = 0;
222 auto vkEnc = gfxstream::vk::ResourceTracker::getThreadLocalEncoder();
223 auto resources = gfxstream::vk::ResourceTracker::get();
224 result = resources->on_vkEnumeratePhysicalDevices(
225 vkEnc, VK_SUCCESS, gfxstream_instance->internal_object, &deviceCount, NULL);
226 if (VK_SUCCESS != result) return result;
227 std::vector<VkPhysicalDevice> internal_list(deviceCount);
228 result = resources->on_vkEnumeratePhysicalDevices(
229 vkEnc, VK_SUCCESS, gfxstream_instance->internal_object, &deviceCount, internal_list.data());
230
231 if (VK_SUCCESS == result) {
232 for (uint32_t i = 0; i < deviceCount; i++) {
233 struct gfxstream_vk_physical_device* gfxstream_physicalDevice =
234 (struct gfxstream_vk_physical_device*)vk_zalloc(
235 &gfxstream_instance->vk.alloc, sizeof(struct gfxstream_vk_physical_device), 8,
236 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
237 if (!gfxstream_physicalDevice) {
238 result = VK_ERROR_OUT_OF_HOST_MEMORY;
239 break;
240 }
241 result = gfxstream_vk_physical_device_init(gfxstream_physicalDevice, gfxstream_instance,
242 internal_list[i]);
243 if (VK_SUCCESS == result) {
244 list_addtail(&gfxstream_physicalDevice->vk.link,
245 &gfxstream_instance->vk.physical_devices.list);
246 } else {
247 vk_free(&gfxstream_instance->vk.alloc, gfxstream_physicalDevice);
248 break;
249 }
250 }
251 }
252
253 return result;
254 }
255
get_instance_extensions()256 static struct vk_instance_extension_table* get_instance_extensions() {
257 struct vk_instance_extension_table* const retTablePtr =
258 &gfxstream_vk_instance_extensions_supported;
259 if (!instance_extension_table_initialized) {
260 VkResult result = SetupInstance();
261 if (VK_SUCCESS == result) {
262 VK_HOST_CONNECTION(retTablePtr)
263 auto resources = gfxstream::vk::ResourceTracker::get();
264 uint32_t numInstanceExts = 0;
265 result = resources->on_vkEnumerateInstanceExtensionProperties(vkEnc, VK_SUCCESS, NULL,
266 &numInstanceExts, NULL);
267 if (VK_SUCCESS == result) {
268 std::vector<VkExtensionProperties> extProps(numInstanceExts);
269 result = resources->on_vkEnumerateInstanceExtensionProperties(
270 vkEnc, VK_SUCCESS, NULL, &numInstanceExts, extProps.data());
271 if (VK_SUCCESS == result) {
272 // instance extensions from gfxstream
273 for (uint32_t i = 0; i < numInstanceExts; i++) {
274 for (uint32_t j = 0; j < VK_INSTANCE_EXTENSION_COUNT; j++) {
275 if (0 == strncmp(extProps[i].extensionName,
276 vk_instance_extensions[j].extensionName,
277 VK_MAX_EXTENSION_NAME_SIZE)) {
278 gfxstream_vk_instance_extensions_supported.extensions[j] = true;
279 break;
280 }
281 }
282 }
283 // instance extensions from Mesa
284 for (uint32_t j = 0; j < VK_INSTANCE_EXTENSION_COUNT; j++) {
285 if (isMesaOnlyInstanceExtension(vk_instance_extensions[j].extensionName)) {
286 gfxstream_vk_instance_extensions_supported.extensions[j] = true;
287 }
288 }
289 instance_extension_table_initialized = true;
290 }
291 }
292 }
293 }
294 return retTablePtr;
295 }
296
gfxstream_vk_CreateInstance(const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)297 VkResult gfxstream_vk_CreateInstance(const VkInstanceCreateInfo* pCreateInfo,
298 const VkAllocationCallbacks* pAllocator,
299 VkInstance* pInstance) {
300 AEMU_SCOPED_TRACE("vkCreateInstance");
301
302 struct gfxstream_vk_instance* instance;
303
304 pAllocator = pAllocator ?: vk_default_allocator();
305 instance = (struct gfxstream_vk_instance*)vk_zalloc(pAllocator, sizeof(*instance), 8,
306 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
307 if (NULL == instance) {
308 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
309 }
310
311 VkResult result = VK_SUCCESS;
312 /* Encoder call */
313 {
314 ALOGE("calling setup instance internally");
315 result = SetupInstance();
316 if (VK_SUCCESS != result) {
317 return vk_error(NULL, result);
318 }
319 uint32_t initialEnabledExtensionCount = pCreateInfo->enabledExtensionCount;
320 const char* const* initialPpEnabledExtensionNames = pCreateInfo->ppEnabledExtensionNames;
321 std::vector<const char*> filteredExts = filteredInstanceExtensionNames(
322 pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
323 // Temporarily modify createInfo for the encoder call
324 VkInstanceCreateInfo* mutableCreateInfo = (VkInstanceCreateInfo*)pCreateInfo;
325 mutableCreateInfo->enabledExtensionCount = static_cast<uint32_t>(filteredExts.size());
326 mutableCreateInfo->ppEnabledExtensionNames = filteredExts.data();
327
328 VK_HOST_CONNECTION(VK_ERROR_DEVICE_LOST);
329 result = vkEnc->vkCreateInstance(pCreateInfo, nullptr, &instance->internal_object,
330 true /* do lock */);
331 if (VK_SUCCESS != result) {
332 return vk_error(NULL, result);
333 }
334 // Revert the createInfo the user-set data
335 mutableCreateInfo->enabledExtensionCount = initialEnabledExtensionCount;
336 mutableCreateInfo->ppEnabledExtensionNames = initialPpEnabledExtensionNames;
337 }
338
339 struct vk_instance_dispatch_table dispatch_table;
340 memset(&dispatch_table, 0, sizeof(struct vk_instance_dispatch_table));
341 vk_instance_dispatch_table_from_entrypoints(&dispatch_table, &gfxstream_vk_instance_entrypoints,
342 false);
343 vk_instance_dispatch_table_from_entrypoints(&dispatch_table, &wsi_instance_entrypoints, false);
344
345 result = vk_instance_init(&instance->vk, get_instance_extensions(), &dispatch_table,
346 pCreateInfo, pAllocator);
347
348 if (result != VK_SUCCESS) {
349 vk_free(pAllocator, instance);
350 return vk_error(NULL, result);
351 }
352
353 instance->vk.physical_devices.enumerate = gfxstream_vk_enumerate_devices;
354 instance->vk.physical_devices.destroy = gfxstream_vk_destroy_physical_device;
355 // TODO: instance->vk.physical_devices.try_create_for_drm (?)
356
357 *pInstance = gfxstream_vk_instance_to_handle(instance);
358 return VK_SUCCESS;
359 }
360
gfxstream_vk_DestroyInstance(VkInstance _instance,const VkAllocationCallbacks * pAllocator)361 void gfxstream_vk_DestroyInstance(VkInstance _instance, const VkAllocationCallbacks* pAllocator) {
362 AEMU_SCOPED_TRACE("vkDestroyInstance");
363 if (VK_NULL_HANDLE == _instance) return;
364
365 VK_FROM_HANDLE(gfxstream_vk_instance, instance, _instance);
366
367 VK_HOST_CONNECTION()
368 vkEnc->vkDestroyInstance(instance->internal_object, pAllocator, true /* do lock */);
369
370 vk_instance_finish(&instance->vk);
371 vk_free(&instance->vk.alloc, instance);
372
373 // To make End2EndTests happy, since now the host connection is statically linked to
374 // libvulkan_ranchu.so [separate HostConnections now].
375 #if defined(END2END_TESTS)
376 hostCon->exit();
377 processPipeRestart();
378 #endif
379 }
380
gfxstream_vk_EnumerateInstanceExtensionProperties(const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)381 VkResult gfxstream_vk_EnumerateInstanceExtensionProperties(const char* pLayerName,
382 uint32_t* pPropertyCount,
383 VkExtensionProperties* pProperties) {
384 AEMU_SCOPED_TRACE("vkvkEnumerateInstanceExtensionProperties");
385 (void)pLayerName;
386
387 return vk_enumerate_instance_extension_properties(get_instance_extensions(), pPropertyCount,
388 pProperties);
389 }
390
gfxstream_vk_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)391 VkResult gfxstream_vk_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
392 const char* pLayerName,
393 uint32_t* pPropertyCount,
394 VkExtensionProperties* pProperties) {
395 AEMU_SCOPED_TRACE("vkEnumerateDeviceExtensionProperties");
396 (void)pLayerName;
397 VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
398
399 VK_OUTARRAY_MAKE_TYPED(VkExtensionProperties, out, pProperties, pPropertyCount);
400
401 for (int i = 0; i < VK_DEVICE_EXTENSION_COUNT; i++) {
402 if (!pdevice->supported_extensions.extensions[i]) continue;
403
404 vk_outarray_append_typed(VkExtensionProperties, &out, prop) {
405 *prop = vk_device_extensions[i];
406 }
407 }
408
409 return vk_outarray_status(&out);
410 }
411
gfxstream_vk_CreateDevice(VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)412 VkResult gfxstream_vk_CreateDevice(VkPhysicalDevice physicalDevice,
413 const VkDeviceCreateInfo* pCreateInfo,
414 const VkAllocationCallbacks* pAllocator, VkDevice* pDevice) {
415 AEMU_SCOPED_TRACE("vkCreateDevice");
416 VK_FROM_HANDLE(gfxstream_vk_physical_device, gfxstream_physicalDevice, physicalDevice);
417 VkResult result = (VkResult)0;
418
419 /*
420 * Android's libvulkan implements VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT, but
421 * passes it to the underlying driver anyways. See:
422 *
423 * https://android-review.googlesource.com/c/platform/hardware/google/gfxstream/+/2839438
424 *
425 * and associated bugs. Mesa VK runtime also checks this, so we have to filter out before
426 * reaches it. vk_find_struct<VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT>(..) doesn't
427 * work for some reason.
428 */
429 VkBaseInStructure* extensionCreateInfo = (VkBaseInStructure*)(pCreateInfo->pNext);
430 while (extensionCreateInfo) {
431 if (extensionCreateInfo->sType ==
432 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SWAPCHAIN_MAINTENANCE_1_FEATURES_EXT) {
433 auto swapchainMaintenance1Features =
434 reinterpret_cast<VkPhysicalDeviceSwapchainMaintenance1FeaturesEXT*>(
435 extensionCreateInfo);
436 swapchainMaintenance1Features->swapchainMaintenance1 = VK_FALSE;
437 }
438 extensionCreateInfo = (VkBaseInStructure*)(extensionCreateInfo->pNext);
439 }
440
441 const VkAllocationCallbacks* pMesaAllocator =
442 pAllocator ?: &gfxstream_physicalDevice->instance->vk.alloc;
443 struct gfxstream_vk_device* gfxstream_device = (struct gfxstream_vk_device*)vk_zalloc(
444 pMesaAllocator, sizeof(struct gfxstream_vk_device), 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
445 result = gfxstream_device ? VK_SUCCESS : VK_ERROR_OUT_OF_HOST_MEMORY;
446 if (VK_SUCCESS == result) {
447 uint32_t initialEnabledExtensionCount = pCreateInfo->enabledExtensionCount;
448 const char* const* initialPpEnabledExtensionNames = pCreateInfo->ppEnabledExtensionNames;
449 std::vector<const char*> filteredExts = filteredDeviceExtensionNames(
450 pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
451 // Temporarily modify createInfo for the encoder call
452 VkDeviceCreateInfo* mutableCreateInfo = (VkDeviceCreateInfo*)pCreateInfo;
453 mutableCreateInfo->enabledExtensionCount = static_cast<uint32_t>(filteredExts.size());
454 mutableCreateInfo->ppEnabledExtensionNames = filteredExts.data();
455
456 auto vkEnc = gfxstream::vk::ResourceTracker::getThreadLocalEncoder();
457 result = vkEnc->vkCreateDevice(gfxstream_physicalDevice->internal_object, pCreateInfo,
458 pAllocator, &gfxstream_device->internal_object,
459 true /* do lock */);
460 // Revert the createInfo the user-set data
461 mutableCreateInfo->enabledExtensionCount = initialEnabledExtensionCount;
462 mutableCreateInfo->ppEnabledExtensionNames = initialPpEnabledExtensionNames;
463 }
464 if (VK_SUCCESS == result) {
465 struct vk_device_dispatch_table dispatch_table;
466 memset(&dispatch_table, 0, sizeof(struct vk_device_dispatch_table));
467 vk_device_dispatch_table_from_entrypoints(&dispatch_table, &gfxstream_vk_device_entrypoints,
468 false);
469 vk_device_dispatch_table_from_entrypoints(&dispatch_table, &wsi_device_entrypoints, false);
470
471 result = vk_device_init(&gfxstream_device->vk, &gfxstream_physicalDevice->vk,
472 &dispatch_table, pCreateInfo, pMesaAllocator);
473 }
474 if (VK_SUCCESS == result) {
475 gfxstream_device->physical_device = gfxstream_physicalDevice;
476 // TODO: Initialize cmd_dispatch for emulated secondary command buffer support?
477 gfxstream_device->vk.command_dispatch_table = &gfxstream_device->cmd_dispatch;
478 *pDevice = gfxstream_vk_device_to_handle(gfxstream_device);
479 } else {
480 vk_free(pMesaAllocator, gfxstream_device);
481 }
482
483 return result;
484 }
485
gfxstream_vk_DestroyDevice(VkDevice device,const VkAllocationCallbacks * pAllocator)486 void gfxstream_vk_DestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator) {
487 AEMU_SCOPED_TRACE("vkDestroyDevice");
488 VK_FROM_HANDLE(gfxstream_vk_device, gfxstream_device, device);
489 if (VK_NULL_HANDLE == device) return;
490
491 auto vkEnc = gfxstream::vk::ResourceTracker::getThreadLocalEncoder();
492 vkEnc->vkDestroyDevice(gfxstream_device->internal_object, pAllocator, true /* do lock */);
493
494 /* Must destroy device queues manually */
495 vk_foreach_queue_safe(queue, &gfxstream_device->vk) {
496 vk_queue_finish(queue);
497 vk_free(&gfxstream_device->vk.alloc, queue);
498 }
499 vk_device_finish(&gfxstream_device->vk);
500 vk_free(&gfxstream_device->vk.alloc, gfxstream_device);
501 }
502
gfxstream_vk_GetDeviceQueue(VkDevice device,uint32_t queueFamilyIndex,uint32_t queueIndex,VkQueue * pQueue)503 void gfxstream_vk_GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
504 VkQueue* pQueue) {
505 AEMU_SCOPED_TRACE("vkGetDeviceQueue");
506 VK_FROM_HANDLE(gfxstream_vk_device, gfxstream_device, device);
507 struct gfxstream_vk_queue* gfxstream_queue = (struct gfxstream_vk_queue*)vk_zalloc(
508 &gfxstream_device->vk.alloc, sizeof(struct gfxstream_vk_queue), 8,
509 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
510 VkResult result = gfxstream_queue ? VK_SUCCESS : VK_ERROR_OUT_OF_HOST_MEMORY;
511 if (VK_SUCCESS == result) {
512 VkDeviceQueueCreateInfo createInfo = {
513 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
514 .pNext = NULL,
515 .flags = 0,
516 .queueFamilyIndex = queueFamilyIndex,
517 .queueCount = 1,
518 .pQueuePriorities = NULL,
519 };
520 result =
521 vk_queue_init(&gfxstream_queue->vk, &gfxstream_device->vk, &createInfo, queueIndex);
522 }
523 if (VK_SUCCESS == result) {
524 auto vkEnc = gfxstream::vk::ResourceTracker::getThreadLocalEncoder();
525 vkEnc->vkGetDeviceQueue(gfxstream_device->internal_object, queueFamilyIndex, queueIndex,
526 &gfxstream_queue->internal_object, true /* do lock */);
527
528 gfxstream_queue->device = gfxstream_device;
529 *pQueue = gfxstream_vk_queue_to_handle(gfxstream_queue);
530 } else {
531 *pQueue = VK_NULL_HANDLE;
532 }
533 }
534
gfxstream_vk_GetDeviceQueue2(VkDevice device,const VkDeviceQueueInfo2 * pQueueInfo,VkQueue * pQueue)535 void gfxstream_vk_GetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2* pQueueInfo,
536 VkQueue* pQueue) {
537 AEMU_SCOPED_TRACE("vkGetDeviceQueue2");
538 VK_FROM_HANDLE(gfxstream_vk_device, gfxstream_device, device);
539 struct gfxstream_vk_queue* gfxstream_queue = (struct gfxstream_vk_queue*)vk_zalloc(
540 &gfxstream_device->vk.alloc, sizeof(struct gfxstream_vk_queue), 8,
541 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
542 VkResult result = gfxstream_queue ? VK_SUCCESS : VK_ERROR_OUT_OF_HOST_MEMORY;
543 if (VK_SUCCESS == result) {
544 VkDeviceQueueCreateInfo createInfo = {
545 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
546 .pNext = NULL,
547 .flags = pQueueInfo->flags,
548 .queueFamilyIndex = pQueueInfo->queueFamilyIndex,
549 .queueCount = 1,
550 .pQueuePriorities = NULL,
551 };
552 result = vk_queue_init(&gfxstream_queue->vk, &gfxstream_device->vk, &createInfo,
553 pQueueInfo->queueIndex);
554 }
555 if (VK_SUCCESS == result) {
556 auto vkEnc = gfxstream::vk::ResourceTracker::getThreadLocalEncoder();
557 vkEnc->vkGetDeviceQueue2(gfxstream_device->internal_object, pQueueInfo,
558 &gfxstream_queue->internal_object, true /* do lock */);
559
560 gfxstream_queue->device = gfxstream_device;
561 *pQueue = gfxstream_vk_queue_to_handle(gfxstream_queue);
562 } else {
563 *pQueue = VK_NULL_HANDLE;
564 }
565 }
566
567 /* The loader wants us to expose a second GetInstanceProcAddr function
568 * to work around certain LD_PRELOAD issues seen in apps.
569 */
570 extern "C" PUBLIC VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
571 vk_icdGetInstanceProcAddr(VkInstance instance, const char* pName);
572
573 extern "C" PUBLIC VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
vk_icdGetInstanceProcAddr(VkInstance instance,const char * pName)574 vk_icdGetInstanceProcAddr(VkInstance instance, const char* pName) {
575 return gfxstream_vk_GetInstanceProcAddr(instance, pName);
576 }
577
gfxstream_vk_GetInstanceProcAddr(VkInstance _instance,const char * pName)578 PFN_vkVoidFunction gfxstream_vk_GetInstanceProcAddr(VkInstance _instance, const char* pName) {
579 VK_FROM_HANDLE(gfxstream_vk_instance, instance, _instance);
580 return vk_instance_get_proc_addr(&instance->vk, &gfxstream_vk_instance_entrypoints, pName);
581 }
582
gfxstream_vk_GetDeviceProcAddr(VkDevice _device,const char * pName)583 PFN_vkVoidFunction gfxstream_vk_GetDeviceProcAddr(VkDevice _device, const char* pName) {
584 AEMU_SCOPED_TRACE("vkGetDeviceProcAddr");
585 VK_FROM_HANDLE(gfxstream_vk_device, device, _device);
586 return vk_device_get_proc_addr(&device->vk, pName);
587 }
588
gfxstream_vk_AllocateMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)589 VkResult gfxstream_vk_AllocateMemory(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo,
590 const VkAllocationCallbacks* pAllocator,
591 VkDeviceMemory* pMemory) {
592 AEMU_SCOPED_TRACE("vkAllocateMemory");
593 VK_FROM_HANDLE(gfxstream_vk_device, gfxstream_device, device);
594 VkResult vkAllocateMemory_VkResult_return = (VkResult)0;
595 struct gfxstream_vk_device_memory* gfxstream_pMemory =
596 (struct gfxstream_vk_device_memory*)vk_device_memory_create(
597 (vk_device*)gfxstream_device, pAllocateInfo, pAllocator,
598 sizeof(struct gfxstream_vk_device_memory));
599 /* VkMemoryDedicatedAllocateInfo */
600 VkMemoryDedicatedAllocateInfo* dedicatedAllocInfoPtr =
601 (VkMemoryDedicatedAllocateInfo*)vk_find_struct<VkMemoryDedicatedAllocateInfo>(
602 pAllocateInfo);
603 if (dedicatedAllocInfoPtr) {
604 if (dedicatedAllocInfoPtr->buffer) {
605 VK_FROM_HANDLE(gfxstream_vk_buffer, gfxstream_buffer, dedicatedAllocInfoPtr->buffer);
606 dedicatedAllocInfoPtr->buffer = gfxstream_buffer->internal_object;
607 }
608 if (dedicatedAllocInfoPtr->image) {
609 VK_FROM_HANDLE(gfxstream_vk_image, gfxstream_image, dedicatedAllocInfoPtr->image);
610 dedicatedAllocInfoPtr->image = gfxstream_image->internal_object;
611 }
612 }
613 vkAllocateMemory_VkResult_return = gfxstream_pMemory ? VK_SUCCESS : VK_ERROR_OUT_OF_HOST_MEMORY;
614 if (VK_SUCCESS == vkAllocateMemory_VkResult_return) {
615 auto vkEnc = gfxstream::vk::ResourceTracker::getThreadLocalEncoder();
616 auto resources = gfxstream::vk::ResourceTracker::get();
617 vkAllocateMemory_VkResult_return = resources->on_vkAllocateMemory(
618 vkEnc, VK_SUCCESS, gfxstream_device->internal_object, pAllocateInfo, pAllocator,
619 &gfxstream_pMemory->internal_object);
620 }
621 *pMemory = gfxstream_vk_device_memory_to_handle(gfxstream_pMemory);
622 return vkAllocateMemory_VkResult_return;
623 }
624
gfxstream_vk_CmdBeginRenderPass(VkCommandBuffer commandBuffer,const VkRenderPassBeginInfo * pRenderPassBegin,VkSubpassContents contents)625 void gfxstream_vk_CmdBeginRenderPass(VkCommandBuffer commandBuffer,
626 const VkRenderPassBeginInfo* pRenderPassBegin,
627 VkSubpassContents contents) {
628 AEMU_SCOPED_TRACE("vkCmdBeginRenderPass");
629 VK_FROM_HANDLE(gfxstream_vk_command_buffer, gfxstream_commandBuffer, commandBuffer);
630 {
631 auto vkEnc = gfxstream::vk::ResourceTracker::getCommandBufferEncoder(
632 gfxstream_commandBuffer->internal_object);
633 VkRenderPassBeginInfo internal_pRenderPassBegin = vk_make_orphan_copy(*pRenderPassBegin);
634 vk_struct_chain_iterator structChainIter =
635 vk_make_chain_iterator(&internal_pRenderPassBegin);
636 /* VkRenderPassBeginInfo::renderPass */
637 VK_FROM_HANDLE(gfxstream_vk_render_pass, gfxstream_renderPass,
638 internal_pRenderPassBegin.renderPass);
639 internal_pRenderPassBegin.renderPass = gfxstream_renderPass->internal_object;
640 /* VkRenderPassBeginInfo::framebuffer */
641 VK_FROM_HANDLE(gfxstream_vk_framebuffer, gfxstream_framebuffer,
642 internal_pRenderPassBegin.framebuffer);
643 internal_pRenderPassBegin.framebuffer = gfxstream_framebuffer->internal_object;
644 /* pNext = VkRenderPassAttachmentBeginInfo */
645 std::vector<VkImageView> internal_pAttachments;
646 VkRenderPassAttachmentBeginInfo internal_renderPassAttachmentBeginInfo;
647 VkRenderPassAttachmentBeginInfo* pRenderPassAttachmentBeginInfo =
648 (VkRenderPassAttachmentBeginInfo*)vk_find_struct<VkRenderPassAttachmentBeginInfo>(
649 pRenderPassBegin);
650 if (pRenderPassAttachmentBeginInfo) {
651 internal_renderPassAttachmentBeginInfo = *pRenderPassAttachmentBeginInfo;
652 /* VkRenderPassAttachmentBeginInfo::pAttachments */
653 internal_pAttachments.reserve(internal_renderPassAttachmentBeginInfo.attachmentCount);
654 for (uint32_t i = 0; i < internal_renderPassAttachmentBeginInfo.attachmentCount; i++) {
655 VK_FROM_HANDLE(gfxstream_vk_image_view, gfxstream_image_view,
656 internal_renderPassAttachmentBeginInfo.pAttachments[i]);
657 internal_pAttachments[i] = gfxstream_image_view->internal_object;
658 }
659 internal_renderPassAttachmentBeginInfo.pAttachments = internal_pAttachments.data();
660 vk_append_struct(&structChainIter, &internal_renderPassAttachmentBeginInfo);
661 }
662 vkEnc->vkCmdBeginRenderPass(gfxstream_commandBuffer->internal_object,
663 &internal_pRenderPassBegin, contents, true /* do lock */);
664 }
665 }
666
gfxstream_vk_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,const VkRenderPassBeginInfo * pRenderPassBegin,const VkSubpassBeginInfo * pSubpassBeginInfo)667 void gfxstream_vk_CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer,
668 const VkRenderPassBeginInfo* pRenderPassBegin,
669 const VkSubpassBeginInfo* pSubpassBeginInfo) {
670 AEMU_SCOPED_TRACE("vkCmdBeginRenderPass2KHR");
671 VK_FROM_HANDLE(gfxstream_vk_command_buffer, gfxstream_commandBuffer, commandBuffer);
672 {
673 auto vkEnc = gfxstream::vk::ResourceTracker::getCommandBufferEncoder(
674 gfxstream_commandBuffer->internal_object);
675 VkRenderPassBeginInfo internal_pRenderPassBegin = vk_make_orphan_copy(*pRenderPassBegin);
676 vk_struct_chain_iterator structChainIter =
677 vk_make_chain_iterator(&internal_pRenderPassBegin);
678 /* VkRenderPassBeginInfo::renderPass */
679 VK_FROM_HANDLE(gfxstream_vk_render_pass, gfxstream_renderPass,
680 internal_pRenderPassBegin.renderPass);
681 internal_pRenderPassBegin.renderPass = gfxstream_renderPass->internal_object;
682 /* VkRenderPassBeginInfo::framebuffer */
683 VK_FROM_HANDLE(gfxstream_vk_framebuffer, gfxstream_framebuffer,
684 internal_pRenderPassBegin.framebuffer);
685 internal_pRenderPassBegin.framebuffer = gfxstream_framebuffer->internal_object;
686 /* pNext = VkRenderPassAttachmentBeginInfo */
687 std::vector<VkImageView> internal_pAttachments;
688 VkRenderPassAttachmentBeginInfo internal_renderPassAttachmentBeginInfo;
689 VkRenderPassAttachmentBeginInfo* pRenderPassAttachmentBeginInfo =
690 (VkRenderPassAttachmentBeginInfo*)vk_find_struct<VkRenderPassAttachmentBeginInfo>(
691 pRenderPassBegin);
692 if (pRenderPassAttachmentBeginInfo) {
693 internal_renderPassAttachmentBeginInfo = *pRenderPassAttachmentBeginInfo;
694 /* VkRenderPassAttachmentBeginInfo::pAttachments */
695 internal_pAttachments.reserve(internal_renderPassAttachmentBeginInfo.attachmentCount);
696 for (uint32_t i = 0; i < internal_renderPassAttachmentBeginInfo.attachmentCount; i++) {
697 VK_FROM_HANDLE(gfxstream_vk_image_view, gfxstream_image_view,
698 internal_renderPassAttachmentBeginInfo.pAttachments[i]);
699 internal_pAttachments[i] = gfxstream_image_view->internal_object;
700 }
701 internal_renderPassAttachmentBeginInfo.pAttachments = internal_pAttachments.data();
702 vk_append_struct(&structChainIter, &internal_renderPassAttachmentBeginInfo);
703 }
704 vkEnc->vkCmdBeginRenderPass2KHR(gfxstream_commandBuffer->internal_object,
705 &internal_pRenderPassBegin, pSubpassBeginInfo,
706 true /* do lock */);
707 }
708 }
709
gfxstream_vk_GetMemoryFdKHR(VkDevice device,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFd)710 VkResult gfxstream_vk_GetMemoryFdKHR(VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo,
711 int* pFd) {
712 AEMU_SCOPED_TRACE("vkGetMemoryFdKHR");
713 VK_FROM_HANDLE(gfxstream_vk_device, gfxstream_device, device);
714 VkResult vkGetMemoryFdKHR_VkResult_return = (VkResult)0;
715
716 {
717 auto vkEnc = gfxstream::vk::ResourceTracker::getThreadLocalEncoder();
718 std::vector<VkMemoryGetFdInfoKHR> internal_pGetFdInfo(1);
719 for (uint32_t i = 0; i < 1; ++i) {
720 internal_pGetFdInfo[i] = pGetFdInfo[i];
721 /* VkMemoryGetFdInfoKHR::memory */
722 VK_FROM_HANDLE(gfxstream_vk_device_memory, gfxstream_memory,
723 internal_pGetFdInfo[i].memory);
724 internal_pGetFdInfo[i].memory = gfxstream_memory->internal_object;
725 }
726 auto resources = gfxstream::vk::ResourceTracker::get();
727 vkGetMemoryFdKHR_VkResult_return = resources->on_vkGetMemoryFdKHR(
728 vkEnc, VK_SUCCESS, gfxstream_device->internal_object, internal_pGetFdInfo.data(), pFd);
729 }
730 return vkGetMemoryFdKHR_VkResult_return;
731 }
732
gfxstream_vk_EnumerateInstanceLayerProperties(uint32_t * pPropertyCount,VkLayerProperties * pProperties)733 VkResult gfxstream_vk_EnumerateInstanceLayerProperties(uint32_t* pPropertyCount,
734 VkLayerProperties* pProperties) {
735 AEMU_SCOPED_TRACE("vkEnumerateInstanceLayerProperties");
736 auto result = SetupInstance();
737 if (VK_SUCCESS != result) {
738 return vk_error(NULL, result);
739 }
740
741 VkResult vkEnumerateInstanceLayerProperties_VkResult_return = (VkResult)0;
742 {
743 auto vkEnc = gfxstream::vk::ResourceTracker::getThreadLocalEncoder();
744 vkEnumerateInstanceLayerProperties_VkResult_return =
745 vkEnc->vkEnumerateInstanceLayerProperties(pPropertyCount, pProperties,
746 true /* do lock */);
747 }
748 return vkEnumerateInstanceLayerProperties_VkResult_return;
749 }
750
gfxstream_vk_EnumerateInstanceVersion(uint32_t * pApiVersion)751 VkResult gfxstream_vk_EnumerateInstanceVersion(uint32_t* pApiVersion) {
752 AEMU_SCOPED_TRACE("vkEnumerateInstanceVersion");
753 auto result = SetupInstance();
754 if (VK_SUCCESS != result) {
755 return vk_error(NULL, result);
756 }
757
758 VkResult vkEnumerateInstanceVersion_VkResult_return = (VkResult)0;
759 {
760 auto vkEnc = gfxstream::vk::ResourceTracker::getThreadLocalEncoder();
761 vkEnumerateInstanceVersion_VkResult_return =
762 vkEnc->vkEnumerateInstanceVersion(pApiVersion, true /* do lock */);
763 }
764 return vkEnumerateInstanceVersion_VkResult_return;
765 }
766
gfxstream_vk_CreateComputePipelines(VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkComputePipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)767 VkResult gfxstream_vk_CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache,
768 uint32_t createInfoCount,
769 const VkComputePipelineCreateInfo* pCreateInfos,
770 const VkAllocationCallbacks* pAllocator,
771 VkPipeline* pPipelines) {
772 AEMU_SCOPED_TRACE("vkCreateComputePipelines");
773 VkResult vkCreateComputePipelines_VkResult_return = (VkResult)0;
774 VK_FROM_HANDLE(gfxstream_vk_device, gfxstream_device, device);
775 VK_FROM_HANDLE(gfxstream_vk_pipeline_cache, gfxstream_pipelineCache, pipelineCache);
776 struct gfxstream_vk_pipeline* gfxstream_pPipelines = (gfxstream_vk_pipeline*)vk_object_zalloc(
777 &gfxstream_device->vk, pAllocator, sizeof(gfxstream_vk_pipeline), VK_OBJECT_TYPE_PIPELINE);
778 vkCreateComputePipelines_VkResult_return =
779 gfxstream_pPipelines ? VK_SUCCESS : VK_ERROR_OUT_OF_HOST_MEMORY;
780 if (VK_SUCCESS == vkCreateComputePipelines_VkResult_return) {
781 auto vkEnc = gfxstream::vk::ResourceTracker::getThreadLocalEncoder();
782 std::vector<VkComputePipelineCreateInfo> internal_pCreateInfos(createInfoCount);
783 std::vector<VkPipelineShaderStageCreateInfo> internal_VkComputePipelineCreateInfo_stage(createInfoCount);
784 for (uint32_t i = 0; i < createInfoCount; ++i) {
785 internal_pCreateInfos[i] = pCreateInfos[i];
786 /* VkComputePipelineCreateInfo::stage */
787 {
788 internal_VkComputePipelineCreateInfo_stage[i] = internal_pCreateInfos[i].stage;
789 /* VkPipelineShaderStageCreateInfo::module */
790 if (internal_VkComputePipelineCreateInfo_stage[i].module) {
791 VK_FROM_HANDLE(gfxstream_vk_shader_module, gfxstream_module,
792 internal_VkComputePipelineCreateInfo_stage[i].module);
793 internal_VkComputePipelineCreateInfo_stage[i].module =
794 gfxstream_module->internal_object;
795 }
796 internal_pCreateInfos[i].stage = internal_VkComputePipelineCreateInfo_stage[i];
797 }
798 /* VkComputePipelineCreateInfo::layout */
799 VK_FROM_HANDLE(gfxstream_vk_pipeline_layout, gfxstream_layout,
800 internal_pCreateInfos[i].layout);
801 internal_pCreateInfos[i].layout = gfxstream_layout->internal_object;
802 /* VkComputePipelineCreateInfo::basePipelineHandle */
803 if (internal_pCreateInfos[i].basePipelineHandle) {
804 VK_FROM_HANDLE(gfxstream_vk_pipeline, gfxstream_basePipelineHandle,
805 internal_pCreateInfos[i].basePipelineHandle);
806 internal_pCreateInfos[i].basePipelineHandle =
807 gfxstream_basePipelineHandle->internal_object;
808 }
809 }
810 vkCreateComputePipelines_VkResult_return = vkEnc->vkCreateComputePipelines(
811 gfxstream_device->internal_object,
812 gfxstream_pipelineCache ? gfxstream_pipelineCache->internal_object : VK_NULL_HANDLE,
813 createInfoCount, internal_pCreateInfos.data(), pAllocator,
814 &gfxstream_pPipelines->internal_object, true /* do lock */);
815 }
816 *pPipelines = gfxstream_vk_pipeline_to_handle(gfxstream_pPipelines);
817 return vkCreateComputePipelines_VkResult_return;
818 }
819
820 struct DescriptorSetTransformStorage {
821 std::vector<std::vector<VkDescriptorImageInfo>> imageInfos;
822 std::vector<std::vector<VkDescriptorBufferInfo>> bufferInfos;
823 std::vector<std::vector<VkBufferView>> texelBuffers;
824 };
825
transformDescriptorSetList(const VkWriteDescriptorSet * pDescriptorSets,uint32_t descriptorSetCount,DescriptorSetTransformStorage & storage)826 static std::vector<VkWriteDescriptorSet> transformDescriptorSetList(
827 const VkWriteDescriptorSet* pDescriptorSets,
828 uint32_t descriptorSetCount,
829 DescriptorSetTransformStorage& storage) {
830 std::vector<VkWriteDescriptorSet> outDescriptorSets(descriptorSetCount);
831 for (uint32_t i = 0; i < descriptorSetCount; ++i) {
832 const auto& srcDescriptorSet = pDescriptorSets[i];
833 const uint32_t descriptorCount = srcDescriptorSet.descriptorCount;
834
835 VkWriteDescriptorSet& outDescriptorSet = outDescriptorSets[i];
836 outDescriptorSet = srcDescriptorSet;
837
838 storage.imageInfos.push_back(std::vector<VkDescriptorImageInfo>());
839 storage.imageInfos[i].reserve(descriptorCount);
840 memset(&storage.imageInfos[i][0], 0, sizeof(VkDescriptorImageInfo) * descriptorCount);
841 for (uint32_t j = 0; j < descriptorCount; ++j) {
842 const auto* srcImageInfo = srcDescriptorSet.pImageInfo;
843 if (srcImageInfo) {
844 storage.imageInfos[i][j] = srcImageInfo[j];
845 storage.imageInfos[i][j].imageView = VK_NULL_HANDLE;
846 if (vk_descriptor_type_has_image_view(srcDescriptorSet.descriptorType) &&
847 srcImageInfo[j].imageView) {
848 VK_FROM_HANDLE(gfxstream_vk_image_view, gfxstreamImageView,
849 srcImageInfo[j].imageView);
850 storage.imageInfos[i][j].imageView = gfxstreamImageView->internal_object;
851 }
852 }
853 }
854 outDescriptorSet.pImageInfo = storage.imageInfos[i].data();
855
856 storage.bufferInfos.push_back(std::vector<VkDescriptorBufferInfo>());
857 storage.bufferInfos[i].reserve(descriptorCount);
858 memset(&storage.bufferInfos[i][0], 0, sizeof(VkDescriptorBufferInfo) * descriptorCount);
859 for (uint32_t j = 0; j < descriptorCount; ++j) {
860 const auto* srcBufferInfo = srcDescriptorSet.pBufferInfo;
861 if (srcBufferInfo) {
862 storage.bufferInfos[i][j] = srcBufferInfo[j];
863 storage.bufferInfos[i][j].buffer = VK_NULL_HANDLE;
864 if (vk_descriptor_type_has_descriptor_buffer(srcDescriptorSet.descriptorType) &&
865 srcBufferInfo[j].buffer) {
866 VK_FROM_HANDLE(gfxstream_vk_buffer, gfxstreamBuffer, srcBufferInfo[j].buffer);
867 storage.bufferInfos[i][j].buffer = gfxstreamBuffer->internal_object;
868 }
869 }
870 }
871 outDescriptorSet.pBufferInfo = storage.bufferInfos[i].data();
872
873 storage.texelBuffers.push_back(std::vector<VkBufferView>());
874 storage.texelBuffers[i].reserve(descriptorCount);
875 memset(&storage.texelBuffers[i][0], 0, sizeof(VkBufferView) * descriptorCount);
876 for (uint32_t j = 0; j < descriptorCount; ++j) {
877 const auto* srcBufferView = srcDescriptorSet.pTexelBufferView;
878 if (vk_descriptor_type_has_texel_buffer(srcDescriptorSet.descriptorType) &&
879 srcBufferView) {
880 VK_FROM_HANDLE(gfxstream_vk_buffer_view, gfxstreamBufferView, srcBufferView[j]);
881 storage.texelBuffers[i][j] =
882 gfxstreamBufferView->internal_object;
883 }
884 }
885 outDescriptorSet.pTexelBufferView = storage.texelBuffers[i].data();
886 }
887 return outDescriptorSets;
888 }
889
gfxstream_vk_UpdateDescriptorSets(VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)890 void gfxstream_vk_UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
891 const VkWriteDescriptorSet* pDescriptorWrites,
892 uint32_t descriptorCopyCount,
893 const VkCopyDescriptorSet* pDescriptorCopies) {
894 AEMU_SCOPED_TRACE("vkUpdateDescriptorSets");
895 VK_FROM_HANDLE(gfxstream_vk_device, gfxstream_device, device);
896 {
897 auto vkEnc = gfxstream::vk::ResourceTracker::getThreadLocalEncoder();
898 DescriptorSetTransformStorage descriptorSetTransformStorage;
899 std::vector<VkWriteDescriptorSet> internal_pDescriptorWrites =
900 transformDescriptorSetList(pDescriptorWrites, descriptorWriteCount,
901 descriptorSetTransformStorage);
902 auto resources = gfxstream::vk::ResourceTracker::get();
903 resources->on_vkUpdateDescriptorSets(
904 vkEnc, gfxstream_device->internal_object, descriptorWriteCount,
905 internal_pDescriptorWrites.data(), descriptorCopyCount, pDescriptorCopies);
906 }
907 }
908
gfxstream_vk_QueueCommitDescriptorSetUpdatesGOOGLE(VkQueue queue,uint32_t descriptorPoolCount,const VkDescriptorPool * pDescriptorPools,uint32_t descriptorSetCount,const VkDescriptorSetLayout * pSetLayouts,const uint64_t * pDescriptorSetPoolIds,const uint32_t * pDescriptorSetWhichPool,const uint32_t * pDescriptorSetPendingAllocation,const uint32_t * pDescriptorWriteStartingIndices,uint32_t pendingDescriptorWriteCount,const VkWriteDescriptorSet * pPendingDescriptorWrites)909 void gfxstream_vk_QueueCommitDescriptorSetUpdatesGOOGLE(
910 VkQueue queue, uint32_t descriptorPoolCount, const VkDescriptorPool* pDescriptorPools,
911 uint32_t descriptorSetCount, const VkDescriptorSetLayout* pSetLayouts,
912 const uint64_t* pDescriptorSetPoolIds, const uint32_t* pDescriptorSetWhichPool,
913 const uint32_t* pDescriptorSetPendingAllocation,
914 const uint32_t* pDescriptorWriteStartingIndices, uint32_t pendingDescriptorWriteCount,
915 const VkWriteDescriptorSet* pPendingDescriptorWrites) {
916 AEMU_SCOPED_TRACE("vkQueueCommitDescriptorSetUpdatesGOOGLE");
917 VK_FROM_HANDLE(gfxstream_vk_queue, gfxstream_queue, queue);
918 {
919 auto vkEnc =
920 gfxstream::vk::ResourceTracker::getQueueEncoder(gfxstream_queue->internal_object);
921 std::vector<VkDescriptorPool> internal_pDescriptorPools(descriptorPoolCount);
922 for (uint32_t i = 0; i < descriptorPoolCount; ++i) {
923 VK_FROM_HANDLE(gfxstream_vk_descriptor_pool, gfxstream_pDescriptorPools,
924 pDescriptorPools[i]);
925 internal_pDescriptorPools[i] = gfxstream_pDescriptorPools->internal_object;
926 }
927 std::vector<VkDescriptorSetLayout> internal_pSetLayouts(descriptorSetCount);
928 for (uint32_t i = 0; i < descriptorSetCount; ++i) {
929 VK_FROM_HANDLE(gfxstream_vk_descriptor_set_layout, gfxstream_pSetLayouts,
930 pSetLayouts[i]);
931 internal_pSetLayouts[i] = gfxstream_pSetLayouts->internal_object;
932 }
933 DescriptorSetTransformStorage descriptorSetTransformStorage;
934 std::vector<VkWriteDescriptorSet> internal_pPendingDescriptorWrites =
935 transformDescriptorSetList(pPendingDescriptorWrites, pendingDescriptorWriteCount,
936 descriptorSetTransformStorage);
937 vkEnc->vkQueueCommitDescriptorSetUpdatesGOOGLE(
938 gfxstream_queue->internal_object, descriptorPoolCount, internal_pDescriptorPools.data(),
939 descriptorSetCount, internal_pSetLayouts.data(), pDescriptorSetPoolIds,
940 pDescriptorSetWhichPool, pDescriptorSetPendingAllocation,
941 pDescriptorWriteStartingIndices, pendingDescriptorWriteCount,
942 internal_pPendingDescriptorWrites.data(), true /* do lock */);
943 }
944 }
945