1 // Copyright (C) 2018 The Android Open Source Project
2 // Copyright (C) 2018 Google Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15
16 #include "ResourceTracker.h"
17
18 #include "Resources.h"
19 #include "CommandBufferStagingStream.h"
20 #include "DescriptorSetVirtualization.h"
21
22 #include "android/base/Optional.h"
23 #include "android/base/threads/AndroidWorkPool.h"
24
25 #include "goldfish_vk_private_defs.h"
26
27 #include "../OpenglSystemCommon/EmulatorFeatureInfo.h"
28 #include "../OpenglSystemCommon/HostConnection.h"
29
30 #ifdef VK_USE_PLATFORM_ANDROID_KHR
31
32 #include "../egl/goldfish_sync.h"
33
34 typedef uint32_t zx_handle_t;
35 typedef uint64_t zx_koid_t;
36 #define ZX_HANDLE_INVALID ((zx_handle_t)0)
37 #define ZX_KOID_INVALID ((zx_koid_t)0)
zx_handle_close(zx_handle_t)38 void zx_handle_close(zx_handle_t) { }
zx_event_create(int,zx_handle_t *)39 void zx_event_create(int, zx_handle_t*) { }
40
41 #include "AndroidHardwareBuffer.h"
42
43 #ifndef HOST_BUILD
44 #include <drm/virtgpu_drm.h>
45 #include <xf86drm.h>
46 #endif
47
48 #include "VirtioGpuNext.h"
49
50 #endif // VK_USE_PLATFORM_ANDROID_KHR
51
52 #ifdef VK_USE_PLATFORM_FUCHSIA
53
54 #include <cutils/native_handle.h>
55 #include <fuchsia/hardware/goldfish/llcpp/fidl.h>
56 #include <fuchsia/sysmem/llcpp/fidl.h>
57 #include <lib/zx/channel.h>
58 #include <lib/zx/vmo.h>
59 #include <zircon/errors.h>
60 #include <zircon/process.h>
61 #include <zircon/rights.h>
62 #include <zircon/syscalls.h>
63 #include <zircon/syscalls/object.h>
64
65 #include "services/service_connector.h"
66
67 #ifndef FUCHSIA_NO_TRACE
68 #include <lib/trace/event.h>
69 #endif
70
71 #define GET_STATUS_SAFE(result, member) \
72 ((result).ok() ? ((result).Unwrap()->member) : ZX_OK)
73
74 struct AHardwareBuffer;
75
AHardwareBuffer_release(AHardwareBuffer *)76 void AHardwareBuffer_release(AHardwareBuffer*) { }
77
AHardwareBuffer_getNativeHandle(AHardwareBuffer *)78 native_handle_t *AHardwareBuffer_getNativeHandle(AHardwareBuffer*) { return NULL; }
79
getAndroidHardwareBufferUsageFromVkUsage(const VkImageCreateFlags vk_create,const VkImageUsageFlags vk_usage)80 uint64_t getAndroidHardwareBufferUsageFromVkUsage(
81 const VkImageCreateFlags vk_create,
82 const VkImageUsageFlags vk_usage) {
83 return AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
84 }
85
importAndroidHardwareBuffer(Gralloc * grallocHelper,const VkImportAndroidHardwareBufferInfoANDROID * info,struct AHardwareBuffer ** importOut)86 VkResult importAndroidHardwareBuffer(
87 Gralloc *grallocHelper,
88 const VkImportAndroidHardwareBufferInfoANDROID* info,
89 struct AHardwareBuffer **importOut) {
90 return VK_SUCCESS;
91 }
92
createAndroidHardwareBuffer(bool hasDedicatedImage,bool hasDedicatedBuffer,const VkExtent3D & imageExtent,uint32_t imageLayers,VkFormat imageFormat,VkImageUsageFlags imageUsage,VkImageCreateFlags imageCreateFlags,VkDeviceSize bufferSize,VkDeviceSize allocationInfoAllocSize,struct AHardwareBuffer ** out)93 VkResult createAndroidHardwareBuffer(
94 bool hasDedicatedImage,
95 bool hasDedicatedBuffer,
96 const VkExtent3D& imageExtent,
97 uint32_t imageLayers,
98 VkFormat imageFormat,
99 VkImageUsageFlags imageUsage,
100 VkImageCreateFlags imageCreateFlags,
101 VkDeviceSize bufferSize,
102 VkDeviceSize allocationInfoAllocSize,
103 struct AHardwareBuffer **out) {
104 return VK_SUCCESS;
105 }
106
107 namespace goldfish_vk {
108 struct HostVisibleMemoryVirtualizationInfo;
109 }
110
getAndroidHardwareBufferPropertiesANDROID(Gralloc * grallocHelper,const goldfish_vk::HostVisibleMemoryVirtualizationInfo *,VkDevice,const AHardwareBuffer *,VkAndroidHardwareBufferPropertiesANDROID *)111 VkResult getAndroidHardwareBufferPropertiesANDROID(
112 Gralloc *grallocHelper,
113 const goldfish_vk::HostVisibleMemoryVirtualizationInfo*,
114 VkDevice,
115 const AHardwareBuffer*,
116 VkAndroidHardwareBufferPropertiesANDROID*) { return VK_SUCCESS; }
117
getMemoryAndroidHardwareBufferANDROID(struct AHardwareBuffer **)118 VkResult getMemoryAndroidHardwareBufferANDROID(struct AHardwareBuffer **) { return VK_SUCCESS; }
119
120 #endif // VK_USE_PLATFORM_FUCHSIA
121
122 #include "HostVisibleMemoryVirtualization.h"
123 #include "Resources.h"
124 #include "VkEncoder.h"
125
126 #include "android/base/AlignedBuf.h"
127 #include "android/base/synchronization/AndroidLock.h"
128
129 #include "goldfish_address_space.h"
130 #include "goldfish_vk_private_defs.h"
131 #include "vk_format_info.h"
132 #include "vk_struct_id.h"
133 #include "vk_util.h"
134
135 #include <set>
136 #include <string>
137 #include <unordered_map>
138 #include <unordered_set>
139
140 #include <vndk/hardware_buffer.h>
141 #include <log/log.h>
142 #include <stdlib.h>
143 #include <sync/sync.h>
144
145 #ifdef VK_USE_PLATFORM_ANDROID_KHR
146
147 #include <sys/mman.h>
148 #include <sys/syscall.h>
149
150 #ifdef HOST_BUILD
151 #include "android/utils/tempfile.h"
152 #endif
153
154 static inline int
inline_memfd_create(const char * name,unsigned int flags)155 inline_memfd_create(const char *name, unsigned int flags) {
156 #ifdef HOST_BUILD
157 TempFile* tmpFile = tempfile_create();
158 return open(tempfile_path(tmpFile), O_RDWR);
159 // TODO: Windows is not suppose to support VkSemaphoreGetFdInfoKHR
160 #else
161 return syscall(SYS_memfd_create, name, flags);
162 #endif
163 }
164 #define memfd_create inline_memfd_create
165 #endif // !VK_USE_PLATFORM_ANDROID_KHR
166
167 #define RESOURCE_TRACKER_DEBUG 0
168
169 #if RESOURCE_TRACKER_DEBUG
170 #undef D
171 #define D(fmt,...) ALOGD("%s: " fmt, __func__, ##__VA_ARGS__);
172 #else
173 #ifndef D
174 #define D(fmt,...)
175 #endif
176 #endif
177
178 using android::aligned_buf_alloc;
179 using android::aligned_buf_free;
180 using android::base::Optional;
181 using android::base::guest::AutoLock;
182 using android::base::guest::RecursiveLock;
183 using android::base::guest::Lock;
184 using android::base::guest::WorkPool;
185
186 namespace goldfish_vk {
187
188 #define MAKE_HANDLE_MAPPING_FOREACH(type_name, map_impl, map_to_u64_impl, map_from_u64_impl) \
189 void mapHandles_##type_name(type_name* handles, size_t count) override { \
190 for (size_t i = 0; i < count; ++i) { \
191 map_impl; \
192 } \
193 } \
194 void mapHandles_##type_name##_u64(const type_name* handles, uint64_t* handle_u64s, size_t count) override { \
195 for (size_t i = 0; i < count; ++i) { \
196 map_to_u64_impl; \
197 } \
198 } \
199 void mapHandles_u64_##type_name(const uint64_t* handle_u64s, type_name* handles, size_t count) override { \
200 for (size_t i = 0; i < count; ++i) { \
201 map_from_u64_impl; \
202 } \
203 } \
204
205 #define DEFINE_RESOURCE_TRACKING_CLASS(class_name, impl) \
206 class class_name : public VulkanHandleMapping { \
207 public: \
208 virtual ~class_name() { } \
209 GOLDFISH_VK_LIST_HANDLE_TYPES(impl) \
210 }; \
211
212 #define CREATE_MAPPING_IMPL_FOR_TYPE(type_name) \
213 MAKE_HANDLE_MAPPING_FOREACH(type_name, \
214 handles[i] = new_from_host_##type_name(handles[i]); ResourceTracker::get()->register_##type_name(handles[i]);, \
215 handle_u64s[i] = (uint64_t)new_from_host_##type_name(handles[i]), \
216 handles[i] = (type_name)new_from_host_u64_##type_name(handle_u64s[i]); ResourceTracker::get()->register_##type_name(handles[i]);)
217
218 #define UNWRAP_MAPPING_IMPL_FOR_TYPE(type_name) \
219 MAKE_HANDLE_MAPPING_FOREACH(type_name, \
220 handles[i] = get_host_##type_name(handles[i]), \
221 handle_u64s[i] = (uint64_t)get_host_u64_##type_name(handles[i]), \
222 handles[i] = (type_name)get_host_##type_name((type_name)handle_u64s[i]))
223
224 #define DESTROY_MAPPING_IMPL_FOR_TYPE(type_name) \
225 MAKE_HANDLE_MAPPING_FOREACH(type_name, \
226 ResourceTracker::get()->unregister_##type_name(handles[i]); delete_goldfish_##type_name(handles[i]), \
227 (void)handle_u64s[i]; delete_goldfish_##type_name(handles[i]), \
228 (void)handles[i]; delete_goldfish_##type_name((type_name)handle_u64s[i]))
229
230 DEFINE_RESOURCE_TRACKING_CLASS(CreateMapping, CREATE_MAPPING_IMPL_FOR_TYPE)
231 DEFINE_RESOURCE_TRACKING_CLASS(UnwrapMapping, UNWRAP_MAPPING_IMPL_FOR_TYPE)
232 DEFINE_RESOURCE_TRACKING_CLASS(DestroyMapping, DESTROY_MAPPING_IMPL_FOR_TYPE)
233
234 static uint32_t* sSeqnoPtr = nullptr;
235
236 // static
237 uint32_t ResourceTracker::streamFeatureBits = 0;
238 ResourceTracker::ThreadingCallbacks ResourceTracker::threadingCallbacks;
239
240 struct StagingInfo {
241 Lock mLock;
242 std::vector<CommandBufferStagingStream*> streams;
243 std::vector<VkEncoder*> encoders;
244
~StagingInfogoldfish_vk::StagingInfo245 ~StagingInfo() {
246 for (auto stream : streams) {
247 delete stream;
248 }
249
250 for (auto encoder : encoders) {
251 delete encoder;
252 }
253 }
254
pushStaginggoldfish_vk::StagingInfo255 void pushStaging(CommandBufferStagingStream* stream, VkEncoder* encoder) {
256 AutoLock lock(mLock);
257 stream->reset();
258 streams.push_back(stream);
259 encoders.push_back(encoder);
260 }
261
popStaginggoldfish_vk::StagingInfo262 void popStaging(CommandBufferStagingStream** streamOut, VkEncoder** encoderOut) {
263 AutoLock lock(mLock);
264 CommandBufferStagingStream* stream;
265 VkEncoder* encoder;
266 if (streams.empty()) {
267 stream = new CommandBufferStagingStream;
268 encoder = new VkEncoder(stream);
269 } else {
270 stream = streams.back();
271 encoder = encoders.back();
272 streams.pop_back();
273 encoders.pop_back();
274 }
275 *streamOut = stream;
276 *encoderOut = encoder;
277 }
278 };
279
280 static StagingInfo sStaging;
281
282 class ResourceTracker::Impl {
283 public:
284 Impl() = default;
285 CreateMapping createMapping;
286 UnwrapMapping unwrapMapping;
287 DestroyMapping destroyMapping;
288 DefaultHandleMapping defaultMapping;
289
290 #define HANDLE_DEFINE_TRIVIAL_INFO_STRUCT(type) \
291 struct type##_Info { \
292 uint32_t unused; \
293 }; \
294
295 GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_DEFINE_TRIVIAL_INFO_STRUCT)
296
297 struct VkInstance_Info {
298 uint32_t highestApiVersion;
299 std::set<std::string> enabledExtensions;
300 // Fodder for vkEnumeratePhysicalDevices.
301 std::vector<VkPhysicalDevice> physicalDevices;
302 };
303
304 using HostMemBlocks = std::vector<HostMemAlloc>;
305 using HostMemBlockIndex = size_t;
306
307 #define INVALID_HOST_MEM_BLOCK (-1)
308
309 struct VkDevice_Info {
310 VkPhysicalDevice physdev;
311 VkPhysicalDeviceProperties props;
312 VkPhysicalDeviceMemoryProperties memProps;
313 std::vector<HostMemBlocks> hostMemBlocks { VK_MAX_MEMORY_TYPES };
314 uint32_t apiVersion;
315 std::set<std::string> enabledExtensions;
316 std::vector<std::pair<PFN_vkDeviceMemoryReportCallbackEXT, void *>> deviceMemoryReportCallbacks;
317 };
318
319 struct VirtioGpuHostmemResourceInfo {
320 uint32_t resourceId = 0;
321 int primeFd = -1;
322 };
323
324 struct VkDeviceMemory_Info {
325 VkDeviceSize allocationSize = 0;
326 VkDeviceSize mappedSize = 0;
327 uint8_t* mappedPtr = nullptr;
328 uint32_t memoryTypeIndex = 0;
329 bool virtualHostVisibleBacking = false;
330 bool directMapped = false;
331 GoldfishAddressSpaceBlock*
332 goldfishAddressSpaceBlock = nullptr;
333 VirtioGpuHostmemResourceInfo resInfo;
334 SubAlloc subAlloc;
335 AHardwareBuffer* ahw = nullptr;
336 bool imported = false;
337 zx_handle_t vmoHandle = ZX_HANDLE_INVALID;
338 };
339
340 struct VkCommandBuffer_Info {
341 uint32_t placeholder;
342 };
343
344 struct VkQueue_Info {
345 VkDevice device;
346 };
347
348 // custom guest-side structs for images/buffers because of AHardwareBuffer :((
349 struct VkImage_Info {
350 VkDevice device;
351 VkImageCreateInfo createInfo;
352 bool external = false;
353 VkExternalMemoryImageCreateInfo externalCreateInfo;
354 VkDeviceMemory currentBacking = VK_NULL_HANDLE;
355 VkDeviceSize currentBackingOffset = 0;
356 VkDeviceSize currentBackingSize = 0;
357 bool baseRequirementsKnown = false;
358 VkMemoryRequirements baseRequirements;
359 #ifdef VK_USE_PLATFORM_FUCHSIA
360 bool isSysmemBackedMemory = false;
361 #endif
362 };
363
364 struct VkBuffer_Info {
365 VkDevice device;
366 VkBufferCreateInfo createInfo;
367 bool external = false;
368 VkExternalMemoryBufferCreateInfo externalCreateInfo;
369 VkDeviceMemory currentBacking = VK_NULL_HANDLE;
370 VkDeviceSize currentBackingOffset = 0;
371 VkDeviceSize currentBackingSize = 0;
372 bool baseRequirementsKnown = false;
373 VkMemoryRequirements baseRequirements;
374 #ifdef VK_USE_PLATFORM_FUCHSIA
375 bool isSysmemBackedMemory = false;
376 #endif
377 };
378
379 struct VkSemaphore_Info {
380 VkDevice device;
381 zx_handle_t eventHandle = ZX_HANDLE_INVALID;
382 zx_koid_t eventKoid = ZX_KOID_INVALID;
383 int syncFd = -1;
384 };
385
386 struct VkDescriptorUpdateTemplate_Info {
387 uint32_t templateEntryCount = 0;
388 VkDescriptorUpdateTemplateEntry* templateEntries;
389
390 uint32_t imageInfoCount = 0;
391 uint32_t bufferInfoCount = 0;
392 uint32_t bufferViewCount = 0;
393 uint32_t* imageInfoIndices;
394 uint32_t* bufferInfoIndices;
395 uint32_t* bufferViewIndices;
396 VkDescriptorImageInfo* imageInfos;
397 VkDescriptorBufferInfo* bufferInfos;
398 VkBufferView* bufferViews;
399 };
400
401 struct VkFence_Info {
402 VkDevice device;
403 bool external = false;
404 VkExportFenceCreateInfo exportFenceCreateInfo;
405 #ifdef VK_USE_PLATFORM_ANDROID_KHR
406 int syncFd = -1;
407 #endif
408 };
409
410 struct VkDescriptorPool_Info {
411 uint32_t unused;
412 };
413
414 struct VkDescriptorSet_Info {
415 uint32_t unused;
416 };
417
418 struct VkDescriptorSetLayout_Info {
419 uint32_t unused;
420 };
421
422 struct VkCommandPool_Info {
423 uint32_t unused;
424 };
425
426 struct VkSampler_Info {
427 uint32_t unused;
428 };
429
430 struct VkBufferCollectionFUCHSIA_Info {
431 #ifdef VK_USE_PLATFORM_FUCHSIA
432 android::base::Optional<
433 fuchsia_sysmem::wire::BufferCollectionConstraints>
434 constraints;
435 android::base::Optional<VkBufferCollectionProperties2FUCHSIA>
436 properties;
437
438 // the index of corresponding createInfo for each image format
439 // constraints in |constraints|.
440 std::vector<uint32_t> createInfoIndex;
441 #endif // VK_USE_PLATFORM_FUCHSIA
442 };
443
444 #define HANDLE_REGISTER_IMPL_IMPL(type) \
445 std::unordered_map<type, type##_Info> info_##type; \
446 void register_##type(type obj) { \
447 AutoLock lock(mLock); \
448 info_##type[obj] = type##_Info(); \
449 } \
450
451 #define HANDLE_UNREGISTER_IMPL_IMPL(type) \
452 void unregister_##type(type obj) { \
453 AutoLock lock(mLock); \
454 info_##type.erase(obj); \
455 } \
456
457 GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL_IMPL)
GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)458 GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)
459
460 void unregister_VkInstance(VkInstance instance) {
461 AutoLock lock(mLock);
462
463 auto it = info_VkInstance.find(instance);
464 if (it == info_VkInstance.end()) return;
465 auto info = it->second;
466 info_VkInstance.erase(instance);
467 lock.unlock();
468 }
469
unregister_VkDevice(VkDevice device)470 void unregister_VkDevice(VkDevice device) {
471 AutoLock lock(mLock);
472
473 auto it = info_VkDevice.find(device);
474 if (it == info_VkDevice.end()) return;
475 auto info = it->second;
476 info_VkDevice.erase(device);
477 lock.unlock();
478 }
479
unregister_VkCommandPool(VkCommandPool pool)480 void unregister_VkCommandPool(VkCommandPool pool) {
481 if (!pool) return;
482
483 clearCommandPool(pool);
484
485 AutoLock lock(mLock);
486 info_VkCommandPool.erase(pool);
487 }
488
unregister_VkSampler(VkSampler sampler)489 void unregister_VkSampler(VkSampler sampler) {
490 if (!sampler) return;
491
492 AutoLock lock(mLock);
493 info_VkSampler.erase(sampler);
494 }
495
unregister_VkCommandBuffer(VkCommandBuffer commandBuffer)496 void unregister_VkCommandBuffer(VkCommandBuffer commandBuffer) {
497 resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */, true /* also clear pending descriptor sets */);
498
499 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
500 if (!cb) return;
501 if (cb->lastUsedEncoder) { cb->lastUsedEncoder->decRef(); }
502 eraseObjects(&cb->subObjects);
503 forAllObjects(cb->poolObjects, [cb](void* commandPool) {
504 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool((VkCommandPool)commandPool);
505 eraseObject(&p->subObjects, (void*)cb);
506 });
507 eraseObjects(&cb->poolObjects);
508
509 if (cb->userPtr) {
510 CommandBufferPendingDescriptorSets* pendingSets = (CommandBufferPendingDescriptorSets*)cb->userPtr;
511 delete pendingSets;
512 }
513
514 AutoLock lock(mLock);
515 info_VkCommandBuffer.erase(commandBuffer);
516 }
517
unregister_VkQueue(VkQueue queue)518 void unregister_VkQueue(VkQueue queue) {
519 struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
520 if (!q) return;
521 if (q->lastUsedEncoder) { q->lastUsedEncoder->decRef(); }
522
523 AutoLock lock(mLock);
524 info_VkQueue.erase(queue);
525 }
526
unregister_VkDeviceMemory(VkDeviceMemory mem)527 void unregister_VkDeviceMemory(VkDeviceMemory mem) {
528 AutoLock lock(mLock);
529
530 auto it = info_VkDeviceMemory.find(mem);
531 if (it == info_VkDeviceMemory.end()) return;
532
533 auto& memInfo = it->second;
534
535 if (memInfo.ahw) {
536 AHardwareBuffer_release(memInfo.ahw);
537 }
538
539 if (memInfo.vmoHandle != ZX_HANDLE_INVALID) {
540 zx_handle_close(memInfo.vmoHandle);
541 }
542
543 if (memInfo.mappedPtr &&
544 !memInfo.virtualHostVisibleBacking &&
545 !memInfo.directMapped) {
546 aligned_buf_free(memInfo.mappedPtr);
547 }
548
549 if (memInfo.directMapped) {
550 subFreeHostMemory(&memInfo.subAlloc);
551 }
552
553 delete memInfo.goldfishAddressSpaceBlock;
554
555 info_VkDeviceMemory.erase(mem);
556 }
557
unregister_VkImage(VkImage img)558 void unregister_VkImage(VkImage img) {
559 AutoLock lock(mLock);
560
561 auto it = info_VkImage.find(img);
562 if (it == info_VkImage.end()) return;
563
564 auto& imageInfo = it->second;
565
566 info_VkImage.erase(img);
567 }
568
unregister_VkBuffer(VkBuffer buf)569 void unregister_VkBuffer(VkBuffer buf) {
570 AutoLock lock(mLock);
571
572 auto it = info_VkBuffer.find(buf);
573 if (it == info_VkBuffer.end()) return;
574
575 info_VkBuffer.erase(buf);
576 }
577
unregister_VkSemaphore(VkSemaphore sem)578 void unregister_VkSemaphore(VkSemaphore sem) {
579 AutoLock lock(mLock);
580
581 auto it = info_VkSemaphore.find(sem);
582 if (it == info_VkSemaphore.end()) return;
583
584 auto& semInfo = it->second;
585
586 if (semInfo.eventHandle != ZX_HANDLE_INVALID) {
587 zx_handle_close(semInfo.eventHandle);
588 }
589
590 #ifdef VK_USE_PLATFORM_ANDROID_KHR
591 if (semInfo.syncFd >= 0) {
592 close(semInfo.syncFd);
593 }
594 #endif
595
596 info_VkSemaphore.erase(sem);
597 }
598
unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ)599 void unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ) {
600
601 AutoLock lock(mLock);
602 auto it = info_VkDescriptorUpdateTemplate.find(templ);
603 if (it == info_VkDescriptorUpdateTemplate.end())
604 return;
605
606 auto& info = it->second;
607 if (info.templateEntryCount) delete [] info.templateEntries;
608 if (info.imageInfoCount) {
609 delete [] info.imageInfoIndices;
610 delete [] info.imageInfos;
611 }
612 if (info.bufferInfoCount) {
613 delete [] info.bufferInfoIndices;
614 delete [] info.bufferInfos;
615 }
616 if (info.bufferViewCount) {
617 delete [] info.bufferViewIndices;
618 delete [] info.bufferViews;
619 }
620 info_VkDescriptorUpdateTemplate.erase(it);
621 }
622
unregister_VkFence(VkFence fence)623 void unregister_VkFence(VkFence fence) {
624 AutoLock lock(mLock);
625 auto it = info_VkFence.find(fence);
626 if (it == info_VkFence.end()) return;
627
628 auto& fenceInfo = it->second;
629 (void)fenceInfo;
630
631 #ifdef VK_USE_PLATFORM_ANDROID_KHR
632 if (fenceInfo.syncFd >= 0) {
633 close(fenceInfo.syncFd);
634 }
635 #endif
636
637 info_VkFence.erase(fence);
638 }
639
640 #ifdef VK_USE_PLATFORM_FUCHSIA
unregister_VkBufferCollectionFUCHSIA(VkBufferCollectionFUCHSIA collection)641 void unregister_VkBufferCollectionFUCHSIA(
642 VkBufferCollectionFUCHSIA collection) {
643 AutoLock lock(mLock);
644 info_VkBufferCollectionFUCHSIA.erase(collection);
645 }
646 #endif
647
unregister_VkDescriptorSet_locked(VkDescriptorSet set)648 void unregister_VkDescriptorSet_locked(VkDescriptorSet set) {
649 struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(set);
650 delete ds->reified;
651 info_VkDescriptorSet.erase(set);
652 }
653
unregister_VkDescriptorSet(VkDescriptorSet set)654 void unregister_VkDescriptorSet(VkDescriptorSet set) {
655 if (!set) return;
656
657 AutoLock lock(mLock);
658 unregister_VkDescriptorSet_locked(set);
659 }
660
unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout)661 void unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout) {
662 if (!setLayout) return;
663
664 AutoLock lock(mLock);
665 delete as_goldfish_VkDescriptorSetLayout(setLayout)->layoutInfo;
666 info_VkDescriptorSetLayout.erase(setLayout);
667 }
668
allocAndInitializeDescriptorSets(void * context,VkDevice device,const VkDescriptorSetAllocateInfo * ci,VkDescriptorSet * sets)669 VkResult allocAndInitializeDescriptorSets(
670 void* context,
671 VkDevice device,
672 const VkDescriptorSetAllocateInfo* ci,
673 VkDescriptorSet* sets) {
674
675 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
676 // Using the pool ID's we collected earlier from the host
677 VkResult poolAllocResult = validateAndApplyVirtualDescriptorSetAllocation(ci, sets);
678
679 if (poolAllocResult != VK_SUCCESS) return poolAllocResult;
680
681 for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
682 register_VkDescriptorSet(sets[i]);
683 VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(sets[i])->reified->setLayout;
684
685 // Need to add ref to the set layout in the virtual case
686 // because the set itself might not be realized on host at the
687 // same time
688 struct goldfish_VkDescriptorSetLayout* dsl = as_goldfish_VkDescriptorSetLayout(setLayout);
689 ++dsl->layoutInfo->refcount;
690 }
691 } else {
692 // Pass through and use host allocation
693 VkEncoder* enc = (VkEncoder*)context;
694 VkResult allocRes = enc->vkAllocateDescriptorSets(device, ci, sets, true /* do lock */);
695
696 if (allocRes != VK_SUCCESS) return allocRes;
697
698 for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
699 applyDescriptorSetAllocation(ci->descriptorPool, ci->pSetLayouts[i]);
700 fillDescriptorSetInfoForPool(ci->descriptorPool, ci->pSetLayouts[i], sets[i]);
701 }
702 }
703
704 return VK_SUCCESS;
705 }
706
createImmutableSamplersFilteredImageInfo(VkDescriptorType descType,VkDescriptorSet descSet,uint32_t binding,const VkDescriptorImageInfo * pImageInfo)707 VkDescriptorImageInfo createImmutableSamplersFilteredImageInfo(
708 VkDescriptorType descType,
709 VkDescriptorSet descSet,
710 uint32_t binding,
711 const VkDescriptorImageInfo* pImageInfo) {
712
713 VkDescriptorImageInfo res = *pImageInfo;
714
715 if (descType != VK_DESCRIPTOR_TYPE_SAMPLER &&
716 descType != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) return res;
717
718 bool immutableSampler = as_goldfish_VkDescriptorSet(descSet)->reified->bindingIsImmutableSampler[binding];
719
720 if (!immutableSampler) return res;
721
722 res.sampler = 0;
723
724 return res;
725 }
726
descriptorBindingIsImmutableSampler(VkDescriptorSet dstSet,uint32_t dstBinding)727 bool descriptorBindingIsImmutableSampler(
728 VkDescriptorSet dstSet,
729 uint32_t dstBinding) {
730
731 return as_goldfish_VkDescriptorSet(dstSet)->reified->bindingIsImmutableSampler[dstBinding];
732 }
733
734 VkDescriptorImageInfo
filterNonexistentSampler(const VkDescriptorImageInfo & inputInfo)735 filterNonexistentSampler(
736 const VkDescriptorImageInfo& inputInfo) {
737
738 VkSampler sampler =
739 inputInfo.sampler;
740
741 VkDescriptorImageInfo res = inputInfo;
742
743 if (sampler) {
744 auto it = info_VkSampler.find(sampler);
745 bool samplerExists = it != info_VkSampler.end();
746 if (!samplerExists) res.sampler = 0;
747 }
748
749 return res;
750 }
751
752
freeDescriptorSetsIfHostAllocated(VkEncoder * enc,VkDevice device,uint32_t descriptorSetCount,const VkDescriptorSet * sets)753 void freeDescriptorSetsIfHostAllocated(VkEncoder* enc, VkDevice device, uint32_t descriptorSetCount, const VkDescriptorSet* sets) {
754 for (uint32_t i = 0; i < descriptorSetCount; ++i) {
755 struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(sets[i]);
756 if (ds->reified->allocationPending) {
757 unregister_VkDescriptorSet(sets[i]);
758 delete_goldfish_VkDescriptorSet(sets[i]);
759 } else {
760 enc->vkFreeDescriptorSets(device, ds->reified->pool, 1, &sets[i], false /* no lock */);
761 }
762 }
763 }
764
clearDescriptorPoolAndUnregisterDescriptorSets(void * context,VkDevice device,VkDescriptorPool pool)765 void clearDescriptorPoolAndUnregisterDescriptorSets(void* context, VkDevice device, VkDescriptorPool pool) {
766
767 std::vector<VkDescriptorSet> toClear =
768 clearDescriptorPool(pool, mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate);
769
770 for (auto set : toClear) {
771 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
772 VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(set)->reified->setLayout;
773 decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
774 }
775 unregister_VkDescriptorSet(set);
776 delete_goldfish_VkDescriptorSet(set);
777 }
778 }
779
unregister_VkDescriptorPool(VkDescriptorPool pool)780 void unregister_VkDescriptorPool(VkDescriptorPool pool) {
781 if (!pool) return;
782
783 AutoLock lock(mLock);
784
785 struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
786 delete dp->allocInfo;
787
788 info_VkDescriptorPool.erase(pool);
789 }
790
descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool)791 bool descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool) {
792 return as_goldfish_VkDescriptorPool(pool)->allocInfo->createFlags &
793 VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
794 }
795
796 static constexpr uint32_t kDefaultApiVersion = VK_MAKE_VERSION(1, 1, 0);
797
setInstanceInfo(VkInstance instance,uint32_t enabledExtensionCount,const char * const * ppEnabledExtensionNames,uint32_t apiVersion)798 void setInstanceInfo(VkInstance instance,
799 uint32_t enabledExtensionCount,
800 const char* const* ppEnabledExtensionNames,
801 uint32_t apiVersion) {
802 AutoLock lock(mLock);
803 auto& info = info_VkInstance[instance];
804 info.highestApiVersion = apiVersion;
805
806 if (!ppEnabledExtensionNames) return;
807
808 for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
809 info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
810 }
811 }
812
setDeviceInfo(VkDevice device,VkPhysicalDevice physdev,VkPhysicalDeviceProperties props,VkPhysicalDeviceMemoryProperties memProps,uint32_t enabledExtensionCount,const char * const * ppEnabledExtensionNames,const void * pNext)813 void setDeviceInfo(VkDevice device,
814 VkPhysicalDevice physdev,
815 VkPhysicalDeviceProperties props,
816 VkPhysicalDeviceMemoryProperties memProps,
817 uint32_t enabledExtensionCount,
818 const char* const* ppEnabledExtensionNames,
819 const void* pNext) {
820 AutoLock lock(mLock);
821 auto& info = info_VkDevice[device];
822 info.physdev = physdev;
823 info.props = props;
824 info.memProps = memProps;
825 initHostVisibleMemoryVirtualizationInfo(
826 physdev, &memProps,
827 mFeatureInfo.get(),
828 &mHostVisibleMemoryVirtInfo);
829 info.apiVersion = props.apiVersion;
830
831 const VkBaseInStructure *extensionCreateInfo =
832 reinterpret_cast<const VkBaseInStructure *>(pNext);
833 while(extensionCreateInfo) {
834 if(extensionCreateInfo->sType
835 == VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT) {
836 auto deviceMemoryReportCreateInfo =
837 reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT *>(
838 extensionCreateInfo);
839 if(deviceMemoryReportCreateInfo->pfnUserCallback != nullptr) {
840 info.deviceMemoryReportCallbacks.emplace_back(
841 deviceMemoryReportCreateInfo->pfnUserCallback,
842 deviceMemoryReportCreateInfo->pUserData);
843 }
844 }
845 extensionCreateInfo = extensionCreateInfo->pNext;
846 }
847
848 if (!ppEnabledExtensionNames) return;
849
850 for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
851 info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
852 }
853 }
854
emitDeviceMemoryReport(VkDevice_Info info,VkDeviceMemoryReportEventTypeEXT type,uint64_t memoryObjectId,VkDeviceSize size,VkObjectType objectType,uint64_t objectHandle,uint32_t heapIndex=0)855 void emitDeviceMemoryReport(VkDevice_Info info,
856 VkDeviceMemoryReportEventTypeEXT type,
857 uint64_t memoryObjectId,
858 VkDeviceSize size,
859 VkObjectType objectType,
860 uint64_t objectHandle,
861 uint32_t heapIndex = 0) {
862 if(info.deviceMemoryReportCallbacks.empty()) return;
863
864 const VkDeviceMemoryReportCallbackDataEXT callbackData = {
865 VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT, // sType
866 nullptr, // pNext
867 0, // flags
868 type, // type
869 memoryObjectId, // memoryObjectId
870 size, // size
871 objectType, // objectType
872 objectHandle, // objectHandle
873 heapIndex, // heapIndex
874 };
875 for(const auto &callback : info.deviceMemoryReportCallbacks) {
876 callback.first(&callbackData, callback.second);
877 }
878 }
879
setDeviceMemoryInfo(VkDevice device,VkDeviceMemory memory,VkDeviceSize allocationSize,VkDeviceSize mappedSize,uint8_t * ptr,uint32_t memoryTypeIndex,AHardwareBuffer * ahw=nullptr,bool imported=false,zx_handle_t vmoHandle=ZX_HANDLE_INVALID)880 void setDeviceMemoryInfo(VkDevice device,
881 VkDeviceMemory memory,
882 VkDeviceSize allocationSize,
883 VkDeviceSize mappedSize,
884 uint8_t* ptr,
885 uint32_t memoryTypeIndex,
886 AHardwareBuffer* ahw = nullptr,
887 bool imported = false,
888 zx_handle_t vmoHandle = ZX_HANDLE_INVALID) {
889 AutoLock lock(mLock);
890 auto& deviceInfo = info_VkDevice[device];
891 auto& info = info_VkDeviceMemory[memory];
892
893 info.allocationSize = allocationSize;
894 info.mappedSize = mappedSize;
895 info.mappedPtr = ptr;
896 info.memoryTypeIndex = memoryTypeIndex;
897 info.ahw = ahw;
898 info.imported = imported;
899 info.vmoHandle = vmoHandle;
900 }
901
setImageInfo(VkImage image,VkDevice device,const VkImageCreateInfo * pCreateInfo)902 void setImageInfo(VkImage image,
903 VkDevice device,
904 const VkImageCreateInfo *pCreateInfo) {
905 AutoLock lock(mLock);
906 auto& info = info_VkImage[image];
907
908 info.device = device;
909 info.createInfo = *pCreateInfo;
910 }
911
isMemoryTypeHostVisible(VkDevice device,uint32_t typeIndex) const912 bool isMemoryTypeHostVisible(VkDevice device, uint32_t typeIndex) const {
913 AutoLock lock(mLock);
914 const auto it = info_VkDevice.find(device);
915
916 if (it == info_VkDevice.end()) return false;
917
918 const auto& info = it->second;
919 return info.memProps.memoryTypes[typeIndex].propertyFlags &
920 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
921 }
922
getMappedPointer(VkDeviceMemory memory)923 uint8_t* getMappedPointer(VkDeviceMemory memory) {
924 AutoLock lock(mLock);
925 const auto it = info_VkDeviceMemory.find(memory);
926 if (it == info_VkDeviceMemory.end()) return nullptr;
927
928 const auto& info = it->second;
929 return info.mappedPtr;
930 }
931
getMappedSize(VkDeviceMemory memory)932 VkDeviceSize getMappedSize(VkDeviceMemory memory) {
933 AutoLock lock(mLock);
934 const auto it = info_VkDeviceMemory.find(memory);
935 if (it == info_VkDeviceMemory.end()) return 0;
936
937 const auto& info = it->second;
938 return info.mappedSize;
939 }
940
getNonCoherentExtendedSize(VkDevice device,VkDeviceSize basicSize) const941 VkDeviceSize getNonCoherentExtendedSize(VkDevice device, VkDeviceSize basicSize) const {
942 AutoLock lock(mLock);
943 const auto it = info_VkDevice.find(device);
944 if (it == info_VkDevice.end()) return basicSize;
945 const auto& info = it->second;
946
947 VkDeviceSize nonCoherentAtomSize =
948 info.props.limits.nonCoherentAtomSize;
949 VkDeviceSize atoms =
950 (basicSize + nonCoherentAtomSize - 1) / nonCoherentAtomSize;
951 return atoms * nonCoherentAtomSize;
952 }
953
isValidMemoryRange(const VkMappedMemoryRange & range) const954 bool isValidMemoryRange(const VkMappedMemoryRange& range) const {
955 AutoLock lock(mLock);
956 const auto it = info_VkDeviceMemory.find(range.memory);
957 if (it == info_VkDeviceMemory.end()) return false;
958 const auto& info = it->second;
959
960 if (!info.mappedPtr) return false;
961
962 VkDeviceSize offset = range.offset;
963 VkDeviceSize size = range.size;
964
965 if (size == VK_WHOLE_SIZE) {
966 return offset <= info.mappedSize;
967 }
968
969 return offset + size <= info.mappedSize;
970 }
971
setupFeatures(const EmulatorFeatureInfo * features)972 void setupFeatures(const EmulatorFeatureInfo* features) {
973 if (!features || mFeatureInfo) return;
974 mFeatureInfo.reset(new EmulatorFeatureInfo);
975 *mFeatureInfo = *features;
976
977 if (mFeatureInfo->hasDirectMem) {
978 mGoldfishAddressSpaceBlockProvider.reset(
979 new GoldfishAddressSpaceBlockProvider(
980 GoldfishAddressSpaceSubdeviceType::NoSubdevice));
981 }
982
983 #ifdef VK_USE_PLATFORM_FUCHSIA
984 if (mFeatureInfo->hasVulkan) {
985 fidl::ClientEnd<fuchsia_hardware_goldfish::ControlDevice> channel{
986 zx::channel(GetConnectToServiceFunction()("/dev/class/goldfish-control/000"))};
987 if (!channel) {
988 ALOGE("failed to open control device");
989 abort();
990 }
991 mControlDevice = std::make_unique<
992 fidl::WireSyncClient<fuchsia_hardware_goldfish::ControlDevice>>(
993 std::move(channel));
994
995 fidl::ClientEnd<fuchsia_sysmem::Allocator> sysmem_channel{
996 zx::channel(GetConnectToServiceFunction()("/svc/fuchsia.sysmem.Allocator"))};
997 if (!sysmem_channel) {
998 ALOGE("failed to open sysmem connection");
999 }
1000 mSysmemAllocator =
1001 std::make_unique<fidl::WireSyncClient<fuchsia_sysmem::Allocator>>(
1002 std::move(sysmem_channel));
1003 char name[ZX_MAX_NAME_LEN] = {};
1004 zx_object_get_property(zx_process_self(), ZX_PROP_NAME, name, sizeof(name));
1005 std::string client_name(name);
1006 client_name += "-goldfish";
1007 zx_info_handle_basic_t info;
1008 zx_object_get_info(zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, sizeof(info),
1009 nullptr, nullptr);
1010 mSysmemAllocator->SetDebugClientInfo(fidl::StringView::FromExternal(client_name),
1011 info.koid);
1012 }
1013 #endif
1014
1015 if (mFeatureInfo->hasVulkanNullOptionalStrings) {
1016 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
1017 }
1018 if (mFeatureInfo->hasVulkanIgnoredHandles) {
1019 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
1020 }
1021 if (mFeatureInfo->hasVulkanShaderFloat16Int8) {
1022 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
1023 }
1024 if (mFeatureInfo->hasVulkanQueueSubmitWithCommands) {
1025 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
1026 }
1027 #if !defined(HOST_BUILD) && defined(VK_USE_PLATFORM_ANDROID_KHR)
1028 if (mFeatureInfo->hasVirtioGpuNext) {
1029 ALOGD("%s: has virtio-gpu-next; create hostmem rendernode\n", __func__);
1030 mRendernodeFd = drmOpenRender(128 /* RENDERNODE_MINOR */);
1031 }
1032 #endif
1033 }
1034
setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks & callbacks)1035 void setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks& callbacks) {
1036 ResourceTracker::threadingCallbacks = callbacks;
1037 }
1038
hostSupportsVulkan() const1039 bool hostSupportsVulkan() const {
1040 if (!mFeatureInfo) return false;
1041
1042 return mFeatureInfo->hasVulkan;
1043 }
1044
usingDirectMapping() const1045 bool usingDirectMapping() const {
1046 return mHostVisibleMemoryVirtInfo.virtualizationSupported;
1047 }
1048
getStreamFeatures() const1049 uint32_t getStreamFeatures() const {
1050 return ResourceTracker::streamFeatureBits;
1051 }
1052
supportsDeferredCommands() const1053 bool supportsDeferredCommands() const {
1054 if (!mFeatureInfo) return false;
1055 return mFeatureInfo->hasDeferredVulkanCommands;
1056 }
1057
supportsAsyncQueueSubmit() const1058 bool supportsAsyncQueueSubmit() const {
1059 if (!mFeatureInfo) return false;
1060 return mFeatureInfo->hasVulkanAsyncQueueSubmit;
1061 }
1062
supportsCreateResourcesWithRequirements() const1063 bool supportsCreateResourcesWithRequirements() const {
1064 if (!mFeatureInfo) return false;
1065 return mFeatureInfo->hasVulkanCreateResourcesWithRequirements;
1066 }
1067
getHostInstanceExtensionIndex(const std::string & extName) const1068 int getHostInstanceExtensionIndex(const std::string& extName) const {
1069 int i = 0;
1070 for (const auto& prop : mHostInstanceExtensions) {
1071 if (extName == std::string(prop.extensionName)) {
1072 return i;
1073 }
1074 ++i;
1075 }
1076 return -1;
1077 }
1078
getHostDeviceExtensionIndex(const std::string & extName) const1079 int getHostDeviceExtensionIndex(const std::string& extName) const {
1080 int i = 0;
1081 for (const auto& prop : mHostDeviceExtensions) {
1082 if (extName == std::string(prop.extensionName)) {
1083 return i;
1084 }
1085 ++i;
1086 }
1087 return -1;
1088 }
1089
deviceMemoryTransform_tohost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)1090 void deviceMemoryTransform_tohost(
1091 VkDeviceMemory* memory, uint32_t memoryCount,
1092 VkDeviceSize* offset, uint32_t offsetCount,
1093 VkDeviceSize* size, uint32_t sizeCount,
1094 uint32_t* typeIndex, uint32_t typeIndexCount,
1095 uint32_t* typeBits, uint32_t typeBitsCount) {
1096
1097 (void)memoryCount;
1098 (void)offsetCount;
1099 (void)sizeCount;
1100
1101 const auto& hostVirt =
1102 mHostVisibleMemoryVirtInfo;
1103
1104 if (!hostVirt.virtualizationSupported) return;
1105
1106 if (memory) {
1107 AutoLock lock (mLock);
1108
1109 for (uint32_t i = 0; i < memoryCount; ++i) {
1110 VkDeviceMemory mem = memory[i];
1111
1112 auto it = info_VkDeviceMemory.find(mem);
1113 if (it == info_VkDeviceMemory.end()) return;
1114
1115 const auto& info = it->second;
1116
1117 if (!info.directMapped) continue;
1118
1119 memory[i] = info.subAlloc.baseMemory;
1120
1121 if (offset) {
1122 offset[i] = info.subAlloc.baseOffset + offset[i];
1123 }
1124
1125 if (size) {
1126 if (size[i] == VK_WHOLE_SIZE) {
1127 size[i] = info.subAlloc.subMappedSize;
1128 }
1129 }
1130
1131 // TODO
1132 (void)memory;
1133 (void)offset;
1134 (void)size;
1135 }
1136 }
1137
1138 for (uint32_t i = 0; i < typeIndexCount; ++i) {
1139 typeIndex[i] =
1140 hostVirt.memoryTypeIndexMappingToHost[typeIndex[i]];
1141 }
1142
1143 for (uint32_t i = 0; i < typeBitsCount; ++i) {
1144 uint32_t bits = 0;
1145 for (uint32_t j = 0; j < VK_MAX_MEMORY_TYPES; ++j) {
1146 bool guestHas = typeBits[i] & (1 << j);
1147 uint32_t hostIndex =
1148 hostVirt.memoryTypeIndexMappingToHost[j];
1149 bits |= guestHas ? (1 << hostIndex) : 0;
1150 }
1151 typeBits[i] = bits;
1152 }
1153 }
1154
deviceMemoryTransform_fromhost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)1155 void deviceMemoryTransform_fromhost(
1156 VkDeviceMemory* memory, uint32_t memoryCount,
1157 VkDeviceSize* offset, uint32_t offsetCount,
1158 VkDeviceSize* size, uint32_t sizeCount,
1159 uint32_t* typeIndex, uint32_t typeIndexCount,
1160 uint32_t* typeBits, uint32_t typeBitsCount) {
1161
1162 (void)memoryCount;
1163 (void)offsetCount;
1164 (void)sizeCount;
1165
1166 const auto& hostVirt =
1167 mHostVisibleMemoryVirtInfo;
1168
1169 if (!hostVirt.virtualizationSupported) return;
1170
1171 AutoLock lock (mLock);
1172
1173 for (uint32_t i = 0; i < memoryCount; ++i) {
1174 // TODO
1175 (void)memory;
1176 (void)offset;
1177 (void)size;
1178 }
1179
1180 for (uint32_t i = 0; i < typeIndexCount; ++i) {
1181 typeIndex[i] =
1182 hostVirt.memoryTypeIndexMappingFromHost[typeIndex[i]];
1183 }
1184
1185 for (uint32_t i = 0; i < typeBitsCount; ++i) {
1186 uint32_t bits = 0;
1187 for (uint32_t j = 0; j < VK_MAX_MEMORY_TYPES; ++j) {
1188 bool hostHas = typeBits[i] & (1 << j);
1189 uint32_t guestIndex =
1190 hostVirt.memoryTypeIndexMappingFromHost[j];
1191 bits |= hostHas ? (1 << guestIndex) : 0;
1192
1193 if (hostVirt.memoryTypeBitsShouldAdvertiseBoth[j]) {
1194 bits |= hostHas ? (1 << j) : 0;
1195 }
1196 }
1197 typeBits[i] = bits;
1198 }
1199 }
1200
transformImpl_VkExternalMemoryProperties_fromhost(VkExternalMemoryProperties * pProperties,uint32_t)1201 void transformImpl_VkExternalMemoryProperties_fromhost(
1202 VkExternalMemoryProperties* pProperties,
1203 uint32_t) {
1204 VkExternalMemoryHandleTypeFlags supportedHandleType = 0u;
1205 #ifdef VK_USE_PLATFORM_FUCHSIA
1206 supportedHandleType |=
1207 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA |
1208 VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA;
1209 #endif // VK_USE_PLATFORM_FUCHSIA
1210 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1211 supportedHandleType |=
1212 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
1213 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
1214 #endif // VK_USE_PLATFORM_ANDROID_KHR
1215 if (supportedHandleType) {
1216 pProperties->compatibleHandleTypes &= supportedHandleType;
1217 pProperties->exportFromImportedHandleTypes &= supportedHandleType;
1218 }
1219 }
1220
on_vkEnumerateInstanceExtensionProperties(void * context,VkResult,const char *,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)1221 VkResult on_vkEnumerateInstanceExtensionProperties(
1222 void* context,
1223 VkResult,
1224 const char*,
1225 uint32_t* pPropertyCount,
1226 VkExtensionProperties* pProperties) {
1227 std::vector<const char*> allowedExtensionNames = {
1228 "VK_KHR_get_physical_device_properties2",
1229 "VK_KHR_sampler_ycbcr_conversion",
1230 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1231 "VK_KHR_external_semaphore_capabilities",
1232 "VK_KHR_external_memory_capabilities",
1233 "VK_KHR_external_fence_capabilities",
1234 #endif
1235 };
1236
1237 VkEncoder* enc = (VkEncoder*)context;
1238
1239 // Only advertise a select set of extensions.
1240 if (mHostInstanceExtensions.empty()) {
1241 uint32_t hostPropCount = 0;
1242 enc->vkEnumerateInstanceExtensionProperties(nullptr, &hostPropCount, nullptr, true /* do lock */);
1243 mHostInstanceExtensions.resize(hostPropCount);
1244
1245 VkResult hostRes =
1246 enc->vkEnumerateInstanceExtensionProperties(
1247 nullptr, &hostPropCount, mHostInstanceExtensions.data(), true /* do lock */);
1248
1249 if (hostRes != VK_SUCCESS) {
1250 return hostRes;
1251 }
1252 }
1253
1254 std::vector<VkExtensionProperties> filteredExts;
1255
1256 for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1257 auto extIndex = getHostInstanceExtensionIndex(allowedExtensionNames[i]);
1258 if (extIndex != -1) {
1259 filteredExts.push_back(mHostInstanceExtensions[extIndex]);
1260 }
1261 }
1262
1263 VkExtensionProperties anbExtProps[] = {
1264 #ifdef VK_USE_PLATFORM_FUCHSIA
1265 { "VK_KHR_external_memory_capabilities", 1},
1266 { "VK_KHR_external_semaphore_capabilities", 1},
1267 #endif
1268 };
1269
1270 for (auto& anbExtProp: anbExtProps) {
1271 filteredExts.push_back(anbExtProp);
1272 }
1273
1274 // Spec:
1275 //
1276 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
1277 //
1278 // If pProperties is NULL, then the number of extensions properties
1279 // available is returned in pPropertyCount. Otherwise, pPropertyCount
1280 // must point to a variable set by the user to the number of elements
1281 // in the pProperties array, and on return the variable is overwritten
1282 // with the number of structures actually written to pProperties. If
1283 // pPropertyCount is less than the number of extension properties
1284 // available, at most pPropertyCount structures will be written. If
1285 // pPropertyCount is smaller than the number of extensions available,
1286 // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1287 // that not all the available properties were returned.
1288 //
1289 // pPropertyCount must be a valid pointer to a uint32_t value
1290 if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1291
1292 if (!pProperties) {
1293 *pPropertyCount = (uint32_t)filteredExts.size();
1294 return VK_SUCCESS;
1295 } else {
1296 auto actualExtensionCount = (uint32_t)filteredExts.size();
1297 if (*pPropertyCount > actualExtensionCount) {
1298 *pPropertyCount = actualExtensionCount;
1299 }
1300
1301 for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1302 pProperties[i] = filteredExts[i];
1303 }
1304
1305 if (actualExtensionCount > *pPropertyCount) {
1306 return VK_INCOMPLETE;
1307 }
1308
1309 return VK_SUCCESS;
1310 }
1311 }
1312
on_vkEnumerateDeviceExtensionProperties(void * context,VkResult,VkPhysicalDevice physdev,const char *,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)1313 VkResult on_vkEnumerateDeviceExtensionProperties(
1314 void* context,
1315 VkResult,
1316 VkPhysicalDevice physdev,
1317 const char*,
1318 uint32_t* pPropertyCount,
1319 VkExtensionProperties* pProperties) {
1320
1321 std::vector<const char*> allowedExtensionNames = {
1322 "VK_KHR_vulkan_memory_model",
1323 "VK_KHR_buffer_device_address",
1324 "VK_KHR_maintenance1",
1325 "VK_KHR_maintenance2",
1326 "VK_KHR_maintenance3",
1327 "VK_KHR_bind_memory2",
1328 "VK_KHR_dedicated_allocation",
1329 "VK_KHR_get_memory_requirements2",
1330 "VK_KHR_image_format_list",
1331 "VK_KHR_sampler_ycbcr_conversion",
1332 "VK_KHR_shader_float16_int8",
1333 "VK_KHR_timeline_semaphore",
1334 "VK_AMD_gpu_shader_half_float",
1335 "VK_NV_shader_subgroup_partitioned",
1336 "VK_KHR_shader_subgroup_extended_types",
1337 "VK_EXT_subgroup_size_control",
1338 "VK_KHR_pipeline_executable_properties",
1339 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1340 "VK_KHR_external_semaphore",
1341 "VK_KHR_external_semaphore_fd",
1342 // "VK_KHR_external_semaphore_win32", not exposed because it's translated to fd
1343 "VK_KHR_external_memory",
1344 "VK_KHR_external_fence",
1345 "VK_KHR_external_fence_fd",
1346 "VK_EXT_device_memory_report",
1347 #endif
1348 };
1349
1350 VkEncoder* enc = (VkEncoder*)context;
1351
1352 if (mHostDeviceExtensions.empty()) {
1353 uint32_t hostPropCount = 0;
1354 enc->vkEnumerateDeviceExtensionProperties(physdev, nullptr, &hostPropCount, nullptr, true /* do lock */);
1355 mHostDeviceExtensions.resize(hostPropCount);
1356
1357 VkResult hostRes =
1358 enc->vkEnumerateDeviceExtensionProperties(
1359 physdev, nullptr, &hostPropCount, mHostDeviceExtensions.data(), true /* do lock */);
1360
1361 if (hostRes != VK_SUCCESS) {
1362 return hostRes;
1363 }
1364 }
1365
1366 bool hostHasWin32ExternalSemaphore =
1367 getHostDeviceExtensionIndex(
1368 "VK_KHR_external_semaphore_win32") != -1;
1369
1370 bool hostHasPosixExternalSemaphore =
1371 getHostDeviceExtensionIndex(
1372 "VK_KHR_external_semaphore_fd") != -1;
1373
1374 ALOGD("%s: host has ext semaphore? win32 %d posix %d\n", __func__,
1375 hostHasWin32ExternalSemaphore,
1376 hostHasPosixExternalSemaphore);
1377
1378 bool hostSupportsExternalSemaphore =
1379 hostHasWin32ExternalSemaphore ||
1380 hostHasPosixExternalSemaphore;
1381
1382 std::vector<VkExtensionProperties> filteredExts;
1383
1384 for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1385 auto extIndex = getHostDeviceExtensionIndex(allowedExtensionNames[i]);
1386 if (extIndex != -1) {
1387 filteredExts.push_back(mHostDeviceExtensions[extIndex]);
1388 }
1389 }
1390
1391 VkExtensionProperties anbExtProps[] = {
1392 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1393 { "VK_ANDROID_native_buffer", 7 },
1394 #endif
1395 #ifdef VK_USE_PLATFORM_FUCHSIA
1396 { "VK_KHR_external_memory", 1 },
1397 { "VK_KHR_external_semaphore", 1 },
1398 { "VK_FUCHSIA_external_semaphore", 1 },
1399 #endif
1400 };
1401
1402 for (auto& anbExtProp: anbExtProps) {
1403 filteredExts.push_back(anbExtProp);
1404 }
1405
1406 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1407 bool hostSupportsExternalFenceFd =
1408 getHostDeviceExtensionIndex(
1409 "VK_KHR_external_fence_fd") != -1;
1410 if (!hostSupportsExternalFenceFd) {
1411 filteredExts.push_back({ "VK_KHR_external_fence_fd", 1});
1412 }
1413 #endif
1414
1415 #ifndef VK_USE_PLATFORM_FUCHSIA
1416 if (hostSupportsExternalSemaphore &&
1417 !hostHasPosixExternalSemaphore) {
1418 filteredExts.push_back(
1419 { "VK_KHR_external_semaphore_fd", 1});
1420 }
1421 #endif
1422
1423 bool win32ExtMemAvailable =
1424 getHostDeviceExtensionIndex(
1425 "VK_KHR_external_memory_win32") != -1;
1426 bool posixExtMemAvailable =
1427 getHostDeviceExtensionIndex(
1428 "VK_KHR_external_memory_fd") != -1;
1429 bool moltenVkExtAvailable =
1430 getHostDeviceExtensionIndex(
1431 "VK_MVK_moltenvk") != -1;
1432
1433 bool hostHasExternalMemorySupport =
1434 win32ExtMemAvailable || posixExtMemAvailable || moltenVkExtAvailable;
1435
1436 if (hostHasExternalMemorySupport) {
1437 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1438 filteredExts.push_back({
1439 "VK_ANDROID_external_memory_android_hardware_buffer", 7
1440 });
1441 filteredExts.push_back({
1442 "VK_EXT_queue_family_foreign", 1
1443 });
1444 #endif
1445 #ifdef VK_USE_PLATFORM_FUCHSIA
1446 filteredExts.push_back({
1447 "VK_FUCHSIA_external_memory", 1
1448 });
1449 filteredExts.push_back({
1450 "VK_FUCHSIA_buffer_collection", 1
1451 });
1452 #endif
1453 }
1454
1455 // Spec:
1456 //
1457 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateDeviceExtensionProperties.html
1458 //
1459 // pPropertyCount is a pointer to an integer related to the number of
1460 // extension properties available or queried, and is treated in the
1461 // same fashion as the
1462 // vkEnumerateInstanceExtensionProperties::pPropertyCount parameter.
1463 //
1464 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
1465 //
1466 // If pProperties is NULL, then the number of extensions properties
1467 // available is returned in pPropertyCount. Otherwise, pPropertyCount
1468 // must point to a variable set by the user to the number of elements
1469 // in the pProperties array, and on return the variable is overwritten
1470 // with the number of structures actually written to pProperties. If
1471 // pPropertyCount is less than the number of extension properties
1472 // available, at most pPropertyCount structures will be written. If
1473 // pPropertyCount is smaller than the number of extensions available,
1474 // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1475 // that not all the available properties were returned.
1476 //
1477 // pPropertyCount must be a valid pointer to a uint32_t value
1478
1479 if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1480
1481 if (!pProperties) {
1482 *pPropertyCount = (uint32_t)filteredExts.size();
1483 return VK_SUCCESS;
1484 } else {
1485 auto actualExtensionCount = (uint32_t)filteredExts.size();
1486 if (*pPropertyCount > actualExtensionCount) {
1487 *pPropertyCount = actualExtensionCount;
1488 }
1489
1490 for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1491 pProperties[i] = filteredExts[i];
1492 }
1493
1494 if (actualExtensionCount > *pPropertyCount) {
1495 return VK_INCOMPLETE;
1496 }
1497
1498 return VK_SUCCESS;
1499 }
1500 }
1501
on_vkEnumeratePhysicalDevices(void * context,VkResult,VkInstance instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices)1502 VkResult on_vkEnumeratePhysicalDevices(
1503 void* context, VkResult,
1504 VkInstance instance, uint32_t* pPhysicalDeviceCount,
1505 VkPhysicalDevice* pPhysicalDevices) {
1506
1507 VkEncoder* enc = (VkEncoder*)context;
1508
1509 if (!instance) return VK_ERROR_INITIALIZATION_FAILED;
1510
1511 if (!pPhysicalDeviceCount) return VK_ERROR_INITIALIZATION_FAILED;
1512
1513 AutoLock lock(mLock);
1514
1515 // When this function is called, we actually need to do two things:
1516 // - Get full information about physical devices from the host,
1517 // even if the guest did not ask for it
1518 // - Serve the guest query according to the spec:
1519 //
1520 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
1521
1522 auto it = info_VkInstance.find(instance);
1523
1524 if (it == info_VkInstance.end()) return VK_ERROR_INITIALIZATION_FAILED;
1525
1526 auto& info = it->second;
1527
1528 // Get the full host information here if it doesn't exist already.
1529 if (info.physicalDevices.empty()) {
1530 uint32_t hostPhysicalDeviceCount = 0;
1531
1532 lock.unlock();
1533 VkResult countRes = enc->vkEnumeratePhysicalDevices(
1534 instance, &hostPhysicalDeviceCount, nullptr, false /* no lock */);
1535 lock.lock();
1536
1537 if (countRes != VK_SUCCESS) {
1538 ALOGE("%s: failed: could not count host physical devices. "
1539 "Error %d\n", __func__, countRes);
1540 return countRes;
1541 }
1542
1543 info.physicalDevices.resize(hostPhysicalDeviceCount);
1544
1545 lock.unlock();
1546 VkResult enumRes = enc->vkEnumeratePhysicalDevices(
1547 instance, &hostPhysicalDeviceCount, info.physicalDevices.data(), false /* no lock */);
1548 lock.lock();
1549
1550 if (enumRes != VK_SUCCESS) {
1551 ALOGE("%s: failed: could not retrieve host physical devices. "
1552 "Error %d\n", __func__, enumRes);
1553 return enumRes;
1554 }
1555 }
1556
1557 // Serve the guest query according to the spec.
1558 //
1559 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
1560 //
1561 // If pPhysicalDevices is NULL, then the number of physical devices
1562 // available is returned in pPhysicalDeviceCount. Otherwise,
1563 // pPhysicalDeviceCount must point to a variable set by the user to the
1564 // number of elements in the pPhysicalDevices array, and on return the
1565 // variable is overwritten with the number of handles actually written
1566 // to pPhysicalDevices. If pPhysicalDeviceCount is less than the number
1567 // of physical devices available, at most pPhysicalDeviceCount
1568 // structures will be written. If pPhysicalDeviceCount is smaller than
1569 // the number of physical devices available, VK_INCOMPLETE will be
1570 // returned instead of VK_SUCCESS, to indicate that not all the
1571 // available physical devices were returned.
1572
1573 if (!pPhysicalDevices) {
1574 *pPhysicalDeviceCount = (uint32_t)info.physicalDevices.size();
1575 return VK_SUCCESS;
1576 } else {
1577 uint32_t actualDeviceCount = (uint32_t)info.physicalDevices.size();
1578 uint32_t toWrite = actualDeviceCount < *pPhysicalDeviceCount ? actualDeviceCount : *pPhysicalDeviceCount;
1579
1580 for (uint32_t i = 0; i < toWrite; ++i) {
1581 pPhysicalDevices[i] = info.physicalDevices[i];
1582 }
1583
1584 *pPhysicalDeviceCount = toWrite;
1585
1586 if (actualDeviceCount > *pPhysicalDeviceCount) {
1587 return VK_INCOMPLETE;
1588 }
1589
1590 return VK_SUCCESS;
1591 }
1592 }
1593
on_vkGetPhysicalDeviceProperties(void *,VkPhysicalDevice,VkPhysicalDeviceProperties * pProperties)1594 void on_vkGetPhysicalDeviceProperties(
1595 void*,
1596 VkPhysicalDevice,
1597 VkPhysicalDeviceProperties* pProperties) {
1598 // We have host properties at this point
1599 if (pProperties) {
1600 // We need this to ignore some cts tests when using Swiftshader Vk
1601 if (pProperties->deviceType != VK_PHYSICAL_DEVICE_TYPE_CPU) {
1602 // Otherwise, if not CPU type, mark as virtual type
1603 pProperties->deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
1604 }
1605 }
1606 }
1607
on_vkGetPhysicalDeviceProperties2(void *,VkPhysicalDevice,VkPhysicalDeviceProperties2 * pProperties)1608 void on_vkGetPhysicalDeviceProperties2(
1609 void*,
1610 VkPhysicalDevice,
1611 VkPhysicalDeviceProperties2* pProperties) {
1612 if (pProperties) {
1613 // We need this to ignore some cts tests when using Swiftshader Vk
1614 if (pProperties->properties.deviceType != VK_PHYSICAL_DEVICE_TYPE_CPU) {
1615 // Otherwise, if not CPU type, mark as virtual type
1616 pProperties->properties.deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
1617 }
1618
1619 VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
1620 vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pProperties);
1621 if (memoryReportFeaturesEXT) {
1622 memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
1623 }
1624 }
1625 }
1626
on_vkGetPhysicalDeviceMemoryProperties(void *,VkPhysicalDevice physdev,VkPhysicalDeviceMemoryProperties * out)1627 void on_vkGetPhysicalDeviceMemoryProperties(
1628 void*,
1629 VkPhysicalDevice physdev,
1630 VkPhysicalDeviceMemoryProperties* out) {
1631
1632 initHostVisibleMemoryVirtualizationInfo(
1633 physdev,
1634 out,
1635 mFeatureInfo.get(),
1636 &mHostVisibleMemoryVirtInfo);
1637
1638 if (mHostVisibleMemoryVirtInfo.virtualizationSupported) {
1639 *out = mHostVisibleMemoryVirtInfo.guestMemoryProperties;
1640 }
1641 }
1642
on_vkGetPhysicalDeviceMemoryProperties2(void *,VkPhysicalDevice physdev,VkPhysicalDeviceMemoryProperties2 * out)1643 void on_vkGetPhysicalDeviceMemoryProperties2(
1644 void*,
1645 VkPhysicalDevice physdev,
1646 VkPhysicalDeviceMemoryProperties2* out) {
1647
1648 initHostVisibleMemoryVirtualizationInfo(
1649 physdev,
1650 &out->memoryProperties,
1651 mFeatureInfo.get(),
1652 &mHostVisibleMemoryVirtInfo);
1653
1654 if (mHostVisibleMemoryVirtInfo.virtualizationSupported) {
1655 out->memoryProperties = mHostVisibleMemoryVirtInfo.guestMemoryProperties;
1656 }
1657 }
1658
on_vkGetDeviceQueue(void *,VkDevice device,uint32_t,uint32_t,VkQueue * pQueue)1659 void on_vkGetDeviceQueue(void*,
1660 VkDevice device,
1661 uint32_t,
1662 uint32_t,
1663 VkQueue* pQueue) {
1664 AutoLock lock(mLock);
1665 info_VkQueue[*pQueue].device = device;
1666 }
1667
on_vkGetDeviceQueue2(void *,VkDevice device,const VkDeviceQueueInfo2 *,VkQueue * pQueue)1668 void on_vkGetDeviceQueue2(void*,
1669 VkDevice device,
1670 const VkDeviceQueueInfo2*,
1671 VkQueue* pQueue) {
1672 AutoLock lock(mLock);
1673 info_VkQueue[*pQueue].device = device;
1674 }
1675
on_vkCreateInstance(void * context,VkResult input_result,const VkInstanceCreateInfo * createInfo,const VkAllocationCallbacks *,VkInstance * pInstance)1676 VkResult on_vkCreateInstance(
1677 void* context,
1678 VkResult input_result,
1679 const VkInstanceCreateInfo* createInfo,
1680 const VkAllocationCallbacks*,
1681 VkInstance* pInstance) {
1682
1683 if (input_result != VK_SUCCESS) return input_result;
1684
1685 VkEncoder* enc = (VkEncoder*)context;
1686
1687 uint32_t apiVersion;
1688 VkResult enumInstanceVersionRes =
1689 enc->vkEnumerateInstanceVersion(&apiVersion, false /* no lock */);
1690
1691 setInstanceInfo(
1692 *pInstance,
1693 createInfo->enabledExtensionCount,
1694 createInfo->ppEnabledExtensionNames,
1695 apiVersion);
1696
1697 return input_result;
1698 }
1699
on_vkCreateDevice(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks *,VkDevice * pDevice)1700 VkResult on_vkCreateDevice(
1701 void* context,
1702 VkResult input_result,
1703 VkPhysicalDevice physicalDevice,
1704 const VkDeviceCreateInfo* pCreateInfo,
1705 const VkAllocationCallbacks*,
1706 VkDevice* pDevice) {
1707
1708 if (input_result != VK_SUCCESS) return input_result;
1709
1710 VkEncoder* enc = (VkEncoder*)context;
1711
1712 VkPhysicalDeviceProperties props;
1713 VkPhysicalDeviceMemoryProperties memProps;
1714 enc->vkGetPhysicalDeviceProperties(physicalDevice, &props, false /* no lock */);
1715 enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps, false /* no lock */);
1716
1717 setDeviceInfo(
1718 *pDevice, physicalDevice, props, memProps,
1719 pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames,
1720 pCreateInfo->pNext);
1721
1722 return input_result;
1723 }
1724
on_vkDestroyDevice_pre(void * context,VkDevice device,const VkAllocationCallbacks *)1725 void on_vkDestroyDevice_pre(
1726 void* context,
1727 VkDevice device,
1728 const VkAllocationCallbacks*) {
1729
1730 AutoLock lock(mLock);
1731
1732 auto it = info_VkDevice.find(device);
1733 if (it == info_VkDevice.end()) return;
1734 auto info = it->second;
1735
1736 lock.unlock();
1737
1738 VkEncoder* enc = (VkEncoder*)context;
1739
1740 bool freeMemorySyncSupported =
1741 mFeatureInfo->hasVulkanFreeMemorySync;
1742 for (uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) {
1743 for (auto& block : info.hostMemBlocks[i]) {
1744 destroyHostMemAlloc(
1745 freeMemorySyncSupported,
1746 enc, device, &block);
1747 }
1748 }
1749 }
1750
on_vkGetAndroidHardwareBufferPropertiesANDROID(void *,VkResult,VkDevice device,const AHardwareBuffer * buffer,VkAndroidHardwareBufferPropertiesANDROID * pProperties)1751 VkResult on_vkGetAndroidHardwareBufferPropertiesANDROID(
1752 void*, VkResult,
1753 VkDevice device,
1754 const AHardwareBuffer* buffer,
1755 VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
1756 auto grallocHelper =
1757 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
1758 return getAndroidHardwareBufferPropertiesANDROID(
1759 grallocHelper,
1760 &mHostVisibleMemoryVirtInfo,
1761 device, buffer, pProperties);
1762 }
1763
on_vkGetMemoryAndroidHardwareBufferANDROID(void *,VkResult,VkDevice device,const VkMemoryGetAndroidHardwareBufferInfoANDROID * pInfo,struct AHardwareBuffer ** pBuffer)1764 VkResult on_vkGetMemoryAndroidHardwareBufferANDROID(
1765 void*, VkResult,
1766 VkDevice device,
1767 const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
1768 struct AHardwareBuffer** pBuffer) {
1769
1770 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
1771 if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
1772
1773 AutoLock lock(mLock);
1774
1775 auto deviceIt = info_VkDevice.find(device);
1776
1777 if (deviceIt == info_VkDevice.end()) {
1778 return VK_ERROR_INITIALIZATION_FAILED;
1779 }
1780
1781 auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
1782
1783 if (memoryIt == info_VkDeviceMemory.end()) {
1784 return VK_ERROR_INITIALIZATION_FAILED;
1785 }
1786
1787 auto& info = memoryIt->second;
1788
1789 VkResult queryRes =
1790 getMemoryAndroidHardwareBufferANDROID(&info.ahw);
1791
1792 if (queryRes != VK_SUCCESS) return queryRes;
1793
1794 *pBuffer = info.ahw;
1795
1796 return queryRes;
1797 }
1798
1799 #ifdef VK_USE_PLATFORM_FUCHSIA
on_vkGetMemoryZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkMemoryGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)1800 VkResult on_vkGetMemoryZirconHandleFUCHSIA(
1801 void*, VkResult,
1802 VkDevice device,
1803 const VkMemoryGetZirconHandleInfoFUCHSIA* pInfo,
1804 uint32_t* pHandle) {
1805
1806 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
1807 if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
1808
1809 AutoLock lock(mLock);
1810
1811 auto deviceIt = info_VkDevice.find(device);
1812
1813 if (deviceIt == info_VkDevice.end()) {
1814 return VK_ERROR_INITIALIZATION_FAILED;
1815 }
1816
1817 auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
1818
1819 if (memoryIt == info_VkDeviceMemory.end()) {
1820 return VK_ERROR_INITIALIZATION_FAILED;
1821 }
1822
1823 auto& info = memoryIt->second;
1824
1825 if (info.vmoHandle == ZX_HANDLE_INVALID) {
1826 ALOGE("%s: memory cannot be exported", __func__);
1827 return VK_ERROR_INITIALIZATION_FAILED;
1828 }
1829
1830 *pHandle = ZX_HANDLE_INVALID;
1831 zx_handle_duplicate(info.vmoHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
1832 return VK_SUCCESS;
1833 }
1834
on_vkGetMemoryZirconHandlePropertiesFUCHSIA(void *,VkResult,VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,uint32_t handle,VkMemoryZirconHandlePropertiesFUCHSIA * pProperties)1835 VkResult on_vkGetMemoryZirconHandlePropertiesFUCHSIA(
1836 void*, VkResult,
1837 VkDevice device,
1838 VkExternalMemoryHandleTypeFlagBits handleType,
1839 uint32_t handle,
1840 VkMemoryZirconHandlePropertiesFUCHSIA* pProperties) {
1841 using fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal;
1842 using fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
1843
1844 if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA &&
1845 handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
1846 return VK_ERROR_INITIALIZATION_FAILED;
1847 }
1848
1849 zx_info_handle_basic_t handleInfo;
1850 zx_status_t status = zx::unowned_vmo(handle)->get_info(
1851 ZX_INFO_HANDLE_BASIC, &handleInfo, sizeof(handleInfo), nullptr,
1852 nullptr);
1853 if (status != ZX_OK || handleInfo.type != ZX_OBJ_TYPE_VMO) {
1854 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
1855 }
1856
1857 AutoLock lock(mLock);
1858
1859 auto deviceIt = info_VkDevice.find(device);
1860
1861 if (deviceIt == info_VkDevice.end()) {
1862 return VK_ERROR_INITIALIZATION_FAILED;
1863 }
1864
1865 auto& info = deviceIt->second;
1866
1867 zx::vmo vmo_dup;
1868 status =
1869 zx::unowned_vmo(handle)->duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
1870 if (status != ZX_OK) {
1871 ALOGE("zx_handle_duplicate() error: %d", status);
1872 return VK_ERROR_INITIALIZATION_FAILED;
1873 }
1874
1875 uint32_t memoryProperty = 0u;
1876
1877 auto result = mControlDevice->GetBufferHandleInfo(std::move(vmo_dup));
1878 if (!result.ok()) {
1879 ALOGE(
1880 "mControlDevice->GetBufferHandleInfo fatal error: epitaph: %d",
1881 result.status());
1882 return VK_ERROR_INITIALIZATION_FAILED;
1883 }
1884 if (result->result.is_response()) {
1885 memoryProperty = result->result.response().info.memory_property();
1886 } else if (result->result.err() == ZX_ERR_NOT_FOUND) {
1887 // If an VMO is allocated while ColorBuffer/Buffer is not created,
1888 // it must be a device-local buffer, since for host-visible buffers,
1889 // ColorBuffer/Buffer is created at sysmem allocation time.
1890 memoryProperty = kMemoryPropertyDeviceLocal;
1891 } else {
1892 // Importing read-only host memory into the Vulkan driver should not
1893 // work, but it is not an error to try to do so. Returning a
1894 // VkMemoryZirconHandlePropertiesFUCHSIA with no available
1895 // memoryType bits should be enough for clients. See fxbug.dev/24225
1896 // for other issues this this flow.
1897 ALOGW("GetBufferHandleInfo failed: %d", result->result.err());
1898 pProperties->memoryTypeBits = 0;
1899 return VK_SUCCESS;
1900 }
1901
1902 pProperties->memoryTypeBits = 0;
1903 for (uint32_t i = 0; i < info.memProps.memoryTypeCount; ++i) {
1904 if (((memoryProperty & kMemoryPropertyDeviceLocal) &&
1905 (info.memProps.memoryTypes[i].propertyFlags &
1906 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
1907 ((memoryProperty & kMemoryPropertyHostVisible) &&
1908 (info.memProps.memoryTypes[i].propertyFlags &
1909 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
1910 pProperties->memoryTypeBits |= 1ull << i;
1911 }
1912 }
1913 return VK_SUCCESS;
1914 }
1915
getEventKoid(zx_handle_t eventHandle)1916 zx_koid_t getEventKoid(zx_handle_t eventHandle) {
1917 if (eventHandle == ZX_HANDLE_INVALID) {
1918 return ZX_KOID_INVALID;
1919 }
1920
1921 zx_info_handle_basic_t info;
1922 zx_status_t status =
1923 zx_object_get_info(eventHandle, ZX_INFO_HANDLE_BASIC, &info,
1924 sizeof(info), nullptr, nullptr);
1925 if (status != ZX_OK) {
1926 ALOGE("Cannot get object info of handle %u: %d", eventHandle,
1927 status);
1928 return ZX_KOID_INVALID;
1929 }
1930 return info.koid;
1931 }
1932
on_vkImportSemaphoreZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkImportSemaphoreZirconHandleInfoFUCHSIA * pInfo)1933 VkResult on_vkImportSemaphoreZirconHandleFUCHSIA(
1934 void*, VkResult,
1935 VkDevice device,
1936 const VkImportSemaphoreZirconHandleInfoFUCHSIA* pInfo) {
1937
1938 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
1939 if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
1940
1941 AutoLock lock(mLock);
1942
1943 auto deviceIt = info_VkDevice.find(device);
1944
1945 if (deviceIt == info_VkDevice.end()) {
1946 return VK_ERROR_INITIALIZATION_FAILED;
1947 }
1948
1949 auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
1950
1951 if (semaphoreIt == info_VkSemaphore.end()) {
1952 return VK_ERROR_INITIALIZATION_FAILED;
1953 }
1954
1955 auto& info = semaphoreIt->second;
1956
1957 if (info.eventHandle != ZX_HANDLE_INVALID) {
1958 zx_handle_close(info.eventHandle);
1959 }
1960 #if VK_HEADER_VERSION < 174
1961 info.eventHandle = pInfo->handle;
1962 #else // VK_HEADER_VERSION >= 174
1963 info.eventHandle = pInfo->zirconHandle;
1964 #endif // VK_HEADER_VERSION < 174
1965 if (info.eventHandle != ZX_HANDLE_INVALID) {
1966 info.eventKoid = getEventKoid(info.eventHandle);
1967 }
1968
1969 return VK_SUCCESS;
1970 }
1971
on_vkGetSemaphoreZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkSemaphoreGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)1972 VkResult on_vkGetSemaphoreZirconHandleFUCHSIA(
1973 void*, VkResult,
1974 VkDevice device,
1975 const VkSemaphoreGetZirconHandleInfoFUCHSIA* pInfo,
1976 uint32_t* pHandle) {
1977
1978 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
1979 if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
1980
1981 AutoLock lock(mLock);
1982
1983 auto deviceIt = info_VkDevice.find(device);
1984
1985 if (deviceIt == info_VkDevice.end()) {
1986 return VK_ERROR_INITIALIZATION_FAILED;
1987 }
1988
1989 auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
1990
1991 if (semaphoreIt == info_VkSemaphore.end()) {
1992 return VK_ERROR_INITIALIZATION_FAILED;
1993 }
1994
1995 auto& info = semaphoreIt->second;
1996
1997 if (info.eventHandle == ZX_HANDLE_INVALID) {
1998 return VK_ERROR_INITIALIZATION_FAILED;
1999 }
2000
2001 *pHandle = ZX_HANDLE_INVALID;
2002 zx_handle_duplicate(info.eventHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
2003 return VK_SUCCESS;
2004 }
2005
on_vkCreateBufferCollectionFUCHSIA(void *,VkResult,VkDevice,const VkBufferCollectionCreateInfoFUCHSIA * pInfo,const VkAllocationCallbacks *,VkBufferCollectionFUCHSIA * pCollection)2006 VkResult on_vkCreateBufferCollectionFUCHSIA(
2007 void*, VkResult, VkDevice,
2008 const VkBufferCollectionCreateInfoFUCHSIA* pInfo,
2009 const VkAllocationCallbacks*,
2010 VkBufferCollectionFUCHSIA* pCollection) {
2011 fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken> token_client;
2012
2013 if (pInfo->collectionToken) {
2014 token_client = fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken>(
2015 zx::channel(pInfo->collectionToken));
2016 } else {
2017 auto endpoints =
2018 fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
2019 if (!endpoints.is_ok()) {
2020 ALOGE("zx_channel_create failed: %d", endpoints.status_value());
2021 return VK_ERROR_INITIALIZATION_FAILED;
2022 }
2023
2024 auto result = mSysmemAllocator->AllocateSharedCollection(
2025 std::move(endpoints->server));
2026 if (!result.ok()) {
2027 ALOGE("AllocateSharedCollection failed: %d", result.status());
2028 return VK_ERROR_INITIALIZATION_FAILED;
2029 }
2030 token_client = std::move(endpoints->client);
2031 }
2032
2033 auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
2034 if (!endpoints.is_ok()) {
2035 ALOGE("zx_channel_create failed: %d", endpoints.status_value());
2036 return VK_ERROR_INITIALIZATION_FAILED;
2037 }
2038 auto [collection_client, collection_server] = std::move(endpoints.value());
2039
2040 auto result = mSysmemAllocator->BindSharedCollection(
2041 std::move(token_client), std::move(collection_server));
2042 if (!result.ok()) {
2043 ALOGE("BindSharedCollection failed: %d", result.status());
2044 return VK_ERROR_INITIALIZATION_FAILED;
2045 }
2046
2047 auto* sysmem_collection =
2048 new fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>(
2049 std::move(collection_client));
2050 *pCollection = reinterpret_cast<VkBufferCollectionFUCHSIA>(sysmem_collection);
2051
2052 register_VkBufferCollectionFUCHSIA(*pCollection);
2053 return VK_SUCCESS;
2054 }
2055
on_vkDestroyBufferCollectionFUCHSIA(void *,VkResult,VkDevice,VkBufferCollectionFUCHSIA collection,const VkAllocationCallbacks *)2056 void on_vkDestroyBufferCollectionFUCHSIA(
2057 void*, VkResult, VkDevice,
2058 VkBufferCollectionFUCHSIA collection,
2059 const VkAllocationCallbacks*) {
2060 auto sysmem_collection = reinterpret_cast<
2061 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2062 if (sysmem_collection) {
2063 sysmem_collection->Close();
2064 }
2065 delete sysmem_collection;
2066
2067 unregister_VkBufferCollectionFUCHSIA(collection);
2068 }
2069
2070 inline fuchsia_sysmem::wire::BufferCollectionConstraints
defaultBufferCollectionConstraints(size_t minSizeBytes,size_t minBufferCount,size_t maxBufferCount=0u,size_t minBufferCountForCamping=0u,size_t minBufferCountForDedicatedSlack=0u,size_t minBufferCountForSharedSlack=0u)2071 defaultBufferCollectionConstraints(
2072 size_t minSizeBytes,
2073 size_t minBufferCount,
2074 size_t maxBufferCount = 0u,
2075 size_t minBufferCountForCamping = 0u,
2076 size_t minBufferCountForDedicatedSlack = 0u,
2077 size_t minBufferCountForSharedSlack = 0u) {
2078 fuchsia_sysmem::wire::BufferCollectionConstraints constraints = {};
2079 constraints.min_buffer_count = minBufferCount;
2080 if (maxBufferCount > 0) {
2081 constraints.max_buffer_count = maxBufferCount;
2082 }
2083 if (minBufferCountForCamping) {
2084 constraints.min_buffer_count_for_camping = minBufferCountForCamping;
2085 }
2086 if (minBufferCountForSharedSlack) {
2087 constraints.min_buffer_count_for_shared_slack =
2088 minBufferCountForSharedSlack;
2089 }
2090 constraints.has_buffer_memory_constraints = true;
2091 fuchsia_sysmem::wire::BufferMemoryConstraints& buffer_constraints =
2092 constraints.buffer_memory_constraints;
2093
2094 buffer_constraints.min_size_bytes = minSizeBytes;
2095 buffer_constraints.max_size_bytes = 0xffffffff;
2096 buffer_constraints.physically_contiguous_required = false;
2097 buffer_constraints.secure_required = false;
2098
2099 // No restrictions on coherency domain or Heaps.
2100 buffer_constraints.ram_domain_supported = true;
2101 buffer_constraints.cpu_domain_supported = true;
2102 buffer_constraints.inaccessible_domain_supported = true;
2103 buffer_constraints.heap_permitted_count = 2;
2104 buffer_constraints.heap_permitted[0] =
2105 fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2106 buffer_constraints.heap_permitted[1] =
2107 fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2108
2109 return constraints;
2110 }
2111
getBufferCollectionConstraintsVulkanImageUsage(const VkImageCreateInfo * pImageInfo)2112 uint32_t getBufferCollectionConstraintsVulkanImageUsage(
2113 const VkImageCreateInfo* pImageInfo) {
2114 uint32_t usage = 0u;
2115 VkImageUsageFlags imageUsage = pImageInfo->usage;
2116
2117 #define SetUsageBit(BIT, VALUE) \
2118 if (imageUsage & VK_IMAGE_USAGE_##BIT##_BIT) { \
2119 usage |= fuchsia_sysmem::wire::kVulkanImageUsage##VALUE; \
2120 }
2121
2122 SetUsageBit(COLOR_ATTACHMENT, ColorAttachment);
2123 SetUsageBit(TRANSFER_SRC, TransferSrc);
2124 SetUsageBit(TRANSFER_DST, TransferDst);
2125 SetUsageBit(SAMPLED, Sampled);
2126
2127 #undef SetUsageBit
2128 return usage;
2129 }
2130
getBufferCollectionConstraintsVulkanBufferUsage(const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2131 uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
2132 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2133 uint32_t usage = 0u;
2134 VkBufferUsageFlags bufferUsage =
2135 pBufferConstraintsInfo->pBufferCreateInfo->usage;
2136
2137 #define SetUsageBit(BIT, VALUE) \
2138 if (bufferUsage & VK_BUFFER_USAGE_##BIT##_BIT) { \
2139 usage |= fuchsia_sysmem::wire::kVulkanBufferUsage##VALUE; \
2140 }
2141
2142 SetUsageBit(TRANSFER_SRC, TransferSrc);
2143 SetUsageBit(TRANSFER_DST, TransferDst);
2144 SetUsageBit(UNIFORM_TEXEL_BUFFER, UniformTexelBuffer);
2145 SetUsageBit(STORAGE_TEXEL_BUFFER, StorageTexelBuffer);
2146 SetUsageBit(UNIFORM_BUFFER, UniformBuffer);
2147 SetUsageBit(STORAGE_BUFFER, StorageBuffer);
2148 SetUsageBit(INDEX_BUFFER, IndexBuffer);
2149 SetUsageBit(VERTEX_BUFFER, VertexBuffer);
2150 SetUsageBit(INDIRECT_BUFFER, IndirectBuffer);
2151
2152 #undef SetUsageBit
2153 return usage;
2154 }
2155
vkFormatTypeToSysmem(VkFormat format)2156 static fuchsia_sysmem::wire::PixelFormatType vkFormatTypeToSysmem(
2157 VkFormat format) {
2158 switch (format) {
2159 case VK_FORMAT_B8G8R8A8_SINT:
2160 case VK_FORMAT_B8G8R8A8_UNORM:
2161 case VK_FORMAT_B8G8R8A8_SRGB:
2162 case VK_FORMAT_B8G8R8A8_SNORM:
2163 case VK_FORMAT_B8G8R8A8_SSCALED:
2164 case VK_FORMAT_B8G8R8A8_USCALED:
2165 return fuchsia_sysmem::wire::PixelFormatType::kBgra32;
2166 case VK_FORMAT_R8G8B8A8_SINT:
2167 case VK_FORMAT_R8G8B8A8_UNORM:
2168 case VK_FORMAT_R8G8B8A8_SRGB:
2169 case VK_FORMAT_R8G8B8A8_SNORM:
2170 case VK_FORMAT_R8G8B8A8_SSCALED:
2171 case VK_FORMAT_R8G8B8A8_USCALED:
2172 return fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
2173 case VK_FORMAT_R8_UNORM:
2174 case VK_FORMAT_R8_UINT:
2175 case VK_FORMAT_R8_USCALED:
2176 case VK_FORMAT_R8_SNORM:
2177 case VK_FORMAT_R8_SINT:
2178 case VK_FORMAT_R8_SSCALED:
2179 case VK_FORMAT_R8_SRGB:
2180 return fuchsia_sysmem::wire::PixelFormatType::kR8;
2181 case VK_FORMAT_R8G8_UNORM:
2182 case VK_FORMAT_R8G8_UINT:
2183 case VK_FORMAT_R8G8_USCALED:
2184 case VK_FORMAT_R8G8_SNORM:
2185 case VK_FORMAT_R8G8_SINT:
2186 case VK_FORMAT_R8G8_SSCALED:
2187 case VK_FORMAT_R8G8_SRGB:
2188 return fuchsia_sysmem::wire::PixelFormatType::kR8G8;
2189 default:
2190 return fuchsia_sysmem::wire::PixelFormatType::kInvalid;
2191 }
2192 }
2193
vkFormatMatchesSysmemFormat(VkFormat vkFormat,fuchsia_sysmem::wire::PixelFormatType sysmemFormat)2194 static bool vkFormatMatchesSysmemFormat(
2195 VkFormat vkFormat,
2196 fuchsia_sysmem::wire::PixelFormatType sysmemFormat) {
2197 switch (vkFormat) {
2198 case VK_FORMAT_B8G8R8A8_SINT:
2199 case VK_FORMAT_B8G8R8A8_UNORM:
2200 case VK_FORMAT_B8G8R8A8_SRGB:
2201 case VK_FORMAT_B8G8R8A8_SNORM:
2202 case VK_FORMAT_B8G8R8A8_SSCALED:
2203 case VK_FORMAT_B8G8R8A8_USCALED:
2204 return sysmemFormat ==
2205 fuchsia_sysmem::wire::PixelFormatType::kBgra32;
2206 case VK_FORMAT_R8G8B8A8_SINT:
2207 case VK_FORMAT_R8G8B8A8_UNORM:
2208 case VK_FORMAT_R8G8B8A8_SRGB:
2209 case VK_FORMAT_R8G8B8A8_SNORM:
2210 case VK_FORMAT_R8G8B8A8_SSCALED:
2211 case VK_FORMAT_R8G8B8A8_USCALED:
2212 return sysmemFormat ==
2213 fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
2214 case VK_FORMAT_R8_UNORM:
2215 case VK_FORMAT_R8_UINT:
2216 case VK_FORMAT_R8_USCALED:
2217 case VK_FORMAT_R8_SNORM:
2218 case VK_FORMAT_R8_SINT:
2219 case VK_FORMAT_R8_SSCALED:
2220 case VK_FORMAT_R8_SRGB:
2221 return sysmemFormat ==
2222 fuchsia_sysmem::wire::PixelFormatType::kR8 ||
2223 sysmemFormat ==
2224 fuchsia_sysmem::wire::PixelFormatType::kL8;
2225 case VK_FORMAT_R8G8_UNORM:
2226 case VK_FORMAT_R8G8_UINT:
2227 case VK_FORMAT_R8G8_USCALED:
2228 case VK_FORMAT_R8G8_SNORM:
2229 case VK_FORMAT_R8G8_SINT:
2230 case VK_FORMAT_R8G8_SSCALED:
2231 case VK_FORMAT_R8G8_SRGB:
2232 return sysmemFormat ==
2233 fuchsia_sysmem::wire::PixelFormatType::kR8G8;
2234 default:
2235 return false;
2236 }
2237 }
2238
sysmemPixelFormatTypeToVk(fuchsia_sysmem::wire::PixelFormatType format)2239 static VkFormat sysmemPixelFormatTypeToVk(
2240 fuchsia_sysmem::wire::PixelFormatType format) {
2241 switch (format) {
2242 case fuchsia_sysmem::wire::PixelFormatType::kBgra32:
2243 return VK_FORMAT_B8G8R8A8_SRGB;
2244 case fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8:
2245 return VK_FORMAT_R8G8B8A8_SRGB;
2246 case fuchsia_sysmem::wire::PixelFormatType::kL8:
2247 case fuchsia_sysmem::wire::PixelFormatType::kR8:
2248 return VK_FORMAT_R8_UNORM;
2249 case fuchsia_sysmem::wire::PixelFormatType::kR8G8:
2250 return VK_FORMAT_R8G8_UNORM;
2251 default:
2252 return VK_FORMAT_UNDEFINED;
2253 }
2254 }
2255
setBufferCollectionConstraints(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * collection,const VkImageCreateInfo * pImageInfo)2256 VkResult setBufferCollectionConstraints(
2257 VkEncoder* enc, VkDevice device,
2258 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
2259 const VkImageCreateInfo* pImageInfo) {
2260 if (pImageInfo == nullptr) {
2261 ALOGE("setBufferCollectionConstraints: pImageInfo cannot be null.");
2262 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2263 }
2264
2265 std::vector<VkImageCreateInfo> createInfos;
2266 if (pImageInfo->format == VK_FORMAT_UNDEFINED) {
2267 const auto kFormats = {
2268 VK_FORMAT_B8G8R8A8_SRGB,
2269 VK_FORMAT_R8G8B8A8_SRGB,
2270 };
2271 for (auto format : kFormats) {
2272 // shallow copy, using pNext from pImageInfo directly.
2273 auto createInfo = *pImageInfo;
2274 createInfo.format = format;
2275 createInfos.push_back(createInfo);
2276 }
2277 } else {
2278 createInfos.push_back(*pImageInfo);
2279 }
2280
2281 VkImageConstraintsInfoFUCHSIA imageConstraints;
2282 imageConstraints.sType =
2283 VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA;
2284 imageConstraints.pNext = nullptr;
2285 imageConstraints.createInfoCount = createInfos.size();
2286 imageConstraints.pCreateInfos = createInfos.data();
2287 imageConstraints.pFormatConstraints = nullptr;
2288 imageConstraints.maxBufferCount = 0;
2289 imageConstraints.minBufferCount = 1;
2290 imageConstraints.minBufferCountForCamping = 0;
2291 imageConstraints.minBufferCountForDedicatedSlack = 0;
2292 imageConstraints.minBufferCountForSharedSlack = 0;
2293 imageConstraints.flags = 0u;
2294
2295 return setBufferCollectionImageConstraints(enc, device, collection,
2296 &imageConstraints);
2297 }
2298
addImageBufferCollectionConstraints(VkEncoder * enc,VkDevice device,VkPhysicalDevice physicalDevice,const VkImageCreateInfo * createInfo,const VkImageFormatConstraintsInfoFUCHSIA * formatConstraints,VkImageTiling tiling,fuchsia_sysmem::wire::BufferCollectionConstraints * constraints)2299 VkResult addImageBufferCollectionConstraints(
2300 VkEncoder* enc,
2301 VkDevice device,
2302 VkPhysicalDevice physicalDevice,
2303 const VkImageCreateInfo* createInfo,
2304 const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints,
2305 VkImageTiling tiling,
2306 fuchsia_sysmem::wire::BufferCollectionConstraints* constraints) {
2307 // First check if the format, tiling and usage is supported on host.
2308 VkImageFormatProperties imageFormatProperties;
2309 auto result = enc->vkGetPhysicalDeviceImageFormatProperties(
2310 physicalDevice, createInfo->format, createInfo->imageType, tiling,
2311 createInfo->usage, createInfo->flags, &imageFormatProperties,
2312 true /* do lock */);
2313 if (result != VK_SUCCESS) {
2314 ALOGW(
2315 "%s: Image format (%u) type (%u) tiling (%u) "
2316 "usage (%u) flags (%u) not supported by physical "
2317 "device",
2318 __func__, static_cast<uint32_t>(createInfo->format),
2319 static_cast<uint32_t>(createInfo->imageType),
2320 static_cast<uint32_t>(tiling),
2321 static_cast<uint32_t>(createInfo->usage),
2322 static_cast<uint32_t>(createInfo->flags));
2323 return VK_ERROR_FORMAT_NOT_SUPPORTED;
2324 }
2325
2326 // Check if format constraints contains unsupported format features.
2327 if (formatConstraints) {
2328 VkFormatProperties formatProperties;
2329 enc->vkGetPhysicalDeviceFormatProperties(
2330 physicalDevice, createInfo->format, &formatProperties,
2331 true /* do lock */);
2332
2333 auto supportedFeatures =
2334 (tiling == VK_IMAGE_TILING_LINEAR)
2335 ? formatProperties.linearTilingFeatures
2336 : formatProperties.optimalTilingFeatures;
2337 auto requiredFeatures = formatConstraints->requiredFormatFeatures;
2338 if ((~supportedFeatures) & requiredFeatures) {
2339 ALOGW(
2340 "%s: Host device support features for %s tiling: %08x, "
2341 "required features: %08x, feature bits %08x missing",
2342 __func__,
2343 tiling == VK_IMAGE_TILING_LINEAR ? "LINEAR" : "OPTIMAL",
2344 static_cast<uint32_t>(requiredFeatures),
2345 static_cast<uint32_t>(supportedFeatures),
2346 static_cast<uint32_t>((~supportedFeatures) &
2347 requiredFeatures));
2348 return VK_ERROR_FORMAT_NOT_SUPPORTED;
2349 }
2350 }
2351
2352 fuchsia_sysmem::wire::ImageFormatConstraints imageConstraints;
2353 if (formatConstraints && formatConstraints->sysmemFormat != 0) {
2354 auto pixelFormat =
2355 static_cast<fuchsia_sysmem::wire::PixelFormatType>(
2356 formatConstraints->sysmemFormat);
2357 if (createInfo->format != VK_FORMAT_UNDEFINED &&
2358 !vkFormatMatchesSysmemFormat(createInfo->format, pixelFormat)) {
2359 ALOGW("%s: VkFormat %u doesn't match sysmem pixelFormat %lu",
2360 __func__, static_cast<uint32_t>(createInfo->format),
2361 formatConstraints->sysmemFormat);
2362 return VK_ERROR_FORMAT_NOT_SUPPORTED;
2363 }
2364 imageConstraints.pixel_format.type = pixelFormat;
2365 } else {
2366 auto pixel_format = vkFormatTypeToSysmem(createInfo->format);
2367 if (pixel_format ==
2368 fuchsia_sysmem::wire::PixelFormatType::kInvalid) {
2369 ALOGW("%s: Unsupported VkFormat %u", __func__,
2370 static_cast<uint32_t>(createInfo->format));
2371 return VK_ERROR_FORMAT_NOT_SUPPORTED;
2372 }
2373 imageConstraints.pixel_format.type = pixel_format;
2374 }
2375
2376 if (!formatConstraints || formatConstraints->colorSpaceCount == 0u) {
2377 imageConstraints.color_spaces_count = 1;
2378 imageConstraints.color_space[0].type =
2379 fuchsia_sysmem::wire::ColorSpaceType::kSrgb;
2380 } else {
2381 imageConstraints.color_spaces_count =
2382 formatConstraints->colorSpaceCount;
2383 for (size_t i = 0; i < formatConstraints->colorSpaceCount; i++) {
2384 imageConstraints.color_space[0].type =
2385 static_cast<fuchsia_sysmem::wire::ColorSpaceType>(
2386 formatConstraints->pColorSpaces[i].colorSpace);
2387 }
2388 }
2389
2390 // Get row alignment from host GPU.
2391 VkDeviceSize offset;
2392 VkDeviceSize rowPitchAlignment;
2393 enc->vkGetLinearImageLayoutGOOGLE(device, createInfo->format, &offset,
2394 &rowPitchAlignment,
2395 true /* do lock */);
2396 ALOGD(
2397 "vkGetLinearImageLayoutGOOGLE: format %d offset %lu "
2398 "rowPitchAlignment = %lu",
2399 (int)createInfo->format, offset, rowPitchAlignment);
2400
2401 imageConstraints.min_coded_width = createInfo->extent.width;
2402 imageConstraints.max_coded_width = 0xfffffff;
2403 imageConstraints.min_coded_height = createInfo->extent.height;
2404 imageConstraints.max_coded_height = 0xffffffff;
2405 // The min_bytes_per_row can be calculated by sysmem using
2406 // |min_coded_width|, |bytes_per_row_divisor| and color format.
2407 imageConstraints.min_bytes_per_row = 0;
2408 imageConstraints.max_bytes_per_row = 0xffffffff;
2409 imageConstraints.max_coded_width_times_coded_height = 0xffffffff;
2410
2411 imageConstraints.layers = 1;
2412 imageConstraints.coded_width_divisor = 1;
2413 imageConstraints.coded_height_divisor = 1;
2414 imageConstraints.bytes_per_row_divisor = rowPitchAlignment;
2415 imageConstraints.start_offset_divisor = 1;
2416 imageConstraints.display_width_divisor = 1;
2417 imageConstraints.display_height_divisor = 1;
2418 imageConstraints.pixel_format.has_format_modifier = true;
2419 imageConstraints.pixel_format.format_modifier.value =
2420 (tiling == VK_IMAGE_TILING_LINEAR)
2421 ? fuchsia_sysmem::wire::kFormatModifierLinear
2422 : fuchsia_sysmem::wire::kFormatModifierGoogleGoldfishOptimal;
2423
2424 constraints->image_format_constraints
2425 [constraints->image_format_constraints_count++] =
2426 std::move(imageConstraints);
2427 return VK_SUCCESS;
2428 }
2429
setBufferCollectionImageConstraints(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * collection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2430 VkResult setBufferCollectionImageConstraints(
2431 VkEncoder* enc,
2432 VkDevice device,
2433 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
2434 const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2435 if (!pImageConstraintsInfo ||
2436 pImageConstraintsInfo->sType !=
2437 VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA) {
2438 ALOGE("%s: invalid pImageConstraintsInfo", __func__);
2439 return VK_ERROR_INITIALIZATION_FAILED;
2440 }
2441
2442 if (pImageConstraintsInfo->createInfoCount == 0) {
2443 ALOGE("%s: createInfoCount must be greater than 0", __func__);
2444 return VK_ERROR_INITIALIZATION_FAILED;
2445 }
2446
2447 fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
2448 defaultBufferCollectionConstraints(
2449 /* min_size_bytes */ 0, pImageConstraintsInfo->minBufferCount,
2450 pImageConstraintsInfo->maxBufferCount,
2451 pImageConstraintsInfo->minBufferCountForCamping,
2452 pImageConstraintsInfo->minBufferCountForDedicatedSlack,
2453 pImageConstraintsInfo->minBufferCountForSharedSlack);
2454
2455 std::vector<fuchsia_sysmem::wire::ImageFormatConstraints>
2456 format_constraints;
2457
2458 VkPhysicalDevice physicalDevice;
2459 {
2460 AutoLock lock(mLock);
2461 auto deviceIt = info_VkDevice.find(device);
2462 if (deviceIt == info_VkDevice.end()) {
2463 return VK_ERROR_INITIALIZATION_FAILED;
2464 }
2465 physicalDevice = deviceIt->second.physdev;
2466 }
2467
2468 std::vector<uint32_t> createInfoIndex;
2469
2470 bool hasOptimalTiling = false;
2471 for (uint32_t i = 0; i < pImageConstraintsInfo->createInfoCount; i++) {
2472 const VkImageCreateInfo* createInfo =
2473 &pImageConstraintsInfo->pCreateInfos[i];
2474 const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints =
2475 pImageConstraintsInfo->pFormatConstraints
2476 ? &pImageConstraintsInfo->pFormatConstraints[i]
2477 : nullptr;
2478
2479 // add ImageFormatConstraints for *optimal* tiling
2480 VkResult optimalResult = VK_ERROR_FORMAT_NOT_SUPPORTED;
2481 if (createInfo->tiling == VK_IMAGE_TILING_OPTIMAL) {
2482 optimalResult = addImageBufferCollectionConstraints(
2483 enc, device, physicalDevice, createInfo, formatConstraints,
2484 VK_IMAGE_TILING_OPTIMAL, &constraints);
2485 if (optimalResult == VK_SUCCESS) {
2486 createInfoIndex.push_back(i);
2487 hasOptimalTiling = true;
2488 }
2489 }
2490
2491 // Add ImageFormatConstraints for *linear* tiling
2492 VkResult linearResult = addImageBufferCollectionConstraints(
2493 enc, device, physicalDevice, createInfo, formatConstraints,
2494 VK_IMAGE_TILING_LINEAR, &constraints);
2495 if (linearResult == VK_SUCCESS) {
2496 createInfoIndex.push_back(i);
2497 }
2498
2499 // Update usage and BufferMemoryConstraints
2500 if (linearResult == VK_SUCCESS || optimalResult == VK_SUCCESS) {
2501 constraints.usage.vulkan |=
2502 getBufferCollectionConstraintsVulkanImageUsage(createInfo);
2503
2504 if (formatConstraints && formatConstraints->flags) {
2505 ALOGW(
2506 "%s: Non-zero flags (%08x) in image format "
2507 "constraints; this is currently not supported, see "
2508 "fxbug.dev/68833.",
2509 __func__, formatConstraints->flags);
2510 }
2511 }
2512 }
2513
2514 // Set buffer memory constraints based on optimal/linear tiling support
2515 // and flags.
2516 VkImageConstraintsInfoFlagsFUCHSIA flags = pImageConstraintsInfo->flags;
2517 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA)
2518 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageRead;
2519 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA)
2520 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageReadOften;
2521 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA)
2522 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWrite;
2523 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA)
2524 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWriteOften;
2525
2526 constraints.has_buffer_memory_constraints = true;
2527 auto& memory_constraints = constraints.buffer_memory_constraints;
2528 memory_constraints.cpu_domain_supported = true;
2529 memory_constraints.ram_domain_supported = true;
2530 memory_constraints.inaccessible_domain_supported =
2531 hasOptimalTiling &&
2532 !(flags & (VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA |
2533 VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA |
2534 VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA |
2535 VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA));
2536
2537 if (memory_constraints.inaccessible_domain_supported) {
2538 memory_constraints.heap_permitted_count = 2;
2539 memory_constraints.heap_permitted[0] =
2540 fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2541 memory_constraints.heap_permitted[1] =
2542 fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2543 } else {
2544 memory_constraints.heap_permitted_count = 1;
2545 memory_constraints.heap_permitted[0] =
2546 fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2547 }
2548
2549 if (constraints.image_format_constraints_count == 0) {
2550 ALOGE("%s: none of the specified formats is supported by device",
2551 __func__);
2552 return VK_ERROR_FORMAT_NOT_SUPPORTED;
2553 }
2554
2555 constexpr uint32_t kVulkanPriority = 5;
2556 const char kName[] = "GoldfishSysmemShared";
2557 collection->SetName(kVulkanPriority, fidl::StringView(kName));
2558
2559 auto result = collection->SetConstraints(true, constraints);
2560 if (!result.ok()) {
2561 ALOGE("setBufferCollectionConstraints: SetConstraints failed: %d",
2562 result.status());
2563 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2564 }
2565
2566 // copy constraints to info_VkBufferCollectionFUCHSIA if
2567 // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2568 AutoLock lock(mLock);
2569 VkBufferCollectionFUCHSIA buffer_collection =
2570 reinterpret_cast<VkBufferCollectionFUCHSIA>(collection);
2571 if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2572 info_VkBufferCollectionFUCHSIA.end()) {
2573 info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2574 android::base::makeOptional(std::move(constraints));
2575 info_VkBufferCollectionFUCHSIA[buffer_collection].createInfoIndex =
2576 std::move(createInfoIndex);
2577 }
2578
2579 return VK_SUCCESS;
2580 }
2581
setBufferCollectionBufferConstraints(fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * collection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2582 VkResult setBufferCollectionBufferConstraints(
2583 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
2584 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2585 if (pBufferConstraintsInfo == nullptr) {
2586 ALOGE(
2587 "setBufferCollectionBufferConstraints: "
2588 "pBufferConstraintsInfo cannot be null.");
2589 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2590 }
2591
2592 fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
2593 defaultBufferCollectionConstraints(
2594 /* min_size_bytes */ pBufferConstraintsInfo->pBufferCreateInfo->size,
2595 /* buffer_count */ pBufferConstraintsInfo->minCount);
2596 constraints.usage.vulkan =
2597 getBufferCollectionConstraintsVulkanBufferUsage(
2598 pBufferConstraintsInfo);
2599
2600 constexpr uint32_t kVulkanPriority = 5;
2601 const char kName[] = "GoldfishBufferSysmemShared";
2602 collection->SetName(kVulkanPriority, fidl::StringView(kName));
2603
2604 auto result = collection->SetConstraints(true, constraints);
2605 if (!result.ok()) {
2606 ALOGE("setBufferCollectionConstraints: SetConstraints failed: %d",
2607 result.status());
2608 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2609 }
2610
2611 // copy constraints to info_VkBufferCollectionFUCHSIA if
2612 // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2613 AutoLock lock(mLock);
2614 VkBufferCollectionFUCHSIA buffer_collection =
2615 reinterpret_cast<VkBufferCollectionFUCHSIA>(collection);
2616 if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2617 info_VkBufferCollectionFUCHSIA.end()) {
2618 info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2619 android::base::makeOptional(std::move(constraints));
2620 }
2621
2622 return VK_SUCCESS;
2623 }
2624
on_vkSetBufferCollectionConstraintsFUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,const VkImageCreateInfo * pImageInfo)2625 VkResult on_vkSetBufferCollectionConstraintsFUCHSIA(
2626 void* context, VkResult, VkDevice device,
2627 VkBufferCollectionFUCHSIA collection,
2628 const VkImageCreateInfo* pImageInfo) {
2629 VkEncoder* enc = (VkEncoder*)context;
2630 auto sysmem_collection = reinterpret_cast<
2631 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2632 return setBufferCollectionConstraints(enc, device, sysmem_collection, pImageInfo);
2633 }
2634
on_vkSetBufferCollectionImageConstraintsFUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2635 VkResult on_vkSetBufferCollectionImageConstraintsFUCHSIA(
2636 void* context,
2637 VkResult,
2638 VkDevice device,
2639 VkBufferCollectionFUCHSIA collection,
2640 const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2641 VkEncoder* enc = (VkEncoder*)context;
2642 auto sysmem_collection = reinterpret_cast<
2643 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2644 return setBufferCollectionImageConstraints(
2645 enc, device, sysmem_collection, pImageConstraintsInfo);
2646 }
2647
on_vkSetBufferCollectionBufferConstraintsFUCHSIA(void *,VkResult,VkDevice,VkBufferCollectionFUCHSIA collection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2648 VkResult on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
2649 void*,
2650 VkResult,
2651 VkDevice,
2652 VkBufferCollectionFUCHSIA collection,
2653 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2654 auto sysmem_collection = reinterpret_cast<
2655 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2656 return setBufferCollectionBufferConstraints(sysmem_collection,
2657 pBufferConstraintsInfo);
2658 }
2659
on_vkGetBufferCollectionPropertiesFUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,VkBufferCollectionPropertiesFUCHSIA * pProperties)2660 VkResult on_vkGetBufferCollectionPropertiesFUCHSIA(
2661 void* context,
2662 VkResult,
2663 VkDevice device,
2664 VkBufferCollectionFUCHSIA collection,
2665 VkBufferCollectionPropertiesFUCHSIA* pProperties) {
2666 VkBufferCollectionProperties2FUCHSIA properties2 = {
2667 .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_PROPERTIES2_FUCHSIA,
2668 .pNext = nullptr};
2669 auto result = on_vkGetBufferCollectionProperties2FUCHSIA(
2670 context, VK_SUCCESS, device, collection, &properties2);
2671 if (result != VK_SUCCESS) {
2672 return result;
2673 }
2674
2675 pProperties->count = properties2.bufferCount;
2676 pProperties->memoryTypeBits = properties2.memoryTypeBits;
2677 return VK_SUCCESS;
2678 }
2679
getBufferCollectionImageCreateInfoIndexLocked(VkBufferCollectionFUCHSIA collection,fuchsia_sysmem::wire::BufferCollectionInfo2 & info,uint32_t * outCreateInfoIndex)2680 VkResult getBufferCollectionImageCreateInfoIndexLocked(
2681 VkBufferCollectionFUCHSIA collection,
2682 fuchsia_sysmem::wire::BufferCollectionInfo2& info,
2683 uint32_t* outCreateInfoIndex) {
2684 if (!info_VkBufferCollectionFUCHSIA[collection]
2685 .constraints.hasValue()) {
2686 ALOGE("%s: constraints not set", __func__);
2687 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2688 }
2689
2690 if (!info.settings.has_image_format_constraints) {
2691 // no image format constraints, skip getting createInfoIndex.
2692 return VK_SUCCESS;
2693 }
2694
2695 const auto& constraints =
2696 *info_VkBufferCollectionFUCHSIA[collection].constraints;
2697 const auto& createInfoIndices =
2698 info_VkBufferCollectionFUCHSIA[collection].createInfoIndex;
2699 const auto& out = info.settings.image_format_constraints;
2700 bool foundCreateInfo = false;
2701
2702 for (size_t imageFormatIndex = 0;
2703 imageFormatIndex < constraints.image_format_constraints_count;
2704 imageFormatIndex++) {
2705 const auto& in =
2706 constraints.image_format_constraints[imageFormatIndex];
2707 // These checks are sorted in order of how often they're expected to
2708 // mismatch, from most likely to least likely. They aren't always
2709 // equality comparisons, since sysmem may change some values in
2710 // compatible ways on behalf of the other participants.
2711 if ((out.pixel_format.type != in.pixel_format.type) ||
2712 (out.pixel_format.has_format_modifier !=
2713 in.pixel_format.has_format_modifier) ||
2714 (out.pixel_format.format_modifier.value !=
2715 in.pixel_format.format_modifier.value) ||
2716 (out.min_bytes_per_row < in.min_bytes_per_row) ||
2717 (out.required_max_coded_width < in.required_max_coded_width) ||
2718 (out.required_max_coded_height <
2719 in.required_max_coded_height) ||
2720 (out.bytes_per_row_divisor % in.bytes_per_row_divisor != 0)) {
2721 continue;
2722 }
2723 // Check if the out colorspaces are a subset of the in color spaces.
2724 bool all_color_spaces_found = true;
2725 for (uint32_t j = 0; j < out.color_spaces_count; j++) {
2726 bool found_matching_color_space = false;
2727 for (uint32_t k = 0; k < in.color_spaces_count; k++) {
2728 if (out.color_space[j].type == in.color_space[k].type) {
2729 found_matching_color_space = true;
2730 break;
2731 }
2732 }
2733 if (!found_matching_color_space) {
2734 all_color_spaces_found = false;
2735 break;
2736 }
2737 }
2738 if (!all_color_spaces_found) {
2739 continue;
2740 }
2741
2742 // Choose the first valid format for now.
2743 *outCreateInfoIndex = createInfoIndices[imageFormatIndex];
2744 return VK_SUCCESS;
2745 }
2746
2747 ALOGE("%s: cannot find a valid image format in constraints", __func__);
2748 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2749 }
2750
on_vkGetBufferCollectionProperties2FUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,VkBufferCollectionProperties2FUCHSIA * pProperties)2751 VkResult on_vkGetBufferCollectionProperties2FUCHSIA(
2752 void* context,
2753 VkResult,
2754 VkDevice device,
2755 VkBufferCollectionFUCHSIA collection,
2756 VkBufferCollectionProperties2FUCHSIA* pProperties) {
2757 VkEncoder* enc = (VkEncoder*)context;
2758 auto sysmem_collection = reinterpret_cast<
2759 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2760
2761 auto result = sysmem_collection->WaitForBuffersAllocated();
2762 if (!result.ok() || result.Unwrap()->status != ZX_OK) {
2763 ALOGE("Failed wait for allocation: %d %d", result.status(),
2764 GET_STATUS_SAFE(result, status));
2765 return VK_ERROR_INITIALIZATION_FAILED;
2766 }
2767 fuchsia_sysmem::wire::BufferCollectionInfo2 info =
2768 std::move(result.Unwrap()->buffer_collection_info);
2769
2770 bool is_host_visible = info.settings.buffer_settings.heap ==
2771 fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2772 bool is_device_local = info.settings.buffer_settings.heap ==
2773 fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2774 if (!is_host_visible && !is_device_local) {
2775 ALOGE("buffer collection uses a non-goldfish heap (type 0x%lu)",
2776 static_cast<uint64_t>(info.settings.buffer_settings.heap));
2777 return VK_ERROR_INITIALIZATION_FAILED;
2778 }
2779
2780 // memoryTypeBits
2781 // ====================================================================
2782 {
2783 AutoLock lock(mLock);
2784 auto deviceIt = info_VkDevice.find(device);
2785 if (deviceIt == info_VkDevice.end()) {
2786 return VK_ERROR_INITIALIZATION_FAILED;
2787 }
2788 auto& deviceInfo = deviceIt->second;
2789
2790 // Device local memory type supported.
2791 pProperties->memoryTypeBits = 0;
2792 for (uint32_t i = 0; i < deviceInfo.memProps.memoryTypeCount; ++i) {
2793 if ((is_device_local &&
2794 (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2795 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
2796 (is_host_visible &&
2797 (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2798 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
2799 pProperties->memoryTypeBits |= 1ull << i;
2800 }
2801 }
2802 }
2803
2804 // bufferCount
2805 // ====================================================================
2806 pProperties->bufferCount = info.buffer_count;
2807
2808 auto storeProperties = [this, collection, pProperties]() -> VkResult {
2809 // store properties to storage
2810 AutoLock lock(mLock);
2811 if (info_VkBufferCollectionFUCHSIA.find(collection) ==
2812 info_VkBufferCollectionFUCHSIA.end()) {
2813 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2814 }
2815
2816 info_VkBufferCollectionFUCHSIA[collection].properties =
2817 android::base::makeOptional(*pProperties);
2818
2819 // We only do a shallow copy so we should remove all pNext pointers.
2820 info_VkBufferCollectionFUCHSIA[collection].properties->pNext =
2821 nullptr;
2822 info_VkBufferCollectionFUCHSIA[collection]
2823 .properties->colorSpace.pNext = nullptr;
2824 return VK_SUCCESS;
2825 };
2826
2827 // The fields below only apply to buffer collections with image formats.
2828 if (!info.settings.has_image_format_constraints) {
2829 ALOGD("%s: buffer collection doesn't have image format constraints",
2830 __func__);
2831 return storeProperties();
2832 }
2833
2834 // sysmemFormat
2835 // ====================================================================
2836
2837 pProperties->sysmemFormat = static_cast<uint64_t>(
2838 info.settings.image_format_constraints.pixel_format.type);
2839
2840 // colorSpace
2841 // ====================================================================
2842 if (info.settings.image_format_constraints.color_spaces_count == 0) {
2843 ALOGE(
2844 "%s: color space missing from allocated buffer collection "
2845 "constraints",
2846 __func__);
2847 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2848 }
2849 // Only report first colorspace for now.
2850 pProperties->colorSpace.colorSpace = static_cast<uint32_t>(
2851 info.settings.image_format_constraints.color_space[0].type);
2852
2853 // createInfoIndex
2854 // ====================================================================
2855 {
2856 AutoLock lock(mLock);
2857 auto getIndexResult = getBufferCollectionImageCreateInfoIndexLocked(
2858 collection, info, &pProperties->createInfoIndex);
2859 if (getIndexResult != VK_SUCCESS) {
2860 return getIndexResult;
2861 }
2862 }
2863
2864 // formatFeatures
2865 // ====================================================================
2866 VkPhysicalDevice physicalDevice;
2867 {
2868 AutoLock lock(mLock);
2869 auto deviceIt = info_VkDevice.find(device);
2870 if (deviceIt == info_VkDevice.end()) {
2871 return VK_ERROR_INITIALIZATION_FAILED;
2872 }
2873 physicalDevice = deviceIt->second.physdev;
2874 }
2875
2876 VkFormat vkFormat = sysmemPixelFormatTypeToVk(
2877 info.settings.image_format_constraints.pixel_format.type);
2878 VkFormatProperties formatProperties;
2879 enc->vkGetPhysicalDeviceFormatProperties(
2880 physicalDevice, vkFormat, &formatProperties, true /* do lock */);
2881 if (is_device_local) {
2882 pProperties->formatFeatures =
2883 formatProperties.optimalTilingFeatures;
2884 }
2885 if (is_host_visible) {
2886 pProperties->formatFeatures = formatProperties.linearTilingFeatures;
2887 }
2888
2889 // YCbCr properties
2890 // ====================================================================
2891 // TODO(59804): Implement this correctly when we support YUV pixel
2892 // formats in goldfish ICD.
2893 pProperties->samplerYcbcrConversionComponents.r =
2894 VK_COMPONENT_SWIZZLE_IDENTITY;
2895 pProperties->samplerYcbcrConversionComponents.g =
2896 VK_COMPONENT_SWIZZLE_IDENTITY;
2897 pProperties->samplerYcbcrConversionComponents.b =
2898 VK_COMPONENT_SWIZZLE_IDENTITY;
2899 pProperties->samplerYcbcrConversionComponents.a =
2900 VK_COMPONENT_SWIZZLE_IDENTITY;
2901 pProperties->suggestedYcbcrModel =
2902 VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
2903 pProperties->suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
2904 pProperties->suggestedXChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2905 pProperties->suggestedYChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2906
2907 return storeProperties();
2908 }
2909 #endif
2910
getOrAllocateHostMemBlockLocked(HostMemBlocks & blocks,const VkMemoryAllocateInfo * pAllocateInfo,VkEncoder * enc,VkDevice device,const VkDevice_Info & deviceInfo)2911 HostMemBlockIndex getOrAllocateHostMemBlockLocked(
2912 HostMemBlocks& blocks,
2913 const VkMemoryAllocateInfo* pAllocateInfo,
2914 VkEncoder* enc,
2915 VkDevice device,
2916 const VkDevice_Info& deviceInfo) {
2917
2918 HostMemBlockIndex res = 0;
2919 bool found = false;
2920
2921 while (!found) {
2922 for (HostMemBlockIndex i = 0; i < blocks.size(); ++i) {
2923 if (blocks[i].initialized &&
2924 blocks[i].initResult == VK_SUCCESS &&
2925 canSubAlloc(
2926 blocks[i].subAlloc,
2927 pAllocateInfo->allocationSize)) {
2928 res = i;
2929 found = true;
2930 return res;
2931 }
2932 }
2933
2934 blocks.push_back({});
2935
2936 auto& hostMemAlloc = blocks.back();
2937
2938 // Uninitialized block; allocate on host.
2939 static constexpr VkDeviceSize oneMb = 1048576;
2940 static constexpr VkDeviceSize kDefaultHostMemBlockSize =
2941 16 * oneMb; // 16 mb
2942 VkDeviceSize roundedUpAllocSize =
2943 oneMb * ((pAllocateInfo->allocationSize + oneMb - 1) / oneMb);
2944
2945 VkDeviceSize virtualHeapSize = VIRTUAL_HOST_VISIBLE_HEAP_SIZE;
2946
2947 VkDeviceSize blockSizeNeeded =
2948 std::max(roundedUpAllocSize,
2949 std::min(virtualHeapSize,
2950 kDefaultHostMemBlockSize));
2951
2952 VkMemoryAllocateInfo allocInfoForHost = *pAllocateInfo;
2953
2954 allocInfoForHost.allocationSize = blockSizeNeeded;
2955
2956 // TODO: Support dedicated/external host visible allocation
2957 allocInfoForHost.pNext = nullptr;
2958
2959 mLock.unlock();
2960 VkResult host_res =
2961 enc->vkAllocateMemory(
2962 device,
2963 &allocInfoForHost,
2964 nullptr,
2965 &hostMemAlloc.memory, true /* do lock */);
2966 mLock.lock();
2967
2968 if (host_res != VK_SUCCESS) {
2969 ALOGE("Could not allocate backing for virtual host visible memory: %d",
2970 host_res);
2971 hostMemAlloc.initialized = true;
2972 hostMemAlloc.initResult = host_res;
2973 return INVALID_HOST_MEM_BLOCK;
2974 }
2975
2976 auto& hostMemInfo = info_VkDeviceMemory[hostMemAlloc.memory];
2977 hostMemInfo.allocationSize = allocInfoForHost.allocationSize;
2978 VkDeviceSize nonCoherentAtomSize =
2979 deviceInfo.props.limits.nonCoherentAtomSize;
2980 hostMemInfo.mappedSize = hostMemInfo.allocationSize;
2981 hostMemInfo.memoryTypeIndex =
2982 pAllocateInfo->memoryTypeIndex;
2983 hostMemAlloc.nonCoherentAtomSize = nonCoherentAtomSize;
2984
2985 uint64_t directMappedAddr = 0;
2986
2987
2988 VkResult directMapResult = VK_SUCCESS;
2989 if (mFeatureInfo->hasDirectMem) {
2990 mLock.unlock();
2991 directMapResult =
2992 enc->vkMapMemoryIntoAddressSpaceGOOGLE(
2993 device, hostMemAlloc.memory, &directMappedAddr, true /* do lock */);
2994 mLock.lock();
2995 } else if (mFeatureInfo->hasVirtioGpuNext) {
2996 #if !defined(HOST_BUILD) && defined(VK_USE_PLATFORM_ANDROID_KHR)
2997 uint64_t hvaSizeId[3];
2998
2999 int rendernodeFdForMem = drmOpenRender(128 /* RENDERNODE_MINOR */);
3000 ALOGE("%s: render fd = %d\n", __func__, rendernodeFdForMem);
3001
3002 mLock.unlock();
3003 enc->vkGetMemoryHostAddressInfoGOOGLE(
3004 device, hostMemAlloc.memory,
3005 &hvaSizeId[0], &hvaSizeId[1], &hvaSizeId[2], true /* do lock */);
3006 ALOGD("%s: hvaOff, size: 0x%llx 0x%llx id: 0x%llx\n", __func__,
3007 (unsigned long long)hvaSizeId[0],
3008 (unsigned long long)hvaSizeId[1],
3009 (unsigned long long)hvaSizeId[2]);
3010 mLock.lock();
3011
3012 struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
3013 drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST;
3014 drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_MAPPABLE;
3015 drm_rc_blob.blob_id = hvaSizeId[2];
3016 drm_rc_blob.size = hvaSizeId[1];
3017
3018 int res = drmIoctl(
3019 rendernodeFdForMem, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
3020
3021 if (res) {
3022 ALOGE("%s: Failed to resource create v2: sterror: %s errno: %d\n", __func__,
3023 strerror(errno), errno);
3024 abort();
3025 }
3026
3027 drm_virtgpu_map map_info;
3028 memset(&map_info, 0, sizeof(map_info));
3029 map_info.handle = drm_rc_blob.bo_handle;
3030
3031 res = drmIoctl(rendernodeFdForMem, DRM_IOCTL_VIRTGPU_MAP, &map_info);
3032 if (res) {
3033 ALOGE("%s: Failed to virtgpu map: sterror: %s errno: %d\n", __func__,
3034 strerror(errno), errno);
3035 abort();
3036 }
3037
3038 directMappedAddr = (uint64_t)(uintptr_t)
3039 mmap64(0, hvaSizeId[1], PROT_WRITE, MAP_SHARED, rendernodeFdForMem, map_info.offset);
3040
3041 if (!directMappedAddr) {
3042 ALOGE("%s: mmap of virtio gpu resource failed\n", __func__);
3043 abort();
3044 }
3045
3046 hostMemAlloc.memoryAddr = directMappedAddr;
3047 hostMemAlloc.memorySize = hvaSizeId[1];
3048
3049 // add the host's page offset
3050 directMappedAddr += (uint64_t)(uintptr_t)(hvaSizeId[0]) & (PAGE_SIZE - 1);
3051 directMapResult = VK_SUCCESS;
3052
3053 hostMemAlloc.fd = rendernodeFdForMem;
3054 #endif // VK_USE_PLATFORM_ANDROID_KHR
3055 }
3056
3057 if (directMapResult != VK_SUCCESS) {
3058 hostMemAlloc.initialized = true;
3059 hostMemAlloc.initResult = directMapResult;
3060 mLock.unlock();
3061 enc->vkFreeMemory(device, hostMemAlloc.memory, nullptr, true /* do lock */);
3062 mLock.lock();
3063 return INVALID_HOST_MEM_BLOCK;
3064 }
3065
3066 hostMemInfo.mappedPtr =
3067 (uint8_t*)(uintptr_t)directMappedAddr;
3068 hostMemInfo.virtualHostVisibleBacking = true;
3069
3070 VkResult hostMemAllocRes =
3071 finishHostMemAllocInit(
3072 enc,
3073 device,
3074 pAllocateInfo->memoryTypeIndex,
3075 nonCoherentAtomSize,
3076 hostMemInfo.allocationSize,
3077 hostMemInfo.mappedSize,
3078 hostMemInfo.mappedPtr,
3079 &hostMemAlloc);
3080
3081 if (hostMemAllocRes != VK_SUCCESS) {
3082 return INVALID_HOST_MEM_BLOCK;
3083 }
3084 }
3085
3086 // unreacheable, but we need to make Werror happy
3087 return INVALID_HOST_MEM_BLOCK;
3088 }
3089
getAHardwareBufferId(AHardwareBuffer * ahw)3090 uint64_t getAHardwareBufferId(AHardwareBuffer* ahw) {
3091 uint64_t id = 0;
3092 #if defined(PLATFORM_SDK_VERSION) && PLATFORM_SDK_VERSION >= 31
3093 AHardwareBuffer_getId(ahw, &id);
3094 #else
3095 (void)ahw;
3096 #endif
3097 return id;
3098 }
3099
on_vkAllocateMemory(void * context,VkResult input_result,VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)3100 VkResult on_vkAllocateMemory(
3101 void* context,
3102 VkResult input_result,
3103 VkDevice device,
3104 const VkMemoryAllocateInfo* pAllocateInfo,
3105 const VkAllocationCallbacks* pAllocator,
3106 VkDeviceMemory* pMemory) {
3107
3108 #define _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(result) \
3109 { \
3110 auto it = info_VkDevice.find(device); \
3111 if (it == info_VkDevice.end()) return result; \
3112 emitDeviceMemoryReport( \
3113 it->second, \
3114 VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT, \
3115 0, \
3116 pAllocateInfo->allocationSize, \
3117 VK_OBJECT_TYPE_DEVICE_MEMORY, \
3118 0, \
3119 pAllocateInfo->memoryTypeIndex); \
3120 return result; \
3121 }
3122
3123 #define _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT \
3124 { \
3125 uint64_t memoryObjectId = (uint64_t)(void*)*pMemory; \
3126 if (ahw) { \
3127 memoryObjectId = getAHardwareBufferId(ahw); \
3128 } \
3129 emitDeviceMemoryReport( \
3130 info_VkDevice[device], \
3131 isImport ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT, \
3132 memoryObjectId, \
3133 pAllocateInfo->allocationSize, \
3134 VK_OBJECT_TYPE_DEVICE_MEMORY, \
3135 (uint64_t)(void*)*pMemory, \
3136 pAllocateInfo->memoryTypeIndex); \
3137 return VK_SUCCESS; \
3138 }
3139
3140
3141 if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
3142
3143 VkEncoder* enc = (VkEncoder*)context;
3144
3145 VkMemoryAllocateInfo finalAllocInfo = vk_make_orphan_copy(*pAllocateInfo);
3146 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&finalAllocInfo);
3147
3148 VkMemoryDedicatedAllocateInfo dedicatedAllocInfo;
3149 VkImportColorBufferGOOGLE importCbInfo = {
3150 VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE, 0,
3151 };
3152 VkImportBufferGOOGLE importBufferInfo = {
3153 VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE,
3154 0,
3155 };
3156 // VkImportPhysicalAddressGOOGLE importPhysAddrInfo = {
3157 // VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE, 0,
3158 // };
3159
3160 const VkExportMemoryAllocateInfo* exportAllocateInfoPtr =
3161 vk_find_struct<VkExportMemoryAllocateInfo>(pAllocateInfo);
3162
3163 const VkImportAndroidHardwareBufferInfoANDROID* importAhbInfoPtr =
3164 vk_find_struct<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo);
3165
3166 const VkImportMemoryBufferCollectionFUCHSIA* importBufferCollectionInfoPtr =
3167 vk_find_struct<VkImportMemoryBufferCollectionFUCHSIA>(pAllocateInfo);
3168
3169 const VkImportMemoryZirconHandleInfoFUCHSIA* importVmoInfoPtr =
3170 vk_find_struct<VkImportMemoryZirconHandleInfoFUCHSIA>(pAllocateInfo);
3171 if (!importVmoInfoPtr) {
3172 importVmoInfoPtr = reinterpret_cast<const VkImportMemoryZirconHandleInfoFUCHSIA*>(
3173 __vk_find_struct(const_cast<void*>(pAllocateInfo->pNext),
3174 VK_STRUCTURE_TYPE_TEMP_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA));
3175 }
3176
3177 const VkMemoryDedicatedAllocateInfo* dedicatedAllocInfoPtr =
3178 vk_find_struct<VkMemoryDedicatedAllocateInfo>(pAllocateInfo);
3179
3180 bool shouldPassThroughDedicatedAllocInfo =
3181 !exportAllocateInfoPtr && !importAhbInfoPtr &&
3182 !importBufferCollectionInfoPtr && !importVmoInfoPtr;
3183
3184 #ifndef VK_USE_PLATFORM_FUCHSIA
3185 shouldPassThroughDedicatedAllocInfo &=
3186 !isHostVisibleMemoryTypeIndexForGuest(
3187 &mHostVisibleMemoryVirtInfo, pAllocateInfo->memoryTypeIndex);
3188
3189 if (!exportAllocateInfoPtr &&
3190 (importAhbInfoPtr || importBufferCollectionInfoPtr ||
3191 importVmoInfoPtr) &&
3192 dedicatedAllocInfoPtr &&
3193 isHostVisibleMemoryTypeIndexForGuest(
3194 &mHostVisibleMemoryVirtInfo, pAllocateInfo->memoryTypeIndex)) {
3195 ALOGE(
3196 "FATAL: It is not yet supported to import-allocate "
3197 "external memory that is both host visible and dedicated.");
3198 abort();
3199 }
3200 #endif // VK_USE_PLATFORM_FUCHSIA
3201
3202 if (shouldPassThroughDedicatedAllocInfo &&
3203 dedicatedAllocInfoPtr) {
3204 dedicatedAllocInfo = vk_make_orphan_copy(*dedicatedAllocInfoPtr);
3205 vk_append_struct(&structChainIter, &dedicatedAllocInfo);
3206 }
3207
3208 // State needed for import/export.
3209 bool exportAhb = false;
3210 bool exportVmo = false;
3211 bool importAhb = false;
3212 bool importBufferCollection = false;
3213 bool importVmo = false;
3214 (void)exportVmo;
3215
3216 // Even if we export allocate, the underlying operation
3217 // for the host is always going to be an import operation.
3218 // This is also how Intel's implementation works,
3219 // and is generally simpler;
3220 // even in an export allocation,
3221 // we perform AHardwareBuffer allocation
3222 // on the guest side, at this layer,
3223 // and then we attach a new VkDeviceMemory
3224 // to the AHardwareBuffer on the host via an "import" operation.
3225 AHardwareBuffer* ahw = nullptr;
3226
3227 if (exportAllocateInfoPtr) {
3228 exportAhb =
3229 exportAllocateInfoPtr->handleTypes &
3230 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
3231 exportVmo =
3232 (exportAllocateInfoPtr->handleTypes &
3233 VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA) ||
3234 (exportAllocateInfoPtr->handleTypes &
3235 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA);
3236 } else if (importAhbInfoPtr) {
3237 importAhb = true;
3238 } else if (importBufferCollectionInfoPtr) {
3239 importBufferCollection = true;
3240 } else if (importVmoInfoPtr) {
3241 importVmo = true;
3242 }
3243 bool isImport = importAhb || importBufferCollection || importVmo;
3244
3245 if (exportAhb) {
3246 bool hasDedicatedImage = dedicatedAllocInfoPtr &&
3247 (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3248 bool hasDedicatedBuffer = dedicatedAllocInfoPtr &&
3249 (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3250 VkExtent3D imageExtent = { 0, 0, 0 };
3251 uint32_t imageLayers = 0;
3252 VkFormat imageFormat = VK_FORMAT_UNDEFINED;
3253 VkImageUsageFlags imageUsage = 0;
3254 VkImageCreateFlags imageCreateFlags = 0;
3255 VkDeviceSize bufferSize = 0;
3256 VkDeviceSize allocationInfoAllocSize =
3257 finalAllocInfo.allocationSize;
3258
3259 if (hasDedicatedImage) {
3260 AutoLock lock(mLock);
3261
3262 auto it = info_VkImage.find(
3263 dedicatedAllocInfoPtr->image);
3264 if (it == info_VkImage.end()) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3265 const auto& info = it->second;
3266 const auto& imgCi = info.createInfo;
3267
3268 imageExtent = imgCi.extent;
3269 imageLayers = imgCi.arrayLayers;
3270 imageFormat = imgCi.format;
3271 imageUsage = imgCi.usage;
3272 imageCreateFlags = imgCi.flags;
3273 }
3274
3275 if (hasDedicatedBuffer) {
3276 AutoLock lock(mLock);
3277
3278 auto it = info_VkBuffer.find(
3279 dedicatedAllocInfoPtr->buffer);
3280 if (it == info_VkBuffer.end()) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3281 const auto& info = it->second;
3282 const auto& bufCi = info.createInfo;
3283
3284 bufferSize = bufCi.size;
3285 }
3286
3287 VkResult ahbCreateRes =
3288 createAndroidHardwareBuffer(
3289 hasDedicatedImage,
3290 hasDedicatedBuffer,
3291 imageExtent,
3292 imageLayers,
3293 imageFormat,
3294 imageUsage,
3295 imageCreateFlags,
3296 bufferSize,
3297 allocationInfoAllocSize,
3298 &ahw);
3299
3300 if (ahbCreateRes != VK_SUCCESS) {
3301 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(ahbCreateRes);
3302 }
3303 }
3304
3305 if (importAhb) {
3306 ahw = importAhbInfoPtr->buffer;
3307 // We still need to acquire the AHardwareBuffer.
3308 importAndroidHardwareBuffer(
3309 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(),
3310 importAhbInfoPtr, nullptr);
3311 }
3312
3313 if (ahw) {
3314 ALOGD("%s: Import AHardwareBuffer", __func__);
3315 importCbInfo.colorBuffer =
3316 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper()->
3317 getHostHandle(AHardwareBuffer_getNativeHandle(ahw));
3318 vk_append_struct(&structChainIter, &importCbInfo);
3319 }
3320
3321 zx_handle_t vmo_handle = ZX_HANDLE_INVALID;
3322
3323 if (importBufferCollection) {
3324
3325 #ifdef VK_USE_PLATFORM_FUCHSIA
3326 auto collection = reinterpret_cast<
3327 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
3328 importBufferCollectionInfoPtr->collection);
3329 auto result = collection->WaitForBuffersAllocated();
3330 if (!result.ok() || result.Unwrap()->status != ZX_OK) {
3331 ALOGE("WaitForBuffersAllocated failed: %d %d", result.status(),
3332 GET_STATUS_SAFE(result, status));
3333 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3334 }
3335 fuchsia_sysmem::wire::BufferCollectionInfo2& info =
3336 result.Unwrap()->buffer_collection_info;
3337 uint32_t index = importBufferCollectionInfoPtr->index;
3338 if (info.buffer_count < index) {
3339 ALOGE("Invalid buffer index: %d %d", index);
3340 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3341 }
3342 vmo_handle = info.buffers[index].vmo.release();
3343 #endif
3344
3345 }
3346
3347 if (importVmo) {
3348 vmo_handle = importVmoInfoPtr->handle;
3349 }
3350
3351 #ifdef VK_USE_PLATFORM_FUCHSIA
3352 if (exportVmo) {
3353 bool hasDedicatedImage = dedicatedAllocInfoPtr &&
3354 (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3355 bool hasDedicatedBuffer =
3356 dedicatedAllocInfoPtr &&
3357 (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3358
3359 if (hasDedicatedImage && hasDedicatedBuffer) {
3360 ALOGE(
3361 "Invalid VkMemoryDedicatedAllocationInfo: At least one "
3362 "of image and buffer must be VK_NULL_HANDLE.");
3363 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3364 }
3365
3366 const VkImageCreateInfo* pImageCreateInfo = nullptr;
3367
3368 VkBufferConstraintsInfoFUCHSIA bufferConstraintsInfo = {
3369 .sType =
3370 VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA,
3371 .pNext = nullptr,
3372 .pBufferCreateInfo = nullptr,
3373 .requiredFormatFeatures = 0,
3374 .minCount = 1,
3375 };
3376 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo =
3377 nullptr;
3378
3379 if (hasDedicatedImage) {
3380 AutoLock lock(mLock);
3381
3382 auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3383 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
3384 const auto& imageInfo = it->second;
3385
3386 pImageCreateInfo = &imageInfo.createInfo;
3387 }
3388
3389 if (hasDedicatedBuffer) {
3390 AutoLock lock(mLock);
3391
3392 auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
3393 if (it == info_VkBuffer.end())
3394 return VK_ERROR_INITIALIZATION_FAILED;
3395 const auto& bufferInfo = it->second;
3396
3397 bufferConstraintsInfo.pBufferCreateInfo =
3398 &bufferInfo.createInfo;
3399 pBufferConstraintsInfo = &bufferConstraintsInfo;
3400 }
3401
3402 hasDedicatedImage = hasDedicatedImage &&
3403 getBufferCollectionConstraintsVulkanImageUsage(
3404 pImageCreateInfo);
3405 hasDedicatedBuffer =
3406 hasDedicatedBuffer &&
3407 getBufferCollectionConstraintsVulkanBufferUsage(
3408 pBufferConstraintsInfo);
3409
3410 if (hasDedicatedImage || hasDedicatedBuffer) {
3411 auto token_ends =
3412 fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
3413 if (!token_ends.is_ok()) {
3414 ALOGE("zx_channel_create failed: %d", token_ends.status_value());
3415 abort();
3416 }
3417
3418 {
3419 auto result = mSysmemAllocator->AllocateSharedCollection(
3420 std::move(token_ends->server));
3421 if (!result.ok()) {
3422 ALOGE("AllocateSharedCollection failed: %d",
3423 result.status());
3424 abort();
3425 }
3426 }
3427
3428 auto collection_ends =
3429 fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
3430 if (!collection_ends.is_ok()) {
3431 ALOGE("zx_channel_create failed: %d", collection_ends.status_value());
3432 abort();
3433 }
3434
3435 {
3436 auto result = mSysmemAllocator->BindSharedCollection(
3437 std::move(token_ends->client), std::move(collection_ends->server));
3438 if (!result.ok()) {
3439 ALOGE("BindSharedCollection failed: %d",
3440 result.status());
3441 abort();
3442 }
3443 }
3444
3445 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> collection(
3446 std::move(collection_ends->client));
3447 if (hasDedicatedImage) {
3448 VkResult res = setBufferCollectionConstraints(
3449 enc, device, &collection, pImageCreateInfo);
3450 if (res == VK_ERROR_FORMAT_NOT_SUPPORTED) {
3451 ALOGE("setBufferCollectionConstraints failed: format %u is not supported",
3452 pImageCreateInfo->format);
3453 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3454 }
3455 if (res != VK_SUCCESS) {
3456 ALOGE("setBufferCollectionConstraints failed: %d", res);
3457 abort();
3458 }
3459 }
3460
3461 if (hasDedicatedBuffer) {
3462 VkResult res = setBufferCollectionBufferConstraints(
3463 &collection, pBufferConstraintsInfo);
3464 if (res != VK_SUCCESS) {
3465 ALOGE("setBufferCollectionBufferConstraints failed: %d",
3466 res);
3467 abort();
3468 }
3469 }
3470
3471 {
3472 auto result = collection.WaitForBuffersAllocated();
3473 if (result.ok() && result.Unwrap()->status == ZX_OK) {
3474 fuchsia_sysmem::wire::BufferCollectionInfo2& info =
3475 result.Unwrap()->buffer_collection_info;
3476 if (!info.buffer_count) {
3477 ALOGE(
3478 "WaitForBuffersAllocated returned "
3479 "invalid count: %d",
3480 info.buffer_count);
3481 abort();
3482 }
3483 vmo_handle = info.buffers[0].vmo.release();
3484 } else {
3485 ALOGE("WaitForBuffersAllocated failed: %d %d",
3486 result.status(), GET_STATUS_SAFE(result, status));
3487 abort();
3488 }
3489 }
3490
3491 collection.Close();
3492
3493 zx::vmo vmo_copy;
3494 zx_status_t status = zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS,
3495 vmo_copy.reset_and_get_address());
3496 if (status != ZX_OK) {
3497 ALOGE("Failed to duplicate VMO: %d", status);
3498 abort();
3499 }
3500
3501 bool isHostVisible = isHostVisibleMemoryTypeIndexForGuest(
3502 &mHostVisibleMemoryVirtInfo,
3503 pAllocateInfo->memoryTypeIndex);
3504
3505 // Only device-local images need to create color buffer; for
3506 // host-visible images, the color buffer is already created when
3507 // sysmem allocates memory.
3508 if (!isHostVisible) {
3509 if (pImageCreateInfo) {
3510 fuchsia_hardware_goldfish::wire::
3511 ColorBufferFormatType format;
3512 switch (pImageCreateInfo->format) {
3513 case VK_FORMAT_B8G8R8A8_SINT:
3514 case VK_FORMAT_B8G8R8A8_UNORM:
3515 case VK_FORMAT_B8G8R8A8_SRGB:
3516 case VK_FORMAT_B8G8R8A8_SNORM:
3517 case VK_FORMAT_B8G8R8A8_SSCALED:
3518 case VK_FORMAT_B8G8R8A8_USCALED:
3519 format = fuchsia_hardware_goldfish::wire::
3520 ColorBufferFormatType::kBgra;
3521 break;
3522 case VK_FORMAT_R8G8B8A8_SINT:
3523 case VK_FORMAT_R8G8B8A8_UNORM:
3524 case VK_FORMAT_R8G8B8A8_SRGB:
3525 case VK_FORMAT_R8G8B8A8_SNORM:
3526 case VK_FORMAT_R8G8B8A8_SSCALED:
3527 case VK_FORMAT_R8G8B8A8_USCALED:
3528 format = fuchsia_hardware_goldfish::wire::
3529 ColorBufferFormatType::kRgba;
3530 break;
3531 case VK_FORMAT_R8_UNORM:
3532 case VK_FORMAT_R8_UINT:
3533 case VK_FORMAT_R8_USCALED:
3534 case VK_FORMAT_R8_SNORM:
3535 case VK_FORMAT_R8_SINT:
3536 case VK_FORMAT_R8_SSCALED:
3537 case VK_FORMAT_R8_SRGB:
3538 format = fuchsia_hardware_goldfish::wire::
3539 ColorBufferFormatType::kLuminance;
3540 break;
3541 case VK_FORMAT_R8G8_UNORM:
3542 case VK_FORMAT_R8G8_UINT:
3543 case VK_FORMAT_R8G8_USCALED:
3544 case VK_FORMAT_R8G8_SNORM:
3545 case VK_FORMAT_R8G8_SINT:
3546 case VK_FORMAT_R8G8_SSCALED:
3547 case VK_FORMAT_R8G8_SRGB:
3548 format = fuchsia_hardware_goldfish::wire::
3549 ColorBufferFormatType::kRg;
3550 break;
3551 default:
3552 ALOGE("Unsupported format: %d",
3553 pImageCreateInfo->format);
3554 abort();
3555 }
3556
3557 fidl::FidlAllocator allocator;
3558 fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(
3559 allocator);
3560 createParams.set_width(allocator, pImageCreateInfo->extent.width)
3561 .set_height(allocator, pImageCreateInfo->extent.height)
3562 .set_format(allocator, format)
3563 .set_memory_property(allocator,
3564 fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
3565
3566 auto result = mControlDevice->CreateColorBuffer2(
3567 std::move(vmo_copy), std::move(createParams));
3568 if (!result.ok() || result.Unwrap()->res != ZX_OK) {
3569 if (result.ok() &&
3570 result.Unwrap()->res == ZX_ERR_ALREADY_EXISTS) {
3571 ALOGD(
3572 "CreateColorBuffer: color buffer already "
3573 "exists\n");
3574 } else {
3575 ALOGE("CreateColorBuffer failed: %d:%d",
3576 result.status(),
3577 GET_STATUS_SAFE(result, res));
3578 abort();
3579 }
3580 }
3581 }
3582 }
3583
3584 if (pBufferConstraintsInfo) {
3585 fidl::FidlAllocator allocator;
3586 fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(allocator);
3587 createParams.set_size(allocator,
3588 pBufferConstraintsInfo->pBufferCreateInfo->size)
3589 .set_memory_property(allocator,
3590 fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
3591
3592 auto result =
3593 mControlDevice->CreateBuffer2(std::move(vmo_copy), std::move(createParams));
3594 if (!result.ok() || result.Unwrap()->result.is_err()) {
3595 ALOGE("CreateBuffer2 failed: %d:%d", result.status(),
3596 GET_STATUS_SAFE(result, result.err()));
3597 abort();
3598 }
3599 }
3600 }
3601 }
3602
3603 if (vmo_handle != ZX_HANDLE_INVALID) {
3604 zx::vmo vmo_copy;
3605 zx_status_t status = zx_handle_duplicate(vmo_handle,
3606 ZX_RIGHT_SAME_RIGHTS,
3607 vmo_copy.reset_and_get_address());
3608 if (status != ZX_OK) {
3609 ALOGE("Failed to duplicate VMO: %d", status);
3610 abort();
3611 }
3612 zx_status_t status2 = ZX_OK;
3613
3614 auto result = mControlDevice->GetBufferHandle(std::move(vmo_copy));
3615 if (!result.ok() || result.Unwrap()->res != ZX_OK) {
3616 ALOGE("GetBufferHandle failed: %d:%d", result.status(),
3617 GET_STATUS_SAFE(result, res));
3618 } else {
3619 fuchsia_hardware_goldfish::wire::BufferHandleType
3620 handle_type = result.Unwrap()->type;
3621 uint32_t buffer_handle = result.Unwrap()->id;
3622
3623 if (handle_type == fuchsia_hardware_goldfish::wire::
3624 BufferHandleType::kBuffer) {
3625 importBufferInfo.buffer = buffer_handle;
3626 vk_append_struct(&structChainIter, &importBufferInfo);
3627 } else {
3628 importCbInfo.colorBuffer = buffer_handle;
3629 vk_append_struct(&structChainIter, &importCbInfo);
3630 }
3631 }
3632 }
3633 #endif
3634
3635 if (!isHostVisibleMemoryTypeIndexForGuest(
3636 &mHostVisibleMemoryVirtInfo,
3637 finalAllocInfo.memoryTypeIndex)) {
3638 input_result =
3639 enc->vkAllocateMemory(
3640 device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
3641
3642 if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
3643
3644 VkDeviceSize allocationSize = finalAllocInfo.allocationSize;
3645 setDeviceMemoryInfo(
3646 device, *pMemory,
3647 finalAllocInfo.allocationSize,
3648 0, nullptr,
3649 finalAllocInfo.memoryTypeIndex,
3650 ahw,
3651 isImport,
3652 vmo_handle);
3653
3654 _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
3655 }
3656
3657 // Device-local memory dealing is over. What follows:
3658 // host-visible memory.
3659
3660 if (ahw) {
3661 ALOGE("%s: Host visible export/import allocation "
3662 "of Android hardware buffers is not supported.",
3663 __func__);
3664 abort();
3665 }
3666
3667 #ifdef VK_USE_PLATFORM_FUCHSIA
3668 if (vmo_handle != ZX_HANDLE_INVALID) {
3669 input_result = enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
3670
3671 // Get VMO handle rights, and only use allowed rights to map the
3672 // host memory.
3673 zx_info_handle_basic handle_info;
3674 zx_status_t status = zx_object_get_info(vmo_handle, ZX_INFO_HANDLE_BASIC, &handle_info,
3675 sizeof(handle_info), nullptr, nullptr);
3676 if (status != ZX_OK) {
3677 ALOGE("%s: cannot get vmo object info: vmo = %u status: %d.", __func__, vmo_handle,
3678 status);
3679 return VK_ERROR_OUT_OF_HOST_MEMORY;
3680 }
3681
3682 zx_vm_option_t vm_permission = 0u;
3683 vm_permission |= (handle_info.rights & ZX_RIGHT_READ) ? ZX_VM_PERM_READ : 0;
3684 vm_permission |= (handle_info.rights & ZX_RIGHT_WRITE) ? ZX_VM_PERM_WRITE : 0;
3685
3686 zx_paddr_t addr;
3687 status = zx_vmar_map(zx_vmar_root_self(), vm_permission, 0, vmo_handle, 0,
3688 finalAllocInfo.allocationSize, &addr);
3689 if (status != ZX_OK) {
3690 ALOGE("%s: cannot map vmar: status %d.", __func__, status);
3691 return VK_ERROR_OUT_OF_HOST_MEMORY;
3692 }
3693
3694 D("host visible alloc (external): "
3695 "size 0x%llx host ptr %p mapped size 0x%llx",
3696 (unsigned long long)finalAllocInfo.allocationSize, mappedPtr,
3697 (unsigned long long)mappedSize);
3698 setDeviceMemoryInfo(device, *pMemory,
3699 finalAllocInfo.allocationSize, finalAllocInfo.allocationSize,
3700 reinterpret_cast<uint8_t*>(addr), finalAllocInfo.memoryTypeIndex,
3701 /*ahw=*/nullptr, isImport, vmo_handle);
3702 return VK_SUCCESS;
3703 }
3704 #endif
3705
3706 // Host visible memory, non external
3707 bool directMappingSupported = usingDirectMapping();
3708 if (!directMappingSupported) {
3709 input_result =
3710 enc->vkAllocateMemory(
3711 device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
3712
3713 if (input_result != VK_SUCCESS) return input_result;
3714
3715 VkDeviceSize mappedSize =
3716 getNonCoherentExtendedSize(device,
3717 finalAllocInfo.allocationSize);
3718 uint8_t* mappedPtr = (uint8_t*)aligned_buf_alloc(4096, mappedSize);
3719 D("host visible alloc (non-direct): "
3720 "size 0x%llx host ptr %p mapped size 0x%llx",
3721 (unsigned long long)finalAllocInfo.allocationSize, mappedPtr,
3722 (unsigned long long)mappedSize);
3723 setDeviceMemoryInfo(
3724 device, *pMemory,
3725 finalAllocInfo.allocationSize,
3726 mappedSize, mappedPtr,
3727 finalAllocInfo.memoryTypeIndex);
3728 _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
3729 }
3730
3731 // Host visible memory with direct mapping via
3732 // VkImportPhysicalAddressGOOGLE
3733 // if (importPhysAddr) {
3734 // vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory);
3735 // host maps the host pointer to the guest physical address
3736 // TODO: the host side page offset of the
3737 // host pointer needs to be returned somehow.
3738 // }
3739
3740 // Host visible memory with direct mapping
3741 AutoLock lock(mLock);
3742
3743 auto it = info_VkDevice.find(device);
3744 if (it == info_VkDevice.end()) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_DEVICE_LOST);
3745 auto& deviceInfo = it->second;
3746
3747 auto& hostMemBlocksForTypeIndex =
3748 deviceInfo.hostMemBlocks[finalAllocInfo.memoryTypeIndex];
3749
3750 HostMemBlockIndex blockIndex =
3751 getOrAllocateHostMemBlockLocked(
3752 hostMemBlocksForTypeIndex,
3753 &finalAllocInfo,
3754 enc,
3755 device,
3756 deviceInfo);
3757
3758 if (blockIndex == (HostMemBlockIndex) INVALID_HOST_MEM_BLOCK) {
3759 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_OUT_OF_HOST_MEMORY);
3760 }
3761
3762 VkDeviceMemory_Info virtualMemInfo;
3763
3764 subAllocHostMemory(
3765 &hostMemBlocksForTypeIndex[blockIndex],
3766 &finalAllocInfo,
3767 &virtualMemInfo.subAlloc);
3768
3769 virtualMemInfo.allocationSize = virtualMemInfo.subAlloc.subAllocSize;
3770 virtualMemInfo.mappedSize = virtualMemInfo.subAlloc.subMappedSize;
3771 virtualMemInfo.mappedPtr = virtualMemInfo.subAlloc.mappedPtr;
3772 virtualMemInfo.memoryTypeIndex = finalAllocInfo.memoryTypeIndex;
3773 virtualMemInfo.directMapped = true;
3774
3775 D("host visible alloc (direct, suballoc): "
3776 "size 0x%llx ptr %p mapped size 0x%llx",
3777 (unsigned long long)virtualMemInfo.allocationSize, virtualMemInfo.mappedPtr,
3778 (unsigned long long)virtualMemInfo.mappedSize);
3779
3780 info_VkDeviceMemory[
3781 virtualMemInfo.subAlloc.subMemory] = virtualMemInfo;
3782
3783 *pMemory = virtualMemInfo.subAlloc.subMemory;
3784
3785 _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
3786 }
3787
on_vkFreeMemory(void * context,VkDevice device,VkDeviceMemory memory,const VkAllocationCallbacks * pAllocateInfo)3788 void on_vkFreeMemory(
3789 void* context,
3790 VkDevice device,
3791 VkDeviceMemory memory,
3792 const VkAllocationCallbacks* pAllocateInfo) {
3793
3794 AutoLock lock(mLock);
3795
3796 auto it = info_VkDeviceMemory.find(memory);
3797 if (it == info_VkDeviceMemory.end()) return;
3798 auto& info = it->second;
3799 uint64_t memoryObjectId = (uint64_t)(void*)memory;
3800 if (info.ahw) {
3801 memoryObjectId = getAHardwareBufferId(info.ahw);
3802 }
3803 emitDeviceMemoryReport(
3804 info_VkDevice[device],
3805 info.imported ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT
3806 : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT,
3807 memoryObjectId,
3808 0 /* size */,
3809 VK_OBJECT_TYPE_DEVICE_MEMORY,
3810 (uint64_t)(void*)memory
3811 );
3812
3813 #ifdef VK_USE_PLATFORM_FUCHSIA
3814 if (info.vmoHandle && info.mappedPtr) {
3815 zx_status_t status = zx_vmar_unmap(
3816 zx_vmar_root_self(), reinterpret_cast<zx_paddr_t>(info.mappedPtr), info.mappedSize);
3817 if (status != ZX_OK) {
3818 ALOGE("%s: Cannot unmap mappedPtr: status %d", status);
3819 }
3820 info.mappedPtr = nullptr;
3821 }
3822 #endif
3823
3824 if (!info.directMapped) {
3825 lock.unlock();
3826 VkEncoder* enc = (VkEncoder*)context;
3827 enc->vkFreeMemory(device, memory, pAllocateInfo, true /* do lock */);
3828 return;
3829 }
3830
3831 subFreeHostMemory(&info.subAlloc);
3832 }
3833
on_vkMapMemory(void *,VkResult host_result,VkDevice,VkDeviceMemory memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags,void ** ppData)3834 VkResult on_vkMapMemory(
3835 void*,
3836 VkResult host_result,
3837 VkDevice,
3838 VkDeviceMemory memory,
3839 VkDeviceSize offset,
3840 VkDeviceSize size,
3841 VkMemoryMapFlags,
3842 void** ppData) {
3843
3844 if (host_result != VK_SUCCESS) return host_result;
3845
3846 AutoLock lock(mLock);
3847
3848 auto it = info_VkDeviceMemory.find(memory);
3849 if (it == info_VkDeviceMemory.end()) return VK_ERROR_MEMORY_MAP_FAILED;
3850
3851 auto& info = it->second;
3852
3853 if (!info.mappedPtr) return VK_ERROR_MEMORY_MAP_FAILED;
3854
3855 if (size != VK_WHOLE_SIZE &&
3856 (info.mappedPtr + offset + size > info.mappedPtr + info.allocationSize)) {
3857 return VK_ERROR_MEMORY_MAP_FAILED;
3858 }
3859
3860 *ppData = info.mappedPtr + offset;
3861
3862 return host_result;
3863 }
3864
on_vkUnmapMemory(void *,VkDevice,VkDeviceMemory)3865 void on_vkUnmapMemory(
3866 void*,
3867 VkDevice,
3868 VkDeviceMemory) {
3869 // no-op
3870 }
3871
transformNonExternalResourceMemoryTypeBitsForGuest(uint32_t hostBits)3872 uint32_t transformNonExternalResourceMemoryTypeBitsForGuest(
3873 uint32_t hostBits) {
3874 uint32_t res = 0;
3875 for (uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) {
3876 if (hostBits & (1 << i)) {
3877 res |= (1 << i);
3878 }
3879 }
3880 return res;
3881 }
3882
transformExternalResourceMemoryTypeBitsForGuest(uint32_t normalBits)3883 uint32_t transformExternalResourceMemoryTypeBitsForGuest(
3884 uint32_t normalBits) {
3885 uint32_t res = 0;
3886 for (uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) {
3887 bool shouldAcceptMemoryIndex = normalBits & (1 << i);
3888 #ifndef VK_USE_PLATFORM_FUCHSIA
3889 shouldAcceptMemoryIndex &= !isHostVisibleMemoryTypeIndexForGuest(
3890 &mHostVisibleMemoryVirtInfo, i);
3891 #endif // VK_USE_PLATFORM_FUCHSIA
3892
3893 if (shouldAcceptMemoryIndex) {
3894 res |= (1 << i);
3895 }
3896 }
3897 return res;
3898 }
3899
transformNonExternalResourceMemoryRequirementsForGuest(VkMemoryRequirements * reqs)3900 void transformNonExternalResourceMemoryRequirementsForGuest(
3901 VkMemoryRequirements* reqs) {
3902 reqs->memoryTypeBits =
3903 transformNonExternalResourceMemoryTypeBitsForGuest(
3904 reqs->memoryTypeBits);
3905 }
3906
transformExternalResourceMemoryRequirementsForGuest(VkMemoryRequirements * reqs)3907 void transformExternalResourceMemoryRequirementsForGuest(
3908 VkMemoryRequirements* reqs) {
3909 reqs->memoryTypeBits =
3910 transformExternalResourceMemoryTypeBitsForGuest(
3911 reqs->memoryTypeBits);
3912 }
3913
transformExternalResourceMemoryDedicatedRequirementsForGuest(VkMemoryDedicatedRequirements * dedicatedReqs)3914 void transformExternalResourceMemoryDedicatedRequirementsForGuest(
3915 VkMemoryDedicatedRequirements* dedicatedReqs) {
3916 dedicatedReqs->prefersDedicatedAllocation = VK_TRUE;
3917 dedicatedReqs->requiresDedicatedAllocation = VK_TRUE;
3918 }
3919
transformImageMemoryRequirementsForGuestLocked(VkImage image,VkMemoryRequirements * reqs)3920 void transformImageMemoryRequirementsForGuestLocked(
3921 VkImage image,
3922 VkMemoryRequirements* reqs) {
3923
3924 auto it = info_VkImage.find(image);
3925 if (it == info_VkImage.end()) return;
3926
3927 auto& info = it->second;
3928
3929 if (!info.external ||
3930 !info.externalCreateInfo.handleTypes) {
3931 transformNonExternalResourceMemoryRequirementsForGuest(reqs);
3932 } else {
3933 transformExternalResourceMemoryRequirementsForGuest(reqs);
3934 }
3935 setMemoryRequirementsForSysmemBackedImage(image, reqs);
3936 }
3937
transformBufferMemoryRequirementsForGuestLocked(VkBuffer buffer,VkMemoryRequirements * reqs)3938 void transformBufferMemoryRequirementsForGuestLocked(
3939 VkBuffer buffer,
3940 VkMemoryRequirements* reqs) {
3941
3942 auto it = info_VkBuffer.find(buffer);
3943 if (it == info_VkBuffer.end()) return;
3944
3945 auto& info = it->second;
3946
3947 if (!info.external ||
3948 !info.externalCreateInfo.handleTypes) {
3949 transformNonExternalResourceMemoryRequirementsForGuest(reqs);
3950 return;
3951 }
3952
3953 transformExternalResourceMemoryRequirementsForGuest(reqs);
3954 }
3955
transformImageMemoryRequirements2ForGuest(VkImage image,VkMemoryRequirements2 * reqs2)3956 void transformImageMemoryRequirements2ForGuest(
3957 VkImage image,
3958 VkMemoryRequirements2* reqs2) {
3959
3960 AutoLock lock(mLock);
3961
3962 auto it = info_VkImage.find(image);
3963 if (it == info_VkImage.end()) return;
3964
3965 auto& info = it->second;
3966
3967 if (!info.external ||
3968 !info.externalCreateInfo.handleTypes) {
3969 transformNonExternalResourceMemoryRequirementsForGuest(
3970 &reqs2->memoryRequirements);
3971 setMemoryRequirementsForSysmemBackedImage(image, &reqs2->memoryRequirements);
3972 return;
3973 }
3974
3975 transformExternalResourceMemoryRequirementsForGuest(&reqs2->memoryRequirements);
3976
3977 setMemoryRequirementsForSysmemBackedImage(image, &reqs2->memoryRequirements);
3978
3979 VkMemoryDedicatedRequirements* dedicatedReqs =
3980 vk_find_struct<VkMemoryDedicatedRequirements>(reqs2);
3981
3982 if (!dedicatedReqs) return;
3983
3984 transformExternalResourceMemoryDedicatedRequirementsForGuest(
3985 dedicatedReqs);
3986 }
3987
transformBufferMemoryRequirements2ForGuest(VkBuffer buffer,VkMemoryRequirements2 * reqs2)3988 void transformBufferMemoryRequirements2ForGuest(
3989 VkBuffer buffer,
3990 VkMemoryRequirements2* reqs2) {
3991
3992 AutoLock lock(mLock);
3993
3994 auto it = info_VkBuffer.find(buffer);
3995 if (it == info_VkBuffer.end()) return;
3996
3997 auto& info = it->second;
3998
3999 if (!info.external ||
4000 !info.externalCreateInfo.handleTypes) {
4001 transformNonExternalResourceMemoryRequirementsForGuest(
4002 &reqs2->memoryRequirements);
4003 return;
4004 }
4005
4006 transformExternalResourceMemoryRequirementsForGuest(&reqs2->memoryRequirements);
4007
4008 VkMemoryDedicatedRequirements* dedicatedReqs =
4009 vk_find_struct<VkMemoryDedicatedRequirements>(reqs2);
4010
4011 if (!dedicatedReqs) return;
4012
4013 transformExternalResourceMemoryDedicatedRequirementsForGuest(
4014 dedicatedReqs);
4015 }
4016
on_vkCreateImage(void * context,VkResult,VkDevice device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)4017 VkResult on_vkCreateImage(
4018 void* context, VkResult,
4019 VkDevice device, const VkImageCreateInfo *pCreateInfo,
4020 const VkAllocationCallbacks *pAllocator,
4021 VkImage *pImage) {
4022 VkEncoder* enc = (VkEncoder*)context;
4023
4024 VkImageCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4025 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
4026 VkExternalMemoryImageCreateInfo localExtImgCi;
4027
4028 const VkExternalMemoryImageCreateInfo* extImgCiPtr =
4029 vk_find_struct<VkExternalMemoryImageCreateInfo>(pCreateInfo);
4030 if (extImgCiPtr) {
4031 localExtImgCi = vk_make_orphan_copy(*extImgCiPtr);
4032 vk_append_struct(&structChainIter, &localExtImgCi);
4033 }
4034
4035 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4036 VkNativeBufferANDROID localAnb;
4037 const VkNativeBufferANDROID* anbInfoPtr =
4038 vk_find_struct<VkNativeBufferANDROID>(pCreateInfo);
4039 if (anbInfoPtr) {
4040 localAnb = vk_make_orphan_copy(*anbInfoPtr);
4041 vk_append_struct(&structChainIter, &localAnb);
4042 }
4043
4044 VkExternalFormatANDROID localExtFormatAndroid;
4045 const VkExternalFormatANDROID* extFormatAndroidPtr =
4046 vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4047 if (extFormatAndroidPtr) {
4048 localExtFormatAndroid = vk_make_orphan_copy(*extFormatAndroidPtr);
4049
4050 // Do not append external format android;
4051 // instead, replace the local image localCreateInfo format
4052 // with the corresponding Vulkan format
4053 if (extFormatAndroidPtr->externalFormat) {
4054 localCreateInfo.format =
4055 vk_format_from_android(extFormatAndroidPtr->externalFormat);
4056 if (localCreateInfo.format == VK_FORMAT_UNDEFINED)
4057 return VK_ERROR_VALIDATION_FAILED_EXT;
4058 }
4059 }
4060 #endif
4061
4062 #ifdef VK_USE_PLATFORM_FUCHSIA
4063 const VkBufferCollectionImageCreateInfoFUCHSIA* extBufferCollectionPtr =
4064 vk_find_struct<VkBufferCollectionImageCreateInfoFUCHSIA>(pCreateInfo);
4065 bool isSysmemBackedMemory = false;
4066
4067 if (extImgCiPtr &&
4068 ((extImgCiPtr->handleTypes &
4069 VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA) ||
4070 (extImgCiPtr->handleTypes &
4071 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA))) {
4072 isSysmemBackedMemory = true;
4073 }
4074
4075 if (extBufferCollectionPtr) {
4076 auto collection = reinterpret_cast<
4077 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
4078 extBufferCollectionPtr->collection);
4079 uint32_t index = extBufferCollectionPtr->index;
4080 zx::vmo vmo;
4081
4082 fuchsia_sysmem::wire::BufferCollectionInfo2 info;
4083
4084 auto result = collection->WaitForBuffersAllocated();
4085 if (result.ok() && result.Unwrap()->status == ZX_OK) {
4086 info = std::move(result.Unwrap()->buffer_collection_info);
4087 if (index < info.buffer_count && info.settings.has_image_format_constraints) {
4088 vmo = std::move(info.buffers[index].vmo);
4089 }
4090 } else {
4091 ALOGE("WaitForBuffersAllocated failed: %d %d", result.status(),
4092 GET_STATUS_SAFE(result, status));
4093 }
4094
4095 if (vmo.is_valid()) {
4096 zx::vmo vmo_dup;
4097 if (zx_status_t status = vmo.duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
4098 status != ZX_OK) {
4099 ALOGE("%s: zx_vmo_duplicate failed: %d", __func__, status);
4100 abort();
4101 }
4102
4103 auto buffer_handle_result = mControlDevice->GetBufferHandle(std::move(vmo_dup));
4104 if (!buffer_handle_result.ok()) {
4105 ALOGE("%s: GetBufferHandle FIDL error: %d", __func__,
4106 buffer_handle_result.status());
4107 abort();
4108 }
4109 if (buffer_handle_result.value().res == ZX_OK) {
4110 // Buffer handle already exists.
4111 // If it is a ColorBuffer, no-op; Otherwise return error.
4112 if (buffer_handle_result.value().type !=
4113 fuchsia_hardware_goldfish::wire::BufferHandleType::kColorBuffer) {
4114 ALOGE("%s: BufferHandle %u is not a ColorBuffer", __func__,
4115 buffer_handle_result.value().id);
4116 return VK_ERROR_OUT_OF_HOST_MEMORY;
4117 }
4118 } else if (buffer_handle_result.value().res == ZX_ERR_NOT_FOUND) {
4119 // Buffer handle not found. Create ColorBuffer based on buffer settings.
4120 auto format =
4121 info.settings.image_format_constraints.pixel_format.type ==
4122 fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8
4123 ? fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba
4124 : fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra;
4125
4126 uint32_t memory_property =
4127 info.settings.buffer_settings.heap ==
4128 fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal
4129 ? fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal
4130 : fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
4131
4132 fidl::FidlAllocator allocator;
4133 fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(
4134 allocator);
4135 createParams.set_width(allocator,
4136 info.settings.image_format_constraints.min_coded_width)
4137 .set_height(allocator,
4138 info.settings.image_format_constraints.min_coded_height)
4139 .set_format(allocator, format)
4140 .set_memory_property(allocator, memory_property);
4141
4142 auto result =
4143 mControlDevice->CreateColorBuffer2(std::move(vmo), std::move(createParams));
4144 if (!result.ok() || result.Unwrap()->res != ZX_OK) {
4145 ALOGE("CreateColorBuffer failed: %d:%d", result.status(),
4146 GET_STATUS_SAFE(result, res));
4147 }
4148 }
4149
4150 if (info.settings.buffer_settings.heap ==
4151 fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible) {
4152 ALOGD(
4153 "%s: Image uses host visible memory heap; set tiling "
4154 "to linear to match host ImageCreateInfo",
4155 __func__);
4156 localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
4157 }
4158 }
4159 isSysmemBackedMemory = true;
4160 }
4161
4162 if (isSysmemBackedMemory) {
4163 localCreateInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
4164 }
4165 #endif
4166
4167 VkResult res;
4168 VkMemoryRequirements memReqs;
4169
4170 if (supportsCreateResourcesWithRequirements()) {
4171 res = enc->vkCreateImageWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator, pImage, &memReqs, true /* do lock */);
4172 } else {
4173 res = enc->vkCreateImage(device, &localCreateInfo, pAllocator, pImage, true /* do lock */);
4174 }
4175
4176 if (res != VK_SUCCESS) return res;
4177
4178 AutoLock lock(mLock);
4179
4180 auto it = info_VkImage.find(*pImage);
4181 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
4182
4183 auto& info = it->second;
4184
4185 info.device = device;
4186 info.createInfo = *pCreateInfo;
4187 info.createInfo.pNext = nullptr;
4188
4189 if (supportsCreateResourcesWithRequirements()) {
4190 info.baseRequirementsKnown = true;
4191 }
4192
4193 if (extImgCiPtr) {
4194 info.external = true;
4195 info.externalCreateInfo = *extImgCiPtr;
4196 }
4197
4198 #ifdef VK_USE_PLATFORM_FUCHSIA
4199 if (isSysmemBackedMemory) {
4200 info.isSysmemBackedMemory = true;
4201 }
4202 #endif
4203
4204 if (info.baseRequirementsKnown) {
4205 transformImageMemoryRequirementsForGuestLocked(*pImage, &memReqs);
4206 info.baseRequirements = memReqs;
4207 }
4208 return res;
4209 }
4210
on_vkCreateSamplerYcbcrConversion(void * context,VkResult,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)4211 VkResult on_vkCreateSamplerYcbcrConversion(
4212 void* context, VkResult,
4213 VkDevice device,
4214 const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4215 const VkAllocationCallbacks* pAllocator,
4216 VkSamplerYcbcrConversion* pYcbcrConversion) {
4217
4218 VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4219
4220 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4221 const VkExternalFormatANDROID* extFormatAndroidPtr =
4222 vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4223 if (extFormatAndroidPtr) {
4224 if (extFormatAndroidPtr->externalFormat == AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM) {
4225 // We don't support external formats on host and it causes RGB565
4226 // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4227 // when passed as an external format.
4228 // We may consider doing this for all external formats.
4229 // See b/134771579.
4230 *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4231 return VK_SUCCESS;
4232 } else if (extFormatAndroidPtr->externalFormat) {
4233 localCreateInfo.format =
4234 vk_format_from_android(extFormatAndroidPtr->externalFormat);
4235 }
4236 }
4237 #endif
4238
4239 VkEncoder* enc = (VkEncoder*)context;
4240 VkResult res = enc->vkCreateSamplerYcbcrConversion(
4241 device, &localCreateInfo, pAllocator, pYcbcrConversion, true /* do lock */);
4242
4243 if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
4244 ALOGE("FATAL: vkCreateSamplerYcbcrConversion returned a reserved value (VK_YCBCR_CONVERSION_DO_NOTHING)");
4245 abort();
4246 }
4247 return res;
4248 }
4249
on_vkDestroySamplerYcbcrConversion(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)4250 void on_vkDestroySamplerYcbcrConversion(
4251 void* context,
4252 VkDevice device,
4253 VkSamplerYcbcrConversion ycbcrConversion,
4254 const VkAllocationCallbacks* pAllocator) {
4255 VkEncoder* enc = (VkEncoder*)context;
4256 if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4257 enc->vkDestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator, true /* do lock */);
4258 }
4259 }
4260
on_vkCreateSamplerYcbcrConversionKHR(void * context,VkResult,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)4261 VkResult on_vkCreateSamplerYcbcrConversionKHR(
4262 void* context, VkResult,
4263 VkDevice device,
4264 const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4265 const VkAllocationCallbacks* pAllocator,
4266 VkSamplerYcbcrConversion* pYcbcrConversion) {
4267
4268 VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4269
4270 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4271 const VkExternalFormatANDROID* extFormatAndroidPtr =
4272 vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4273 if (extFormatAndroidPtr) {
4274 if (extFormatAndroidPtr->externalFormat == AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM) {
4275 // We don't support external formats on host and it causes RGB565
4276 // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4277 // when passed as an external format.
4278 // We may consider doing this for all external formats.
4279 // See b/134771579.
4280 *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4281 return VK_SUCCESS;
4282 } else if (extFormatAndroidPtr->externalFormat) {
4283 localCreateInfo.format =
4284 vk_format_from_android(extFormatAndroidPtr->externalFormat);
4285 }
4286 }
4287 #endif
4288
4289 VkEncoder* enc = (VkEncoder*)context;
4290 VkResult res = enc->vkCreateSamplerYcbcrConversionKHR(
4291 device, &localCreateInfo, pAllocator, pYcbcrConversion, true /* do lock */);
4292
4293 if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
4294 ALOGE("FATAL: vkCreateSamplerYcbcrConversionKHR returned a reserved value (VK_YCBCR_CONVERSION_DO_NOTHING)");
4295 abort();
4296 }
4297 return res;
4298 }
4299
on_vkDestroySamplerYcbcrConversionKHR(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)4300 void on_vkDestroySamplerYcbcrConversionKHR(
4301 void* context,
4302 VkDevice device,
4303 VkSamplerYcbcrConversion ycbcrConversion,
4304 const VkAllocationCallbacks* pAllocator) {
4305 VkEncoder* enc = (VkEncoder*)context;
4306 if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4307 enc->vkDestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator, true /* do lock */);
4308 }
4309 }
4310
on_vkCreateSampler(void * context,VkResult,VkDevice device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler)4311 VkResult on_vkCreateSampler(
4312 void* context, VkResult,
4313 VkDevice device,
4314 const VkSamplerCreateInfo* pCreateInfo,
4315 const VkAllocationCallbacks* pAllocator,
4316 VkSampler* pSampler) {
4317
4318 VkSamplerCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4319 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
4320
4321 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(VK_USE_PLATFORM_FUCHSIA)
4322 VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
4323 const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo =
4324 vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo);
4325 if (samplerYcbcrConversionInfo) {
4326 if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4327 localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
4328 vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
4329 }
4330 }
4331 #endif
4332
4333 VkEncoder* enc = (VkEncoder*)context;
4334 return enc->vkCreateSampler(device, &localCreateInfo, pAllocator, pSampler, true /* do lock */);
4335 }
4336
on_vkGetPhysicalDeviceExternalFenceProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)4337 void on_vkGetPhysicalDeviceExternalFenceProperties(
4338 void* context,
4339 VkPhysicalDevice physicalDevice,
4340 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
4341 VkExternalFenceProperties* pExternalFenceProperties) {
4342
4343 (void)context;
4344 (void)physicalDevice;
4345
4346 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
4347 pExternalFenceProperties->compatibleHandleTypes = 0;
4348 pExternalFenceProperties->externalFenceFeatures = 0;
4349
4350 bool syncFd =
4351 pExternalFenceInfo->handleType &
4352 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4353
4354 if (!syncFd) {
4355 return;
4356 }
4357
4358 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4359 pExternalFenceProperties->exportFromImportedHandleTypes =
4360 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4361 pExternalFenceProperties->compatibleHandleTypes =
4362 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4363 pExternalFenceProperties->externalFenceFeatures =
4364 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT |
4365 VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT;
4366
4367 ALOGD("%s: asked for sync fd, set the features\n", __func__);
4368 #endif
4369 }
4370
on_vkCreateFence(void * context,VkResult input_result,VkDevice device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)4371 VkResult on_vkCreateFence(
4372 void* context,
4373 VkResult input_result,
4374 VkDevice device,
4375 const VkFenceCreateInfo* pCreateInfo,
4376 const VkAllocationCallbacks* pAllocator, VkFence* pFence) {
4377
4378 VkEncoder* enc = (VkEncoder*)context;
4379 VkFenceCreateInfo finalCreateInfo = *pCreateInfo;
4380
4381 const VkExportFenceCreateInfo* exportFenceInfoPtr =
4382 vk_find_struct<VkExportFenceCreateInfo>(pCreateInfo);
4383
4384 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4385 bool exportSyncFd =
4386 exportFenceInfoPtr &&
4387 (exportFenceInfoPtr->handleTypes &
4388 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
4389
4390 if (exportSyncFd) {
4391 ALOGV("%s: exporting sync fd, do not send pNext to host\n", __func__);
4392 finalCreateInfo.pNext = nullptr;
4393 }
4394 #endif
4395
4396 input_result = enc->vkCreateFence(
4397 device, &finalCreateInfo, pAllocator, pFence, true /* do lock */);
4398
4399 if (input_result != VK_SUCCESS) return input_result;
4400
4401 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4402 if (exportSyncFd) {
4403 if (!mFeatureInfo->hasVirtioGpuNativeSync) {
4404 ALOGV("%s: ensure sync device\n", __func__);
4405 ensureSyncDeviceFd();
4406 }
4407
4408 ALOGV("%s: getting fence info\n", __func__);
4409 AutoLock lock(mLock);
4410 auto it = info_VkFence.find(*pFence);
4411
4412 if (it == info_VkFence.end())
4413 return VK_ERROR_INITIALIZATION_FAILED;
4414
4415 auto& info = it->second;
4416
4417 info.external = true;
4418 info.exportFenceCreateInfo = *exportFenceInfoPtr;
4419 ALOGV("%s: info set (fence still -1). fence: %p\n", __func__, (void*)(*pFence));
4420 // syncFd is still -1 because we expect user to explicitly
4421 // export it via vkGetFenceFdKHR
4422 }
4423 #endif
4424
4425 return input_result;
4426 }
4427
on_vkDestroyFence(void * context,VkDevice device,VkFence fence,const VkAllocationCallbacks * pAllocator)4428 void on_vkDestroyFence(
4429 void* context,
4430 VkDevice device,
4431 VkFence fence,
4432 const VkAllocationCallbacks* pAllocator) {
4433 VkEncoder* enc = (VkEncoder*)context;
4434 enc->vkDestroyFence(device, fence, pAllocator, true /* do lock */);
4435 }
4436
on_vkResetFences(void * context,VkResult,VkDevice device,uint32_t fenceCount,const VkFence * pFences)4437 VkResult on_vkResetFences(
4438 void* context,
4439 VkResult,
4440 VkDevice device,
4441 uint32_t fenceCount,
4442 const VkFence* pFences) {
4443
4444 VkEncoder* enc = (VkEncoder*)context;
4445 VkResult res = enc->vkResetFences(device, fenceCount, pFences, true /* do lock */);
4446
4447 if (res != VK_SUCCESS) return res;
4448
4449 if (!fenceCount) return res;
4450
4451 // Permanence: temporary
4452 // on fence reset, close the fence fd
4453 // and act like we need to GetFenceFdKHR/ImportFenceFdKHR again
4454 AutoLock lock(mLock);
4455 for (uint32_t i = 0; i < fenceCount; ++i) {
4456 VkFence fence = pFences[i];
4457 auto it = info_VkFence.find(fence);
4458 auto& info = it->second;
4459 if (!info.external) continue;
4460
4461 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4462 if (info.syncFd >= 0) {
4463 ALOGV("%s: resetting fence. make fd -1\n", __func__);
4464 goldfish_sync_signal(info.syncFd);
4465 close(info.syncFd);
4466 info.syncFd = -1;
4467 }
4468 #endif
4469 }
4470
4471 return res;
4472 }
4473
on_vkImportFenceFdKHR(void * context,VkResult,VkDevice device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)4474 VkResult on_vkImportFenceFdKHR(
4475 void* context,
4476 VkResult,
4477 VkDevice device,
4478 const VkImportFenceFdInfoKHR* pImportFenceFdInfo) {
4479
4480 (void)context;
4481 (void)device;
4482 (void)pImportFenceFdInfo;
4483
4484 // Transference: copy
4485 // meaning dup() the incoming fd
4486
4487 VkEncoder* enc = (VkEncoder*)context;
4488
4489 bool hasFence = pImportFenceFdInfo->fence != VK_NULL_HANDLE;
4490
4491 if (!hasFence) return VK_ERROR_OUT_OF_HOST_MEMORY;
4492
4493 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4494
4495 bool syncFdImport =
4496 pImportFenceFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4497
4498 if (!syncFdImport) {
4499 ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd import\n", __func__);
4500 return VK_ERROR_OUT_OF_HOST_MEMORY;
4501 }
4502
4503 AutoLock lock(mLock);
4504 auto it = info_VkFence.find(pImportFenceFdInfo->fence);
4505 if (it == info_VkFence.end()) {
4506 ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
4507 return VK_ERROR_OUT_OF_HOST_MEMORY;
4508 }
4509
4510 auto& info = it->second;
4511
4512 if (info.syncFd >= 0) {
4513 ALOGV("%s: previous sync fd exists, close it\n", __func__);
4514 goldfish_sync_signal(info.syncFd);
4515 close(info.syncFd);
4516 }
4517
4518 if (pImportFenceFdInfo->fd < 0) {
4519 ALOGV("%s: import -1, set to -1 and exit\n", __func__);
4520 info.syncFd = -1;
4521 } else {
4522 ALOGV("%s: import actual fd, dup and close()\n", __func__);
4523 info.syncFd = dup(pImportFenceFdInfo->fd);
4524 close(pImportFenceFdInfo->fd);
4525 }
4526 return VK_SUCCESS;
4527 #else
4528 return VK_ERROR_OUT_OF_HOST_MEMORY;
4529 #endif
4530 }
4531
on_vkGetFenceFdKHR(void * context,VkResult,VkDevice device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd)4532 VkResult on_vkGetFenceFdKHR(
4533 void* context,
4534 VkResult,
4535 VkDevice device,
4536 const VkFenceGetFdInfoKHR* pGetFdInfo,
4537 int* pFd) {
4538
4539 // export operation.
4540 // first check if fence is signaled
4541 // then if so, return -1
4542 // else, queue work
4543
4544 VkEncoder* enc = (VkEncoder*)context;
4545
4546 bool hasFence = pGetFdInfo->fence != VK_NULL_HANDLE;
4547
4548 if (!hasFence) {
4549 ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence\n", __func__);
4550 return VK_ERROR_OUT_OF_HOST_MEMORY;
4551 }
4552
4553 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4554 bool syncFdExport =
4555 pGetFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4556
4557 if (!syncFdExport) {
4558 ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd fence\n", __func__);
4559 return VK_ERROR_OUT_OF_HOST_MEMORY;
4560 }
4561
4562 VkResult currentFenceStatus = enc->vkGetFenceStatus(device, pGetFdInfo->fence, true /* do lock */);
4563
4564 if (VK_SUCCESS == currentFenceStatus) { // Fence already signaled
4565 ALOGV("%s: VK_SUCCESS: already signaled\n", __func__);
4566 *pFd = -1;
4567 return VK_SUCCESS;
4568 }
4569
4570 if (VK_ERROR_DEVICE_LOST == currentFenceStatus) { // Other error
4571 ALOGV("%s: VK_ERROR_DEVICE_LOST: Other error\n", __func__);
4572 *pFd = -1;
4573 return VK_ERROR_DEVICE_LOST;
4574 }
4575
4576 if (VK_NOT_READY == currentFenceStatus) { // Fence unsignaled; create fd here
4577 AutoLock lock(mLock);
4578
4579 auto it = info_VkFence.find(pGetFdInfo->fence);
4580 if (it == info_VkFence.end()) {
4581 ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
4582 return VK_ERROR_OUT_OF_HOST_MEMORY;
4583 }
4584
4585 auto& info = it->second;
4586
4587 bool syncFdCreated =
4588 info.external &&
4589 (info.exportFenceCreateInfo.handleTypes &
4590 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
4591
4592 if (!syncFdCreated) {
4593 ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd created\n", __func__);
4594 return VK_ERROR_OUT_OF_HOST_MEMORY;
4595 }
4596
4597 if (mFeatureInfo->hasVirtioGpuNativeSync) {
4598 #if !defined(HOST_BUILD) && defined(VK_USE_PLATFORM_ANDROID_KHR)
4599 uint64_t hostFenceHandle = get_host_u64_VkFence(pGetFdInfo->fence);
4600 uint32_t hostFenceHandleLo = (uint32_t)hostFenceHandle;
4601 uint32_t hostFenceHandleHi = (uint32_t)(hostFenceHandle >> 32);
4602
4603 uint64_t hostDeviceHandle = get_host_u64_VkDevice(device);
4604 uint32_t hostDeviceHandleLo = (uint32_t)hostDeviceHandle;
4605 uint32_t hostDeviceHandleHi = (uint32_t)(hostFenceHandle >> 32);
4606
4607 #define VIRTIO_GPU_NATIVE_SYNC_VULKAN_CREATE_EXPORT_FD 0xa000
4608
4609 uint32_t cmdDwords[5] = {
4610 VIRTIO_GPU_NATIVE_SYNC_VULKAN_CREATE_EXPORT_FD,
4611 hostDeviceHandleLo,
4612 hostDeviceHandleHi,
4613 hostFenceHandleLo,
4614 hostFenceHandleHi,
4615 };
4616
4617 drm_virtgpu_execbuffer execbuffer = {
4618 .flags = VIRTGPU_EXECBUF_FENCE_FD_OUT,
4619 .size = 5 * sizeof(uint32_t),
4620 .command = (uint64_t)(cmdDwords),
4621 .bo_handles = 0,
4622 .num_bo_handles = 0,
4623 .fence_fd = -1,
4624 };
4625
4626 int res = drmIoctl(mRendernodeFd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
4627 if (res) {
4628 ALOGE("%s: Failed to virtgpu execbuffer: sterror: %s errno: %d\n", __func__,
4629 strerror(errno), errno);
4630 abort();
4631 }
4632
4633 *pFd = execbuffer.fence_fd;
4634 #endif
4635 } else {
4636 goldfish_sync_queue_work(
4637 mSyncDeviceFd,
4638 get_host_u64_VkFence(pGetFdInfo->fence) /* the handle */,
4639 GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */,
4640 pFd);
4641 }
4642
4643 // relinquish ownership
4644 info.syncFd = -1;
4645 ALOGV("%s: got fd: %d\n", __func__, *pFd);
4646 return VK_SUCCESS;
4647 }
4648 return VK_ERROR_DEVICE_LOST;
4649 #else
4650 return VK_ERROR_OUT_OF_HOST_MEMORY;
4651 #endif
4652 }
4653
on_vkWaitForFences(void * context,VkResult,VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)4654 VkResult on_vkWaitForFences(
4655 void* context,
4656 VkResult,
4657 VkDevice device,
4658 uint32_t fenceCount,
4659 const VkFence* pFences,
4660 VkBool32 waitAll,
4661 uint64_t timeout) {
4662
4663 VkEncoder* enc = (VkEncoder*)context;
4664
4665 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4666 std::vector<VkFence> fencesExternal;
4667 std::vector<int> fencesExternalWaitFds;
4668 std::vector<VkFence> fencesNonExternal;
4669
4670 AutoLock lock(mLock);
4671
4672 for (uint32_t i = 0; i < fenceCount; ++i) {
4673 auto it = info_VkFence.find(pFences[i]);
4674 if (it == info_VkFence.end()) continue;
4675 const auto& info = it->second;
4676 if (info.syncFd >= 0) {
4677 fencesExternal.push_back(pFences[i]);
4678 fencesExternalWaitFds.push_back(info.syncFd);
4679 } else {
4680 fencesNonExternal.push_back(pFences[i]);
4681 }
4682 }
4683
4684 lock.unlock();
4685
4686 if (fencesExternal.empty()) {
4687 // No need for work pool, just wait with host driver.
4688 return enc->vkWaitForFences(
4689 device, fenceCount, pFences, waitAll, timeout, true /* do lock */);
4690 } else {
4691 // Depending on wait any or wait all,
4692 // schedule a wait group with waitAny/waitAll
4693 std::vector<WorkPool::Task> tasks;
4694
4695 ALOGV("%s: scheduling ext waits\n", __func__);
4696
4697 for (auto fd : fencesExternalWaitFds) {
4698 ALOGV("%s: wait on %d\n", __func__, fd);
4699 tasks.push_back([fd] {
4700 sync_wait(fd, 3000);
4701 ALOGV("done waiting on fd %d\n", fd);
4702 });
4703 }
4704
4705 if (!fencesNonExternal.empty()) {
4706 tasks.push_back([this,
4707 fencesNonExternal /* copy of vector */,
4708 device, waitAll, timeout] {
4709 auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
4710 auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
4711 ALOGV("%s: vkWaitForFences to host\n", __func__);
4712 vkEncoder->vkWaitForFences(device, fencesNonExternal.size(), fencesNonExternal.data(), waitAll, timeout, true /* do lock */);
4713 });
4714 }
4715
4716 auto waitGroupHandle = mWorkPool.schedule(tasks);
4717
4718 // Convert timeout to microseconds from nanoseconds
4719 bool waitRes = false;
4720 if (waitAll) {
4721 waitRes = mWorkPool.waitAll(waitGroupHandle, timeout / 1000);
4722 } else {
4723 waitRes = mWorkPool.waitAny(waitGroupHandle, timeout / 1000);
4724 }
4725
4726 if (waitRes) {
4727 ALOGV("%s: VK_SUCCESS\n", __func__);
4728 return VK_SUCCESS;
4729 } else {
4730 ALOGV("%s: VK_TIMEOUT\n", __func__);
4731 return VK_TIMEOUT;
4732 }
4733 }
4734 #else
4735 return enc->vkWaitForFences(
4736 device, fenceCount, pFences, waitAll, timeout, true /* do lock */);
4737 #endif
4738 }
4739
on_vkCreateDescriptorPool(void * context,VkResult,VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)4740 VkResult on_vkCreateDescriptorPool(
4741 void* context,
4742 VkResult,
4743 VkDevice device,
4744 const VkDescriptorPoolCreateInfo* pCreateInfo,
4745 const VkAllocationCallbacks* pAllocator,
4746 VkDescriptorPool* pDescriptorPool) {
4747
4748 VkEncoder* enc = (VkEncoder*)context;
4749
4750 VkResult res = enc->vkCreateDescriptorPool(
4751 device, pCreateInfo, pAllocator, pDescriptorPool, true /* do lock */);
4752
4753 if (res != VK_SUCCESS) return res;
4754
4755 VkDescriptorPool pool = *pDescriptorPool;
4756
4757 struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
4758 dp->allocInfo = new DescriptorPoolAllocationInfo;
4759 dp->allocInfo->device = device;
4760 dp->allocInfo->createFlags = pCreateInfo->flags;
4761 dp->allocInfo->maxSets = pCreateInfo->maxSets;
4762 dp->allocInfo->usedSets = 0;
4763
4764 for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; ++i) {
4765 dp->allocInfo->descriptorCountInfo.push_back({
4766 pCreateInfo->pPoolSizes[i].type,
4767 pCreateInfo->pPoolSizes[i].descriptorCount,
4768 0, /* used */
4769 });
4770 }
4771
4772 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
4773 std::vector<uint64_t> poolIds(pCreateInfo->maxSets);
4774
4775 uint32_t count = pCreateInfo->maxSets;
4776 enc->vkCollectDescriptorPoolIdsGOOGLE(
4777 device, pool, &count, poolIds.data(), true /* do lock */);
4778
4779 dp->allocInfo->freePoolIds = poolIds;
4780 }
4781
4782 return res;
4783 }
4784
on_vkDestroyDescriptorPool(void * context,VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)4785 void on_vkDestroyDescriptorPool(
4786 void* context,
4787 VkDevice device,
4788 VkDescriptorPool descriptorPool,
4789 const VkAllocationCallbacks* pAllocator) {
4790
4791 if (!descriptorPool) return;
4792
4793 VkEncoder* enc = (VkEncoder*)context;
4794
4795 clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
4796
4797 enc->vkDestroyDescriptorPool(device, descriptorPool, pAllocator, true /* do lock */);
4798 }
4799
on_vkResetDescriptorPool(void * context,VkResult,VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)4800 VkResult on_vkResetDescriptorPool(
4801 void* context,
4802 VkResult,
4803 VkDevice device,
4804 VkDescriptorPool descriptorPool,
4805 VkDescriptorPoolResetFlags flags) {
4806
4807 if (!descriptorPool) return VK_ERROR_INITIALIZATION_FAILED;
4808
4809 VkEncoder* enc = (VkEncoder*)context;
4810
4811 VkResult res = enc->vkResetDescriptorPool(device, descriptorPool, flags, true /* do lock */);
4812
4813 if (res != VK_SUCCESS) return res;
4814
4815 clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
4816 return res;
4817 }
4818
on_vkAllocateDescriptorSets(void * context,VkResult,VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)4819 VkResult on_vkAllocateDescriptorSets(
4820 void* context,
4821 VkResult,
4822 VkDevice device,
4823 const VkDescriptorSetAllocateInfo* pAllocateInfo,
4824 VkDescriptorSet* pDescriptorSets) {
4825
4826 VkEncoder* enc = (VkEncoder*)context;
4827
4828 return allocAndInitializeDescriptorSets(context, device, pAllocateInfo, pDescriptorSets);
4829 }
4830
on_vkFreeDescriptorSets(void * context,VkResult,VkDevice device,VkDescriptorPool descriptorPool,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)4831 VkResult on_vkFreeDescriptorSets(
4832 void* context,
4833 VkResult,
4834 VkDevice device,
4835 VkDescriptorPool descriptorPool,
4836 uint32_t descriptorSetCount,
4837 const VkDescriptorSet* pDescriptorSets) {
4838
4839 VkEncoder* enc = (VkEncoder*)context;
4840
4841 // Bit of robustness so that we can double free descriptor sets
4842 // and do other invalid usages
4843 // https://github.com/KhronosGroup/Vulkan-Docs/issues/1070
4844 // (people expect VK_SUCCESS to always be returned by vkFreeDescriptorSets)
4845 std::vector<VkDescriptorSet> toActuallyFree;
4846 {
4847 AutoLock lock(mLock);
4848
4849 // Pool was destroyed
4850 if (info_VkDescriptorPool.find(descriptorPool) == info_VkDescriptorPool.end()) {
4851 return VK_SUCCESS;
4852 }
4853
4854 if (!descriptorPoolSupportsIndividualFreeLocked(descriptorPool))
4855 return VK_SUCCESS;
4856
4857 std::vector<VkDescriptorSet> existingDescriptorSets;;
4858
4859 // Check if this descriptor set was in the pool's set of allocated descriptor sets,
4860 // to guard against double free (Double free is allowed by the client)
4861 {
4862 auto allocedSets = as_goldfish_VkDescriptorPool(descriptorPool)->allocInfo->allocedSets;
4863
4864 for (uint32_t i = 0; i < descriptorSetCount; ++i) {
4865
4866 if (allocedSets.end() == allocedSets.find(pDescriptorSets[i])) {
4867 ALOGV("%s: Warning: descriptor set %p not found in pool. Was this double-freed?\n", __func__,
4868 (void*)pDescriptorSets[i]);
4869 continue;
4870 }
4871
4872 auto it = info_VkDescriptorSet.find(pDescriptorSets[i]);
4873 if (it == info_VkDescriptorSet.end())
4874 continue;
4875
4876 existingDescriptorSets.push_back(pDescriptorSets[i]);
4877 }
4878 }
4879
4880 for (auto set : existingDescriptorSets) {
4881 if (removeDescriptorSetFromPool(set, mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate)) {
4882 toActuallyFree.push_back(set);
4883 }
4884 }
4885
4886 if (toActuallyFree.empty()) return VK_SUCCESS;
4887 }
4888
4889 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
4890 // In the batched set update case, decrement refcount on the set layout
4891 // and only free on host if we satisfied a pending allocation on the
4892 // host.
4893 for (uint32_t i = 0; i < toActuallyFree.size(); ++i) {
4894 VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(toActuallyFree[i])->reified->setLayout;
4895 decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
4896 }
4897 freeDescriptorSetsIfHostAllocated(
4898 enc, device, (uint32_t)toActuallyFree.size(), toActuallyFree.data());
4899 } else {
4900 // In the non-batched set update case, just free them directly.
4901 enc->vkFreeDescriptorSets(device, descriptorPool, (uint32_t)toActuallyFree.size(), toActuallyFree.data(), true /* do lock */);
4902 }
4903 return VK_SUCCESS;
4904 }
4905
on_vkCreateDescriptorSetLayout(void * context,VkResult,VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)4906 VkResult on_vkCreateDescriptorSetLayout(
4907 void* context,
4908 VkResult,
4909 VkDevice device,
4910 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
4911 const VkAllocationCallbacks* pAllocator,
4912 VkDescriptorSetLayout* pSetLayout) {
4913
4914 VkEncoder* enc = (VkEncoder*)context;
4915
4916 VkResult res = enc->vkCreateDescriptorSetLayout(
4917 device, pCreateInfo, pAllocator, pSetLayout, true /* do lock */);
4918
4919 if (res != VK_SUCCESS) return res;
4920
4921 struct goldfish_VkDescriptorSetLayout* dsl =
4922 as_goldfish_VkDescriptorSetLayout(*pSetLayout);
4923 dsl->layoutInfo = new DescriptorSetLayoutInfo;
4924 for (uint32_t i = 0; i < pCreateInfo->bindingCount; ++i) {
4925 dsl->layoutInfo->bindings.push_back(pCreateInfo->pBindings[i]);
4926 }
4927 dsl->layoutInfo->refcount = 1;
4928
4929 return res;
4930 }
4931
on_vkUpdateDescriptorSets(void * context,VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)4932 void on_vkUpdateDescriptorSets(
4933 void* context,
4934 VkDevice device,
4935 uint32_t descriptorWriteCount,
4936 const VkWriteDescriptorSet* pDescriptorWrites,
4937 uint32_t descriptorCopyCount,
4938 const VkCopyDescriptorSet* pDescriptorCopies) {
4939
4940 VkEncoder* enc = (VkEncoder*)context;
4941
4942 std::vector<VkDescriptorImageInfo> transformedImageInfos;
4943 std::vector<VkWriteDescriptorSet> transformedWrites(descriptorWriteCount);
4944
4945 memcpy(transformedWrites.data(), pDescriptorWrites, sizeof(VkWriteDescriptorSet) * descriptorWriteCount);
4946
4947 size_t imageInfosNeeded = 0;
4948 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
4949 if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
4950 if (!transformedWrites[i].pImageInfo) continue;
4951
4952 imageInfosNeeded += transformedWrites[i].descriptorCount;
4953 }
4954
4955 transformedImageInfos.resize(imageInfosNeeded);
4956
4957 size_t imageInfoIndex = 0;
4958 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
4959 if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
4960 if (!transformedWrites[i].pImageInfo) continue;
4961
4962 for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
4963 transformedImageInfos[imageInfoIndex] = transformedWrites[i].pImageInfo[j];
4964 ++imageInfoIndex;
4965 }
4966 transformedWrites[i].pImageInfo = &transformedImageInfos[imageInfoIndex - transformedWrites[i].descriptorCount];
4967 }
4968
4969 {
4970 // Validate and filter samplers
4971 AutoLock lock(mLock);
4972 size_t imageInfoIndex = 0;
4973 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
4974
4975 if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
4976 if (!transformedWrites[i].pImageInfo) continue;
4977
4978 bool isImmutableSampler =
4979 descriptorBindingIsImmutableSampler(
4980 transformedWrites[i].dstSet,
4981 transformedWrites[i].dstBinding);
4982
4983 for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
4984 if (isImmutableSampler) {
4985 transformedImageInfos[imageInfoIndex].sampler = 0;
4986 }
4987 transformedImageInfos[imageInfoIndex] =
4988 filterNonexistentSampler(transformedImageInfos[imageInfoIndex]);
4989 ++imageInfoIndex;
4990 }
4991 }
4992 }
4993
4994 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
4995 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
4996 VkDescriptorSet set = transformedWrites[i].dstSet;
4997 doEmulatedDescriptorWrite(&transformedWrites[i],
4998 as_goldfish_VkDescriptorSet(set)->reified);
4999 }
5000
5001 for (uint32_t i = 0; i < descriptorCopyCount; ++i) {
5002 doEmulatedDescriptorCopy(&pDescriptorCopies[i],
5003 as_goldfish_VkDescriptorSet(pDescriptorCopies[i].srcSet)->reified,
5004 as_goldfish_VkDescriptorSet(pDescriptorCopies[i].dstSet)->reified);
5005 }
5006 } else {
5007 enc->vkUpdateDescriptorSets(
5008 device, descriptorWriteCount, transformedWrites.data(),
5009 descriptorCopyCount, pDescriptorCopies, true /* do lock */);
5010 }
5011 }
5012
on_vkDestroyImage(void * context,VkDevice device,VkImage image,const VkAllocationCallbacks * pAllocator)5013 void on_vkDestroyImage(
5014 void* context,
5015 VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5016 VkEncoder* enc = (VkEncoder*)context;
5017 enc->vkDestroyImage(device, image, pAllocator, true /* do lock */);
5018 }
5019
setMemoryRequirementsForSysmemBackedImage(VkImage image,VkMemoryRequirements * pMemoryRequirements)5020 void setMemoryRequirementsForSysmemBackedImage(
5021 VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5022 #ifdef VK_USE_PLATFORM_FUCHSIA
5023 auto it = info_VkImage.find(image);
5024 if (it == info_VkImage.end()) return;
5025 auto& info = it->second;
5026 if (info.isSysmemBackedMemory) {
5027 auto width = info.createInfo.extent.width;
5028 auto height = info.createInfo.extent.height;
5029 pMemoryRequirements->size = width * height * 4;
5030 }
5031 #else
5032 // Bypass "unused parameter" checks.
5033 (void)image;
5034 (void)pMemoryRequirements;
5035 #endif
5036 }
5037
on_vkGetImageMemoryRequirements(void * context,VkDevice device,VkImage image,VkMemoryRequirements * pMemoryRequirements)5038 void on_vkGetImageMemoryRequirements(
5039 void *context, VkDevice device, VkImage image,
5040 VkMemoryRequirements *pMemoryRequirements) {
5041
5042 AutoLock lock(mLock);
5043
5044 auto it = info_VkImage.find(image);
5045 if (it == info_VkImage.end()) return;
5046
5047 auto& info = it->second;
5048
5049 if (info.baseRequirementsKnown) {
5050 *pMemoryRequirements = info.baseRequirements;
5051 return;
5052 }
5053
5054 lock.unlock();
5055
5056 VkEncoder* enc = (VkEncoder*)context;
5057
5058 enc->vkGetImageMemoryRequirements(
5059 device, image, pMemoryRequirements, true /* do lock */);
5060
5061 lock.lock();
5062
5063 transformImageMemoryRequirementsForGuestLocked(
5064 image, pMemoryRequirements);
5065
5066 info.baseRequirementsKnown = true;
5067 info.baseRequirements = *pMemoryRequirements;
5068 }
5069
on_vkGetImageMemoryRequirements2(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5070 void on_vkGetImageMemoryRequirements2(
5071 void *context, VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
5072 VkMemoryRequirements2 *pMemoryRequirements) {
5073 VkEncoder* enc = (VkEncoder*)context;
5074 enc->vkGetImageMemoryRequirements2(
5075 device, pInfo, pMemoryRequirements, true /* do lock */);
5076 transformImageMemoryRequirements2ForGuest(
5077 pInfo->image, pMemoryRequirements);
5078 }
5079
on_vkGetImageMemoryRequirements2KHR(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5080 void on_vkGetImageMemoryRequirements2KHR(
5081 void *context, VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
5082 VkMemoryRequirements2 *pMemoryRequirements) {
5083 VkEncoder* enc = (VkEncoder*)context;
5084 enc->vkGetImageMemoryRequirements2KHR(
5085 device, pInfo, pMemoryRequirements, true /* do lock */);
5086 transformImageMemoryRequirements2ForGuest(
5087 pInfo->image, pMemoryRequirements);
5088 }
5089
on_vkBindImageMemory(void * context,VkResult,VkDevice device,VkImage image,VkDeviceMemory memory,VkDeviceSize memoryOffset)5090 VkResult on_vkBindImageMemory(
5091 void* context, VkResult,
5092 VkDevice device, VkImage image, VkDeviceMemory memory,
5093 VkDeviceSize memoryOffset) {
5094 VkEncoder* enc = (VkEncoder*)context;
5095 return enc->vkBindImageMemory(device, image, memory, memoryOffset, true /* do lock */);
5096 }
5097
on_vkBindImageMemory2(void * context,VkResult,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)5098 VkResult on_vkBindImageMemory2(
5099 void* context, VkResult,
5100 VkDevice device, uint32_t bindingCount, const VkBindImageMemoryInfo* pBindInfos) {
5101 VkEncoder* enc = (VkEncoder*)context;
5102 return enc->vkBindImageMemory2(device, bindingCount, pBindInfos, true /* do lock */);
5103 }
5104
on_vkBindImageMemory2KHR(void * context,VkResult,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)5105 VkResult on_vkBindImageMemory2KHR(
5106 void* context, VkResult,
5107 VkDevice device, uint32_t bindingCount, const VkBindImageMemoryInfo* pBindInfos) {
5108 VkEncoder* enc = (VkEncoder*)context;
5109 return enc->vkBindImageMemory2KHR(device, bindingCount, pBindInfos, true /* do lock */);
5110 }
5111
on_vkCreateBuffer(void * context,VkResult,VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)5112 VkResult on_vkCreateBuffer(
5113 void* context, VkResult,
5114 VkDevice device, const VkBufferCreateInfo *pCreateInfo,
5115 const VkAllocationCallbacks *pAllocator,
5116 VkBuffer *pBuffer) {
5117 VkEncoder* enc = (VkEncoder*)context;
5118
5119 #ifdef VK_USE_PLATFORM_FUCHSIA
5120 Optional<zx::vmo> vmo;
5121 bool isSysmemBackedMemory = false;
5122
5123 const VkExternalMemoryBufferCreateInfo* extBufCiPtr =
5124 vk_find_struct<VkExternalMemoryBufferCreateInfo>(pCreateInfo);
5125 if (extBufCiPtr &&
5126 ((extBufCiPtr->handleTypes &
5127 VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA) ||
5128 (extBufCiPtr->handleTypes &
5129 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA))) {
5130 isSysmemBackedMemory = true;
5131 }
5132
5133 const auto* extBufferCollectionPtr =
5134 vk_find_struct<VkBufferCollectionBufferCreateInfoFUCHSIA>(
5135 pCreateInfo);
5136
5137 if (extBufferCollectionPtr) {
5138 auto collection = reinterpret_cast<
5139 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
5140 extBufferCollectionPtr->collection);
5141 uint32_t index = extBufferCollectionPtr->index;
5142
5143 auto result = collection->WaitForBuffersAllocated();
5144 if (result.ok() && result.Unwrap()->status == ZX_OK) {
5145 auto& info = result.Unwrap()->buffer_collection_info;
5146 if (index < info.buffer_count) {
5147 vmo = android::base::makeOptional(
5148 std::move(info.buffers[index].vmo));
5149 }
5150 } else {
5151 ALOGE("WaitForBuffersAllocated failed: %d %d", result.status(),
5152 GET_STATUS_SAFE(result, status));
5153 }
5154
5155 if (vmo && vmo->is_valid()) {
5156 fidl::FidlAllocator allocator;
5157 fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(allocator);
5158 createParams.set_size(allocator, pCreateInfo->size)
5159 .set_memory_property(allocator,
5160 fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
5161
5162 auto result =
5163 mControlDevice->CreateBuffer2(std::move(*vmo), std::move(createParams));
5164 if (!result.ok() ||
5165 (result.Unwrap()->result.is_err() != ZX_OK &&
5166 result.Unwrap()->result.err() != ZX_ERR_ALREADY_EXISTS)) {
5167 ALOGE("CreateBuffer2 failed: %d:%d", result.status(),
5168 GET_STATUS_SAFE(result, result.err()));
5169 }
5170 isSysmemBackedMemory = true;
5171 }
5172 }
5173 #endif // VK_USE_PLATFORM_FUCHSIA
5174
5175 VkResult res;
5176 VkMemoryRequirements memReqs;
5177
5178 if (supportsCreateResourcesWithRequirements()) {
5179 res = enc->vkCreateBufferWithRequirementsGOOGLE(device, pCreateInfo, pAllocator, pBuffer, &memReqs, true /* do lock */);
5180 } else {
5181 res = enc->vkCreateBuffer(device, pCreateInfo, pAllocator, pBuffer, true /* do lock */);
5182 }
5183
5184 if (res != VK_SUCCESS) return res;
5185
5186 AutoLock lock(mLock);
5187
5188 auto it = info_VkBuffer.find(*pBuffer);
5189 if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
5190
5191 auto& info = it->second;
5192
5193 info.createInfo = *pCreateInfo;
5194 info.createInfo.pNext = nullptr;
5195
5196 if (supportsCreateResourcesWithRequirements()) {
5197 info.baseRequirementsKnown = true;
5198 }
5199
5200 const VkExternalMemoryBufferCreateInfo* extBufCi =
5201 vk_find_struct<VkExternalMemoryBufferCreateInfo>(pCreateInfo);
5202
5203 if (extBufCi) {
5204 info.external = true;
5205 info.externalCreateInfo = *extBufCi;
5206 }
5207
5208 #ifdef VK_USE_PLATFORM_FUCHSIA
5209 if (isSysmemBackedMemory) {
5210 info.isSysmemBackedMemory = true;
5211 }
5212 #endif
5213
5214 if (info.baseRequirementsKnown) {
5215 transformBufferMemoryRequirementsForGuestLocked(*pBuffer, &memReqs);
5216 info.baseRequirements = memReqs;
5217 }
5218
5219 return res;
5220 }
5221
on_vkDestroyBuffer(void * context,VkDevice device,VkBuffer buffer,const VkAllocationCallbacks * pAllocator)5222 void on_vkDestroyBuffer(
5223 void* context,
5224 VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5225 VkEncoder* enc = (VkEncoder*)context;
5226 enc->vkDestroyBuffer(device, buffer, pAllocator, true /* do lock */);
5227 }
5228
on_vkGetBufferMemoryRequirements(void * context,VkDevice device,VkBuffer buffer,VkMemoryRequirements * pMemoryRequirements)5229 void on_vkGetBufferMemoryRequirements(
5230 void* context, VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5231
5232 AutoLock lock(mLock);
5233
5234 auto it = info_VkBuffer.find(buffer);
5235 if (it == info_VkBuffer.end()) return;
5236
5237 auto& info = it->second;
5238
5239 if (info.baseRequirementsKnown) {
5240 *pMemoryRequirements = info.baseRequirements;
5241 return;
5242 }
5243
5244 lock.unlock();
5245
5246 VkEncoder* enc = (VkEncoder*)context;
5247 enc->vkGetBufferMemoryRequirements(
5248 device, buffer, pMemoryRequirements, true /* do lock */);
5249
5250 lock.lock();
5251
5252 transformBufferMemoryRequirementsForGuestLocked(
5253 buffer, pMemoryRequirements);
5254 info.baseRequirementsKnown = true;
5255 info.baseRequirements = *pMemoryRequirements;
5256 }
5257
on_vkGetBufferMemoryRequirements2(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5258 void on_vkGetBufferMemoryRequirements2(
5259 void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5260 VkMemoryRequirements2* pMemoryRequirements) {
5261 VkEncoder* enc = (VkEncoder*)context;
5262 enc->vkGetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */);
5263 transformBufferMemoryRequirements2ForGuest(
5264 pInfo->buffer, pMemoryRequirements);
5265 }
5266
on_vkGetBufferMemoryRequirements2KHR(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5267 void on_vkGetBufferMemoryRequirements2KHR(
5268 void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5269 VkMemoryRequirements2* pMemoryRequirements) {
5270 VkEncoder* enc = (VkEncoder*)context;
5271 enc->vkGetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */);
5272 transformBufferMemoryRequirements2ForGuest(
5273 pInfo->buffer, pMemoryRequirements);
5274 }
5275
on_vkBindBufferMemory(void * context,VkResult,VkDevice device,VkBuffer buffer,VkDeviceMemory memory,VkDeviceSize memoryOffset)5276 VkResult on_vkBindBufferMemory(
5277 void *context, VkResult,
5278 VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset) {
5279 VkEncoder *enc = (VkEncoder *)context;
5280 return enc->vkBindBufferMemory(
5281 device, buffer, memory, memoryOffset, true /* do lock */);
5282 }
5283
on_vkBindBufferMemory2(void * context,VkResult,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)5284 VkResult on_vkBindBufferMemory2(
5285 void *context, VkResult,
5286 VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) {
5287 VkEncoder *enc = (VkEncoder *)context;
5288 return enc->vkBindBufferMemory2(
5289 device, bindInfoCount, pBindInfos, true /* do lock */);
5290 }
5291
on_vkBindBufferMemory2KHR(void * context,VkResult,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)5292 VkResult on_vkBindBufferMemory2KHR(
5293 void *context, VkResult,
5294 VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) {
5295 VkEncoder *enc = (VkEncoder *)context;
5296 return enc->vkBindBufferMemory2KHR(
5297 device, bindInfoCount, pBindInfos, true /* do lock */);
5298 }
5299
ensureSyncDeviceFd()5300 void ensureSyncDeviceFd() {
5301 if (mSyncDeviceFd >= 0) return;
5302 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5303 mSyncDeviceFd = goldfish_sync_open();
5304 if (mSyncDeviceFd >= 0) {
5305 ALOGD("%s: created sync device for current Vulkan process: %d\n", __func__, mSyncDeviceFd);
5306 } else {
5307 ALOGD("%s: failed to create sync device for current Vulkan process\n", __func__);
5308 }
5309 #endif
5310 }
5311
on_vkCreateSemaphore(void * context,VkResult input_result,VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)5312 VkResult on_vkCreateSemaphore(
5313 void* context, VkResult input_result,
5314 VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo,
5315 const VkAllocationCallbacks* pAllocator,
5316 VkSemaphore* pSemaphore) {
5317
5318 VkEncoder* enc = (VkEncoder*)context;
5319
5320 VkSemaphoreCreateInfo finalCreateInfo = *pCreateInfo;
5321
5322 const VkExportSemaphoreCreateInfoKHR* exportSemaphoreInfoPtr =
5323 vk_find_struct<VkExportSemaphoreCreateInfoKHR>(pCreateInfo);
5324
5325 #ifdef VK_USE_PLATFORM_FUCHSIA
5326 bool exportEvent = exportSemaphoreInfoPtr &&
5327 ((exportSemaphoreInfoPtr->handleTypes &
5328 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA) ||
5329 (exportSemaphoreInfoPtr->handleTypes &
5330 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA));
5331
5332 if (exportEvent) {
5333 finalCreateInfo.pNext = nullptr;
5334 // If we have timeline semaphores externally, leave it there.
5335 const VkSemaphoreTypeCreateInfo* typeCi =
5336 vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo);
5337 if (typeCi) finalCreateInfo.pNext = typeCi;
5338 }
5339 #endif
5340
5341 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5342 bool exportSyncFd = exportSemaphoreInfoPtr &&
5343 (exportSemaphoreInfoPtr->handleTypes &
5344 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
5345
5346 if (exportSyncFd) {
5347 finalCreateInfo.pNext = nullptr;
5348 // If we have timeline semaphores externally, leave it there.
5349 const VkSemaphoreTypeCreateInfo* typeCi =
5350 vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo);
5351 if (typeCi) finalCreateInfo.pNext = typeCi;
5352 }
5353 #endif
5354 input_result = enc->vkCreateSemaphore(
5355 device, &finalCreateInfo, pAllocator, pSemaphore, true /* do lock */);
5356
5357 zx_handle_t event_handle = ZX_HANDLE_INVALID;
5358
5359 #ifdef VK_USE_PLATFORM_FUCHSIA
5360 if (exportEvent) {
5361 zx_event_create(0, &event_handle);
5362 }
5363 #endif
5364
5365 AutoLock lock(mLock);
5366
5367 auto it = info_VkSemaphore.find(*pSemaphore);
5368 if (it == info_VkSemaphore.end()) return VK_ERROR_INITIALIZATION_FAILED;
5369
5370 auto& info = it->second;
5371
5372 info.device = device;
5373 info.eventHandle = event_handle;
5374 #ifdef VK_USE_PLATFORM_FUCHSIA
5375 info.eventKoid = getEventKoid(info.eventHandle);
5376 #endif
5377
5378 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5379 if (exportSyncFd) {
5380 if (mFeatureInfo->hasVirtioGpuNativeSync) {
5381 #if !defined(HOST_BUILD)
5382 uint64_t hostFenceHandle = get_host_u64_VkSemaphore(*pSemaphore);
5383 uint32_t hostFenceHandleLo = (uint32_t)hostFenceHandle;
5384 uint32_t hostFenceHandleHi = (uint32_t)(hostFenceHandle >> 32);
5385
5386 uint64_t hostDeviceHandle = get_host_u64_VkDevice(device);
5387 uint32_t hostDeviceHandleLo = (uint32_t)hostDeviceHandle;
5388 uint32_t hostDeviceHandleHi = (uint32_t)(hostFenceHandle >> 32);
5389
5390 #define VIRTIO_GPU_NATIVE_SYNC_VULKAN_CREATE_EXPORT_FD 0xa000
5391
5392 uint32_t cmdDwords[5] = {
5393 VIRTIO_GPU_NATIVE_SYNC_VULKAN_CREATE_EXPORT_FD,
5394 hostDeviceHandleLo,
5395 hostDeviceHandleHi,
5396 hostFenceHandleLo,
5397 hostFenceHandleHi,
5398 };
5399
5400 drm_virtgpu_execbuffer execbuffer = {
5401 .flags = VIRTGPU_EXECBUF_FENCE_FD_OUT,
5402 .size = 5 * sizeof(uint32_t),
5403 .command = (uint64_t)(cmdDwords),
5404 .bo_handles = 0,
5405 .num_bo_handles = 0,
5406 .fence_fd = -1,
5407 };
5408
5409 int res = drmIoctl(mRendernodeFd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
5410 if (res) {
5411 ALOGE("%s: Failed to virtgpu execbuffer: sterror: %s errno: %d\n", __func__,
5412 strerror(errno), errno);
5413 abort();
5414 }
5415
5416 info.syncFd = execbuffer.fence_fd;
5417 #endif
5418 } else {
5419 ensureSyncDeviceFd();
5420
5421 if (exportSyncFd) {
5422 int syncFd = -1;
5423 goldfish_sync_queue_work(
5424 mSyncDeviceFd,
5425 get_host_u64_VkSemaphore(*pSemaphore) /* the handle */,
5426 GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */,
5427 &syncFd);
5428 info.syncFd = syncFd;
5429 }
5430 }
5431 }
5432 #endif
5433
5434 return VK_SUCCESS;
5435 }
5436
on_vkDestroySemaphore(void * context,VkDevice device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)5437 void on_vkDestroySemaphore(
5438 void* context,
5439 VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5440 VkEncoder* enc = (VkEncoder*)context;
5441 enc->vkDestroySemaphore(device, semaphore, pAllocator, true /* do lock */);
5442 }
5443
5444 // https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#vkGetSemaphoreFdKHR
5445 // Each call to vkGetSemaphoreFdKHR must create a new file descriptor and transfer ownership
5446 // of it to the application. To avoid leaking resources, the application must release ownership
5447 // of the file descriptor when it is no longer needed.
on_vkGetSemaphoreFdKHR(void * context,VkResult,VkDevice device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd)5448 VkResult on_vkGetSemaphoreFdKHR(
5449 void* context, VkResult,
5450 VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
5451 int* pFd) {
5452 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5453 VkEncoder* enc = (VkEncoder*)context;
5454 bool getSyncFd =
5455 pGetFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
5456
5457 if (getSyncFd) {
5458 AutoLock lock(mLock);
5459 auto it = info_VkSemaphore.find(pGetFdInfo->semaphore);
5460 if (it == info_VkSemaphore.end()) return VK_ERROR_OUT_OF_HOST_MEMORY;
5461 auto& semInfo = it->second;
5462 *pFd = dup(semInfo.syncFd);
5463 return VK_SUCCESS;
5464 } else {
5465 // opaque fd
5466 int hostFd = 0;
5467 VkResult result = enc->vkGetSemaphoreFdKHR(device, pGetFdInfo, &hostFd, true /* do lock */);
5468 if (result != VK_SUCCESS) {
5469 return result;
5470 }
5471 *pFd = memfd_create("vk_opaque_fd", 0);
5472 write(*pFd, &hostFd, sizeof(hostFd));
5473 return VK_SUCCESS;
5474 }
5475 #else
5476 (void)context;
5477 (void)device;
5478 (void)pGetFdInfo;
5479 (void)pFd;
5480 return VK_ERROR_INCOMPATIBLE_DRIVER;
5481 #endif
5482 }
5483
on_vkImportSemaphoreFdKHR(void * context,VkResult input_result,VkDevice device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)5484 VkResult on_vkImportSemaphoreFdKHR(
5485 void* context, VkResult input_result,
5486 VkDevice device,
5487 const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) {
5488 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5489 VkEncoder* enc = (VkEncoder*)context;
5490 if (input_result != VK_SUCCESS) {
5491 return input_result;
5492 }
5493
5494 if (pImportSemaphoreFdInfo->handleType &
5495 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
5496 VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo;
5497
5498 AutoLock lock(mLock);
5499
5500 auto semaphoreIt = info_VkSemaphore.find(pImportSemaphoreFdInfo->semaphore);
5501 auto& info = semaphoreIt->second;
5502
5503 if (info.syncFd >= 0) {
5504 close(info.syncFd);
5505 }
5506
5507 info.syncFd = pImportSemaphoreFdInfo->fd;
5508
5509 return VK_SUCCESS;
5510 } else {
5511 int fd = pImportSemaphoreFdInfo->fd;
5512 int err = lseek(fd, 0, SEEK_SET);
5513 if (err == -1) {
5514 ALOGE("lseek fail on import semaphore");
5515 }
5516 int hostFd = 0;
5517 read(fd, &hostFd, sizeof(hostFd));
5518 VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo;
5519 tmpInfo.fd = hostFd;
5520 VkResult result = enc->vkImportSemaphoreFdKHR(device, &tmpInfo, true /* do lock */);
5521 close(fd);
5522 return result;
5523 }
5524 #else
5525 (void)context;
5526 (void)input_result;
5527 (void)device;
5528 (void)pImportSemaphoreFdInfo;
5529 return VK_ERROR_INCOMPATIBLE_DRIVER;
5530 #endif
5531 }
5532
5533 struct CommandBufferPendingDescriptorSets {
5534 std::unordered_set<VkDescriptorSet> sets;
5535 };
5536
collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer> & workingSet,std::unordered_set<VkDescriptorSet> & allDs)5537 void collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer>& workingSet, std::unordered_set<VkDescriptorSet>& allDs) {
5538 if (workingSet.empty()) return;
5539
5540 std::vector<VkCommandBuffer> nextLevel;
5541 for (auto commandBuffer : workingSet) {
5542 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
5543 forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
5544 nextLevel.push_back((VkCommandBuffer)secondary);
5545 });
5546 }
5547
5548 collectAllPendingDescriptorSetsBottomUp(nextLevel, allDs);
5549
5550 for (auto cmdbuf : workingSet) {
5551 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
5552
5553 if (!cb->userPtr) {
5554 continue; // No descriptors to update.
5555 }
5556
5557 CommandBufferPendingDescriptorSets* pendingDescriptorSets =
5558 (CommandBufferPendingDescriptorSets*)(cb->userPtr);
5559
5560 if (pendingDescriptorSets->sets.empty()) {
5561 continue; // No descriptors to update.
5562 }
5563
5564 allDs.insert(pendingDescriptorSets->sets.begin(), pendingDescriptorSets->sets.end());
5565 }
5566 }
5567
commitDescriptorSetUpdates(void * context,VkQueue queue,const std::unordered_set<VkDescriptorSet> & sets)5568 void commitDescriptorSetUpdates(void* context, VkQueue queue, const std::unordered_set<VkDescriptorSet>& sets) {
5569 VkEncoder* enc = (VkEncoder*)context;
5570
5571 std::unordered_map<VkDescriptorPool, uint32_t> poolSet;
5572 std::vector<VkDescriptorPool> pools;
5573 std::vector<VkDescriptorSetLayout> setLayouts;
5574 std::vector<uint64_t> poolIds;
5575 std::vector<uint32_t> descriptorSetWhichPool;
5576 std::vector<uint32_t> pendingAllocations;
5577 std::vector<uint32_t> writeStartingIndices;
5578 std::vector<VkWriteDescriptorSet> writesForHost;
5579
5580 uint32_t poolIndex = 0;
5581 uint32_t currentWriteIndex = 0;
5582 for (auto set : sets) {
5583 ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
5584 VkDescriptorPool pool = reified->pool;
5585 VkDescriptorSetLayout setLayout = reified->setLayout;
5586
5587 auto it = poolSet.find(pool);
5588 if (it == poolSet.end()) {
5589 poolSet[pool] = poolIndex;
5590 descriptorSetWhichPool.push_back(poolIndex);
5591 pools.push_back(pool);
5592 ++poolIndex;
5593 } else {
5594 uint32_t savedPoolIndex = it->second;
5595 descriptorSetWhichPool.push_back(savedPoolIndex);
5596 }
5597
5598 poolIds.push_back(reified->poolId);
5599 setLayouts.push_back(setLayout);
5600 pendingAllocations.push_back(reified->allocationPending ? 1 : 0);
5601 writeStartingIndices.push_back(currentWriteIndex);
5602
5603 auto& writes = reified->allWrites;
5604
5605 for (size_t i = 0; i < writes.size(); ++i) {
5606 uint32_t binding = i;
5607
5608 for (size_t j = 0; j < writes[i].size(); ++j) {
5609 auto& write = writes[i][j];
5610
5611 if (write.type == DescriptorWriteType::Empty) continue;
5612
5613 uint32_t dstArrayElement = 0;
5614
5615 VkDescriptorImageInfo* imageInfo = nullptr;
5616 VkDescriptorBufferInfo* bufferInfo = nullptr;
5617 VkBufferView* bufferView = nullptr;
5618
5619 switch (write.type) {
5620 case DescriptorWriteType::Empty:
5621 break;
5622 case DescriptorWriteType::ImageInfo:
5623 dstArrayElement = j;
5624 imageInfo = &write.imageInfo;
5625 break;
5626 case DescriptorWriteType::BufferInfo:
5627 dstArrayElement = j;
5628 bufferInfo = &write.bufferInfo;
5629 break;
5630 case DescriptorWriteType::BufferView:
5631 dstArrayElement = j;
5632 bufferView = &write.bufferView;
5633 break;
5634 case DescriptorWriteType::InlineUniformBlock:
5635 case DescriptorWriteType::AccelerationStructure:
5636 // TODO
5637 ALOGE("Encountered pending inline uniform block or acceleration structure desc write, abort (NYI)\n");
5638 abort();
5639 default:
5640 break;
5641
5642 }
5643
5644 // TODO: Combine multiple writes into one VkWriteDescriptorSet.
5645 VkWriteDescriptorSet forHost = {
5646 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, 0 /* TODO: inline uniform block */,
5647 set,
5648 binding,
5649 dstArrayElement,
5650 1,
5651 write.descriptorType,
5652 imageInfo,
5653 bufferInfo,
5654 bufferView,
5655 };
5656
5657 writesForHost.push_back(forHost);
5658 ++currentWriteIndex;
5659
5660 // Set it back to empty.
5661 write.type = DescriptorWriteType::Empty;
5662 }
5663 }
5664 }
5665
5666 // Skip out if there's nothing to VkWriteDescriptorSet home about.
5667 if (writesForHost.empty()) {
5668 return;
5669 }
5670
5671 enc->vkQueueCommitDescriptorSetUpdatesGOOGLE(
5672 queue,
5673 (uint32_t)pools.size(), pools.data(),
5674 (uint32_t)sets.size(),
5675 setLayouts.data(),
5676 poolIds.data(),
5677 descriptorSetWhichPool.data(),
5678 pendingAllocations.data(),
5679 writeStartingIndices.data(),
5680 (uint32_t)writesForHost.size(),
5681 writesForHost.data(),
5682 false /* no lock */);
5683
5684 // If we got here, then we definitely serviced the allocations.
5685 for (auto set : sets) {
5686 ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
5687 reified->allocationPending = false;
5688 }
5689 }
5690
flushCommandBufferPendingCommandsBottomUp(void * context,VkQueue queue,const std::vector<VkCommandBuffer> & workingSet)5691 void flushCommandBufferPendingCommandsBottomUp(void* context, VkQueue queue, const std::vector<VkCommandBuffer>& workingSet) {
5692 if (workingSet.empty()) return;
5693
5694 std::vector<VkCommandBuffer> nextLevel;
5695 for (auto commandBuffer : workingSet) {
5696 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
5697 forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
5698 nextLevel.push_back((VkCommandBuffer)secondary);
5699 });
5700 }
5701
5702 flushCommandBufferPendingCommandsBottomUp(context, queue, nextLevel);
5703
5704 // After this point, everyone at the previous level has been flushed
5705 for (auto cmdbuf : workingSet) {
5706 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
5707
5708 // There's no pending commands here, skip. (case 1)
5709 if (!cb->privateStream) continue;
5710
5711 unsigned char* writtenPtr = 0;
5712 size_t written = 0;
5713 ((CommandBufferStagingStream*)cb->privateStream)->getWritten(&writtenPtr, &written);
5714
5715 // There's no pending commands here, skip. (case 2, stream created but no new recordings)
5716 if (!written) continue;
5717
5718 // There are pending commands to flush.
5719 VkEncoder* enc = (VkEncoder*)context;
5720 enc->vkQueueFlushCommandsGOOGLE(queue, cmdbuf, written, (const void*)writtenPtr, true /* do lock */);
5721
5722 // Reset this stream.
5723 ((CommandBufferStagingStream*)cb->privateStream)->reset();
5724 }
5725 }
5726
5727 // Unlike resetCommandBufferStagingInfo, this does not always erase its
5728 // superObjects pointers because the command buffer has merely been
5729 // submitted, not reset. However, if the command buffer was recorded with
5730 // ONE_TIME_SUBMIT_BIT, then it will also reset its primaries.
5731 //
5732 // Also, we save the set of descriptor sets referenced by this command
5733 // buffer because we only submitted the command buffer and it's possible to
5734 // update the descriptor set again and re-submit the same command without
5735 // recording it (Update-after-bind descriptor sets)
resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer)5736 void resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer) {
5737 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
5738 if (cb->flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) {
5739 resetCommandBufferStagingInfo(commandBuffer,
5740 true /* reset primaries */,
5741 true /* clear pending descriptor sets */);
5742 } else {
5743 resetCommandBufferStagingInfo(commandBuffer,
5744 false /* Don't reset primaries */,
5745 false /* Don't clear pending descriptor sets */);
5746 }
5747 }
5748
flushStagingStreams(void * context,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits)5749 void flushStagingStreams(void* context, VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits) {
5750 std::vector<VkCommandBuffer> toFlush;
5751 for (uint32_t i = 0; i < submitCount; ++i) {
5752 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; ++j) {
5753 toFlush.push_back(pSubmits[i].pCommandBuffers[j]);
5754 }
5755 }
5756
5757 std::unordered_set<VkDescriptorSet> pendingSets;
5758 collectAllPendingDescriptorSetsBottomUp(toFlush, pendingSets);
5759 commitDescriptorSetUpdates(context, queue, pendingSets);
5760
5761 flushCommandBufferPendingCommandsBottomUp(context, queue, toFlush);
5762
5763 for (auto cb : toFlush) {
5764 resetCommandBufferPendingTopology(cb);
5765 }
5766 }
5767
on_vkQueueSubmit(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)5768 VkResult on_vkQueueSubmit(
5769 void* context, VkResult input_result,
5770 VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence) {
5771 AEMU_SCOPED_TRACE("on_vkQueueSubmit");
5772
5773 flushStagingStreams(context, queue, submitCount, pSubmits);
5774
5775 std::vector<VkSemaphore> pre_signal_semaphores;
5776 std::vector<zx_handle_t> pre_signal_events;
5777 std::vector<int> pre_signal_sync_fds;
5778 std::vector<std::pair<zx_handle_t, zx_koid_t>> post_wait_events;
5779 std::vector<int> post_wait_sync_fds;
5780
5781 VkEncoder* enc = (VkEncoder*)context;
5782
5783 AutoLock lock(mLock);
5784
5785 for (uint32_t i = 0; i < submitCount; ++i) {
5786 for (uint32_t j = 0; j < pSubmits[i].waitSemaphoreCount; ++j) {
5787 auto it = info_VkSemaphore.find(pSubmits[i].pWaitSemaphores[j]);
5788 if (it != info_VkSemaphore.end()) {
5789 auto& semInfo = it->second;
5790 #ifdef VK_USE_PLATFORM_FUCHSIA
5791 if (semInfo.eventHandle) {
5792 pre_signal_events.push_back(semInfo.eventHandle);
5793 pre_signal_semaphores.push_back(pSubmits[i].pWaitSemaphores[j]);
5794 }
5795 #endif
5796 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5797 if (semInfo.syncFd >= 0) {
5798 pre_signal_sync_fds.push_back(semInfo.syncFd);
5799 pre_signal_semaphores.push_back(pSubmits[i].pWaitSemaphores[j]);
5800 }
5801 #endif
5802 }
5803 }
5804 for (uint32_t j = 0; j < pSubmits[i].signalSemaphoreCount; ++j) {
5805 auto it = info_VkSemaphore.find(pSubmits[i].pSignalSemaphores[j]);
5806 if (it != info_VkSemaphore.end()) {
5807 auto& semInfo = it->second;
5808 #ifdef VK_USE_PLATFORM_FUCHSIA
5809 if (semInfo.eventHandle) {
5810 post_wait_events.push_back(
5811 {semInfo.eventHandle, semInfo.eventKoid});
5812 #ifndef FUCHSIA_NO_TRACE
5813 if (semInfo.eventKoid != ZX_KOID_INVALID) {
5814 // TODO(fxbug.dev/66098): Remove the "semaphore"
5815 // FLOW_END events once it is removed from clients
5816 // (for example, gfx Engine).
5817 TRACE_FLOW_END("gfx", "semaphore",
5818 semInfo.eventKoid);
5819 TRACE_FLOW_BEGIN("gfx", "goldfish_post_wait_event",
5820 semInfo.eventKoid);
5821 }
5822 #endif
5823 }
5824 #endif
5825 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5826 if (semInfo.syncFd >= 0) {
5827 post_wait_sync_fds.push_back(semInfo.syncFd);
5828 }
5829 #endif
5830 }
5831 }
5832 }
5833 lock.unlock();
5834
5835 if (pre_signal_semaphores.empty()) {
5836 if (supportsAsyncQueueSubmit()) {
5837 enc->vkQueueSubmitAsyncGOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
5838 input_result = VK_SUCCESS;
5839 } else {
5840 input_result = enc->vkQueueSubmit(queue, submitCount, pSubmits, fence, true /* do lock */);
5841 if (input_result != VK_SUCCESS) return input_result;
5842 }
5843 } else {
5844 // Schedule waits on the OS external objects and
5845 // signal the wait semaphores
5846 // in a separate thread.
5847 std::vector<WorkPool::Task> preSignalTasks;
5848 std::vector<WorkPool::Task> preSignalQueueSubmitTasks;;
5849 #ifdef VK_USE_PLATFORM_FUCHSIA
5850 for (auto event : pre_signal_events) {
5851 preSignalTasks.push_back([event] {
5852 zx_object_wait_one(
5853 event,
5854 ZX_EVENT_SIGNALED,
5855 ZX_TIME_INFINITE,
5856 nullptr);
5857 });
5858 }
5859 #endif
5860 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5861 for (auto fd : pre_signal_sync_fds) {
5862 preSignalTasks.push_back([fd] {
5863 sync_wait(fd, 3000);
5864 });
5865 }
5866 #endif
5867 auto waitGroupHandle = mWorkPool.schedule(preSignalTasks);
5868 mWorkPool.waitAll(waitGroupHandle);
5869
5870 VkSubmitInfo submit_info = {
5871 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
5872 .waitSemaphoreCount = 0,
5873 .pWaitSemaphores = nullptr,
5874 .pWaitDstStageMask = nullptr,
5875 .signalSemaphoreCount =
5876 static_cast<uint32_t>(pre_signal_semaphores.size()),
5877 .pSignalSemaphores = pre_signal_semaphores.data()};
5878
5879 if (supportsAsyncQueueSubmit()) {
5880 enc->vkQueueSubmitAsyncGOOGLE(queue, 1, &submit_info, VK_NULL_HANDLE, true /* do lock */);
5881 } else {
5882 enc->vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE, true /* do lock */);
5883 }
5884
5885 if (supportsAsyncQueueSubmit()) {
5886 enc->vkQueueSubmitAsyncGOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
5887 input_result = VK_SUCCESS;
5888 } else {
5889 input_result = enc->vkQueueSubmit(queue, submitCount, pSubmits, fence, true /* do lock */);
5890 if (input_result != VK_SUCCESS) return input_result;
5891 }
5892 }
5893
5894 lock.lock();
5895 int externalFenceFdToSignal = -1;
5896
5897 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5898 if (fence != VK_NULL_HANDLE) {
5899 auto it = info_VkFence.find(fence);
5900 if (it != info_VkFence.end()) {
5901 const auto& info = it->second;
5902 if (info.syncFd >= 0) {
5903 externalFenceFdToSignal = info.syncFd;
5904 }
5905 }
5906 }
5907 #endif
5908 if (externalFenceFdToSignal >= 0 ||
5909 !post_wait_events.empty() ||
5910 !post_wait_sync_fds.empty()) {
5911
5912 std::vector<WorkPool::Task> tasks;
5913
5914 tasks.push_back([queue, externalFenceFdToSignal,
5915 post_wait_events /* copy of zx handles */,
5916 post_wait_sync_fds /* copy of sync fds */] {
5917 auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
5918 auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
5919 auto waitIdleRes = vkEncoder->vkQueueWaitIdle(queue, true /* do lock */);
5920 #ifdef VK_USE_PLATFORM_FUCHSIA
5921 AEMU_SCOPED_TRACE("on_vkQueueSubmit::SignalSemaphores");
5922 (void)externalFenceFdToSignal;
5923 for (auto& [event, koid] : post_wait_events) {
5924 #ifndef FUCHSIA_NO_TRACE
5925 if (koid != ZX_KOID_INVALID) {
5926 TRACE_FLOW_END("gfx", "goldfish_post_wait_event", koid);
5927 TRACE_FLOW_BEGIN("gfx", "event_signal", koid);
5928 }
5929 #endif
5930 zx_object_signal(event, 0, ZX_EVENT_SIGNALED);
5931 }
5932 #endif
5933 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5934 for (auto& fd : post_wait_sync_fds) {
5935 goldfish_sync_signal(fd);
5936 }
5937
5938 if (externalFenceFdToSignal >= 0) {
5939 ALOGV("%s: external fence real signal: %d\n", __func__, externalFenceFdToSignal);
5940 goldfish_sync_signal(externalFenceFdToSignal);
5941 }
5942 #endif
5943 });
5944 auto queueAsyncWaitHandle = mWorkPool.schedule(tasks);
5945 auto& queueWorkItems = mQueueSensitiveWorkPoolItems[queue];
5946 queueWorkItems.push_back(queueAsyncWaitHandle);
5947 }
5948
5949 return VK_SUCCESS;
5950 }
5951
on_vkQueueWaitIdle(void * context,VkResult,VkQueue queue)5952 VkResult on_vkQueueWaitIdle(
5953 void* context, VkResult,
5954 VkQueue queue) {
5955
5956 VkEncoder* enc = (VkEncoder*)context;
5957
5958 AutoLock lock(mLock);
5959 std::vector<WorkPool::WaitGroupHandle> toWait =
5960 mQueueSensitiveWorkPoolItems[queue];
5961 mQueueSensitiveWorkPoolItems[queue].clear();
5962 lock.unlock();
5963
5964 if (toWait.empty()) {
5965 ALOGV("%s: No queue-specific work pool items\n", __func__);
5966 return enc->vkQueueWaitIdle(queue, true /* do lock */);
5967 }
5968
5969 for (auto handle : toWait) {
5970 ALOGV("%s: waiting on work group item: %llu\n", __func__,
5971 (unsigned long long)handle);
5972 mWorkPool.waitAll(handle);
5973 }
5974
5975 // now done waiting, get the host's opinion
5976 return enc->vkQueueWaitIdle(queue, true /* do lock */);
5977 }
5978
unwrap_VkNativeBufferANDROID(const VkImageCreateInfo * pCreateInfo,VkImageCreateInfo * local_pCreateInfo)5979 void unwrap_VkNativeBufferANDROID(
5980 const VkImageCreateInfo* pCreateInfo,
5981 VkImageCreateInfo* local_pCreateInfo) {
5982
5983 if (!pCreateInfo->pNext) return;
5984
5985 const VkNativeBufferANDROID* nativeInfo =
5986 vk_find_struct<VkNativeBufferANDROID>(pCreateInfo);
5987 if (!nativeInfo) {
5988 return;
5989 }
5990
5991 if (!nativeInfo->handle) return;
5992
5993 VkNativeBufferANDROID* nativeInfoOut =
5994 reinterpret_cast<VkNativeBufferANDROID*>(
5995 const_cast<void*>(
5996 local_pCreateInfo->pNext));
5997
5998 if (!nativeInfoOut->handle) {
5999 ALOGE("FATAL: Local native buffer info not properly allocated!");
6000 abort();
6001 }
6002
6003 *(uint32_t*)(nativeInfoOut->handle) =
6004 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->
6005 grallocHelper()->getHostHandle(
6006 (const native_handle_t*)nativeInfo->handle);
6007 }
6008
unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd,int *)6009 void unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd, int*) {
6010 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6011 if (fd != -1) {
6012 // Implicit Synchronization
6013 sync_wait(fd, 3000);
6014 // From libvulkan's swapchain.cpp:
6015 // """
6016 // NOTE: we're relying on AcquireImageANDROID to close fence_clone,
6017 // even if the call fails. We could close it ourselves on failure, but
6018 // that would create a race condition if the driver closes it on a
6019 // failure path: some other thread might create an fd with the same
6020 // number between the time the driver closes it and the time we close
6021 // it. We must assume one of: the driver *always* closes it even on
6022 // failure, or *never* closes it on failure.
6023 // """
6024 // Therefore, assume contract where we need to close fd in this driver
6025 close(fd);
6026 }
6027 #endif
6028 }
6029
6030 // Action of vkMapMemoryIntoAddressSpaceGOOGLE:
6031 // 1. preprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE_pre):
6032 // uses address space device to reserve the right size of
6033 // memory.
6034 // 2. the reservation results in a physical address. the physical
6035 // address is set as |*pAddress|.
6036 // 3. after pre, the API call is encoded to the host, where the
6037 // value of pAddress is also sent (the physical address).
6038 // 4. the host will obtain the actual gpu pointer and send it
6039 // back out in |*pAddress|.
6040 // 5. postprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE) will run,
6041 // using the mmap() method of GoldfishAddressSpaceBlock to obtain
6042 // a pointer in guest userspace corresponding to the host pointer.
on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void *,VkResult,VkDevice,VkDeviceMemory memory,uint64_t * pAddress)6043 VkResult on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(
6044 void*,
6045 VkResult,
6046 VkDevice,
6047 VkDeviceMemory memory,
6048 uint64_t* pAddress) {
6049
6050 AutoLock lock(mLock);
6051
6052 auto it = info_VkDeviceMemory.find(memory);
6053 if (it == info_VkDeviceMemory.end()) {
6054 return VK_ERROR_OUT_OF_HOST_MEMORY;
6055 }
6056
6057 auto& memInfo = it->second;
6058 memInfo.goldfishAddressSpaceBlock =
6059 new GoldfishAddressSpaceBlock;
6060 auto& block = *(memInfo.goldfishAddressSpaceBlock);
6061
6062 block.allocate(
6063 mGoldfishAddressSpaceBlockProvider.get(),
6064 memInfo.mappedSize);
6065
6066 *pAddress = block.physAddr();
6067
6068 return VK_SUCCESS;
6069 }
6070
on_vkMapMemoryIntoAddressSpaceGOOGLE(void *,VkResult input_result,VkDevice,VkDeviceMemory memory,uint64_t * pAddress)6071 VkResult on_vkMapMemoryIntoAddressSpaceGOOGLE(
6072 void*,
6073 VkResult input_result,
6074 VkDevice,
6075 VkDeviceMemory memory,
6076 uint64_t* pAddress) {
6077
6078 if (input_result != VK_SUCCESS) {
6079 return input_result;
6080 }
6081
6082 // Now pAddress points to the gpu addr from host.
6083 AutoLock lock(mLock);
6084
6085 auto it = info_VkDeviceMemory.find(memory);
6086 if (it == info_VkDeviceMemory.end()) {
6087 return VK_ERROR_OUT_OF_HOST_MEMORY;
6088 }
6089
6090 auto& memInfo = it->second;
6091 auto& block = *(memInfo.goldfishAddressSpaceBlock);
6092
6093 uint64_t gpuAddr = *pAddress;
6094
6095 void* userPtr = block.mmap(gpuAddr);
6096
6097 D("%s: Got new host visible alloc. "
6098 "Sizeof void: %zu map size: %zu Range: [%p %p]",
6099 __func__,
6100 sizeof(void*), (size_t)memInfo.mappedSize,
6101 userPtr,
6102 (unsigned char*)userPtr + memInfo.mappedSize);
6103
6104 *pAddress = (uint64_t)(uintptr_t)userPtr;
6105
6106 return input_result;
6107 }
6108
initDescriptorUpdateTemplateBuffers(const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,VkDescriptorUpdateTemplate descriptorUpdateTemplate)6109 VkResult initDescriptorUpdateTemplateBuffers(
6110 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6111 VkDescriptorUpdateTemplate descriptorUpdateTemplate) {
6112
6113 AutoLock lock(mLock);
6114
6115 auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6116 if (it == info_VkDescriptorUpdateTemplate.end()) {
6117 return VK_ERROR_INITIALIZATION_FAILED;
6118 }
6119
6120 auto& info = it->second;
6121
6122 for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6123 const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6124 uint32_t descCount = entry.descriptorCount;
6125 VkDescriptorType descType = entry.descriptorType;
6126 ++info.templateEntryCount;
6127 for (uint32_t j = 0; j < descCount; ++j) {
6128 if (isDescriptorTypeImageInfo(descType)) {
6129 ++info.imageInfoCount;
6130 } else if (isDescriptorTypeBufferInfo(descType)) {
6131 ++info.bufferInfoCount;
6132 } else if (isDescriptorTypeBufferView(descType)) {
6133 ++info.bufferViewCount;
6134 } else {
6135 ALOGE("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6136 abort();
6137 }
6138 }
6139 }
6140
6141 if (info.templateEntryCount)
6142 info.templateEntries = new VkDescriptorUpdateTemplateEntry[info.templateEntryCount];
6143
6144 if (info.imageInfoCount) {
6145 info.imageInfoIndices = new uint32_t[info.imageInfoCount];
6146 info.imageInfos = new VkDescriptorImageInfo[info.imageInfoCount];
6147 }
6148
6149 if (info.bufferInfoCount) {
6150 info.bufferInfoIndices = new uint32_t[info.bufferInfoCount];
6151 info.bufferInfos = new VkDescriptorBufferInfo[info.bufferInfoCount];
6152 }
6153
6154 if (info.bufferViewCount) {
6155 info.bufferViewIndices = new uint32_t[info.bufferViewCount];
6156 info.bufferViews = new VkBufferView[info.bufferViewCount];
6157 }
6158
6159 uint32_t imageInfoIndex = 0;
6160 uint32_t bufferInfoIndex = 0;
6161 uint32_t bufferViewIndex = 0;
6162
6163 for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6164 const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6165 uint32_t descCount = entry.descriptorCount;
6166 VkDescriptorType descType = entry.descriptorType;
6167
6168 info.templateEntries[i] = entry;
6169
6170 for (uint32_t j = 0; j < descCount; ++j) {
6171 if (isDescriptorTypeImageInfo(descType)) {
6172 info.imageInfoIndices[imageInfoIndex] = i;
6173 ++imageInfoIndex;
6174 } else if (isDescriptorTypeBufferInfo(descType)) {
6175 info.bufferInfoIndices[bufferInfoIndex] = i;
6176 ++bufferInfoIndex;
6177 } else if (isDescriptorTypeBufferView(descType)) {
6178 info.bufferViewIndices[bufferViewIndex] = i;
6179 ++bufferViewIndex;
6180 } else {
6181 ALOGE("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6182 abort();
6183 }
6184 }
6185 }
6186
6187 return VK_SUCCESS;
6188 }
6189
on_vkCreateDescriptorUpdateTemplate(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)6190 VkResult on_vkCreateDescriptorUpdateTemplate(
6191 void* context, VkResult input_result,
6192 VkDevice device,
6193 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6194 const VkAllocationCallbacks* pAllocator,
6195 VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6196
6197 (void)context;
6198 (void)device;
6199 (void)pAllocator;
6200
6201 if (input_result != VK_SUCCESS) return input_result;
6202
6203 return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6204 }
6205
on_vkCreateDescriptorUpdateTemplateKHR(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)6206 VkResult on_vkCreateDescriptorUpdateTemplateKHR(
6207 void* context, VkResult input_result,
6208 VkDevice device,
6209 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6210 const VkAllocationCallbacks* pAllocator,
6211 VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6212
6213 (void)context;
6214 (void)device;
6215 (void)pAllocator;
6216
6217 if (input_result != VK_SUCCESS) return input_result;
6218
6219 return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6220 }
6221
on_vkUpdateDescriptorSetWithTemplate(void * context,VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)6222 void on_vkUpdateDescriptorSetWithTemplate(
6223 void* context,
6224 VkDevice device,
6225 VkDescriptorSet descriptorSet,
6226 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
6227 const void* pData) {
6228
6229 VkEncoder* enc = (VkEncoder*)context;
6230
6231 uint8_t* userBuffer = (uint8_t*)pData;
6232 if (!userBuffer) return;
6233
6234 // TODO: Make this thread safe
6235 AutoLock lock(mLock);
6236
6237 auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6238 if (it == info_VkDescriptorUpdateTemplate.end()) {
6239 return;
6240 }
6241
6242 auto& info = it->second;
6243
6244 uint32_t templateEntryCount = info.templateEntryCount;
6245 VkDescriptorUpdateTemplateEntry* templateEntries = info.templateEntries;
6246
6247 uint32_t imageInfoCount = info.imageInfoCount;
6248 uint32_t bufferInfoCount = info.bufferInfoCount;
6249 uint32_t bufferViewCount = info.bufferViewCount;
6250 uint32_t* imageInfoIndices = info.imageInfoIndices;
6251 uint32_t* bufferInfoIndices = info.bufferInfoIndices;
6252 uint32_t* bufferViewIndices = info.bufferViewIndices;
6253 VkDescriptorImageInfo* imageInfos = info.imageInfos;
6254 VkDescriptorBufferInfo* bufferInfos = info.bufferInfos;
6255 VkBufferView* bufferViews = info.bufferViews;
6256
6257 lock.unlock();
6258
6259 size_t currImageInfoOffset = 0;
6260 size_t currBufferInfoOffset = 0;
6261 size_t currBufferViewOffset = 0;
6262
6263 struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(descriptorSet);
6264 ReifiedDescriptorSet* reified = ds->reified;
6265
6266 bool batched = mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate;
6267
6268 for (uint32_t i = 0; i < templateEntryCount; ++i) {
6269 const auto& entry = templateEntries[i];
6270 VkDescriptorType descType = entry.descriptorType;
6271 uint32_t dstBinding = entry.dstBinding;
6272
6273 auto offset = entry.offset;
6274 auto stride = entry.stride;
6275 auto dstArrayElement = entry.dstArrayElement;
6276
6277 uint32_t descCount = entry.descriptorCount;
6278
6279 if (isDescriptorTypeImageInfo(descType)) {
6280
6281 if (!stride) stride = sizeof(VkDescriptorImageInfo);
6282
6283 const VkDescriptorImageInfo* currImageInfoBegin =
6284 (const VkDescriptorImageInfo*)((uint8_t*)imageInfos + currImageInfoOffset);
6285
6286 for (uint32_t j = 0; j < descCount; ++j) {
6287 const VkDescriptorImageInfo* user =
6288 (const VkDescriptorImageInfo*)(userBuffer + offset + j * stride);
6289
6290 memcpy(((uint8_t*)imageInfos) + currImageInfoOffset,
6291 userBuffer + offset + j * stride,
6292 sizeof(VkDescriptorImageInfo));
6293 currImageInfoOffset += sizeof(VkDescriptorImageInfo);
6294 }
6295
6296 if (batched) doEmulatedDescriptorImageInfoWriteFromTemplate(
6297 descType,
6298 dstBinding,
6299 dstArrayElement,
6300 descCount,
6301 currImageInfoBegin,
6302 reified);
6303
6304 } else if (isDescriptorTypeBufferInfo(descType)) {
6305
6306
6307 if (!stride) stride = sizeof(VkDescriptorBufferInfo);
6308
6309 const VkDescriptorBufferInfo* currBufferInfoBegin =
6310 (const VkDescriptorBufferInfo*)((uint8_t*)bufferInfos + currBufferInfoOffset);
6311
6312 for (uint32_t j = 0; j < descCount; ++j) {
6313 const VkDescriptorBufferInfo* user =
6314 (const VkDescriptorBufferInfo*)(userBuffer + offset + j * stride);
6315
6316 memcpy(((uint8_t*)bufferInfos) + currBufferInfoOffset,
6317 userBuffer + offset + j * stride,
6318 sizeof(VkDescriptorBufferInfo));
6319 currBufferInfoOffset += sizeof(VkDescriptorBufferInfo);
6320 }
6321
6322 if (batched) doEmulatedDescriptorBufferInfoWriteFromTemplate(
6323 descType,
6324 dstBinding,
6325 dstArrayElement,
6326 descCount,
6327 currBufferInfoBegin,
6328 reified);
6329
6330 } else if (isDescriptorTypeBufferView(descType)) {
6331 if (!stride) stride = sizeof(VkBufferView);
6332
6333 const VkBufferView* currBufferViewBegin =
6334 (const VkBufferView*)((uint8_t*)bufferViews + currBufferViewOffset);
6335
6336 for (uint32_t j = 0; j < descCount; ++j) {
6337 memcpy(((uint8_t*)bufferViews) + currBufferViewOffset,
6338 userBuffer + offset + j * stride,
6339 sizeof(VkBufferView));
6340 currBufferViewOffset += sizeof(VkBufferView);
6341 }
6342
6343 if (batched) doEmulatedDescriptorBufferViewWriteFromTemplate(
6344 descType,
6345 dstBinding,
6346 dstArrayElement,
6347 descCount,
6348 currBufferViewBegin,
6349 reified);
6350 } else {
6351 ALOGE("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6352 abort();
6353 }
6354 }
6355
6356 if (batched) return;
6357
6358 enc->vkUpdateDescriptorSetWithTemplateSizedGOOGLE(
6359 device,
6360 descriptorSet,
6361 descriptorUpdateTemplate,
6362 imageInfoCount,
6363 bufferInfoCount,
6364 bufferViewCount,
6365 imageInfoIndices,
6366 bufferInfoIndices,
6367 bufferViewIndices,
6368 imageInfos,
6369 bufferInfos,
6370 bufferViews,
6371 true /* do lock */);
6372 }
6373
on_vkGetPhysicalDeviceImageFormatProperties2_common(bool isKhr,void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6374 VkResult on_vkGetPhysicalDeviceImageFormatProperties2_common(
6375 bool isKhr,
6376 void* context, VkResult input_result,
6377 VkPhysicalDevice physicalDevice,
6378 const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6379 VkImageFormatProperties2* pImageFormatProperties) {
6380
6381 VkEncoder* enc = (VkEncoder*)context;
6382 (void)input_result;
6383
6384 #ifdef VK_USE_PLATFORM_FUCHSIA
6385
6386 constexpr VkFormat kExternalImageSupportedFormats[] = {
6387 VK_FORMAT_B8G8R8A8_SINT,
6388 VK_FORMAT_B8G8R8A8_UNORM,
6389 VK_FORMAT_B8G8R8A8_SRGB,
6390 VK_FORMAT_B8G8R8A8_SNORM,
6391 VK_FORMAT_B8G8R8A8_SSCALED,
6392 VK_FORMAT_B8G8R8A8_USCALED,
6393 VK_FORMAT_R8G8B8A8_SINT,
6394 VK_FORMAT_R8G8B8A8_UNORM,
6395 VK_FORMAT_R8G8B8A8_SRGB,
6396 VK_FORMAT_R8G8B8A8_SNORM,
6397 VK_FORMAT_R8G8B8A8_SSCALED,
6398 VK_FORMAT_R8G8B8A8_USCALED,
6399 VK_FORMAT_R8_UNORM,
6400 VK_FORMAT_R8_UINT,
6401 VK_FORMAT_R8_USCALED,
6402 VK_FORMAT_R8_SNORM,
6403 VK_FORMAT_R8_SINT,
6404 VK_FORMAT_R8_SSCALED,
6405 VK_FORMAT_R8_SRGB,
6406 VK_FORMAT_R8G8_UNORM,
6407 VK_FORMAT_R8G8_UINT,
6408 VK_FORMAT_R8G8_USCALED,
6409 VK_FORMAT_R8G8_SNORM,
6410 VK_FORMAT_R8G8_SINT,
6411 VK_FORMAT_R8G8_SSCALED,
6412 VK_FORMAT_R8G8_SRGB,
6413 };
6414
6415 VkExternalImageFormatProperties* ext_img_properties =
6416 vk_find_struct<VkExternalImageFormatProperties>(pImageFormatProperties);
6417
6418 if (ext_img_properties) {
6419 if (std::find(std::begin(kExternalImageSupportedFormats),
6420 std::end(kExternalImageSupportedFormats),
6421 pImageFormatInfo->format) == std::end(kExternalImageSupportedFormats)) {
6422 return VK_ERROR_FORMAT_NOT_SUPPORTED;
6423 }
6424 }
6425 #endif
6426
6427 VkAndroidHardwareBufferUsageANDROID* output_ahw_usage =
6428 vk_find_struct<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties);
6429
6430 VkResult hostRes;
6431
6432 if (isKhr) {
6433 hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2KHR(
6434 physicalDevice, pImageFormatInfo,
6435 pImageFormatProperties, true /* do lock */);
6436 } else {
6437 hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2(
6438 physicalDevice, pImageFormatInfo,
6439 pImageFormatProperties, true /* do lock */);
6440 }
6441
6442 if (hostRes != VK_SUCCESS) return hostRes;
6443
6444 #ifdef VK_USE_PLATFORM_FUCHSIA
6445 if (ext_img_properties) {
6446 const VkPhysicalDeviceExternalImageFormatInfo* ext_img_info =
6447 vk_find_struct<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo);
6448 if (ext_img_info) {
6449 switch (static_cast<uint32_t>(ext_img_info->handleType)) {
6450 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA:
6451 ext_img_properties->externalMemoryProperties = {
6452 .externalMemoryFeatures = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
6453 VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
6454 .exportFromImportedHandleTypes =
6455 VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA,
6456 .compatibleHandleTypes =
6457 VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA,
6458 };
6459 break;
6460 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA:
6461 ext_img_properties->externalMemoryProperties = {
6462 .externalMemoryFeatures = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
6463 VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
6464 .exportFromImportedHandleTypes =
6465 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6466 .compatibleHandleTypes =
6467 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6468 };
6469 break;
6470 }
6471 }
6472 }
6473 #endif
6474
6475 if (output_ahw_usage) {
6476 output_ahw_usage->androidHardwareBufferUsage =
6477 getAndroidHardwareBufferUsageFromVkUsage(
6478 pImageFormatInfo->flags,
6479 pImageFormatInfo->usage);
6480 }
6481
6482 return hostRes;
6483 }
6484
on_vkGetPhysicalDeviceImageFormatProperties2(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6485 VkResult on_vkGetPhysicalDeviceImageFormatProperties2(
6486 void* context, VkResult input_result,
6487 VkPhysicalDevice physicalDevice,
6488 const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6489 VkImageFormatProperties2* pImageFormatProperties) {
6490 return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6491 false /* not KHR */, context, input_result,
6492 physicalDevice, pImageFormatInfo, pImageFormatProperties);
6493 }
6494
on_vkGetPhysicalDeviceImageFormatProperties2KHR(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6495 VkResult on_vkGetPhysicalDeviceImageFormatProperties2KHR(
6496 void* context, VkResult input_result,
6497 VkPhysicalDevice physicalDevice,
6498 const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6499 VkImageFormatProperties2* pImageFormatProperties) {
6500 return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6501 true /* is KHR */, context, input_result,
6502 physicalDevice, pImageFormatInfo, pImageFormatProperties);
6503 }
6504
on_vkGetPhysicalDeviceExternalSemaphoreProperties(void *,VkPhysicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)6505 void on_vkGetPhysicalDeviceExternalSemaphoreProperties(
6506 void*,
6507 VkPhysicalDevice,
6508 const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
6509 VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
6510 (void)pExternalSemaphoreInfo;
6511 (void)pExternalSemaphoreProperties;
6512 #ifdef VK_USE_PLATFORM_FUCHSIA
6513 if (pExternalSemaphoreInfo->handleType ==
6514 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA) {
6515 pExternalSemaphoreProperties->compatibleHandleTypes |=
6516 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA;
6517 pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6518 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA;
6519 pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6520 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6521 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6522 }
6523 if (pExternalSemaphoreInfo->handleType ==
6524 static_cast<uint32_t>(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA)) {
6525 pExternalSemaphoreProperties->compatibleHandleTypes |=
6526 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6527 pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6528 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6529 pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6530 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6531 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6532 }
6533 #endif // VK_USE_PLATFORM_FUCHSIA
6534 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6535 if (pExternalSemaphoreInfo->handleType ==
6536 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
6537 pExternalSemaphoreProperties->compatibleHandleTypes |=
6538 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6539 pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6540 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6541 pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6542 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6543 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6544 }
6545 #endif // VK_USE_PLATFORM_ANDROID_KHR
6546 }
6547
registerEncoderCleanupCallback(const VkEncoder * encoder,void * object,CleanupCallback callback)6548 void registerEncoderCleanupCallback(const VkEncoder* encoder, void* object, CleanupCallback callback) {
6549 AutoLock lock(mLock);
6550 auto& callbacks = mEncoderCleanupCallbacks[encoder];
6551 callbacks[object] = callback;
6552 }
6553
unregisterEncoderCleanupCallback(const VkEncoder * encoder,void * object)6554 void unregisterEncoderCleanupCallback(const VkEncoder* encoder, void* object) {
6555 AutoLock lock(mLock);
6556 mEncoderCleanupCallbacks[encoder].erase(object);
6557 }
6558
onEncoderDeleted(const VkEncoder * encoder)6559 void onEncoderDeleted(const VkEncoder* encoder) {
6560 AutoLock lock(mLock);
6561 if (mEncoderCleanupCallbacks.find(encoder) == mEncoderCleanupCallbacks.end()) return;
6562
6563 std::unordered_map<void*, CleanupCallback> callbackCopies = mEncoderCleanupCallbacks[encoder];
6564
6565 mEncoderCleanupCallbacks.erase(encoder);
6566 lock.unlock();
6567
6568 for (auto it : callbackCopies) {
6569 it.second();
6570 }
6571 }
6572
syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer,VkEncoder * currentEncoder)6573 uint32_t syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer, VkEncoder* currentEncoder) {
6574 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
6575 if (!cb) return 0;
6576
6577 auto lastEncoder = cb->lastUsedEncoder;
6578
6579 if (lastEncoder == currentEncoder) return 0;
6580
6581 currentEncoder->incRef();
6582
6583 cb->lastUsedEncoder = currentEncoder;
6584
6585 if (!lastEncoder) return 0;
6586
6587 auto oldSeq = cb->sequenceNumber;
6588 cb->sequenceNumber += 2;
6589 lastEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, false, oldSeq + 1, true /* do lock */);
6590 lastEncoder->flush();
6591 currentEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, true, oldSeq + 2, true /* do lock */);
6592
6593 if (lastEncoder->decRef()) {
6594 cb->lastUsedEncoder = nullptr;
6595 }
6596 return 0;
6597 }
6598
syncEncodersForQueue(VkQueue queue,VkEncoder * currentEncoder)6599 uint32_t syncEncodersForQueue(VkQueue queue, VkEncoder* currentEncoder) {
6600 if (!supportsAsyncQueueSubmit()) {
6601 return 0;
6602 }
6603
6604 struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
6605 if (!q) return 0;
6606
6607 auto lastEncoder = q->lastUsedEncoder;
6608
6609 if (lastEncoder == currentEncoder) return 0;
6610
6611 currentEncoder->incRef();
6612
6613 q->lastUsedEncoder = currentEncoder;
6614
6615 if (!lastEncoder) return 0;
6616
6617 auto oldSeq = q->sequenceNumber;
6618 q->sequenceNumber += 2;
6619 lastEncoder->vkQueueHostSyncGOOGLE(queue, false, oldSeq + 1, true /* do lock */);
6620 lastEncoder->flush();
6621 currentEncoder->vkQueueHostSyncGOOGLE(queue, true, oldSeq + 2, true /* do lock */);
6622
6623 if (lastEncoder->decRef()) {
6624 q->lastUsedEncoder = nullptr;
6625 }
6626
6627 return 0;
6628 }
6629
on_vkBeginCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)6630 VkResult on_vkBeginCommandBuffer(
6631 void* context, VkResult input_result,
6632 VkCommandBuffer commandBuffer,
6633 const VkCommandBufferBeginInfo* pBeginInfo) {
6634
6635 (void)context;
6636
6637 resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */, true /* also clear pending descriptor sets */);
6638
6639 VkEncoder* enc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
6640 (void)input_result;
6641
6642 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
6643 cb->flags = pBeginInfo->flags;
6644
6645 VkCommandBufferBeginInfo modifiedBeginInfo;
6646
6647 if (pBeginInfo->pInheritanceInfo && !cb->isSecondary) {
6648 modifiedBeginInfo = *pBeginInfo;
6649 modifiedBeginInfo.pInheritanceInfo = nullptr;
6650 pBeginInfo = &modifiedBeginInfo;
6651 }
6652
6653 if (!supportsDeferredCommands()) {
6654 return enc->vkBeginCommandBuffer(commandBuffer, pBeginInfo, true /* do lock */);
6655 }
6656
6657 enc->vkBeginCommandBufferAsyncGOOGLE(commandBuffer, pBeginInfo, true /* do lock */);
6658
6659 return VK_SUCCESS;
6660 }
6661
on_vkEndCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer)6662 VkResult on_vkEndCommandBuffer(
6663 void* context, VkResult input_result,
6664 VkCommandBuffer commandBuffer) {
6665
6666 VkEncoder* enc = (VkEncoder*)context;
6667 (void)input_result;
6668
6669 if (!supportsDeferredCommands()) {
6670 return enc->vkEndCommandBuffer(commandBuffer, true /* do lock */);
6671 }
6672
6673 enc->vkEndCommandBufferAsyncGOOGLE(commandBuffer, true /* do lock */);
6674
6675 return VK_SUCCESS;
6676 }
6677
on_vkResetCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)6678 VkResult on_vkResetCommandBuffer(
6679 void* context, VkResult input_result,
6680 VkCommandBuffer commandBuffer,
6681 VkCommandBufferResetFlags flags) {
6682
6683 resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */, true /* also clear pending descriptor sets */);
6684
6685 VkEncoder* enc = (VkEncoder*)context;
6686 (void)input_result;
6687
6688 if (!supportsDeferredCommands()) {
6689 return enc->vkResetCommandBuffer(commandBuffer, flags, true /* do lock */);
6690 }
6691
6692 enc->vkResetCommandBufferAsyncGOOGLE(commandBuffer, flags, true /* do lock */);
6693 return VK_SUCCESS;
6694 }
6695
on_vkCreateImageView(void * context,VkResult input_result,VkDevice device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)6696 VkResult on_vkCreateImageView(
6697 void* context, VkResult input_result,
6698 VkDevice device,
6699 const VkImageViewCreateInfo* pCreateInfo,
6700 const VkAllocationCallbacks* pAllocator,
6701 VkImageView* pView) {
6702
6703 VkEncoder* enc = (VkEncoder*)context;
6704 (void)input_result;
6705
6706 VkImageViewCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
6707
6708 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6709 const VkExternalFormatANDROID* extFormatAndroidPtr =
6710 vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
6711 if (extFormatAndroidPtr) {
6712 if (extFormatAndroidPtr->externalFormat) {
6713 localCreateInfo.format =
6714 vk_format_from_android(extFormatAndroidPtr->externalFormat);
6715 }
6716 }
6717 #endif
6718
6719 return enc->vkCreateImageView(device, &localCreateInfo, pAllocator, pView, true /* do lock */);
6720 }
6721
on_vkCmdExecuteCommands(void * context,VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)6722 void on_vkCmdExecuteCommands(
6723 void* context,
6724 VkCommandBuffer commandBuffer,
6725 uint32_t commandBufferCount,
6726 const VkCommandBuffer* pCommandBuffers) {
6727
6728 VkEncoder* enc = (VkEncoder*)context;
6729
6730 if (!mFeatureInfo->hasVulkanQueueSubmitWithCommands) {
6731 enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers, true /* do lock */);
6732 return;
6733 }
6734
6735 struct goldfish_VkCommandBuffer* primary = as_goldfish_VkCommandBuffer(commandBuffer);
6736 for (uint32_t i = 0; i < commandBufferCount; ++i) {
6737 struct goldfish_VkCommandBuffer* secondary = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
6738 appendObject(&secondary->superObjects, primary);
6739 appendObject(&primary->subObjects, secondary);
6740 }
6741
6742 enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers, true /* do lock */);
6743 }
6744
addPendingDescriptorSets(VkCommandBuffer commandBuffer,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)6745 void addPendingDescriptorSets(VkCommandBuffer commandBuffer, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets) {
6746 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
6747
6748 if (!cb->userPtr) {
6749 CommandBufferPendingDescriptorSets* newPendingSets =
6750 new CommandBufferPendingDescriptorSets;
6751 cb->userPtr = newPendingSets;
6752 }
6753
6754 CommandBufferPendingDescriptorSets* pendingSets =
6755 (CommandBufferPendingDescriptorSets*)cb->userPtr;
6756
6757 for (uint32_t i = 0; i < descriptorSetCount; ++i) {
6758 pendingSets->sets.insert(pDescriptorSets[i]);
6759 }
6760 }
6761
on_vkCmdBindDescriptorSets(void * context,VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)6762 void on_vkCmdBindDescriptorSets(
6763 void* context,
6764 VkCommandBuffer commandBuffer,
6765 VkPipelineBindPoint pipelineBindPoint,
6766 VkPipelineLayout layout,
6767 uint32_t firstSet,
6768 uint32_t descriptorSetCount,
6769 const VkDescriptorSet* pDescriptorSets,
6770 uint32_t dynamicOffsetCount,
6771 const uint32_t* pDynamicOffsets) {
6772
6773 VkEncoder* enc = (VkEncoder*)context;
6774
6775 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate)
6776 addPendingDescriptorSets(commandBuffer, descriptorSetCount, pDescriptorSets);
6777
6778 enc->vkCmdBindDescriptorSets(
6779 commandBuffer,
6780 pipelineBindPoint,
6781 layout,
6782 firstSet,
6783 descriptorSetCount,
6784 pDescriptorSets,
6785 dynamicOffsetCount,
6786 pDynamicOffsets,
6787 true /* do lock */);
6788 }
6789
decDescriptorSetLayoutRef(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)6790 void decDescriptorSetLayoutRef(
6791 void* context,
6792 VkDevice device,
6793 VkDescriptorSetLayout descriptorSetLayout,
6794 const VkAllocationCallbacks* pAllocator) {
6795
6796 if (!descriptorSetLayout) return;
6797
6798 struct goldfish_VkDescriptorSetLayout* setLayout = as_goldfish_VkDescriptorSetLayout(descriptorSetLayout);
6799
6800 if (0 == --setLayout->layoutInfo->refcount) {
6801 VkEncoder* enc = (VkEncoder*)context;
6802 enc->vkDestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator, true /* do lock */);
6803 }
6804 }
6805
on_vkDestroyDescriptorSetLayout(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)6806 void on_vkDestroyDescriptorSetLayout(
6807 void* context,
6808 VkDevice device,
6809 VkDescriptorSetLayout descriptorSetLayout,
6810 const VkAllocationCallbacks* pAllocator) {
6811 decDescriptorSetLayoutRef(context, device, descriptorSetLayout, pAllocator);
6812 }
6813
on_vkAllocateCommandBuffers(void * context,VkResult input_result,VkDevice device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)6814 VkResult on_vkAllocateCommandBuffers(
6815 void* context,
6816 VkResult input_result,
6817 VkDevice device,
6818 const VkCommandBufferAllocateInfo* pAllocateInfo,
6819 VkCommandBuffer* pCommandBuffers) {
6820
6821 (void)input_result;
6822
6823 VkEncoder* enc = (VkEncoder*)context;
6824 VkResult res = enc->vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers, true /* do lock */);
6825 if (VK_SUCCESS != res) return res;
6826
6827 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; ++i) {
6828 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
6829 cb->isSecondary = pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY;
6830 }
6831
6832 return res;
6833 }
6834
getApiVersionFromInstance(VkInstance instance) const6835 uint32_t getApiVersionFromInstance(VkInstance instance) const {
6836 AutoLock lock(mLock);
6837 uint32_t api = kDefaultApiVersion;
6838
6839 auto it = info_VkInstance.find(instance);
6840 if (it == info_VkInstance.end()) return api;
6841
6842 api = it->second.highestApiVersion;
6843
6844 return api;
6845 }
6846
getApiVersionFromDevice(VkDevice device) const6847 uint32_t getApiVersionFromDevice(VkDevice device) const {
6848 AutoLock lock(mLock);
6849
6850 uint32_t api = kDefaultApiVersion;
6851
6852 auto it = info_VkDevice.find(device);
6853 if (it == info_VkDevice.end()) return api;
6854
6855 api = it->second.apiVersion;
6856
6857 return api;
6858 }
6859
hasInstanceExtension(VkInstance instance,const std::string & name) const6860 bool hasInstanceExtension(VkInstance instance, const std::string& name) const {
6861 AutoLock lock(mLock);
6862
6863 auto it = info_VkInstance.find(instance);
6864 if (it == info_VkInstance.end()) return false;
6865
6866 return it->second.enabledExtensions.find(name) !=
6867 it->second.enabledExtensions.end();
6868 }
6869
hasDeviceExtension(VkDevice device,const std::string & name) const6870 bool hasDeviceExtension(VkDevice device, const std::string& name) const {
6871 AutoLock lock(mLock);
6872
6873 auto it = info_VkDevice.find(device);
6874 if (it == info_VkDevice.end()) return false;
6875
6876 return it->second.enabledExtensions.find(name) !=
6877 it->second.enabledExtensions.end();
6878 }
6879
6880 // Resets staging stream for this command buffer and primary command buffers
6881 // where this command buffer has been recorded.
resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer,bool alsoResetPrimaries,bool alsoClearPendingDescriptorSets)6882 void resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer, bool alsoResetPrimaries, bool alsoClearPendingDescriptorSets) {
6883 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
6884 if (!cb) {
6885 return;
6886 }
6887 if (cb->privateEncoder) {
6888 sStaging.pushStaging((CommandBufferStagingStream*)cb->privateStream, cb->privateEncoder);
6889 cb->privateEncoder = nullptr;
6890 cb->privateStream = nullptr;
6891 }
6892
6893 if (alsoClearPendingDescriptorSets && cb->userPtr) {
6894 CommandBufferPendingDescriptorSets* pendingSets = (CommandBufferPendingDescriptorSets*)cb->userPtr;
6895 pendingSets->sets.clear();
6896 }
6897
6898 if (alsoResetPrimaries) {
6899 forAllObjects(cb->superObjects, [this, alsoResetPrimaries, alsoClearPendingDescriptorSets](void* obj) {
6900 VkCommandBuffer superCommandBuffer = (VkCommandBuffer)obj;
6901 struct goldfish_VkCommandBuffer* superCb = as_goldfish_VkCommandBuffer(superCommandBuffer);
6902 this->resetCommandBufferStagingInfo(superCommandBuffer, alsoResetPrimaries, alsoClearPendingDescriptorSets);
6903 });
6904 eraseObjects(&cb->superObjects);
6905 }
6906
6907 forAllObjects(cb->subObjects, [cb](void* obj) {
6908 VkCommandBuffer subCommandBuffer = (VkCommandBuffer)obj;
6909 struct goldfish_VkCommandBuffer* subCb = as_goldfish_VkCommandBuffer(subCommandBuffer);
6910 // We don't do resetCommandBufferStagingInfo(subCommandBuffer)
6911 // since the user still might have submittable stuff pending there.
6912 eraseObject(&subCb->superObjects, (void*)cb);
6913 });
6914
6915 eraseObjects(&cb->subObjects);
6916 }
6917
resetCommandPoolStagingInfo(VkCommandPool commandPool)6918 void resetCommandPoolStagingInfo(VkCommandPool commandPool) {
6919 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
6920
6921 if (!p) return;
6922
6923 forAllObjects(p->subObjects, [this](void* commandBuffer) {
6924 this->resetCommandBufferStagingInfo((VkCommandBuffer)commandBuffer, true /* also reset primaries */, true /* also clear pending descriptor sets */);
6925 });
6926 }
6927
addToCommandPool(VkCommandPool commandPool,uint32_t commandBufferCount,VkCommandBuffer * pCommandBuffers)6928 void addToCommandPool(VkCommandPool commandPool,
6929 uint32_t commandBufferCount,
6930 VkCommandBuffer* pCommandBuffers) {
6931 for (uint32_t i = 0; i < commandBufferCount; ++i) {
6932 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
6933 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
6934 appendObject(&p->subObjects, (void*)(pCommandBuffers[i]));
6935 appendObject(&cb->poolObjects, (void*)commandPool);
6936 }
6937 }
6938
clearCommandPool(VkCommandPool commandPool)6939 void clearCommandPool(VkCommandPool commandPool) {
6940 resetCommandPoolStagingInfo(commandPool);
6941 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
6942 forAllObjects(p->subObjects, [this](void* commandBuffer) {
6943 this->unregister_VkCommandBuffer((VkCommandBuffer)commandBuffer);
6944 });
6945 eraseObjects(&p->subObjects);
6946 }
6947
6948 private:
6949 mutable RecursiveLock mLock;
6950 HostVisibleMemoryVirtualizationInfo mHostVisibleMemoryVirtInfo;
6951 std::unique_ptr<EmulatorFeatureInfo> mFeatureInfo;
6952 std::unique_ptr<GoldfishAddressSpaceBlockProvider> mGoldfishAddressSpaceBlockProvider;
6953
6954 std::vector<VkExtensionProperties> mHostInstanceExtensions;
6955 std::vector<VkExtensionProperties> mHostDeviceExtensions;
6956
6957 int mSyncDeviceFd = -1;
6958 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6959 int mRendernodeFd = -1;
6960 #endif
6961
6962 #ifdef VK_USE_PLATFORM_FUCHSIA
6963 std::unique_ptr<
6964 fidl::WireSyncClient<fuchsia_hardware_goldfish::ControlDevice>>
6965 mControlDevice;
6966 std::unique_ptr<fidl::WireSyncClient<fuchsia_sysmem::Allocator>>
6967 mSysmemAllocator;
6968 #endif
6969
6970 WorkPool mWorkPool { 4 };
6971 std::unordered_map<VkQueue, std::vector<WorkPool::WaitGroupHandle>>
6972 mQueueSensitiveWorkPoolItems;
6973
6974 std::unordered_map<const VkEncoder*, std::unordered_map<void*, CleanupCallback>> mEncoderCleanupCallbacks;
6975
6976 };
6977
ResourceTracker()6978 ResourceTracker::ResourceTracker() : mImpl(new ResourceTracker::Impl()) { }
~ResourceTracker()6979 ResourceTracker::~ResourceTracker() { }
createMapping()6980 VulkanHandleMapping* ResourceTracker::createMapping() {
6981 return &mImpl->createMapping;
6982 }
unwrapMapping()6983 VulkanHandleMapping* ResourceTracker::unwrapMapping() {
6984 return &mImpl->unwrapMapping;
6985 }
destroyMapping()6986 VulkanHandleMapping* ResourceTracker::destroyMapping() {
6987 return &mImpl->destroyMapping;
6988 }
defaultMapping()6989 VulkanHandleMapping* ResourceTracker::defaultMapping() {
6990 return &mImpl->defaultMapping;
6991 }
6992 static ResourceTracker* sTracker = nullptr;
6993 // static
get()6994 ResourceTracker* ResourceTracker::get() {
6995 if (!sTracker) {
6996 // To be initialized once on vulkan device open.
6997 sTracker = new ResourceTracker;
6998 }
6999 return sTracker;
7000 }
7001
7002 #define HANDLE_REGISTER_IMPL(type) \
7003 void ResourceTracker::register_##type(type obj) { \
7004 mImpl->register_##type(obj); \
7005 } \
7006 void ResourceTracker::unregister_##type(type obj) { \
7007 mImpl->unregister_##type(obj); \
7008 } \
7009
GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL)7010 GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL)
7011
7012 bool ResourceTracker::isMemoryTypeHostVisible(
7013 VkDevice device, uint32_t typeIndex) const {
7014 return mImpl->isMemoryTypeHostVisible(device, typeIndex);
7015 }
7016
getMappedPointer(VkDeviceMemory memory)7017 uint8_t* ResourceTracker::getMappedPointer(VkDeviceMemory memory) {
7018 return mImpl->getMappedPointer(memory);
7019 }
7020
getMappedSize(VkDeviceMemory memory)7021 VkDeviceSize ResourceTracker::getMappedSize(VkDeviceMemory memory) {
7022 return mImpl->getMappedSize(memory);
7023 }
7024
getNonCoherentExtendedSize(VkDevice device,VkDeviceSize basicSize) const7025 VkDeviceSize ResourceTracker::getNonCoherentExtendedSize(VkDevice device, VkDeviceSize basicSize) const {
7026 return mImpl->getNonCoherentExtendedSize(device, basicSize);
7027 }
7028
isValidMemoryRange(const VkMappedMemoryRange & range) const7029 bool ResourceTracker::isValidMemoryRange(const VkMappedMemoryRange& range) const {
7030 return mImpl->isValidMemoryRange(range);
7031 }
7032
setupFeatures(const EmulatorFeatureInfo * features)7033 void ResourceTracker::setupFeatures(const EmulatorFeatureInfo* features) {
7034 mImpl->setupFeatures(features);
7035 }
7036
setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks & callbacks)7037 void ResourceTracker::setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks& callbacks) {
7038 mImpl->setThreadingCallbacks(callbacks);
7039 }
7040
hostSupportsVulkan() const7041 bool ResourceTracker::hostSupportsVulkan() const {
7042 return mImpl->hostSupportsVulkan();
7043 }
7044
usingDirectMapping() const7045 bool ResourceTracker::usingDirectMapping() const {
7046 return mImpl->usingDirectMapping();
7047 }
7048
getStreamFeatures() const7049 uint32_t ResourceTracker::getStreamFeatures() const {
7050 return mImpl->getStreamFeatures();
7051 }
7052
getApiVersionFromInstance(VkInstance instance) const7053 uint32_t ResourceTracker::getApiVersionFromInstance(VkInstance instance) const {
7054 return mImpl->getApiVersionFromInstance(instance);
7055 }
7056
getApiVersionFromDevice(VkDevice device) const7057 uint32_t ResourceTracker::getApiVersionFromDevice(VkDevice device) const {
7058 return mImpl->getApiVersionFromDevice(device);
7059 }
hasInstanceExtension(VkInstance instance,const std::string & name) const7060 bool ResourceTracker::hasInstanceExtension(VkInstance instance, const std::string &name) const {
7061 return mImpl->hasInstanceExtension(instance, name);
7062 }
hasDeviceExtension(VkDevice device,const std::string & name) const7063 bool ResourceTracker::hasDeviceExtension(VkDevice device, const std::string &name) const {
7064 return mImpl->hasDeviceExtension(device, name);
7065 }
addToCommandPool(VkCommandPool commandPool,uint32_t commandBufferCount,VkCommandBuffer * pCommandBuffers)7066 void ResourceTracker::addToCommandPool(VkCommandPool commandPool,
7067 uint32_t commandBufferCount,
7068 VkCommandBuffer* pCommandBuffers) {
7069 mImpl->addToCommandPool(commandPool, commandBufferCount, pCommandBuffers);
7070 }
resetCommandPoolStagingInfo(VkCommandPool commandPool)7071 void ResourceTracker::resetCommandPoolStagingInfo(VkCommandPool commandPool) {
7072 mImpl->resetCommandPoolStagingInfo(commandPool);
7073 }
7074
7075
7076 // static
getCommandBufferEncoder(VkCommandBuffer commandBuffer)7077 __attribute__((always_inline)) VkEncoder* ResourceTracker::getCommandBufferEncoder(VkCommandBuffer commandBuffer) {
7078 if (!(ResourceTracker::streamFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
7079 auto enc = ResourceTracker::getThreadLocalEncoder();
7080 ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, enc);
7081 return enc;
7082 }
7083
7084 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7085 if (!cb->privateEncoder) {
7086 sStaging.popStaging((CommandBufferStagingStream**)&cb->privateStream, &cb->privateEncoder);
7087 }
7088 uint8_t* writtenPtr; size_t written;
7089 ((CommandBufferStagingStream*)cb->privateStream)->getWritten(&writtenPtr, &written);
7090 return cb->privateEncoder;
7091 }
7092
7093 // static
getQueueEncoder(VkQueue queue)7094 __attribute__((always_inline)) VkEncoder* ResourceTracker::getQueueEncoder(VkQueue queue) {
7095 auto enc = ResourceTracker::getThreadLocalEncoder();
7096 if (!(ResourceTracker::streamFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
7097 ResourceTracker::get()->syncEncodersForQueue(queue, enc);
7098 }
7099 return enc;
7100 }
7101
7102 // static
getThreadLocalEncoder()7103 __attribute__((always_inline)) VkEncoder* ResourceTracker::getThreadLocalEncoder() {
7104 auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
7105 auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
7106 return vkEncoder;
7107 }
7108
7109 // static
setSeqnoPtr(uint32_t * seqnoptr)7110 void ResourceTracker::setSeqnoPtr(uint32_t* seqnoptr) {
7111 sSeqnoPtr = seqnoptr;
7112 }
7113
7114 // static
nextSeqno()7115 __attribute__((always_inline)) uint32_t ResourceTracker::nextSeqno() {
7116 uint32_t res = __atomic_add_fetch(sSeqnoPtr, 1, __ATOMIC_SEQ_CST);
7117 return res;
7118 }
7119
7120 // static
getSeqno()7121 __attribute__((always_inline)) uint32_t ResourceTracker::getSeqno() {
7122 uint32_t res = __atomic_load_n(sSeqnoPtr, __ATOMIC_SEQ_CST);
7123 return res;
7124 }
7125
on_vkEnumerateInstanceExtensionProperties(void * context,VkResult input_result,const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)7126 VkResult ResourceTracker::on_vkEnumerateInstanceExtensionProperties(
7127 void* context,
7128 VkResult input_result,
7129 const char* pLayerName,
7130 uint32_t* pPropertyCount,
7131 VkExtensionProperties* pProperties) {
7132 return mImpl->on_vkEnumerateInstanceExtensionProperties(
7133 context, input_result, pLayerName, pPropertyCount, pProperties);
7134 }
7135
on_vkEnumerateDeviceExtensionProperties(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)7136 VkResult ResourceTracker::on_vkEnumerateDeviceExtensionProperties(
7137 void* context,
7138 VkResult input_result,
7139 VkPhysicalDevice physicalDevice,
7140 const char* pLayerName,
7141 uint32_t* pPropertyCount,
7142 VkExtensionProperties* pProperties) {
7143 return mImpl->on_vkEnumerateDeviceExtensionProperties(
7144 context, input_result, physicalDevice, pLayerName, pPropertyCount, pProperties);
7145 }
7146
on_vkEnumeratePhysicalDevices(void * context,VkResult input_result,VkInstance instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices)7147 VkResult ResourceTracker::on_vkEnumeratePhysicalDevices(
7148 void* context, VkResult input_result,
7149 VkInstance instance, uint32_t* pPhysicalDeviceCount,
7150 VkPhysicalDevice* pPhysicalDevices) {
7151 return mImpl->on_vkEnumeratePhysicalDevices(
7152 context, input_result, instance, pPhysicalDeviceCount,
7153 pPhysicalDevices);
7154 }
7155
on_vkGetPhysicalDeviceProperties(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties * pProperties)7156 void ResourceTracker::on_vkGetPhysicalDeviceProperties(
7157 void* context,
7158 VkPhysicalDevice physicalDevice,
7159 VkPhysicalDeviceProperties* pProperties) {
7160 mImpl->on_vkGetPhysicalDeviceProperties(context, physicalDevice,
7161 pProperties);
7162 }
7163
on_vkGetPhysicalDeviceProperties2(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)7164 void ResourceTracker::on_vkGetPhysicalDeviceProperties2(
7165 void* context,
7166 VkPhysicalDevice physicalDevice,
7167 VkPhysicalDeviceProperties2* pProperties) {
7168 mImpl->on_vkGetPhysicalDeviceProperties2(context, physicalDevice,
7169 pProperties);
7170 }
7171
on_vkGetPhysicalDeviceProperties2KHR(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)7172 void ResourceTracker::on_vkGetPhysicalDeviceProperties2KHR(
7173 void* context,
7174 VkPhysicalDevice physicalDevice,
7175 VkPhysicalDeviceProperties2* pProperties) {
7176 mImpl->on_vkGetPhysicalDeviceProperties2(context, physicalDevice,
7177 pProperties);
7178 }
7179
on_vkGetPhysicalDeviceMemoryProperties(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties * pMemoryProperties)7180 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties(
7181 void* context,
7182 VkPhysicalDevice physicalDevice,
7183 VkPhysicalDeviceMemoryProperties* pMemoryProperties) {
7184 mImpl->on_vkGetPhysicalDeviceMemoryProperties(
7185 context, physicalDevice, pMemoryProperties);
7186 }
7187
on_vkGetPhysicalDeviceMemoryProperties2(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties2 * pMemoryProperties)7188 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties2(
7189 void* context,
7190 VkPhysicalDevice physicalDevice,
7191 VkPhysicalDeviceMemoryProperties2* pMemoryProperties) {
7192 mImpl->on_vkGetPhysicalDeviceMemoryProperties2(
7193 context, physicalDevice, pMemoryProperties);
7194 }
7195
on_vkGetPhysicalDeviceMemoryProperties2KHR(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties2 * pMemoryProperties)7196 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties2KHR(
7197 void* context,
7198 VkPhysicalDevice physicalDevice,
7199 VkPhysicalDeviceMemoryProperties2* pMemoryProperties) {
7200 mImpl->on_vkGetPhysicalDeviceMemoryProperties2(
7201 context, physicalDevice, pMemoryProperties);
7202 }
7203
on_vkGetDeviceQueue(void * context,VkDevice device,uint32_t queueFamilyIndex,uint32_t queueIndex,VkQueue * pQueue)7204 void ResourceTracker::on_vkGetDeviceQueue(void* context,
7205 VkDevice device,
7206 uint32_t queueFamilyIndex,
7207 uint32_t queueIndex,
7208 VkQueue* pQueue) {
7209 mImpl->on_vkGetDeviceQueue(context, device, queueFamilyIndex, queueIndex,
7210 pQueue);
7211 }
7212
on_vkGetDeviceQueue2(void * context,VkDevice device,const VkDeviceQueueInfo2 * pQueueInfo,VkQueue * pQueue)7213 void ResourceTracker::on_vkGetDeviceQueue2(void* context,
7214 VkDevice device,
7215 const VkDeviceQueueInfo2* pQueueInfo,
7216 VkQueue* pQueue) {
7217 mImpl->on_vkGetDeviceQueue2(context, device, pQueueInfo, pQueue);
7218 }
7219
on_vkCreateInstance(void * context,VkResult input_result,const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)7220 VkResult ResourceTracker::on_vkCreateInstance(
7221 void* context,
7222 VkResult input_result,
7223 const VkInstanceCreateInfo* pCreateInfo,
7224 const VkAllocationCallbacks* pAllocator,
7225 VkInstance* pInstance) {
7226 return mImpl->on_vkCreateInstance(
7227 context, input_result, pCreateInfo, pAllocator, pInstance);
7228 }
7229
on_vkCreateDevice(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)7230 VkResult ResourceTracker::on_vkCreateDevice(
7231 void* context,
7232 VkResult input_result,
7233 VkPhysicalDevice physicalDevice,
7234 const VkDeviceCreateInfo* pCreateInfo,
7235 const VkAllocationCallbacks* pAllocator,
7236 VkDevice* pDevice) {
7237 return mImpl->on_vkCreateDevice(
7238 context, input_result, physicalDevice, pCreateInfo, pAllocator, pDevice);
7239 }
7240
on_vkDestroyDevice_pre(void * context,VkDevice device,const VkAllocationCallbacks * pAllocator)7241 void ResourceTracker::on_vkDestroyDevice_pre(
7242 void* context,
7243 VkDevice device,
7244 const VkAllocationCallbacks* pAllocator) {
7245 mImpl->on_vkDestroyDevice_pre(context, device, pAllocator);
7246 }
7247
on_vkAllocateMemory(void * context,VkResult input_result,VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)7248 VkResult ResourceTracker::on_vkAllocateMemory(
7249 void* context,
7250 VkResult input_result,
7251 VkDevice device,
7252 const VkMemoryAllocateInfo* pAllocateInfo,
7253 const VkAllocationCallbacks* pAllocator,
7254 VkDeviceMemory* pMemory) {
7255 return mImpl->on_vkAllocateMemory(
7256 context, input_result, device, pAllocateInfo, pAllocator, pMemory);
7257 }
7258
on_vkFreeMemory(void * context,VkDevice device,VkDeviceMemory memory,const VkAllocationCallbacks * pAllocator)7259 void ResourceTracker::on_vkFreeMemory(
7260 void* context,
7261 VkDevice device,
7262 VkDeviceMemory memory,
7263 const VkAllocationCallbacks* pAllocator) {
7264 return mImpl->on_vkFreeMemory(
7265 context, device, memory, pAllocator);
7266 }
7267
on_vkMapMemory(void * context,VkResult input_result,VkDevice device,VkDeviceMemory memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags flags,void ** ppData)7268 VkResult ResourceTracker::on_vkMapMemory(
7269 void* context,
7270 VkResult input_result,
7271 VkDevice device,
7272 VkDeviceMemory memory,
7273 VkDeviceSize offset,
7274 VkDeviceSize size,
7275 VkMemoryMapFlags flags,
7276 void** ppData) {
7277 return mImpl->on_vkMapMemory(
7278 context, input_result, device, memory, offset, size, flags, ppData);
7279 }
7280
on_vkUnmapMemory(void * context,VkDevice device,VkDeviceMemory memory)7281 void ResourceTracker::on_vkUnmapMemory(
7282 void* context,
7283 VkDevice device,
7284 VkDeviceMemory memory) {
7285 mImpl->on_vkUnmapMemory(context, device, memory);
7286 }
7287
on_vkCreateImage(void * context,VkResult input_result,VkDevice device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)7288 VkResult ResourceTracker::on_vkCreateImage(
7289 void* context, VkResult input_result,
7290 VkDevice device, const VkImageCreateInfo *pCreateInfo,
7291 const VkAllocationCallbacks *pAllocator,
7292 VkImage *pImage) {
7293 return mImpl->on_vkCreateImage(
7294 context, input_result,
7295 device, pCreateInfo, pAllocator, pImage);
7296 }
7297
on_vkDestroyImage(void * context,VkDevice device,VkImage image,const VkAllocationCallbacks * pAllocator)7298 void ResourceTracker::on_vkDestroyImage(
7299 void* context,
7300 VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
7301 mImpl->on_vkDestroyImage(context,
7302 device, image, pAllocator);
7303 }
7304
on_vkGetImageMemoryRequirements(void * context,VkDevice device,VkImage image,VkMemoryRequirements * pMemoryRequirements)7305 void ResourceTracker::on_vkGetImageMemoryRequirements(
7306 void *context, VkDevice device, VkImage image,
7307 VkMemoryRequirements *pMemoryRequirements) {
7308 mImpl->on_vkGetImageMemoryRequirements(
7309 context, device, image, pMemoryRequirements);
7310 }
7311
on_vkGetImageMemoryRequirements2(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)7312 void ResourceTracker::on_vkGetImageMemoryRequirements2(
7313 void *context, VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
7314 VkMemoryRequirements2 *pMemoryRequirements) {
7315 mImpl->on_vkGetImageMemoryRequirements2(
7316 context, device, pInfo, pMemoryRequirements);
7317 }
7318
on_vkGetImageMemoryRequirements2KHR(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)7319 void ResourceTracker::on_vkGetImageMemoryRequirements2KHR(
7320 void *context, VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
7321 VkMemoryRequirements2 *pMemoryRequirements) {
7322 mImpl->on_vkGetImageMemoryRequirements2KHR(
7323 context, device, pInfo, pMemoryRequirements);
7324 }
7325
on_vkBindImageMemory(void * context,VkResult input_result,VkDevice device,VkImage image,VkDeviceMemory memory,VkDeviceSize memoryOffset)7326 VkResult ResourceTracker::on_vkBindImageMemory(
7327 void* context, VkResult input_result,
7328 VkDevice device, VkImage image, VkDeviceMemory memory,
7329 VkDeviceSize memoryOffset) {
7330 return mImpl->on_vkBindImageMemory(
7331 context, input_result, device, image, memory, memoryOffset);
7332 }
7333
on_vkBindImageMemory2(void * context,VkResult input_result,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)7334 VkResult ResourceTracker::on_vkBindImageMemory2(
7335 void* context, VkResult input_result,
7336 VkDevice device, uint32_t bindingCount, const VkBindImageMemoryInfo* pBindInfos) {
7337 return mImpl->on_vkBindImageMemory2(
7338 context, input_result, device, bindingCount, pBindInfos);
7339 }
7340
on_vkBindImageMemory2KHR(void * context,VkResult input_result,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)7341 VkResult ResourceTracker::on_vkBindImageMemory2KHR(
7342 void* context, VkResult input_result,
7343 VkDevice device, uint32_t bindingCount, const VkBindImageMemoryInfo* pBindInfos) {
7344 return mImpl->on_vkBindImageMemory2KHR(
7345 context, input_result, device, bindingCount, pBindInfos);
7346 }
7347
on_vkCreateBuffer(void * context,VkResult input_result,VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)7348 VkResult ResourceTracker::on_vkCreateBuffer(
7349 void* context, VkResult input_result,
7350 VkDevice device, const VkBufferCreateInfo *pCreateInfo,
7351 const VkAllocationCallbacks *pAllocator,
7352 VkBuffer *pBuffer) {
7353 return mImpl->on_vkCreateBuffer(
7354 context, input_result,
7355 device, pCreateInfo, pAllocator, pBuffer);
7356 }
7357
on_vkDestroyBuffer(void * context,VkDevice device,VkBuffer buffer,const VkAllocationCallbacks * pAllocator)7358 void ResourceTracker::on_vkDestroyBuffer(
7359 void* context,
7360 VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
7361 mImpl->on_vkDestroyBuffer(context, device, buffer, pAllocator);
7362 }
7363
on_vkGetBufferMemoryRequirements(void * context,VkDevice device,VkBuffer buffer,VkMemoryRequirements * pMemoryRequirements)7364 void ResourceTracker::on_vkGetBufferMemoryRequirements(
7365 void* context, VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
7366 mImpl->on_vkGetBufferMemoryRequirements(context, device, buffer, pMemoryRequirements);
7367 }
7368
on_vkGetBufferMemoryRequirements2(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)7369 void ResourceTracker::on_vkGetBufferMemoryRequirements2(
7370 void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
7371 VkMemoryRequirements2* pMemoryRequirements) {
7372 mImpl->on_vkGetBufferMemoryRequirements2(
7373 context, device, pInfo, pMemoryRequirements);
7374 }
7375
on_vkGetBufferMemoryRequirements2KHR(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)7376 void ResourceTracker::on_vkGetBufferMemoryRequirements2KHR(
7377 void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
7378 VkMemoryRequirements2* pMemoryRequirements) {
7379 mImpl->on_vkGetBufferMemoryRequirements2KHR(
7380 context, device, pInfo, pMemoryRequirements);
7381 }
7382
on_vkBindBufferMemory(void * context,VkResult input_result,VkDevice device,VkBuffer buffer,VkDeviceMemory memory,VkDeviceSize memoryOffset)7383 VkResult ResourceTracker::on_vkBindBufferMemory(
7384 void* context, VkResult input_result,
7385 VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset) {
7386 return mImpl->on_vkBindBufferMemory(
7387 context, input_result,
7388 device, buffer, memory, memoryOffset);
7389 }
7390
on_vkBindBufferMemory2(void * context,VkResult input_result,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)7391 VkResult ResourceTracker::on_vkBindBufferMemory2(
7392 void* context, VkResult input_result,
7393 VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) {
7394 return mImpl->on_vkBindBufferMemory2(
7395 context, input_result,
7396 device, bindInfoCount, pBindInfos);
7397 }
7398
on_vkBindBufferMemory2KHR(void * context,VkResult input_result,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)7399 VkResult ResourceTracker::on_vkBindBufferMemory2KHR(
7400 void* context, VkResult input_result,
7401 VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) {
7402 return mImpl->on_vkBindBufferMemory2KHR(
7403 context, input_result,
7404 device, bindInfoCount, pBindInfos);
7405 }
7406
on_vkCreateSemaphore(void * context,VkResult input_result,VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)7407 VkResult ResourceTracker::on_vkCreateSemaphore(
7408 void* context, VkResult input_result,
7409 VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
7410 const VkAllocationCallbacks *pAllocator,
7411 VkSemaphore *pSemaphore) {
7412 return mImpl->on_vkCreateSemaphore(
7413 context, input_result,
7414 device, pCreateInfo, pAllocator, pSemaphore);
7415 }
7416
on_vkDestroySemaphore(void * context,VkDevice device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)7417 void ResourceTracker::on_vkDestroySemaphore(
7418 void* context,
7419 VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
7420 mImpl->on_vkDestroySemaphore(context, device, semaphore, pAllocator);
7421 }
7422
on_vkQueueSubmit(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)7423 VkResult ResourceTracker::on_vkQueueSubmit(
7424 void* context, VkResult input_result,
7425 VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence) {
7426 return mImpl->on_vkQueueSubmit(
7427 context, input_result, queue, submitCount, pSubmits, fence);
7428 }
7429
on_vkQueueWaitIdle(void * context,VkResult input_result,VkQueue queue)7430 VkResult ResourceTracker::on_vkQueueWaitIdle(
7431 void* context, VkResult input_result,
7432 VkQueue queue) {
7433 return mImpl->on_vkQueueWaitIdle(context, input_result, queue);
7434 }
7435
on_vkGetSemaphoreFdKHR(void * context,VkResult input_result,VkDevice device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd)7436 VkResult ResourceTracker::on_vkGetSemaphoreFdKHR(
7437 void* context, VkResult input_result,
7438 VkDevice device,
7439 const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
7440 int* pFd) {
7441 return mImpl->on_vkGetSemaphoreFdKHR(context, input_result, device, pGetFdInfo, pFd);
7442 }
7443
on_vkImportSemaphoreFdKHR(void * context,VkResult input_result,VkDevice device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)7444 VkResult ResourceTracker::on_vkImportSemaphoreFdKHR(
7445 void* context, VkResult input_result,
7446 VkDevice device,
7447 const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) {
7448 return mImpl->on_vkImportSemaphoreFdKHR(context, input_result, device, pImportSemaphoreFdInfo);
7449 }
7450
unwrap_VkNativeBufferANDROID(const VkImageCreateInfo * pCreateInfo,VkImageCreateInfo * local_pCreateInfo)7451 void ResourceTracker::unwrap_VkNativeBufferANDROID(
7452 const VkImageCreateInfo* pCreateInfo,
7453 VkImageCreateInfo* local_pCreateInfo) {
7454 mImpl->unwrap_VkNativeBufferANDROID(pCreateInfo, local_pCreateInfo);
7455 }
7456
unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd,int * fd_out)7457 void ResourceTracker::unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd, int* fd_out) {
7458 mImpl->unwrap_vkAcquireImageANDROID_nativeFenceFd(fd, fd_out);
7459 }
7460
7461 #ifdef VK_USE_PLATFORM_FUCHSIA
on_vkGetMemoryZirconHandleFUCHSIA(void * context,VkResult input_result,VkDevice device,const VkMemoryGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)7462 VkResult ResourceTracker::on_vkGetMemoryZirconHandleFUCHSIA(
7463 void* context, VkResult input_result,
7464 VkDevice device,
7465 const VkMemoryGetZirconHandleInfoFUCHSIA* pInfo,
7466 uint32_t* pHandle) {
7467 return mImpl->on_vkGetMemoryZirconHandleFUCHSIA(
7468 context, input_result, device, pInfo, pHandle);
7469 }
7470
on_vkGetMemoryZirconHandlePropertiesFUCHSIA(void * context,VkResult input_result,VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,uint32_t handle,VkMemoryZirconHandlePropertiesFUCHSIA * pProperties)7471 VkResult ResourceTracker::on_vkGetMemoryZirconHandlePropertiesFUCHSIA(
7472 void* context, VkResult input_result,
7473 VkDevice device,
7474 VkExternalMemoryHandleTypeFlagBits handleType,
7475 uint32_t handle,
7476 VkMemoryZirconHandlePropertiesFUCHSIA* pProperties) {
7477 return mImpl->on_vkGetMemoryZirconHandlePropertiesFUCHSIA(
7478 context, input_result, device, handleType, handle, pProperties);
7479 }
7480
on_vkGetSemaphoreZirconHandleFUCHSIA(void * context,VkResult input_result,VkDevice device,const VkSemaphoreGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)7481 VkResult ResourceTracker::on_vkGetSemaphoreZirconHandleFUCHSIA(
7482 void* context, VkResult input_result,
7483 VkDevice device,
7484 const VkSemaphoreGetZirconHandleInfoFUCHSIA* pInfo,
7485 uint32_t* pHandle) {
7486 return mImpl->on_vkGetSemaphoreZirconHandleFUCHSIA(
7487 context, input_result, device, pInfo, pHandle);
7488 }
7489
on_vkImportSemaphoreZirconHandleFUCHSIA(void * context,VkResult input_result,VkDevice device,const VkImportSemaphoreZirconHandleInfoFUCHSIA * pInfo)7490 VkResult ResourceTracker::on_vkImportSemaphoreZirconHandleFUCHSIA(
7491 void* context, VkResult input_result,
7492 VkDevice device,
7493 const VkImportSemaphoreZirconHandleInfoFUCHSIA* pInfo) {
7494 return mImpl->on_vkImportSemaphoreZirconHandleFUCHSIA(
7495 context, input_result, device, pInfo);
7496 }
7497
on_vkCreateBufferCollectionFUCHSIA(void * context,VkResult input_result,VkDevice device,const VkBufferCollectionCreateInfoFUCHSIA * pInfo,const VkAllocationCallbacks * pAllocator,VkBufferCollectionFUCHSIA * pCollection)7498 VkResult ResourceTracker::on_vkCreateBufferCollectionFUCHSIA(
7499 void* context, VkResult input_result,
7500 VkDevice device,
7501 const VkBufferCollectionCreateInfoFUCHSIA* pInfo,
7502 const VkAllocationCallbacks* pAllocator,
7503 VkBufferCollectionFUCHSIA* pCollection) {
7504 return mImpl->on_vkCreateBufferCollectionFUCHSIA(
7505 context, input_result, device, pInfo, pAllocator, pCollection);
7506 }
7507
on_vkDestroyBufferCollectionFUCHSIA(void * context,VkResult input_result,VkDevice device,VkBufferCollectionFUCHSIA collection,const VkAllocationCallbacks * pAllocator)7508 void ResourceTracker::on_vkDestroyBufferCollectionFUCHSIA(
7509 void* context, VkResult input_result,
7510 VkDevice device,
7511 VkBufferCollectionFUCHSIA collection,
7512 const VkAllocationCallbacks* pAllocator) {
7513 return mImpl->on_vkDestroyBufferCollectionFUCHSIA(
7514 context, input_result, device, collection, pAllocator);
7515 }
7516
on_vkSetBufferCollectionConstraintsFUCHSIA(void * context,VkResult input_result,VkDevice device,VkBufferCollectionFUCHSIA collection,const VkImageCreateInfo * pImageInfo)7517 VkResult ResourceTracker::on_vkSetBufferCollectionConstraintsFUCHSIA(
7518 void* context, VkResult input_result,
7519 VkDevice device,
7520 VkBufferCollectionFUCHSIA collection,
7521 const VkImageCreateInfo* pImageInfo) {
7522 return mImpl->on_vkSetBufferCollectionConstraintsFUCHSIA(
7523 context, input_result, device, collection, pImageInfo);
7524 }
7525
on_vkSetBufferCollectionBufferConstraintsFUCHSIA(void * context,VkResult input_result,VkDevice device,VkBufferCollectionFUCHSIA collection,const VkBufferConstraintsInfoFUCHSIA * pBufferDConstraintsInfo)7526 VkResult ResourceTracker::on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
7527 void* context, VkResult input_result,
7528 VkDevice device,
7529 VkBufferCollectionFUCHSIA collection,
7530 const VkBufferConstraintsInfoFUCHSIA* pBufferDConstraintsInfo) {
7531 return mImpl->on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
7532 context, input_result, device, collection, pBufferDConstraintsInfo);
7533 }
7534
on_vkSetBufferCollectionImageConstraintsFUCHSIA(void * context,VkResult input_result,VkDevice device,VkBufferCollectionFUCHSIA collection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)7535 VkResult ResourceTracker::on_vkSetBufferCollectionImageConstraintsFUCHSIA(
7536 void* context,
7537 VkResult input_result,
7538 VkDevice device,
7539 VkBufferCollectionFUCHSIA collection,
7540 const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
7541 return mImpl->on_vkSetBufferCollectionImageConstraintsFUCHSIA(
7542 context, input_result, device, collection, pImageConstraintsInfo);
7543 }
7544
on_vkGetBufferCollectionPropertiesFUCHSIA(void * context,VkResult input_result,VkDevice device,VkBufferCollectionFUCHSIA collection,VkBufferCollectionPropertiesFUCHSIA * pProperties)7545 VkResult ResourceTracker::on_vkGetBufferCollectionPropertiesFUCHSIA(
7546 void* context, VkResult input_result,
7547 VkDevice device,
7548 VkBufferCollectionFUCHSIA collection,
7549 VkBufferCollectionPropertiesFUCHSIA* pProperties) {
7550 return mImpl->on_vkGetBufferCollectionPropertiesFUCHSIA(
7551 context, input_result, device, collection, pProperties);
7552 }
7553
on_vkGetBufferCollectionProperties2FUCHSIA(void * context,VkResult input_result,VkDevice device,VkBufferCollectionFUCHSIA collection,VkBufferCollectionProperties2FUCHSIA * pProperties)7554 VkResult ResourceTracker::on_vkGetBufferCollectionProperties2FUCHSIA(
7555 void* context,
7556 VkResult input_result,
7557 VkDevice device,
7558 VkBufferCollectionFUCHSIA collection,
7559 VkBufferCollectionProperties2FUCHSIA* pProperties) {
7560 return mImpl->on_vkGetBufferCollectionProperties2FUCHSIA(
7561 context, input_result, device, collection, pProperties);
7562 }
7563 #endif
7564
on_vkGetAndroidHardwareBufferPropertiesANDROID(void * context,VkResult input_result,VkDevice device,const AHardwareBuffer * buffer,VkAndroidHardwareBufferPropertiesANDROID * pProperties)7565 VkResult ResourceTracker::on_vkGetAndroidHardwareBufferPropertiesANDROID(
7566 void* context, VkResult input_result,
7567 VkDevice device,
7568 const AHardwareBuffer* buffer,
7569 VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
7570 return mImpl->on_vkGetAndroidHardwareBufferPropertiesANDROID(
7571 context, input_result, device, buffer, pProperties);
7572 }
on_vkGetMemoryAndroidHardwareBufferANDROID(void * context,VkResult input_result,VkDevice device,const VkMemoryGetAndroidHardwareBufferInfoANDROID * pInfo,struct AHardwareBuffer ** pBuffer)7573 VkResult ResourceTracker::on_vkGetMemoryAndroidHardwareBufferANDROID(
7574 void* context, VkResult input_result,
7575 VkDevice device,
7576 const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
7577 struct AHardwareBuffer** pBuffer) {
7578 return mImpl->on_vkGetMemoryAndroidHardwareBufferANDROID(
7579 context, input_result,
7580 device, pInfo, pBuffer);
7581 }
7582
on_vkCreateSamplerYcbcrConversion(void * context,VkResult input_result,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)7583 VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversion(
7584 void* context, VkResult input_result,
7585 VkDevice device,
7586 const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
7587 const VkAllocationCallbacks* pAllocator,
7588 VkSamplerYcbcrConversion* pYcbcrConversion) {
7589 return mImpl->on_vkCreateSamplerYcbcrConversion(
7590 context, input_result, device, pCreateInfo, pAllocator, pYcbcrConversion);
7591 }
7592
on_vkDestroySamplerYcbcrConversion(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)7593 void ResourceTracker::on_vkDestroySamplerYcbcrConversion(
7594 void* context,
7595 VkDevice device,
7596 VkSamplerYcbcrConversion ycbcrConversion,
7597 const VkAllocationCallbacks* pAllocator) {
7598 mImpl->on_vkDestroySamplerYcbcrConversion(
7599 context, device, ycbcrConversion, pAllocator);
7600 }
7601
on_vkCreateSamplerYcbcrConversionKHR(void * context,VkResult input_result,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)7602 VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversionKHR(
7603 void* context, VkResult input_result,
7604 VkDevice device,
7605 const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
7606 const VkAllocationCallbacks* pAllocator,
7607 VkSamplerYcbcrConversion* pYcbcrConversion) {
7608 return mImpl->on_vkCreateSamplerYcbcrConversionKHR(
7609 context, input_result, device, pCreateInfo, pAllocator, pYcbcrConversion);
7610 }
7611
on_vkDestroySamplerYcbcrConversionKHR(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)7612 void ResourceTracker::on_vkDestroySamplerYcbcrConversionKHR(
7613 void* context,
7614 VkDevice device,
7615 VkSamplerYcbcrConversion ycbcrConversion,
7616 const VkAllocationCallbacks* pAllocator) {
7617 mImpl->on_vkDestroySamplerYcbcrConversionKHR(
7618 context, device, ycbcrConversion, pAllocator);
7619 }
7620
on_vkCreateSampler(void * context,VkResult input_result,VkDevice device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler)7621 VkResult ResourceTracker::on_vkCreateSampler(
7622 void* context, VkResult input_result,
7623 VkDevice device,
7624 const VkSamplerCreateInfo* pCreateInfo,
7625 const VkAllocationCallbacks* pAllocator,
7626 VkSampler* pSampler) {
7627 return mImpl->on_vkCreateSampler(
7628 context, input_result, device, pCreateInfo, pAllocator, pSampler);
7629 }
7630
on_vkGetPhysicalDeviceExternalFenceProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)7631 void ResourceTracker::on_vkGetPhysicalDeviceExternalFenceProperties(
7632 void* context,
7633 VkPhysicalDevice physicalDevice,
7634 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
7635 VkExternalFenceProperties* pExternalFenceProperties) {
7636 mImpl->on_vkGetPhysicalDeviceExternalFenceProperties(
7637 context, physicalDevice, pExternalFenceInfo, pExternalFenceProperties);
7638 }
7639
on_vkGetPhysicalDeviceExternalFencePropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)7640 void ResourceTracker::on_vkGetPhysicalDeviceExternalFencePropertiesKHR(
7641 void* context,
7642 VkPhysicalDevice physicalDevice,
7643 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
7644 VkExternalFenceProperties* pExternalFenceProperties) {
7645 mImpl->on_vkGetPhysicalDeviceExternalFenceProperties(
7646 context, physicalDevice, pExternalFenceInfo, pExternalFenceProperties);
7647 }
7648
on_vkCreateFence(void * context,VkResult input_result,VkDevice device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)7649 VkResult ResourceTracker::on_vkCreateFence(
7650 void* context,
7651 VkResult input_result,
7652 VkDevice device,
7653 const VkFenceCreateInfo* pCreateInfo,
7654 const VkAllocationCallbacks* pAllocator, VkFence* pFence) {
7655 return mImpl->on_vkCreateFence(
7656 context, input_result, device, pCreateInfo, pAllocator, pFence);
7657 }
7658
on_vkDestroyFence(void * context,VkDevice device,VkFence fence,const VkAllocationCallbacks * pAllocator)7659 void ResourceTracker::on_vkDestroyFence(
7660 void* context,
7661 VkDevice device,
7662 VkFence fence,
7663 const VkAllocationCallbacks* pAllocator) {
7664 mImpl->on_vkDestroyFence(
7665 context, device, fence, pAllocator);
7666 }
7667
on_vkResetFences(void * context,VkResult input_result,VkDevice device,uint32_t fenceCount,const VkFence * pFences)7668 VkResult ResourceTracker::on_vkResetFences(
7669 void* context,
7670 VkResult input_result,
7671 VkDevice device,
7672 uint32_t fenceCount,
7673 const VkFence* pFences) {
7674 return mImpl->on_vkResetFences(
7675 context, input_result, device, fenceCount, pFences);
7676 }
7677
on_vkImportFenceFdKHR(void * context,VkResult input_result,VkDevice device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)7678 VkResult ResourceTracker::on_vkImportFenceFdKHR(
7679 void* context,
7680 VkResult input_result,
7681 VkDevice device,
7682 const VkImportFenceFdInfoKHR* pImportFenceFdInfo) {
7683 return mImpl->on_vkImportFenceFdKHR(
7684 context, input_result, device, pImportFenceFdInfo);
7685 }
7686
on_vkGetFenceFdKHR(void * context,VkResult input_result,VkDevice device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd)7687 VkResult ResourceTracker::on_vkGetFenceFdKHR(
7688 void* context,
7689 VkResult input_result,
7690 VkDevice device,
7691 const VkFenceGetFdInfoKHR* pGetFdInfo,
7692 int* pFd) {
7693 return mImpl->on_vkGetFenceFdKHR(
7694 context, input_result, device, pGetFdInfo, pFd);
7695 }
7696
on_vkWaitForFences(void * context,VkResult input_result,VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)7697 VkResult ResourceTracker::on_vkWaitForFences(
7698 void* context,
7699 VkResult input_result,
7700 VkDevice device,
7701 uint32_t fenceCount,
7702 const VkFence* pFences,
7703 VkBool32 waitAll,
7704 uint64_t timeout) {
7705 return mImpl->on_vkWaitForFences(
7706 context, input_result, device, fenceCount, pFences, waitAll, timeout);
7707 }
7708
on_vkCreateDescriptorPool(void * context,VkResult input_result,VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)7709 VkResult ResourceTracker::on_vkCreateDescriptorPool(
7710 void* context,
7711 VkResult input_result,
7712 VkDevice device,
7713 const VkDescriptorPoolCreateInfo* pCreateInfo,
7714 const VkAllocationCallbacks* pAllocator,
7715 VkDescriptorPool* pDescriptorPool) {
7716 return mImpl->on_vkCreateDescriptorPool(
7717 context, input_result, device, pCreateInfo, pAllocator, pDescriptorPool);
7718 }
7719
on_vkDestroyDescriptorPool(void * context,VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)7720 void ResourceTracker::on_vkDestroyDescriptorPool(
7721 void* context,
7722 VkDevice device,
7723 VkDescriptorPool descriptorPool,
7724 const VkAllocationCallbacks* pAllocator) {
7725 mImpl->on_vkDestroyDescriptorPool(context, device, descriptorPool, pAllocator);
7726 }
7727
on_vkResetDescriptorPool(void * context,VkResult input_result,VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)7728 VkResult ResourceTracker::on_vkResetDescriptorPool(
7729 void* context,
7730 VkResult input_result,
7731 VkDevice device,
7732 VkDescriptorPool descriptorPool,
7733 VkDescriptorPoolResetFlags flags) {
7734 return mImpl->on_vkResetDescriptorPool(
7735 context, input_result, device, descriptorPool, flags);
7736 }
7737
on_vkAllocateDescriptorSets(void * context,VkResult input_result,VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)7738 VkResult ResourceTracker::on_vkAllocateDescriptorSets(
7739 void* context,
7740 VkResult input_result,
7741 VkDevice device,
7742 const VkDescriptorSetAllocateInfo* pAllocateInfo,
7743 VkDescriptorSet* pDescriptorSets) {
7744 return mImpl->on_vkAllocateDescriptorSets(
7745 context, input_result, device, pAllocateInfo, pDescriptorSets);
7746 }
7747
on_vkFreeDescriptorSets(void * context,VkResult input_result,VkDevice device,VkDescriptorPool descriptorPool,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)7748 VkResult ResourceTracker::on_vkFreeDescriptorSets(
7749 void* context,
7750 VkResult input_result,
7751 VkDevice device,
7752 VkDescriptorPool descriptorPool,
7753 uint32_t descriptorSetCount,
7754 const VkDescriptorSet* pDescriptorSets) {
7755 return mImpl->on_vkFreeDescriptorSets(
7756 context, input_result, device, descriptorPool, descriptorSetCount, pDescriptorSets);
7757 }
7758
on_vkCreateDescriptorSetLayout(void * context,VkResult input_result,VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)7759 VkResult ResourceTracker::on_vkCreateDescriptorSetLayout(
7760 void* context,
7761 VkResult input_result,
7762 VkDevice device,
7763 const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
7764 const VkAllocationCallbacks* pAllocator,
7765 VkDescriptorSetLayout* pSetLayout) {
7766 return mImpl->on_vkCreateDescriptorSetLayout(
7767 context, input_result, device, pCreateInfo, pAllocator, pSetLayout);
7768 }
7769
on_vkUpdateDescriptorSets(void * context,VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)7770 void ResourceTracker::on_vkUpdateDescriptorSets(
7771 void* context,
7772 VkDevice device,
7773 uint32_t descriptorWriteCount,
7774 const VkWriteDescriptorSet* pDescriptorWrites,
7775 uint32_t descriptorCopyCount,
7776 const VkCopyDescriptorSet* pDescriptorCopies) {
7777 return mImpl->on_vkUpdateDescriptorSets(
7778 context, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
7779 }
7780
on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void * context,VkResult input_result,VkDevice device,VkDeviceMemory memory,uint64_t * pAddress)7781 VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(
7782 void* context,
7783 VkResult input_result,
7784 VkDevice device,
7785 VkDeviceMemory memory,
7786 uint64_t* pAddress) {
7787 return mImpl->on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(
7788 context, input_result, device, memory, pAddress);
7789 }
7790
on_vkMapMemoryIntoAddressSpaceGOOGLE(void * context,VkResult input_result,VkDevice device,VkDeviceMemory memory,uint64_t * pAddress)7791 VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE(
7792 void* context,
7793 VkResult input_result,
7794 VkDevice device,
7795 VkDeviceMemory memory,
7796 uint64_t* pAddress) {
7797 return mImpl->on_vkMapMemoryIntoAddressSpaceGOOGLE(
7798 context, input_result, device, memory, pAddress);
7799 }
7800
on_vkCreateDescriptorUpdateTemplate(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)7801 VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplate(
7802 void* context, VkResult input_result,
7803 VkDevice device,
7804 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
7805 const VkAllocationCallbacks* pAllocator,
7806 VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
7807 return mImpl->on_vkCreateDescriptorUpdateTemplate(
7808 context, input_result,
7809 device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
7810 }
7811
on_vkCreateDescriptorUpdateTemplateKHR(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)7812 VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplateKHR(
7813 void* context, VkResult input_result,
7814 VkDevice device,
7815 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
7816 const VkAllocationCallbacks* pAllocator,
7817 VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
7818 return mImpl->on_vkCreateDescriptorUpdateTemplateKHR(
7819 context, input_result,
7820 device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
7821 }
7822
on_vkUpdateDescriptorSetWithTemplate(void * context,VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)7823 void ResourceTracker::on_vkUpdateDescriptorSetWithTemplate(
7824 void* context,
7825 VkDevice device,
7826 VkDescriptorSet descriptorSet,
7827 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
7828 const void* pData) {
7829 mImpl->on_vkUpdateDescriptorSetWithTemplate(
7830 context, device, descriptorSet,
7831 descriptorUpdateTemplate, pData);
7832 }
7833
on_vkGetPhysicalDeviceImageFormatProperties2(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)7834 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2(
7835 void* context, VkResult input_result,
7836 VkPhysicalDevice physicalDevice,
7837 const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
7838 VkImageFormatProperties2* pImageFormatProperties) {
7839 return mImpl->on_vkGetPhysicalDeviceImageFormatProperties2(
7840 context, input_result, physicalDevice, pImageFormatInfo,
7841 pImageFormatProperties);
7842 }
7843
on_vkGetPhysicalDeviceImageFormatProperties2KHR(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)7844 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2KHR(
7845 void* context, VkResult input_result,
7846 VkPhysicalDevice physicalDevice,
7847 const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
7848 VkImageFormatProperties2* pImageFormatProperties) {
7849 return mImpl->on_vkGetPhysicalDeviceImageFormatProperties2KHR(
7850 context, input_result, physicalDevice, pImageFormatInfo,
7851 pImageFormatProperties);
7852 }
7853
on_vkGetPhysicalDeviceExternalSemaphoreProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)7854 void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphoreProperties(
7855 void* context,
7856 VkPhysicalDevice physicalDevice,
7857 const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
7858 VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
7859 mImpl->on_vkGetPhysicalDeviceExternalSemaphoreProperties(
7860 context, physicalDevice, pExternalSemaphoreInfo,
7861 pExternalSemaphoreProperties);
7862 }
7863
on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)7864 void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(
7865 void* context,
7866 VkPhysicalDevice physicalDevice,
7867 const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
7868 VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
7869 mImpl->on_vkGetPhysicalDeviceExternalSemaphoreProperties(
7870 context, physicalDevice, pExternalSemaphoreInfo,
7871 pExternalSemaphoreProperties);
7872 }
7873
registerEncoderCleanupCallback(const VkEncoder * encoder,void * handle,ResourceTracker::CleanupCallback callback)7874 void ResourceTracker::registerEncoderCleanupCallback(const VkEncoder* encoder, void* handle, ResourceTracker::CleanupCallback callback) {
7875 mImpl->registerEncoderCleanupCallback(encoder, handle, callback);
7876 }
7877
unregisterEncoderCleanupCallback(const VkEncoder * encoder,void * handle)7878 void ResourceTracker::unregisterEncoderCleanupCallback(const VkEncoder* encoder, void* handle) {
7879 mImpl->unregisterEncoderCleanupCallback(encoder, handle);
7880 }
7881
onEncoderDeleted(const VkEncoder * encoder)7882 void ResourceTracker::onEncoderDeleted(const VkEncoder* encoder) {
7883 mImpl->onEncoderDeleted(encoder);
7884 }
7885
syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer,VkEncoder * current)7886 uint32_t ResourceTracker::syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer, VkEncoder* current) {
7887 return mImpl->syncEncodersForCommandBuffer(commandBuffer, current);
7888 }
7889
syncEncodersForQueue(VkQueue queue,VkEncoder * current)7890 uint32_t ResourceTracker::syncEncodersForQueue(VkQueue queue, VkEncoder* current) {
7891 return mImpl->syncEncodersForQueue(queue, current);
7892 }
7893
7894
on_vkBeginCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)7895 VkResult ResourceTracker::on_vkBeginCommandBuffer(
7896 void* context, VkResult input_result,
7897 VkCommandBuffer commandBuffer,
7898 const VkCommandBufferBeginInfo* pBeginInfo) {
7899 return mImpl->on_vkBeginCommandBuffer(
7900 context, input_result, commandBuffer, pBeginInfo);
7901 }
7902
on_vkEndCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer)7903 VkResult ResourceTracker::on_vkEndCommandBuffer(
7904 void* context, VkResult input_result,
7905 VkCommandBuffer commandBuffer) {
7906 return mImpl->on_vkEndCommandBuffer(
7907 context, input_result, commandBuffer);
7908 }
7909
on_vkResetCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)7910 VkResult ResourceTracker::on_vkResetCommandBuffer(
7911 void* context, VkResult input_result,
7912 VkCommandBuffer commandBuffer,
7913 VkCommandBufferResetFlags flags) {
7914 return mImpl->on_vkResetCommandBuffer(
7915 context, input_result, commandBuffer, flags);
7916 }
7917
on_vkCreateImageView(void * context,VkResult input_result,VkDevice device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)7918 VkResult ResourceTracker::on_vkCreateImageView(
7919 void* context, VkResult input_result,
7920 VkDevice device,
7921 const VkImageViewCreateInfo* pCreateInfo,
7922 const VkAllocationCallbacks* pAllocator,
7923 VkImageView* pView) {
7924 return mImpl->on_vkCreateImageView(
7925 context, input_result, device, pCreateInfo, pAllocator, pView);
7926 }
7927
on_vkCmdExecuteCommands(void * context,VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)7928 void ResourceTracker::on_vkCmdExecuteCommands(
7929 void* context,
7930 VkCommandBuffer commandBuffer,
7931 uint32_t commandBufferCount,
7932 const VkCommandBuffer* pCommandBuffers) {
7933 mImpl->on_vkCmdExecuteCommands(
7934 context, commandBuffer, commandBufferCount, pCommandBuffers);
7935 }
7936
on_vkCmdBindDescriptorSets(void * context,VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)7937 void ResourceTracker::on_vkCmdBindDescriptorSets(
7938 void* context,
7939 VkCommandBuffer commandBuffer,
7940 VkPipelineBindPoint pipelineBindPoint,
7941 VkPipelineLayout layout,
7942 uint32_t firstSet,
7943 uint32_t descriptorSetCount,
7944 const VkDescriptorSet* pDescriptorSets,
7945 uint32_t dynamicOffsetCount,
7946 const uint32_t* pDynamicOffsets) {
7947 mImpl->on_vkCmdBindDescriptorSets(
7948 context,
7949 commandBuffer,
7950 pipelineBindPoint,
7951 layout,
7952 firstSet,
7953 descriptorSetCount,
7954 pDescriptorSets,
7955 dynamicOffsetCount,
7956 pDynamicOffsets);
7957 }
7958
on_vkDestroyDescriptorSetLayout(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)7959 void ResourceTracker::on_vkDestroyDescriptorSetLayout(
7960 void* context,
7961 VkDevice device,
7962 VkDescriptorSetLayout descriptorSetLayout,
7963 const VkAllocationCallbacks* pAllocator) {
7964 mImpl->on_vkDestroyDescriptorSetLayout(context, device, descriptorSetLayout, pAllocator);
7965 }
7966
on_vkAllocateCommandBuffers(void * context,VkResult input_result,VkDevice device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)7967 VkResult ResourceTracker::on_vkAllocateCommandBuffers(
7968 void* context,
7969 VkResult input_result,
7970 VkDevice device,
7971 const VkCommandBufferAllocateInfo* pAllocateInfo,
7972 VkCommandBuffer* pCommandBuffers) {
7973 return mImpl->on_vkAllocateCommandBuffers(context, input_result, device, pAllocateInfo, pCommandBuffers);
7974 }
7975
deviceMemoryTransform_tohost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)7976 void ResourceTracker::deviceMemoryTransform_tohost(
7977 VkDeviceMemory* memory, uint32_t memoryCount,
7978 VkDeviceSize* offset, uint32_t offsetCount,
7979 VkDeviceSize* size, uint32_t sizeCount,
7980 uint32_t* typeIndex, uint32_t typeIndexCount,
7981 uint32_t* typeBits, uint32_t typeBitsCount) {
7982 mImpl->deviceMemoryTransform_tohost(
7983 memory, memoryCount,
7984 offset, offsetCount,
7985 size, sizeCount,
7986 typeIndex, typeIndexCount,
7987 typeBits, typeBitsCount);
7988 }
7989
deviceMemoryTransform_fromhost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)7990 void ResourceTracker::deviceMemoryTransform_fromhost(
7991 VkDeviceMemory* memory, uint32_t memoryCount,
7992 VkDeviceSize* offset, uint32_t offsetCount,
7993 VkDeviceSize* size, uint32_t sizeCount,
7994 uint32_t* typeIndex, uint32_t typeIndexCount,
7995 uint32_t* typeBits, uint32_t typeBitsCount) {
7996 mImpl->deviceMemoryTransform_fromhost(
7997 memory, memoryCount,
7998 offset, offsetCount,
7999 size, sizeCount,
8000 typeIndex, typeIndexCount,
8001 typeBits, typeBitsCount);
8002 }
8003
transformImpl_VkExternalMemoryProperties_fromhost(VkExternalMemoryProperties * pProperties,uint32_t lenAccess)8004 void ResourceTracker::transformImpl_VkExternalMemoryProperties_fromhost(
8005 VkExternalMemoryProperties* pProperties,
8006 uint32_t lenAccess) {
8007 mImpl->transformImpl_VkExternalMemoryProperties_fromhost(pProperties,
8008 lenAccess);
8009 }
8010
transformImpl_VkExternalMemoryProperties_tohost(VkExternalMemoryProperties *,uint32_t)8011 void ResourceTracker::transformImpl_VkExternalMemoryProperties_tohost(
8012 VkExternalMemoryProperties*, uint32_t) {}
8013
8014 #define DEFINE_TRANSFORMED_TYPE_IMPL(type) \
8015 void ResourceTracker::transformImpl_##type##_tohost(type*, uint32_t) {} \
8016 void ResourceTracker::transformImpl_##type##_fromhost(type*, uint32_t) {}
8017
8018 LIST_TRIVIAL_TRANSFORMED_TYPES(DEFINE_TRANSFORMED_TYPE_IMPL)
8019
8020 } // namespace goldfish_vk
8021