• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (C) 2018 The Android Open Source Project
2 // Copyright (C) 2018 Google Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 
16 #include "ResourceTracker.h"
17 
18 #include "../OpenglSystemCommon/EmulatorFeatureInfo.h"
19 #include "../OpenglSystemCommon/HostConnection.h"
20 #include "CommandBufferStagingStream.h"
21 #include "DescriptorSetVirtualization.h"
22 #include "Resources.h"
23 #include "aemu/base/Optional.h"
24 #include "aemu/base/Tracing.h"
25 #include "aemu/base/threads/AndroidWorkPool.h"
26 #include "goldfish_vk_private_defs.h"
27 #include "vulkan/vulkan_core.h"
28 
29 /// Use installed headers or locally defined Fuchsia-specific bits
30 #ifdef VK_USE_PLATFORM_FUCHSIA
31 
32 #include <cutils/native_handle.h>
33 #include <fidl/fuchsia.hardware.goldfish/cpp/wire.h>
34 #include <fidl/fuchsia.sysmem/cpp/wire.h>
35 #include <lib/zx/channel.h>
36 #include <lib/zx/vmo.h>
37 #include <optional>
38 #include <zircon/errors.h>
39 #include <zircon/process.h>
40 #include <zircon/rights.h>
41 #include <zircon/syscalls.h>
42 #include <zircon/syscalls/object.h>
43 
44 #include "services/service_connector.h"
45 
46 #ifndef FUCHSIA_NO_TRACE
47 #include <lib/trace/event.h>
48 #endif
49 
50 #define GET_STATUS_SAFE(result, member) \
51     ((result).ok() ? ((result)->member) : ZX_OK)
52 
53 #else
54 
55 typedef uint32_t zx_handle_t;
56 typedef uint64_t zx_koid_t;
57 #define ZX_HANDLE_INVALID         ((zx_handle_t)0)
58 #define ZX_KOID_INVALID ((zx_koid_t)0)
zx_handle_close(zx_handle_t)59 void zx_handle_close(zx_handle_t) { }
zx_event_create(int,zx_handle_t *)60 void zx_event_create(int, zx_handle_t*) { }
61 #endif // VK_USE_PLATFORM_FUCHSIA
62 
63 /// Use installed headers or locally defined Android-specific bits
64 #ifdef VK_USE_PLATFORM_ANDROID_KHR
65 
66 /// Goldfish sync only used for AEMU -- should replace in virtio-gpu when possibe
67 #include "../egl/goldfish_sync.h"
68 #include "AndroidHardwareBuffer.h"
69 
70 #else
71 
72 #if defined(__linux__)
73 #include "../egl/goldfish_sync.h"
74 #endif
75 
76 #include <android/hardware_buffer.h>
77 
78 #endif // VK_USE_PLATFORM_ANDROID_KHR
79 
80 #include "HostVisibleMemoryVirtualization.h"
81 #include "Resources.h"
82 #include "VkEncoder.h"
83 
84 #include "aemu/base/AlignedBuf.h"
85 #include "aemu/base/synchronization/AndroidLock.h"
86 #include "virtgpu_gfxstream_protocol.h"
87 
88 #include "goldfish_address_space.h"
89 #include "goldfish_vk_private_defs.h"
90 #ifdef VK_USE_PLATFORM_ANDROID_KHR
91 #include "vk_format_info.h"
92 #endif
93 #include "vk_struct_id.h"
94 #include "vk_util.h"
95 
96 #include <set>
97 #include <string>
98 #include <unordered_map>
99 #include <unordered_set>
100 
101 #include <vndk/hardware_buffer.h>
102 #include <log/log.h>
103 #include <stdlib.h>
104 #include <sync/sync.h>
105 
106 #if defined(__ANDROID__) || defined(__linux__) || defined(__APPLE__)
107 
108 #include <sys/mman.h>
109 #include <unistd.h>
110 #include <sys/syscall.h>
111 
112 #ifdef HOST_BUILD
113 #include "android/utils/tempfile.h"
114 #endif
115 
116 static inline int
inline_memfd_create(const char * name,unsigned int flags)117 inline_memfd_create(const char *name, unsigned int flags) {
118 #ifdef HOST_BUILD
119     TempFile* tmpFile = tempfile_create();
120     return open(tempfile_path(tmpFile), O_RDWR);
121     // TODO: Windows is not suppose to support VkSemaphoreGetFdInfoKHR
122 #else
123     return syscall(SYS_memfd_create, name, flags);
124 #endif
125 }
126 
127 #define memfd_create inline_memfd_create
128 #endif
129 
130 #define RESOURCE_TRACKER_DEBUG 0
131 
132 #if RESOURCE_TRACKER_DEBUG
133 #undef D
134 #define D(fmt,...) ALOGD("%s: " fmt, __func__, ##__VA_ARGS__);
135 #else
136 #ifndef D
137 #define D(fmt,...)
138 #endif
139 #endif
140 
141 using android::base::Optional;
142 using android::base::guest::AutoLock;
143 using android::base::guest::RecursiveLock;
144 using android::base::guest::Lock;
145 using android::base::guest::WorkPool;
146 
147 namespace gfxstream {
148 namespace vk {
149 
150 #define MAKE_HANDLE_MAPPING_FOREACH(type_name, map_impl, map_to_u64_impl, map_from_u64_impl) \
151     void mapHandles_##type_name(type_name* handles, size_t count) override { \
152         for (size_t i = 0; i < count; ++i) { \
153             map_impl; \
154         } \
155     } \
156     void mapHandles_##type_name##_u64(const type_name* handles, uint64_t* handle_u64s, size_t count) override { \
157         for (size_t i = 0; i < count; ++i) { \
158             map_to_u64_impl; \
159         } \
160     } \
161     void mapHandles_u64_##type_name(const uint64_t* handle_u64s, type_name* handles, size_t count) override { \
162         for (size_t i = 0; i < count; ++i) { \
163             map_from_u64_impl; \
164         } \
165     } \
166 
167 #define DEFINE_RESOURCE_TRACKING_CLASS(class_name, impl) \
168 class class_name : public VulkanHandleMapping { \
169 public: \
170     virtual ~class_name() { } \
171     GOLDFISH_VK_LIST_HANDLE_TYPES(impl) \
172 }; \
173 
174 #define CREATE_MAPPING_IMPL_FOR_TYPE(type_name) \
175     MAKE_HANDLE_MAPPING_FOREACH(type_name, \
176         handles[i] = new_from_host_##type_name(handles[i]); ResourceTracker::get()->register_##type_name(handles[i]);, \
177         handle_u64s[i] = (uint64_t)new_from_host_##type_name(handles[i]), \
178         handles[i] = (type_name)new_from_host_u64_##type_name(handle_u64s[i]); ResourceTracker::get()->register_##type_name(handles[i]);)
179 
180 #define UNWRAP_MAPPING_IMPL_FOR_TYPE(type_name) \
181     MAKE_HANDLE_MAPPING_FOREACH(type_name, \
182         handles[i] = get_host_##type_name(handles[i]), \
183         handle_u64s[i] = (uint64_t)get_host_u64_##type_name(handles[i]), \
184         handles[i] = (type_name)get_host_##type_name((type_name)handle_u64s[i]))
185 
186 #define DESTROY_MAPPING_IMPL_FOR_TYPE(type_name) \
187     MAKE_HANDLE_MAPPING_FOREACH(type_name, \
188         ResourceTracker::get()->unregister_##type_name(handles[i]); delete_goldfish_##type_name(handles[i]), \
189         (void)handle_u64s[i]; delete_goldfish_##type_name(handles[i]), \
190         (void)handles[i]; delete_goldfish_##type_name((type_name)handle_u64s[i]))
191 
192 DEFINE_RESOURCE_TRACKING_CLASS(CreateMapping, CREATE_MAPPING_IMPL_FOR_TYPE)
193 DEFINE_RESOURCE_TRACKING_CLASS(UnwrapMapping, UNWRAP_MAPPING_IMPL_FOR_TYPE)
194 DEFINE_RESOURCE_TRACKING_CLASS(DestroyMapping, DESTROY_MAPPING_IMPL_FOR_TYPE)
195 
196 static uint32_t* sSeqnoPtr = nullptr;
197 
198 // static
199 uint32_t ResourceTracker::streamFeatureBits = 0;
200 ResourceTracker::ThreadingCallbacks ResourceTracker::threadingCallbacks;
201 
202 struct StagingInfo {
203     Lock mLock;
204     std::vector<CommandBufferStagingStream*> streams;
205     std::vector<VkEncoder*> encoders;
206     /// \brief sets alloc and free callbacks for memory allocation for CommandBufferStagingStream(s)
207     /// \param allocFn is the callback to allocate memory
208     /// \param freeFn is the callback to free memory
setAllocFreegfxstream::vk::StagingInfo209     void setAllocFree(CommandBufferStagingStream::Alloc&& allocFn,
210                       CommandBufferStagingStream::Free&& freeFn) {
211         mAlloc = allocFn;
212         mFree = freeFn;
213     }
214 
~StagingInfogfxstream::vk::StagingInfo215     ~StagingInfo() {
216         for (auto stream : streams) {
217             delete stream;
218         }
219 
220         for (auto encoder : encoders) {
221             delete encoder;
222         }
223     }
224 
pushStaginggfxstream::vk::StagingInfo225     void pushStaging(CommandBufferStagingStream* stream, VkEncoder* encoder) {
226         AutoLock<Lock> lock(mLock);
227         stream->reset();
228         streams.push_back(stream);
229         encoders.push_back(encoder);
230     }
231 
popStaginggfxstream::vk::StagingInfo232     void popStaging(CommandBufferStagingStream** streamOut, VkEncoder** encoderOut) {
233         AutoLock<Lock> lock(mLock);
234         CommandBufferStagingStream* stream;
235         VkEncoder* encoder;
236         if (streams.empty()) {
237             if (mAlloc && mFree) {
238                 // if custom allocators are provided, forward them to CommandBufferStagingStream
239                 stream = new CommandBufferStagingStream(mAlloc, mFree);
240             } else {
241                 stream = new CommandBufferStagingStream;
242             }
243             encoder = new VkEncoder(stream);
244         } else {
245             stream = streams.back();
246             encoder = encoders.back();
247             streams.pop_back();
248             encoders.pop_back();
249         }
250         *streamOut = stream;
251         *encoderOut = encoder;
252     }
253 
254    private:
255     CommandBufferStagingStream::Alloc mAlloc = nullptr;
256     CommandBufferStagingStream::Free mFree = nullptr;
257 };
258 
259 static StagingInfo sStaging;
260 
261 class ResourceTracker::Impl {
262 public:
263     Impl() = default;
264     CreateMapping createMapping;
265     UnwrapMapping unwrapMapping;
266     DestroyMapping destroyMapping;
267     DefaultHandleMapping defaultMapping;
268 
269 #define HANDLE_DEFINE_TRIVIAL_INFO_STRUCT(type) \
270     struct type##_Info { \
271         uint32_t unused; \
272     }; \
273 
274     GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_DEFINE_TRIVIAL_INFO_STRUCT)
275 
276     struct VkInstance_Info {
277         uint32_t highestApiVersion;
278         std::set<std::string> enabledExtensions;
279         // Fodder for vkEnumeratePhysicalDevices.
280         std::vector<VkPhysicalDevice> physicalDevices;
281     };
282 
283     struct VkDevice_Info {
284         VkPhysicalDevice physdev;
285         VkPhysicalDeviceProperties props;
286         VkPhysicalDeviceMemoryProperties memProps;
287         uint32_t apiVersion;
288         std::set<std::string> enabledExtensions;
289         std::vector<std::pair<PFN_vkDeviceMemoryReportCallbackEXT, void *>> deviceMemoryReportCallbacks;
290     };
291 
292     struct VkDeviceMemory_Info {
293         bool dedicated = false;
294         bool imported = false;
295 
296 #ifdef VK_USE_PLATFORM_ANDROID_KHR
297         AHardwareBuffer* ahw = nullptr;
298 #endif
299         zx_handle_t vmoHandle = ZX_HANDLE_INVALID;
300         VkDevice device;
301 
302         uint8_t* ptr = nullptr;
303 
304         uint64_t blobId = 0;
305         uint64_t allocationSize = 0;
306         uint32_t memoryTypeIndex = 0;
307         uint64_t coherentMemorySize = 0;
308         uint64_t coherentMemoryOffset = 0;
309 
310         GoldfishAddressSpaceBlockPtr goldfishBlock = nullptr;
311         CoherentMemoryPtr coherentMemory = nullptr;
312     };
313 
314     struct VkCommandBuffer_Info {
315         uint32_t placeholder;
316     };
317 
318     struct VkQueue_Info {
319         VkDevice device;
320     };
321 
322     // custom guest-side structs for images/buffers because of AHardwareBuffer :((
323     struct VkImage_Info {
324         VkDevice device;
325         VkImageCreateInfo createInfo;
326         bool external = false;
327         VkExternalMemoryImageCreateInfo externalCreateInfo;
328         VkDeviceMemory currentBacking = VK_NULL_HANDLE;
329         VkDeviceSize currentBackingOffset = 0;
330         VkDeviceSize currentBackingSize = 0;
331         bool baseRequirementsKnown = false;
332         VkMemoryRequirements baseRequirements;
333 #ifdef VK_USE_PLATFORM_ANDROID_KHR
334         bool hasExternalFormat = false;
335         unsigned androidFormat = 0;
336         std::vector<int> pendingQsriSyncFds;
337 #endif
338 #ifdef VK_USE_PLATFORM_FUCHSIA
339         bool isSysmemBackedMemory = false;
340 #endif
341     };
342 
343     struct VkBuffer_Info {
344         VkDevice device;
345         VkBufferCreateInfo createInfo;
346         bool external = false;
347         VkExternalMemoryBufferCreateInfo externalCreateInfo;
348         VkDeviceMemory currentBacking = VK_NULL_HANDLE;
349         VkDeviceSize currentBackingOffset = 0;
350         VkDeviceSize currentBackingSize = 0;
351         bool baseRequirementsKnown = false;
352         VkMemoryRequirements baseRequirements;
353 #ifdef VK_USE_PLATFORM_FUCHSIA
354         bool isSysmemBackedMemory = false;
355 #endif
356     };
357 
358     struct VkSemaphore_Info {
359         VkDevice device;
360         zx_handle_t eventHandle = ZX_HANDLE_INVALID;
361         zx_koid_t eventKoid = ZX_KOID_INVALID;
362         std::optional<int> syncFd = {};
363     };
364 
365     struct VkDescriptorUpdateTemplate_Info {
366         uint32_t templateEntryCount = 0;
367         VkDescriptorUpdateTemplateEntry* templateEntries;
368 
369         uint32_t imageInfoCount = 0;
370         uint32_t bufferInfoCount = 0;
371         uint32_t bufferViewCount = 0;
372         uint32_t inlineUniformBlockCount = 0;
373         uint32_t* imageInfoIndices;
374         uint32_t* bufferInfoIndices;
375         uint32_t* bufferViewIndices;
376         VkDescriptorImageInfo* imageInfos;
377         VkDescriptorBufferInfo* bufferInfos;
378         VkBufferView* bufferViews;
379         std::vector<uint8_t> inlineUniformBlockBuffer;
380         std::vector<uint32_t> inlineUniformBlockBytesPerBlocks;  // bytes per uniform block
381     };
382 
383     struct VkFence_Info {
384         VkDevice device;
385         bool external = false;
386         VkExportFenceCreateInfo exportFenceCreateInfo;
387 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
388         int syncFd = -1;
389 #endif
390     };
391 
392     struct VkDescriptorPool_Info {
393         uint32_t unused;
394     };
395 
396     struct VkDescriptorSet_Info {
397         uint32_t unused;
398     };
399 
400     struct VkDescriptorSetLayout_Info {
401         uint32_t unused;
402     };
403 
404     struct VkCommandPool_Info {
405         uint32_t unused;
406     };
407 
408     struct VkSampler_Info {
409         uint32_t unused;
410     };
411 
412     struct VkBufferCollectionFUCHSIA_Info {
413 #ifdef VK_USE_PLATFORM_FUCHSIA
414         android::base::Optional<
415             fuchsia_sysmem::wire::BufferCollectionConstraints>
416             constraints;
417         android::base::Optional<VkBufferCollectionPropertiesFUCHSIA> properties;
418 
419         // the index of corresponding createInfo for each image format
420         // constraints in |constraints|.
421         std::vector<uint32_t> createInfoIndex;
422 #endif  // VK_USE_PLATFORM_FUCHSIA
423     };
424 
425 #define HANDLE_REGISTER_IMPL_IMPL(type) \
426     std::unordered_map<type, type##_Info> info_##type; \
427     void register_##type(type obj) { \
428         AutoLock<RecursiveLock> lock(mLock); \
429         info_##type[obj] = type##_Info(); \
430     } \
431 
432 #define HANDLE_UNREGISTER_IMPL_IMPL(type) \
433     void unregister_##type(type obj) { \
434         AutoLock<RecursiveLock> lock(mLock); \
435         info_##type.erase(obj); \
436     } \
437 
438     GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL_IMPL)
GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)439     GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)
440 
441     void unregister_VkInstance(VkInstance instance) {
442         AutoLock<RecursiveLock> lock(mLock);
443 
444         auto it = info_VkInstance.find(instance);
445         if (it == info_VkInstance.end()) return;
446         auto info = it->second;
447         info_VkInstance.erase(instance);
448         lock.unlock();
449     }
450 
unregister_VkDevice(VkDevice device)451     void unregister_VkDevice(VkDevice device) {
452         AutoLock<RecursiveLock> lock(mLock);
453 
454         auto it = info_VkDevice.find(device);
455         if (it == info_VkDevice.end()) return;
456         auto info = it->second;
457         info_VkDevice.erase(device);
458         lock.unlock();
459     }
460 
unregister_VkCommandPool(VkCommandPool pool)461     void unregister_VkCommandPool(VkCommandPool pool) {
462         if (!pool) return;
463 
464         clearCommandPool(pool);
465 
466         AutoLock<RecursiveLock> lock(mLock);
467         info_VkCommandPool.erase(pool);
468     }
469 
unregister_VkSampler(VkSampler sampler)470     void unregister_VkSampler(VkSampler sampler) {
471         if (!sampler) return;
472 
473         AutoLock<RecursiveLock> lock(mLock);
474         info_VkSampler.erase(sampler);
475     }
476 
unregister_VkCommandBuffer(VkCommandBuffer commandBuffer)477     void unregister_VkCommandBuffer(VkCommandBuffer commandBuffer) {
478         resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */, true /* also clear pending descriptor sets */);
479 
480         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
481         if (!cb) return;
482         if (cb->lastUsedEncoder) { cb->lastUsedEncoder->decRef(); }
483         eraseObjects(&cb->subObjects);
484         forAllObjects(cb->poolObjects, [cb](void* commandPool) {
485             struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool((VkCommandPool)commandPool);
486             eraseObject(&p->subObjects, (void*)cb);
487         });
488         eraseObjects(&cb->poolObjects);
489 
490         if (cb->userPtr) {
491             CommandBufferPendingDescriptorSets* pendingSets = (CommandBufferPendingDescriptorSets*)cb->userPtr;
492             delete pendingSets;
493         }
494 
495         AutoLock<RecursiveLock> lock(mLock);
496         info_VkCommandBuffer.erase(commandBuffer);
497     }
498 
unregister_VkQueue(VkQueue queue)499     void unregister_VkQueue(VkQueue queue) {
500         struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
501         if (!q) return;
502         if (q->lastUsedEncoder) { q->lastUsedEncoder->decRef(); }
503 
504         AutoLock<RecursiveLock> lock(mLock);
505         info_VkQueue.erase(queue);
506     }
507 
unregister_VkDeviceMemory(VkDeviceMemory mem)508     void unregister_VkDeviceMemory(VkDeviceMemory mem) {
509         AutoLock<RecursiveLock> lock(mLock);
510 
511         auto it = info_VkDeviceMemory.find(mem);
512         if (it == info_VkDeviceMemory.end()) return;
513 
514         auto& memInfo = it->second;
515 
516 #ifdef VK_USE_PLATFORM_ANDROID_KHR
517         if (memInfo.ahw) {
518             AHardwareBuffer_release(memInfo.ahw);
519         }
520 #endif
521 
522         if (memInfo.vmoHandle != ZX_HANDLE_INVALID) {
523             zx_handle_close(memInfo.vmoHandle);
524         }
525 
526         info_VkDeviceMemory.erase(mem);
527     }
528 
unregister_VkImage(VkImage img)529     void unregister_VkImage(VkImage img) {
530         AutoLock<RecursiveLock> lock(mLock);
531 
532         auto it = info_VkImage.find(img);
533         if (it == info_VkImage.end()) return;
534 
535         auto& imageInfo = it->second;
536 
537         info_VkImage.erase(img);
538     }
539 
unregister_VkBuffer(VkBuffer buf)540     void unregister_VkBuffer(VkBuffer buf) {
541         AutoLock<RecursiveLock> lock(mLock);
542 
543         auto it = info_VkBuffer.find(buf);
544         if (it == info_VkBuffer.end()) return;
545 
546         info_VkBuffer.erase(buf);
547     }
548 
unregister_VkSemaphore(VkSemaphore sem)549     void unregister_VkSemaphore(VkSemaphore sem) {
550         AutoLock<RecursiveLock> lock(mLock);
551 
552         auto it = info_VkSemaphore.find(sem);
553         if (it == info_VkSemaphore.end()) return;
554 
555         auto& semInfo = it->second;
556 
557         if (semInfo.eventHandle != ZX_HANDLE_INVALID) {
558             zx_handle_close(semInfo.eventHandle);
559         }
560 
561 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
562         if (semInfo.syncFd.value_or(-1) >= 0) {
563             close(semInfo.syncFd.value());
564         }
565 #endif
566 
567         info_VkSemaphore.erase(sem);
568     }
569 
unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ)570     void unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ) {
571 
572         AutoLock<RecursiveLock> lock(mLock);
573         auto it = info_VkDescriptorUpdateTemplate.find(templ);
574         if (it == info_VkDescriptorUpdateTemplate.end())
575             return;
576 
577         auto& info = it->second;
578         if (info.templateEntryCount) delete [] info.templateEntries;
579         if (info.imageInfoCount) {
580             delete [] info.imageInfoIndices;
581             delete [] info.imageInfos;
582         }
583         if (info.bufferInfoCount) {
584             delete [] info.bufferInfoIndices;
585             delete [] info.bufferInfos;
586         }
587         if (info.bufferViewCount) {
588             delete [] info.bufferViewIndices;
589             delete [] info.bufferViews;
590         }
591         info_VkDescriptorUpdateTemplate.erase(it);
592     }
593 
unregister_VkFence(VkFence fence)594     void unregister_VkFence(VkFence fence) {
595         AutoLock<RecursiveLock> lock(mLock);
596         auto it = info_VkFence.find(fence);
597         if (it == info_VkFence.end()) return;
598 
599         auto& fenceInfo = it->second;
600         (void)fenceInfo;
601 
602 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
603         if (fenceInfo.syncFd >= 0) {
604             close(fenceInfo.syncFd);
605         }
606 #endif
607 
608         info_VkFence.erase(fence);
609     }
610 
611 #ifdef VK_USE_PLATFORM_FUCHSIA
unregister_VkBufferCollectionFUCHSIA(VkBufferCollectionFUCHSIA collection)612     void unregister_VkBufferCollectionFUCHSIA(
613         VkBufferCollectionFUCHSIA collection) {
614         AutoLock<RecursiveLock> lock(mLock);
615         info_VkBufferCollectionFUCHSIA.erase(collection);
616     }
617 #endif
618 
unregister_VkDescriptorSet_locked(VkDescriptorSet set)619     void unregister_VkDescriptorSet_locked(VkDescriptorSet set) {
620         struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(set);
621         delete ds->reified;
622         info_VkDescriptorSet.erase(set);
623     }
624 
unregister_VkDescriptorSet(VkDescriptorSet set)625     void unregister_VkDescriptorSet(VkDescriptorSet set) {
626         if (!set) return;
627 
628         AutoLock<RecursiveLock> lock(mLock);
629         unregister_VkDescriptorSet_locked(set);
630     }
631 
unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout)632     void unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout) {
633         if (!setLayout) return;
634 
635         AutoLock<RecursiveLock> lock(mLock);
636         delete as_goldfish_VkDescriptorSetLayout(setLayout)->layoutInfo;
637         info_VkDescriptorSetLayout.erase(setLayout);
638     }
639 
allocAndInitializeDescriptorSets(void * context,VkDevice device,const VkDescriptorSetAllocateInfo * ci,VkDescriptorSet * sets)640     VkResult allocAndInitializeDescriptorSets(
641         void* context,
642         VkDevice device,
643         const VkDescriptorSetAllocateInfo* ci,
644         VkDescriptorSet* sets) {
645 
646         if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
647             // Using the pool ID's we collected earlier from the host
648             VkResult poolAllocResult = validateAndApplyVirtualDescriptorSetAllocation(ci, sets);
649 
650             if (poolAllocResult != VK_SUCCESS) return poolAllocResult;
651 
652             for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
653                 register_VkDescriptorSet(sets[i]);
654                 VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(sets[i])->reified->setLayout;
655 
656                 // Need to add ref to the set layout in the virtual case
657                 // because the set itself might not be realized on host at the
658                 // same time
659                 struct goldfish_VkDescriptorSetLayout* dsl = as_goldfish_VkDescriptorSetLayout(setLayout);
660                 ++dsl->layoutInfo->refcount;
661             }
662         } else {
663             // Pass through and use host allocation
664             VkEncoder* enc = (VkEncoder*)context;
665             VkResult allocRes = enc->vkAllocateDescriptorSets(device, ci, sets, true /* do lock */);
666 
667             if (allocRes != VK_SUCCESS) return allocRes;
668 
669             for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
670                 applyDescriptorSetAllocation(ci->descriptorPool, ci->pSetLayouts[i]);
671                 fillDescriptorSetInfoForPool(ci->descriptorPool, ci->pSetLayouts[i], sets[i]);
672             }
673         }
674 
675         return VK_SUCCESS;
676     }
677 
createImmutableSamplersFilteredImageInfo(VkDescriptorType descType,VkDescriptorSet descSet,uint32_t binding,const VkDescriptorImageInfo * pImageInfo)678     VkDescriptorImageInfo createImmutableSamplersFilteredImageInfo(
679         VkDescriptorType descType,
680         VkDescriptorSet descSet,
681         uint32_t binding,
682         const VkDescriptorImageInfo* pImageInfo) {
683 
684         VkDescriptorImageInfo res = *pImageInfo;
685 
686         if (descType != VK_DESCRIPTOR_TYPE_SAMPLER &&
687             descType != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) return res;
688 
689         bool immutableSampler = as_goldfish_VkDescriptorSet(descSet)->reified->bindingIsImmutableSampler[binding];
690 
691         if (!immutableSampler) return res;
692 
693         res.sampler = 0;
694 
695         return res;
696     }
697 
descriptorBindingIsImmutableSampler(VkDescriptorSet dstSet,uint32_t dstBinding)698     bool descriptorBindingIsImmutableSampler(
699         VkDescriptorSet dstSet,
700         uint32_t dstBinding) {
701 
702         return as_goldfish_VkDescriptorSet(dstSet)->reified->bindingIsImmutableSampler[dstBinding];
703     }
704 
705     VkDescriptorImageInfo
filterNonexistentSampler(const VkDescriptorImageInfo & inputInfo)706     filterNonexistentSampler(
707         const VkDescriptorImageInfo& inputInfo) {
708 
709         VkSampler sampler =
710             inputInfo.sampler;
711 
712         VkDescriptorImageInfo res = inputInfo;
713 
714         if (sampler) {
715             auto it = info_VkSampler.find(sampler);
716             bool samplerExists = it != info_VkSampler.end();
717             if (!samplerExists) res.sampler = 0;
718         }
719 
720         return res;
721     }
722 
723 
freeDescriptorSetsIfHostAllocated(VkEncoder * enc,VkDevice device,uint32_t descriptorSetCount,const VkDescriptorSet * sets)724     void freeDescriptorSetsIfHostAllocated(VkEncoder* enc, VkDevice device, uint32_t descriptorSetCount, const VkDescriptorSet* sets) {
725         for (uint32_t i = 0; i < descriptorSetCount; ++i) {
726             struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(sets[i]);
727             if (ds->reified->allocationPending) {
728                 unregister_VkDescriptorSet(sets[i]);
729                 delete_goldfish_VkDescriptorSet(sets[i]);
730             } else {
731                 enc->vkFreeDescriptorSets(device, ds->reified->pool, 1, &sets[i], false /* no lock */);
732             }
733         }
734     }
735 
clearDescriptorPoolAndUnregisterDescriptorSets(void * context,VkDevice device,VkDescriptorPool pool)736     void clearDescriptorPoolAndUnregisterDescriptorSets(void* context, VkDevice device, VkDescriptorPool pool) {
737 
738         std::vector<VkDescriptorSet> toClear =
739             clearDescriptorPool(pool, mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate);
740 
741         for (auto set : toClear) {
742             if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
743                 VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(set)->reified->setLayout;
744                 decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
745             }
746             unregister_VkDescriptorSet(set);
747             delete_goldfish_VkDescriptorSet(set);
748         }
749     }
750 
unregister_VkDescriptorPool(VkDescriptorPool pool)751     void unregister_VkDescriptorPool(VkDescriptorPool pool) {
752         if (!pool) return;
753 
754         AutoLock<RecursiveLock> lock(mLock);
755 
756         struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
757         delete dp->allocInfo;
758 
759         info_VkDescriptorPool.erase(pool);
760     }
761 
descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool)762     bool descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool) {
763         return as_goldfish_VkDescriptorPool(pool)->allocInfo->createFlags &
764             VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
765     }
766 
767     static constexpr uint32_t kDefaultApiVersion = VK_MAKE_VERSION(1, 1, 0);
768 
setInstanceInfo(VkInstance instance,uint32_t enabledExtensionCount,const char * const * ppEnabledExtensionNames,uint32_t apiVersion)769     void setInstanceInfo(VkInstance instance,
770                          uint32_t enabledExtensionCount,
771                          const char* const* ppEnabledExtensionNames,
772                          uint32_t apiVersion) {
773         AutoLock<RecursiveLock> lock(mLock);
774         auto& info = info_VkInstance[instance];
775         info.highestApiVersion = apiVersion;
776 
777         if (!ppEnabledExtensionNames) return;
778 
779         for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
780             info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
781         }
782     }
783 
setDeviceInfo(VkDevice device,VkPhysicalDevice physdev,VkPhysicalDeviceProperties props,VkPhysicalDeviceMemoryProperties memProps,uint32_t enabledExtensionCount,const char * const * ppEnabledExtensionNames,const void * pNext)784     void setDeviceInfo(VkDevice device,
785                        VkPhysicalDevice physdev,
786                        VkPhysicalDeviceProperties props,
787                        VkPhysicalDeviceMemoryProperties memProps,
788                        uint32_t enabledExtensionCount,
789                        const char* const* ppEnabledExtensionNames,
790                        const void* pNext) {
791         AutoLock<RecursiveLock> lock(mLock);
792         auto& info = info_VkDevice[device];
793         info.physdev = physdev;
794         info.props = props;
795         info.memProps = memProps;
796         info.apiVersion = props.apiVersion;
797 
798         const VkBaseInStructure *extensionCreateInfo =
799             reinterpret_cast<const VkBaseInStructure *>(pNext);
800         while(extensionCreateInfo) {
801             if(extensionCreateInfo->sType
802                 == VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT) {
803                 auto deviceMemoryReportCreateInfo =
804                     reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT *>(
805                         extensionCreateInfo);
806                 if(deviceMemoryReportCreateInfo->pfnUserCallback != nullptr) {
807                     info.deviceMemoryReportCallbacks.emplace_back(
808                         deviceMemoryReportCreateInfo->pfnUserCallback,
809                         deviceMemoryReportCreateInfo->pUserData);
810                 }
811             }
812             extensionCreateInfo = extensionCreateInfo->pNext;
813         }
814 
815         if (!ppEnabledExtensionNames) return;
816 
817         for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
818             info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
819         }
820     }
821 
emitDeviceMemoryReport(VkDevice_Info info,VkDeviceMemoryReportEventTypeEXT type,uint64_t memoryObjectId,VkDeviceSize size,VkObjectType objectType,uint64_t objectHandle,uint32_t heapIndex=0)822     void emitDeviceMemoryReport(VkDevice_Info info,
823                                 VkDeviceMemoryReportEventTypeEXT type,
824                                 uint64_t memoryObjectId,
825                                 VkDeviceSize size,
826                                 VkObjectType objectType,
827                                 uint64_t objectHandle,
828                                 uint32_t heapIndex = 0) {
829         if(info.deviceMemoryReportCallbacks.empty()) return;
830 
831         const VkDeviceMemoryReportCallbackDataEXT callbackData = {
832             VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT,  // sType
833             nullptr,                                                   // pNext
834             0,                                                         // flags
835             type,                                                      // type
836             memoryObjectId,                                            // memoryObjectId
837             size,                                                      // size
838             objectType,                                                // objectType
839             objectHandle,                                              // objectHandle
840             heapIndex,                                                 // heapIndex
841         };
842         for(const auto &callback : info.deviceMemoryReportCallbacks) {
843             callback.first(&callbackData, callback.second);
844         }
845     }
846 
setDeviceMemoryInfo(VkDevice device,VkDeviceMemory memory,VkDeviceSize allocationSize,uint8_t * ptr,uint32_t memoryTypeIndex,AHardwareBuffer * ahw=nullptr,bool imported=false,zx_handle_t vmoHandle=ZX_HANDLE_INVALID)847     void setDeviceMemoryInfo(VkDevice device,
848                              VkDeviceMemory memory,
849                              VkDeviceSize allocationSize,
850                              uint8_t* ptr,
851                              uint32_t memoryTypeIndex,
852                              AHardwareBuffer* ahw = nullptr,
853                              bool imported = false,
854                              zx_handle_t vmoHandle = ZX_HANDLE_INVALID) {
855         AutoLock<RecursiveLock> lock(mLock);
856         auto& info = info_VkDeviceMemory[memory];
857 
858         info.device = device;
859         info.allocationSize = allocationSize;
860         info.ptr = ptr;
861         info.memoryTypeIndex = memoryTypeIndex;
862 #ifdef VK_USE_PLATFORM_ANDROID_KHR
863         info.ahw = ahw;
864 #endif
865         info.imported = imported;
866         info.vmoHandle = vmoHandle;
867     }
868 
setImageInfo(VkImage image,VkDevice device,const VkImageCreateInfo * pCreateInfo)869     void setImageInfo(VkImage image,
870                       VkDevice device,
871                       const VkImageCreateInfo *pCreateInfo) {
872         AutoLock<RecursiveLock> lock(mLock);
873         auto& info = info_VkImage[image];
874 
875         info.device = device;
876         info.createInfo = *pCreateInfo;
877     }
878 
getMappedPointer(VkDeviceMemory memory)879     uint8_t* getMappedPointer(VkDeviceMemory memory) {
880         AutoLock<RecursiveLock> lock(mLock);
881         const auto it = info_VkDeviceMemory.find(memory);
882         if (it == info_VkDeviceMemory.end()) return nullptr;
883 
884         const auto& info = it->second;
885         return info.ptr;
886     }
887 
getMappedSize(VkDeviceMemory memory)888     VkDeviceSize getMappedSize(VkDeviceMemory memory) {
889         AutoLock<RecursiveLock> lock(mLock);
890         const auto it = info_VkDeviceMemory.find(memory);
891         if (it == info_VkDeviceMemory.end()) return 0;
892 
893         const auto& info = it->second;
894         return info.allocationSize;
895     }
896 
isValidMemoryRange(const VkMappedMemoryRange & range) const897     bool isValidMemoryRange(const VkMappedMemoryRange& range) const {
898         AutoLock<RecursiveLock> lock(mLock);
899         const auto it = info_VkDeviceMemory.find(range.memory);
900         if (it == info_VkDeviceMemory.end()) return false;
901         const auto& info = it->second;
902 
903         if (!info.ptr) return false;
904 
905         VkDeviceSize offset = range.offset;
906         VkDeviceSize size = range.size;
907 
908         if (size == VK_WHOLE_SIZE) {
909             return offset <= info.allocationSize;
910         }
911 
912         return offset + size <= info.allocationSize;
913     }
914 
setupCaps(void)915     void setupCaps(void) {
916         VirtGpuDevice& instance = VirtGpuDevice::getInstance((enum VirtGpuCapset)3);
917         mCaps = instance.getCaps();
918 
919         // Delete once goldfish Linux drivers are gone
920         if (mCaps.gfxstreamCapset.protocolVersion == 0) {
921             mCaps.gfxstreamCapset.colorBufferMemoryIndex = 0xFFFFFFFF;
922         }
923     }
924 
setupFeatures(const EmulatorFeatureInfo * features)925     void setupFeatures(const EmulatorFeatureInfo* features) {
926         if (!features || mFeatureInfo) return;
927         mFeatureInfo.reset(new EmulatorFeatureInfo);
928         *mFeatureInfo = *features;
929 
930         if (mFeatureInfo->hasDirectMem) {
931             mGoldfishAddressSpaceBlockProvider.reset(
932                 new GoldfishAddressSpaceBlockProvider(
933                     GoldfishAddressSpaceSubdeviceType::NoSubdevice));
934         }
935 
936 #ifdef VK_USE_PLATFORM_FUCHSIA
937         if (mFeatureInfo->hasVulkan) {
938             fidl::ClientEnd<fuchsia_hardware_goldfish::ControlDevice> channel{
939                 zx::channel(GetConnectToServiceFunction()("/loader-gpu-devices/class/goldfish-control/000"))};
940             if (!channel) {
941                 ALOGE("failed to open control device");
942                 abort();
943             }
944             mControlDevice =
945                 fidl::WireSyncClient<fuchsia_hardware_goldfish::ControlDevice>(
946                     std::move(channel));
947 
948             fidl::ClientEnd<fuchsia_sysmem::Allocator> sysmem_channel{
949                 zx::channel(GetConnectToServiceFunction()("/svc/fuchsia.sysmem.Allocator"))};
950             if (!sysmem_channel) {
951                 ALOGE("failed to open sysmem connection");
952             }
953             mSysmemAllocator =
954                 fidl::WireSyncClient<fuchsia_sysmem::Allocator>(
955                     std::move(sysmem_channel));
956             char name[ZX_MAX_NAME_LEN] = {};
957             zx_object_get_property(zx_process_self(), ZX_PROP_NAME, name, sizeof(name));
958             std::string client_name(name);
959             client_name += "-goldfish";
960             zx_info_handle_basic_t info;
961             zx_object_get_info(zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, sizeof(info),
962                                nullptr, nullptr);
963             mSysmemAllocator->SetDebugClientInfo(fidl::StringView::FromExternal(client_name),
964                                                  info.koid);
965         }
966 #endif
967 
968         if (mFeatureInfo->hasVulkanNullOptionalStrings) {
969             ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
970         }
971         if (mFeatureInfo->hasVulkanIgnoredHandles) {
972             ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
973         }
974         if (mFeatureInfo->hasVulkanShaderFloat16Int8) {
975             ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
976         }
977         if (mFeatureInfo->hasVulkanQueueSubmitWithCommands) {
978             ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
979         }
980     }
981 
setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks & callbacks)982     void setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks& callbacks) {
983         ResourceTracker::threadingCallbacks = callbacks;
984     }
985 
hostSupportsVulkan() const986     bool hostSupportsVulkan() const {
987         if (!mFeatureInfo) return false;
988 
989         return mFeatureInfo->hasVulkan;
990     }
991 
usingDirectMapping() const992     bool usingDirectMapping() const {
993         return true;
994     }
995 
getStreamFeatures() const996     uint32_t getStreamFeatures() const {
997         return ResourceTracker::streamFeatureBits;
998     }
999 
supportsDeferredCommands() const1000     bool supportsDeferredCommands() const {
1001         if (!mFeatureInfo) return false;
1002         return mFeatureInfo->hasDeferredVulkanCommands;
1003     }
1004 
supportsAsyncQueueSubmit() const1005     bool supportsAsyncQueueSubmit() const {
1006         if (!mFeatureInfo) return false;
1007         return mFeatureInfo->hasVulkanAsyncQueueSubmit;
1008     }
1009 
supportsCreateResourcesWithRequirements() const1010     bool supportsCreateResourcesWithRequirements() const {
1011         if (!mFeatureInfo) return false;
1012         return mFeatureInfo->hasVulkanCreateResourcesWithRequirements;
1013     }
1014 
getHostInstanceExtensionIndex(const std::string & extName) const1015     int getHostInstanceExtensionIndex(const std::string& extName) const {
1016         int i = 0;
1017         for (const auto& prop : mHostInstanceExtensions) {
1018             if (extName == std::string(prop.extensionName)) {
1019                 return i;
1020             }
1021             ++i;
1022         }
1023         return -1;
1024     }
1025 
getHostDeviceExtensionIndex(const std::string & extName) const1026     int getHostDeviceExtensionIndex(const std::string& extName) const {
1027         int i = 0;
1028         for (const auto& prop : mHostDeviceExtensions) {
1029             if (extName == std::string(prop.extensionName)) {
1030                 return i;
1031             }
1032             ++i;
1033         }
1034         return -1;
1035     }
1036 
deviceMemoryTransform_tohost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)1037     void deviceMemoryTransform_tohost(
1038         VkDeviceMemory* memory, uint32_t memoryCount,
1039         VkDeviceSize* offset, uint32_t offsetCount,
1040         VkDeviceSize* size, uint32_t sizeCount,
1041         uint32_t* typeIndex, uint32_t typeIndexCount,
1042         uint32_t* typeBits, uint32_t typeBitsCount) {
1043 
1044         (void)memoryCount;
1045         (void)offsetCount;
1046         (void)sizeCount;
1047         (void)typeIndex;
1048         (void)typeIndexCount;
1049         (void)typeBits;
1050         (void)typeBitsCount;
1051 
1052         if (memory) {
1053             AutoLock<RecursiveLock> lock (mLock);
1054 
1055             for (uint32_t i = 0; i < memoryCount; ++i) {
1056                 VkDeviceMemory mem = memory[i];
1057 
1058                 auto it = info_VkDeviceMemory.find(mem);
1059                 if (it == info_VkDeviceMemory.end())
1060                     return;
1061 
1062                 const auto& info = it->second;
1063 
1064                 if (!info.coherentMemory)
1065                     continue;
1066 
1067                 memory[i] = info.coherentMemory->getDeviceMemory();
1068 
1069                 if (offset) {
1070                     offset[i] = info.coherentMemoryOffset + offset[i];
1071                 }
1072 
1073                 if (size && size[i] == VK_WHOLE_SIZE) {
1074                     size[i] = info.allocationSize;
1075                 }
1076 
1077                 // TODO
1078                 (void)memory;
1079                 (void)offset;
1080                 (void)size;
1081             }
1082         }
1083     }
1084 
deviceMemoryTransform_fromhost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)1085     void deviceMemoryTransform_fromhost(
1086         VkDeviceMemory* memory, uint32_t memoryCount,
1087         VkDeviceSize* offset, uint32_t offsetCount,
1088         VkDeviceSize* size, uint32_t sizeCount,
1089         uint32_t* typeIndex, uint32_t typeIndexCount,
1090         uint32_t* typeBits, uint32_t typeBitsCount) {
1091 
1092         (void)memory;
1093         (void)memoryCount;
1094         (void)offset;
1095         (void)offsetCount;
1096         (void)size;
1097         (void)sizeCount;
1098         (void)typeIndex;
1099         (void)typeIndexCount;
1100         (void)typeBits;
1101         (void)typeBitsCount;
1102     }
1103 
transformImpl_VkExternalMemoryProperties_fromhost(VkExternalMemoryProperties * pProperties,uint32_t)1104     void transformImpl_VkExternalMemoryProperties_fromhost(
1105         VkExternalMemoryProperties* pProperties,
1106         uint32_t) {
1107         VkExternalMemoryHandleTypeFlags supportedHandleType = 0u;
1108 #ifdef VK_USE_PLATFORM_FUCHSIA
1109         supportedHandleType |=
1110                 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
1111 #endif  // VK_USE_PLATFORM_FUCHSIA
1112 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1113         supportedHandleType |=
1114             VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
1115             VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
1116 #endif  // VK_USE_PLATFORM_ANDROID_KHR
1117         if (supportedHandleType) {
1118             pProperties->compatibleHandleTypes &= supportedHandleType;
1119             pProperties->exportFromImportedHandleTypes &= supportedHandleType;
1120         }
1121     }
1122 
on_vkEnumerateInstanceExtensionProperties(void * context,VkResult,const char *,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)1123     VkResult on_vkEnumerateInstanceExtensionProperties(
1124         void* context,
1125         VkResult,
1126         const char*,
1127         uint32_t* pPropertyCount,
1128         VkExtensionProperties* pProperties) {
1129         std::vector<const char*> allowedExtensionNames = {
1130             "VK_KHR_get_physical_device_properties2",
1131             "VK_KHR_sampler_ycbcr_conversion",
1132 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1133             "VK_KHR_external_semaphore_capabilities",
1134             "VK_KHR_external_memory_capabilities",
1135             "VK_KHR_external_fence_capabilities",
1136 #endif
1137         };
1138 
1139         VkEncoder* enc = (VkEncoder*)context;
1140 
1141         // Only advertise a select set of extensions.
1142         if (mHostInstanceExtensions.empty()) {
1143             uint32_t hostPropCount = 0;
1144             enc->vkEnumerateInstanceExtensionProperties(nullptr, &hostPropCount, nullptr, true /* do lock */);
1145             mHostInstanceExtensions.resize(hostPropCount);
1146 
1147             VkResult hostRes =
1148                 enc->vkEnumerateInstanceExtensionProperties(
1149                     nullptr, &hostPropCount, mHostInstanceExtensions.data(), true /* do lock */);
1150 
1151             if (hostRes != VK_SUCCESS) {
1152                 return hostRes;
1153             }
1154         }
1155 
1156         std::vector<VkExtensionProperties> filteredExts;
1157 
1158         for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1159             auto extIndex = getHostInstanceExtensionIndex(allowedExtensionNames[i]);
1160             if (extIndex != -1) {
1161                 filteredExts.push_back(mHostInstanceExtensions[extIndex]);
1162             }
1163         }
1164 
1165         VkExtensionProperties anbExtProps[] = {
1166 #ifdef VK_USE_PLATFORM_FUCHSIA
1167             { "VK_KHR_external_memory_capabilities", 1},
1168             { "VK_KHR_external_semaphore_capabilities", 1},
1169 #endif
1170         };
1171 
1172         for (auto& anbExtProp: anbExtProps) {
1173             filteredExts.push_back(anbExtProp);
1174         }
1175 
1176         // Spec:
1177         //
1178         // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
1179         //
1180         // If pProperties is NULL, then the number of extensions properties
1181         // available is returned in pPropertyCount. Otherwise, pPropertyCount
1182         // must point to a variable set by the user to the number of elements
1183         // in the pProperties array, and on return the variable is overwritten
1184         // with the number of structures actually written to pProperties. If
1185         // pPropertyCount is less than the number of extension properties
1186         // available, at most pPropertyCount structures will be written. If
1187         // pPropertyCount is smaller than the number of extensions available,
1188         // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1189         // that not all the available properties were returned.
1190         //
1191         // pPropertyCount must be a valid pointer to a uint32_t value
1192         if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1193 
1194         if (!pProperties) {
1195             *pPropertyCount = (uint32_t)filteredExts.size();
1196             return VK_SUCCESS;
1197         } else {
1198             auto actualExtensionCount = (uint32_t)filteredExts.size();
1199             if (*pPropertyCount > actualExtensionCount) {
1200               *pPropertyCount = actualExtensionCount;
1201             }
1202 
1203             for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1204                 pProperties[i] = filteredExts[i];
1205             }
1206 
1207             if (actualExtensionCount > *pPropertyCount) {
1208                 return VK_INCOMPLETE;
1209             }
1210 
1211             return VK_SUCCESS;
1212         }
1213     }
1214 
on_vkEnumerateDeviceExtensionProperties(void * context,VkResult,VkPhysicalDevice physdev,const char *,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)1215     VkResult on_vkEnumerateDeviceExtensionProperties(
1216         void* context,
1217         VkResult,
1218         VkPhysicalDevice physdev,
1219         const char*,
1220         uint32_t* pPropertyCount,
1221         VkExtensionProperties* pProperties) {
1222         std::vector<const char*> allowedExtensionNames = {
1223             "VK_KHR_vulkan_memory_model",
1224             "VK_KHR_buffer_device_address",
1225             "VK_KHR_maintenance1",
1226             "VK_KHR_maintenance2",
1227             "VK_KHR_maintenance3",
1228             "VK_KHR_bind_memory2",
1229             "VK_KHR_dedicated_allocation",
1230             "VK_KHR_get_memory_requirements2",
1231             "VK_KHR_sampler_ycbcr_conversion",
1232             "VK_KHR_shader_float16_int8",
1233         // Timeline semaphores buggy in newer NVIDIA drivers
1234         // (vkWaitSemaphoresKHR causes further vkCommandBuffer dispatches to deadlock)
1235 #ifndef VK_USE_PLATFORM_ANDROID_KHR
1236             "VK_KHR_timeline_semaphore",
1237 #endif
1238             "VK_AMD_gpu_shader_half_float",
1239             "VK_NV_shader_subgroup_partitioned",
1240             "VK_KHR_shader_subgroup_extended_types",
1241             "VK_EXT_subgroup_size_control",
1242             "VK_EXT_provoking_vertex",
1243             "VK_EXT_line_rasterization",
1244             "VK_KHR_shader_terminate_invocation",
1245             "VK_EXT_transform_feedback",
1246             "VK_EXT_primitive_topology_list_restart",
1247             "VK_EXT_index_type_uint8",
1248             "VK_EXT_load_store_op_none",
1249             "VK_EXT_swapchain_colorspace",
1250             "VK_EXT_image_robustness",
1251             "VK_EXT_custom_border_color",
1252             "VK_EXT_shader_stencil_export",
1253             "VK_KHR_image_format_list",
1254             "VK_KHR_incremental_present",
1255             "VK_KHR_pipeline_executable_properties",
1256             "VK_EXT_queue_family_foreign",
1257             "VK_KHR_descriptor_update_template",
1258             "VK_KHR_storage_buffer_storage_class",
1259 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1260             "VK_KHR_external_semaphore",
1261             "VK_KHR_external_semaphore_fd",
1262             // "VK_KHR_external_semaphore_win32", not exposed because it's translated to fd
1263             "VK_KHR_external_memory",
1264             "VK_KHR_external_fence",
1265             "VK_KHR_external_fence_fd",
1266             "VK_EXT_device_memory_report",
1267 #endif
1268 #if !defined(VK_USE_PLATFORM_ANDROID_KHR) && defined(__linux__)
1269             "VK_KHR_create_renderpass2",
1270             "VK_KHR_imageless_framebuffer",
1271 #endif
1272             // Vulkan 1.3
1273             "VK_KHR_synchronization2",
1274             "VK_EXT_private_data",
1275         };
1276 
1277         VkEncoder* enc = (VkEncoder*)context;
1278 
1279         if (mHostDeviceExtensions.empty()) {
1280             uint32_t hostPropCount = 0;
1281             enc->vkEnumerateDeviceExtensionProperties(physdev, nullptr, &hostPropCount, nullptr, true /* do lock */);
1282             mHostDeviceExtensions.resize(hostPropCount);
1283 
1284             VkResult hostRes =
1285                 enc->vkEnumerateDeviceExtensionProperties(
1286                     physdev, nullptr, &hostPropCount, mHostDeviceExtensions.data(), true /* do lock */);
1287 
1288             if (hostRes != VK_SUCCESS) {
1289                 return hostRes;
1290             }
1291         }
1292 
1293         bool hostHasWin32ExternalSemaphore =
1294             getHostDeviceExtensionIndex(
1295                 "VK_KHR_external_semaphore_win32") != -1;
1296 
1297         bool hostHasPosixExternalSemaphore =
1298             getHostDeviceExtensionIndex(
1299                 "VK_KHR_external_semaphore_fd") != -1;
1300 
1301         D("%s: host has ext semaphore? win32 %d posix %d\n", __func__,
1302           hostHasWin32ExternalSemaphore,
1303           hostHasPosixExternalSemaphore);
1304 
1305         bool hostSupportsExternalSemaphore =
1306             hostHasWin32ExternalSemaphore ||
1307             hostHasPosixExternalSemaphore;
1308 
1309         std::vector<VkExtensionProperties> filteredExts;
1310 
1311         for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1312             auto extIndex = getHostDeviceExtensionIndex(allowedExtensionNames[i]);
1313             if (extIndex != -1) {
1314                 filteredExts.push_back(mHostDeviceExtensions[extIndex]);
1315             }
1316         }
1317 
1318         VkExtensionProperties anbExtProps[] = {
1319 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1320             { "VK_ANDROID_native_buffer", 7 },
1321 #endif
1322 #ifdef VK_USE_PLATFORM_FUCHSIA
1323             { "VK_KHR_external_memory", 1 },
1324             { "VK_KHR_external_semaphore", 1 },
1325             { "VK_FUCHSIA_external_semaphore", 1 },
1326 #endif
1327         };
1328 
1329         for (auto& anbExtProp: anbExtProps) {
1330             filteredExts.push_back(anbExtProp);
1331         }
1332 
1333 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1334         bool hostSupportsExternalFenceFd =
1335             getHostDeviceExtensionIndex(
1336                 "VK_KHR_external_fence_fd") != -1;
1337         if (!hostSupportsExternalFenceFd) {
1338             filteredExts.push_back(
1339                 VkExtensionProperties { "VK_KHR_external_fence_fd", 1});
1340         }
1341 #endif
1342 
1343 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1344         if (hostSupportsExternalSemaphore &&
1345             !hostHasPosixExternalSemaphore) {
1346             filteredExts.push_back(
1347                 VkExtensionProperties { "VK_KHR_external_semaphore_fd", 1});
1348         }
1349 #endif
1350 
1351         bool win32ExtMemAvailable =
1352             getHostDeviceExtensionIndex(
1353                 "VK_KHR_external_memory_win32") != -1;
1354         bool posixExtMemAvailable =
1355             getHostDeviceExtensionIndex(
1356                 "VK_KHR_external_memory_fd") != -1;
1357         bool moltenVkExtAvailable =
1358             getHostDeviceExtensionIndex(
1359                 "VK_MVK_moltenvk") != -1;
1360 
1361         bool hostHasExternalMemorySupport =
1362             win32ExtMemAvailable || posixExtMemAvailable || moltenVkExtAvailable;
1363 
1364         if (hostHasExternalMemorySupport) {
1365 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1366             filteredExts.push_back(
1367                 VkExtensionProperties {
1368                    "VK_ANDROID_external_memory_android_hardware_buffer", 7
1369                 });
1370             filteredExts.push_back(
1371                 VkExtensionProperties { "VK_EXT_queue_family_foreign", 1 });
1372 #endif
1373 #ifdef VK_USE_PLATFORM_FUCHSIA
1374             filteredExts.push_back(
1375                 VkExtensionProperties { "VK_FUCHSIA_external_memory", 1});
1376             filteredExts.push_back(
1377                 VkExtensionProperties { "VK_FUCHSIA_buffer_collection", 1 });
1378 #endif
1379 #if !defined(VK_USE_PLATFORM_ANDROID_KHR) && defined(__linux__)
1380             filteredExts.push_back(
1381                 VkExtensionProperties {
1382                    "VK_KHR_external_memory_fd", 1
1383                 });
1384             filteredExts.push_back(
1385                 VkExtensionProperties { "VK_EXT_external_memory_dma_buf", 1 });
1386 #endif
1387         }
1388 
1389         // Spec:
1390         //
1391         // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateDeviceExtensionProperties.html
1392         //
1393         // pPropertyCount is a pointer to an integer related to the number of
1394         // extension properties available or queried, and is treated in the
1395         // same fashion as the
1396         // vkEnumerateInstanceExtensionProperties::pPropertyCount parameter.
1397         //
1398         // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
1399         //
1400         // If pProperties is NULL, then the number of extensions properties
1401         // available is returned in pPropertyCount. Otherwise, pPropertyCount
1402         // must point to a variable set by the user to the number of elements
1403         // in the pProperties array, and on return the variable is overwritten
1404         // with the number of structures actually written to pProperties. If
1405         // pPropertyCount is less than the number of extension properties
1406         // available, at most pPropertyCount structures will be written. If
1407         // pPropertyCount is smaller than the number of extensions available,
1408         // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1409         // that not all the available properties were returned.
1410         //
1411         // pPropertyCount must be a valid pointer to a uint32_t value
1412 
1413         if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1414 
1415         if (!pProperties) {
1416             *pPropertyCount = (uint32_t)filteredExts.size();
1417             return VK_SUCCESS;
1418         } else {
1419             auto actualExtensionCount = (uint32_t)filteredExts.size();
1420             if (*pPropertyCount > actualExtensionCount) {
1421               *pPropertyCount = actualExtensionCount;
1422             }
1423 
1424             for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1425                 pProperties[i] = filteredExts[i];
1426             }
1427 
1428             if (actualExtensionCount > *pPropertyCount) {
1429                 return VK_INCOMPLETE;
1430             }
1431 
1432             return VK_SUCCESS;
1433         }
1434     }
1435 
on_vkEnumeratePhysicalDevices(void * context,VkResult,VkInstance instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices)1436     VkResult on_vkEnumeratePhysicalDevices(
1437         void* context, VkResult,
1438         VkInstance instance, uint32_t* pPhysicalDeviceCount,
1439         VkPhysicalDevice* pPhysicalDevices) {
1440 
1441         VkEncoder* enc = (VkEncoder*)context;
1442 
1443         if (!instance) return VK_ERROR_INITIALIZATION_FAILED;
1444 
1445         if (!pPhysicalDeviceCount) return VK_ERROR_INITIALIZATION_FAILED;
1446 
1447         AutoLock<RecursiveLock> lock(mLock);
1448 
1449         // When this function is called, we actually need to do two things:
1450         // - Get full information about physical devices from the host,
1451         // even if the guest did not ask for it
1452         // - Serve the guest query according to the spec:
1453         //
1454         // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
1455 
1456         auto it = info_VkInstance.find(instance);
1457 
1458         if (it == info_VkInstance.end()) return VK_ERROR_INITIALIZATION_FAILED;
1459 
1460         auto& info = it->second;
1461 
1462         // Get the full host information here if it doesn't exist already.
1463         if (info.physicalDevices.empty()) {
1464             uint32_t hostPhysicalDeviceCount = 0;
1465 
1466             lock.unlock();
1467             VkResult countRes = enc->vkEnumeratePhysicalDevices(
1468                 instance, &hostPhysicalDeviceCount, nullptr, false /* no lock */);
1469             lock.lock();
1470 
1471             if (countRes != VK_SUCCESS) {
1472                 ALOGE("%s: failed: could not count host physical devices. "
1473                       "Error %d\n", __func__, countRes);
1474                 return countRes;
1475             }
1476 
1477             info.physicalDevices.resize(hostPhysicalDeviceCount);
1478 
1479             lock.unlock();
1480             VkResult enumRes = enc->vkEnumeratePhysicalDevices(
1481                 instance, &hostPhysicalDeviceCount, info.physicalDevices.data(), false /* no lock */);
1482             lock.lock();
1483 
1484             if (enumRes != VK_SUCCESS) {
1485                 ALOGE("%s: failed: could not retrieve host physical devices. "
1486                       "Error %d\n", __func__, enumRes);
1487                 return enumRes;
1488             }
1489         }
1490 
1491         // Serve the guest query according to the spec.
1492         //
1493         // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
1494         //
1495         // If pPhysicalDevices is NULL, then the number of physical devices
1496         // available is returned in pPhysicalDeviceCount. Otherwise,
1497         // pPhysicalDeviceCount must point to a variable set by the user to the
1498         // number of elements in the pPhysicalDevices array, and on return the
1499         // variable is overwritten with the number of handles actually written
1500         // to pPhysicalDevices. If pPhysicalDeviceCount is less than the number
1501         // of physical devices available, at most pPhysicalDeviceCount
1502         // structures will be written.  If pPhysicalDeviceCount is smaller than
1503         // the number of physical devices available, VK_INCOMPLETE will be
1504         // returned instead of VK_SUCCESS, to indicate that not all the
1505         // available physical devices were returned.
1506 
1507         if (!pPhysicalDevices) {
1508             *pPhysicalDeviceCount = (uint32_t)info.physicalDevices.size();
1509             return VK_SUCCESS;
1510         } else {
1511             uint32_t actualDeviceCount = (uint32_t)info.physicalDevices.size();
1512             uint32_t toWrite = actualDeviceCount < *pPhysicalDeviceCount ? actualDeviceCount : *pPhysicalDeviceCount;
1513 
1514             for (uint32_t i = 0; i < toWrite; ++i) {
1515                 pPhysicalDevices[i] = info.physicalDevices[i];
1516             }
1517 
1518             *pPhysicalDeviceCount = toWrite;
1519 
1520             if (actualDeviceCount > *pPhysicalDeviceCount) {
1521                 return VK_INCOMPLETE;
1522             }
1523 
1524             return VK_SUCCESS;
1525         }
1526     }
1527 
on_vkGetPhysicalDeviceProperties(void *,VkPhysicalDevice,VkPhysicalDeviceProperties *)1528     void on_vkGetPhysicalDeviceProperties(
1529         void*,
1530         VkPhysicalDevice,
1531         VkPhysicalDeviceProperties*) {
1532     }
1533 
on_vkGetPhysicalDeviceFeatures2(void *,VkPhysicalDevice,VkPhysicalDeviceFeatures2 * pFeatures)1534     void on_vkGetPhysicalDeviceFeatures2(
1535         void*,
1536         VkPhysicalDevice,
1537         VkPhysicalDeviceFeatures2* pFeatures) {
1538         if (pFeatures) {
1539             VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
1540                 vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pFeatures);
1541             if (memoryReportFeaturesEXT) {
1542                 memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
1543             }
1544         }
1545     }
1546 
on_vkGetPhysicalDeviceProperties2(void *,VkPhysicalDevice,VkPhysicalDeviceProperties2 * pProperties)1547     void on_vkGetPhysicalDeviceProperties2(
1548         void*,
1549         VkPhysicalDevice,
1550         VkPhysicalDeviceProperties2* pProperties) {
1551         if (pProperties) {
1552             VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
1553                 vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pProperties);
1554             if (memoryReportFeaturesEXT) {
1555                 memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
1556             }
1557         }
1558     }
1559 
on_vkGetPhysicalDeviceMemoryProperties(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties * out)1560     void on_vkGetPhysicalDeviceMemoryProperties(
1561         void* context,
1562         VkPhysicalDevice physicalDevice,
1563         VkPhysicalDeviceMemoryProperties* out) {
1564         // gfxstream decides which physical device to expose to the guest on startup.
1565         // Otherwise, we would need a physical device to properties mapping.
1566         *out = getPhysicalDeviceMemoryProperties(context, VK_NULL_HANDLE, physicalDevice);
1567     }
1568 
on_vkGetPhysicalDeviceMemoryProperties2(void *,VkPhysicalDevice physdev,VkPhysicalDeviceMemoryProperties2 * out)1569     void on_vkGetPhysicalDeviceMemoryProperties2(
1570         void*,
1571         VkPhysicalDevice physdev,
1572         VkPhysicalDeviceMemoryProperties2* out) {
1573 
1574         on_vkGetPhysicalDeviceMemoryProperties(nullptr, physdev, &out->memoryProperties);
1575     }
1576 
on_vkGetDeviceQueue(void *,VkDevice device,uint32_t,uint32_t,VkQueue * pQueue)1577     void on_vkGetDeviceQueue(void*,
1578                              VkDevice device,
1579                              uint32_t,
1580                              uint32_t,
1581                              VkQueue* pQueue) {
1582         AutoLock<RecursiveLock> lock(mLock);
1583         info_VkQueue[*pQueue].device = device;
1584     }
1585 
on_vkGetDeviceQueue2(void *,VkDevice device,const VkDeviceQueueInfo2 *,VkQueue * pQueue)1586     void on_vkGetDeviceQueue2(void*,
1587                               VkDevice device,
1588                               const VkDeviceQueueInfo2*,
1589                               VkQueue* pQueue) {
1590         AutoLock<RecursiveLock> lock(mLock);
1591         info_VkQueue[*pQueue].device = device;
1592     }
1593 
on_vkCreateInstance(void * context,VkResult input_result,const VkInstanceCreateInfo * createInfo,const VkAllocationCallbacks *,VkInstance * pInstance)1594     VkResult on_vkCreateInstance(
1595         void* context,
1596         VkResult input_result,
1597         const VkInstanceCreateInfo* createInfo,
1598         const VkAllocationCallbacks*,
1599         VkInstance* pInstance) {
1600 
1601         if (input_result != VK_SUCCESS) return input_result;
1602 
1603         VkEncoder* enc = (VkEncoder*)context;
1604 
1605         uint32_t apiVersion;
1606         VkResult enumInstanceVersionRes =
1607             enc->vkEnumerateInstanceVersion(&apiVersion, false /* no lock */);
1608 
1609         setInstanceInfo(
1610             *pInstance,
1611             createInfo->enabledExtensionCount,
1612             createInfo->ppEnabledExtensionNames,
1613             apiVersion);
1614 
1615         return input_result;
1616     }
1617 
on_vkCreateDevice(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks *,VkDevice * pDevice)1618     VkResult on_vkCreateDevice(
1619         void* context,
1620         VkResult input_result,
1621         VkPhysicalDevice physicalDevice,
1622         const VkDeviceCreateInfo* pCreateInfo,
1623         const VkAllocationCallbacks*,
1624         VkDevice* pDevice) {
1625 
1626         if (input_result != VK_SUCCESS) return input_result;
1627 
1628         VkEncoder* enc = (VkEncoder*)context;
1629 
1630         VkPhysicalDeviceProperties props;
1631         VkPhysicalDeviceMemoryProperties memProps;
1632         enc->vkGetPhysicalDeviceProperties(physicalDevice, &props, false /* no lock */);
1633         enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps, false /* no lock */);
1634 
1635         setDeviceInfo(
1636             *pDevice, physicalDevice, props, memProps,
1637             pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames,
1638             pCreateInfo->pNext);
1639 
1640         return input_result;
1641     }
1642 
on_vkDestroyDevice_pre(void * context,VkDevice device,const VkAllocationCallbacks *)1643     void on_vkDestroyDevice_pre(
1644         void* context,
1645         VkDevice device,
1646         const VkAllocationCallbacks*) {
1647 
1648         (void)context;
1649         AutoLock<RecursiveLock> lock(mLock);
1650 
1651         auto it = info_VkDevice.find(device);
1652         if (it == info_VkDevice.end()) return;
1653 
1654         for (auto itr = info_VkDeviceMemory.cbegin() ; itr != info_VkDeviceMemory.cend(); ) {
1655             auto& memInfo = itr->second;
1656             if (memInfo.device == device) {
1657                 itr = info_VkDeviceMemory.erase(itr);
1658             } else {
1659                 itr++;
1660             }
1661         }
1662     }
1663 
1664 #ifdef VK_USE_PLATFORM_ANDROID_KHR
getColorBufferMemoryIndex(void * context,VkDevice device)1665     uint32_t getColorBufferMemoryIndex(void* context, VkDevice device) {
1666         // Create test image to get the memory requirements
1667         VkEncoder* enc = (VkEncoder*)context;
1668         VkImageCreateInfo createInfo = {
1669             .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1670             .imageType = VK_IMAGE_TYPE_2D,
1671             .format = VK_FORMAT_R8G8B8A8_UNORM,
1672             .extent = {64, 64, 1},
1673             .mipLevels = 1,
1674             .arrayLayers = 1,
1675             .samples = VK_SAMPLE_COUNT_1_BIT,
1676             .tiling = VK_IMAGE_TILING_OPTIMAL,
1677             .usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT |  VK_IMAGE_USAGE_TRANSFER_DST_BIT |
1678                         VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
1679                         VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
1680             .initialLayout = VK_IMAGE_LAYOUT_MAX_ENUM,
1681         };
1682         VkImage image = VK_NULL_HANDLE;
1683         VkResult res = enc->vkCreateImage(device, &createInfo, nullptr, &image, true /* do lock */);
1684 
1685         if (res != VK_SUCCESS) {
1686             return 0;
1687         }
1688 
1689         VkMemoryRequirements memReqs;
1690         enc->vkGetImageMemoryRequirements(
1691             device, image, &memReqs, true /* do lock */);
1692         enc->vkDestroyImage(device, image, nullptr, true /* do lock */);
1693 
1694         const VkPhysicalDeviceMemoryProperties& memProps =
1695                 getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE);
1696 
1697         // Currently, host looks for the last index that has with memory
1698         // property type VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
1699         VkMemoryPropertyFlags memoryProperty = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1700         for (int i = VK_MAX_MEMORY_TYPES - 1; i >= 0; --i) {
1701             if ((memReqs.memoryTypeBits & (1u << i)) &&
1702                 (memProps.memoryTypes[i].propertyFlags & memoryProperty)) {
1703                 return i;
1704             }
1705         }
1706 
1707         return 0;
1708     }
1709 
on_vkGetAndroidHardwareBufferPropertiesANDROID(void * context,VkResult,VkDevice device,const AHardwareBuffer * buffer,VkAndroidHardwareBufferPropertiesANDROID * pProperties)1710     VkResult on_vkGetAndroidHardwareBufferPropertiesANDROID(
1711             void* context, VkResult,
1712             VkDevice device,
1713             const AHardwareBuffer* buffer,
1714             VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
1715         auto grallocHelper =
1716             ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
1717 
1718         // Delete once goldfish Linux drivers are gone
1719         if (mCaps.gfxstreamCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
1720             mCaps.gfxstreamCapset.colorBufferMemoryIndex =
1721                     getColorBufferMemoryIndex(context, device);
1722         }
1723 
1724         updateMemoryTypeBits(&pProperties->memoryTypeBits,
1725                              mCaps.gfxstreamCapset.colorBufferMemoryIndex);
1726 
1727         return getAndroidHardwareBufferPropertiesANDROID(
1728             grallocHelper, buffer, pProperties);
1729     }
1730 
on_vkGetMemoryAndroidHardwareBufferANDROID(void *,VkResult,VkDevice device,const VkMemoryGetAndroidHardwareBufferInfoANDROID * pInfo,struct AHardwareBuffer ** pBuffer)1731     VkResult on_vkGetMemoryAndroidHardwareBufferANDROID(
1732         void*, VkResult,
1733         VkDevice device,
1734         const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
1735         struct AHardwareBuffer** pBuffer) {
1736 
1737         if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
1738         if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
1739 
1740         AutoLock<RecursiveLock> lock(mLock);
1741 
1742         auto deviceIt = info_VkDevice.find(device);
1743 
1744         if (deviceIt == info_VkDevice.end()) {
1745             return VK_ERROR_INITIALIZATION_FAILED;
1746         }
1747 
1748         auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
1749 
1750         if (memoryIt == info_VkDeviceMemory.end()) {
1751             return VK_ERROR_INITIALIZATION_FAILED;
1752         }
1753 
1754         auto& info = memoryIt->second;
1755 
1756         VkResult queryRes =
1757             getMemoryAndroidHardwareBufferANDROID(&info.ahw);
1758 
1759         if (queryRes != VK_SUCCESS) return queryRes;
1760 
1761         *pBuffer = info.ahw;
1762 
1763         return queryRes;
1764     }
1765 #endif
1766 
1767 #ifdef VK_USE_PLATFORM_FUCHSIA
on_vkGetMemoryZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkMemoryGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)1768     VkResult on_vkGetMemoryZirconHandleFUCHSIA(
1769         void*, VkResult,
1770         VkDevice device,
1771         const VkMemoryGetZirconHandleInfoFUCHSIA* pInfo,
1772         uint32_t* pHandle) {
1773 
1774         if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
1775         if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
1776 
1777         AutoLock<RecursiveLock> lock(mLock);
1778 
1779         auto deviceIt = info_VkDevice.find(device);
1780 
1781         if (deviceIt == info_VkDevice.end()) {
1782             return VK_ERROR_INITIALIZATION_FAILED;
1783         }
1784 
1785         auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
1786 
1787         if (memoryIt == info_VkDeviceMemory.end()) {
1788             return VK_ERROR_INITIALIZATION_FAILED;
1789         }
1790 
1791         auto& info = memoryIt->second;
1792 
1793         if (info.vmoHandle == ZX_HANDLE_INVALID) {
1794             ALOGE("%s: memory cannot be exported", __func__);
1795             return VK_ERROR_INITIALIZATION_FAILED;
1796         }
1797 
1798         *pHandle = ZX_HANDLE_INVALID;
1799         zx_handle_duplicate(info.vmoHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
1800         return VK_SUCCESS;
1801     }
1802 
on_vkGetMemoryZirconHandlePropertiesFUCHSIA(void *,VkResult,VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,uint32_t handle,VkMemoryZirconHandlePropertiesFUCHSIA * pProperties)1803     VkResult on_vkGetMemoryZirconHandlePropertiesFUCHSIA(
1804         void*, VkResult,
1805         VkDevice device,
1806         VkExternalMemoryHandleTypeFlagBits handleType,
1807         uint32_t handle,
1808         VkMemoryZirconHandlePropertiesFUCHSIA* pProperties) {
1809         using fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal;
1810         using fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
1811 
1812         if (handleType !=
1813             VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
1814             return VK_ERROR_INITIALIZATION_FAILED;
1815         }
1816 
1817         zx_info_handle_basic_t handleInfo;
1818         zx_status_t status = zx::unowned_vmo(handle)->get_info(
1819             ZX_INFO_HANDLE_BASIC, &handleInfo, sizeof(handleInfo), nullptr,
1820             nullptr);
1821         if (status != ZX_OK || handleInfo.type != ZX_OBJ_TYPE_VMO) {
1822             return VK_ERROR_INVALID_EXTERNAL_HANDLE;
1823         }
1824 
1825         AutoLock<RecursiveLock> lock(mLock);
1826 
1827         auto deviceIt = info_VkDevice.find(device);
1828 
1829         if (deviceIt == info_VkDevice.end()) {
1830             return VK_ERROR_INITIALIZATION_FAILED;
1831         }
1832 
1833         auto& info = deviceIt->second;
1834 
1835         zx::vmo vmo_dup;
1836         status =
1837             zx::unowned_vmo(handle)->duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
1838         if (status != ZX_OK) {
1839             ALOGE("zx_handle_duplicate() error: %d", status);
1840             return VK_ERROR_INITIALIZATION_FAILED;
1841         }
1842 
1843         uint32_t memoryProperty = 0u;
1844 
1845         auto result = mControlDevice->GetBufferHandleInfo(std::move(vmo_dup));
1846         if (!result.ok()) {
1847             ALOGE(
1848                 "mControlDevice->GetBufferHandleInfo fatal error: epitaph: %d",
1849                 result.status());
1850             return VK_ERROR_INITIALIZATION_FAILED;
1851         }
1852         if (result.value().is_ok()) {
1853             memoryProperty = result.value().value()->info.memory_property();
1854         } else if (result.value().error_value() == ZX_ERR_NOT_FOUND) {
1855             // If an VMO is allocated while ColorBuffer/Buffer is not created,
1856             // it must be a device-local buffer, since for host-visible buffers,
1857             // ColorBuffer/Buffer is created at sysmem allocation time.
1858             memoryProperty = kMemoryPropertyDeviceLocal;
1859         } else {
1860             // Importing read-only host memory into the Vulkan driver should not
1861             // work, but it is not an error to try to do so. Returning a
1862             // VkMemoryZirconHandlePropertiesFUCHSIA with no available
1863             // memoryType bits should be enough for clients. See fxbug.dev/24225
1864             // for other issues this this flow.
1865             ALOGW("GetBufferHandleInfo failed: %d", result.value().error_value());
1866             pProperties->memoryTypeBits = 0;
1867             return VK_SUCCESS;
1868         }
1869 
1870         pProperties->memoryTypeBits = 0;
1871         for (uint32_t i = 0; i < info.memProps.memoryTypeCount; ++i) {
1872             if (((memoryProperty & kMemoryPropertyDeviceLocal) &&
1873                  (info.memProps.memoryTypes[i].propertyFlags &
1874                   VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
1875                 ((memoryProperty & kMemoryPropertyHostVisible) &&
1876                  (info.memProps.memoryTypes[i].propertyFlags &
1877                   VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
1878                 pProperties->memoryTypeBits |= 1ull << i;
1879             }
1880         }
1881         return VK_SUCCESS;
1882     }
1883 
getEventKoid(zx_handle_t eventHandle)1884     zx_koid_t getEventKoid(zx_handle_t eventHandle) {
1885         if (eventHandle == ZX_HANDLE_INVALID) {
1886             return ZX_KOID_INVALID;
1887         }
1888 
1889         zx_info_handle_basic_t info;
1890         zx_status_t status =
1891             zx_object_get_info(eventHandle, ZX_INFO_HANDLE_BASIC, &info,
1892                                sizeof(info), nullptr, nullptr);
1893         if (status != ZX_OK) {
1894             ALOGE("Cannot get object info of handle %u: %d", eventHandle,
1895                   status);
1896             return ZX_KOID_INVALID;
1897         }
1898         return info.koid;
1899     }
1900 
on_vkImportSemaphoreZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkImportSemaphoreZirconHandleInfoFUCHSIA * pInfo)1901     VkResult on_vkImportSemaphoreZirconHandleFUCHSIA(
1902         void*, VkResult,
1903         VkDevice device,
1904         const VkImportSemaphoreZirconHandleInfoFUCHSIA* pInfo) {
1905 
1906         if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
1907         if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
1908 
1909         AutoLock<RecursiveLock> lock(mLock);
1910 
1911         auto deviceIt = info_VkDevice.find(device);
1912 
1913         if (deviceIt == info_VkDevice.end()) {
1914             return VK_ERROR_INITIALIZATION_FAILED;
1915         }
1916 
1917         auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
1918 
1919         if (semaphoreIt == info_VkSemaphore.end()) {
1920             return VK_ERROR_INITIALIZATION_FAILED;
1921         }
1922 
1923         auto& info = semaphoreIt->second;
1924 
1925         if (info.eventHandle != ZX_HANDLE_INVALID) {
1926             zx_handle_close(info.eventHandle);
1927         }
1928 #if VK_HEADER_VERSION < 174
1929         info.eventHandle = pInfo->handle;
1930 #else // VK_HEADER_VERSION >= 174
1931         info.eventHandle = pInfo->zirconHandle;
1932 #endif // VK_HEADER_VERSION < 174
1933         if (info.eventHandle != ZX_HANDLE_INVALID) {
1934             info.eventKoid = getEventKoid(info.eventHandle);
1935         }
1936 
1937         return VK_SUCCESS;
1938     }
1939 
on_vkGetSemaphoreZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkSemaphoreGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)1940     VkResult on_vkGetSemaphoreZirconHandleFUCHSIA(
1941         void*, VkResult,
1942         VkDevice device,
1943         const VkSemaphoreGetZirconHandleInfoFUCHSIA* pInfo,
1944         uint32_t* pHandle) {
1945 
1946         if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
1947         if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
1948 
1949         AutoLock<RecursiveLock> lock(mLock);
1950 
1951         auto deviceIt = info_VkDevice.find(device);
1952 
1953         if (deviceIt == info_VkDevice.end()) {
1954             return VK_ERROR_INITIALIZATION_FAILED;
1955         }
1956 
1957         auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
1958 
1959         if (semaphoreIt == info_VkSemaphore.end()) {
1960             return VK_ERROR_INITIALIZATION_FAILED;
1961         }
1962 
1963         auto& info = semaphoreIt->second;
1964 
1965         if (info.eventHandle == ZX_HANDLE_INVALID) {
1966             return VK_ERROR_INITIALIZATION_FAILED;
1967         }
1968 
1969         *pHandle = ZX_HANDLE_INVALID;
1970         zx_handle_duplicate(info.eventHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
1971         return VK_SUCCESS;
1972     }
1973 
on_vkCreateBufferCollectionFUCHSIA(void *,VkResult,VkDevice,const VkBufferCollectionCreateInfoFUCHSIA * pInfo,const VkAllocationCallbacks *,VkBufferCollectionFUCHSIA * pCollection)1974     VkResult on_vkCreateBufferCollectionFUCHSIA(
1975         void*,
1976         VkResult,
1977         VkDevice,
1978         const VkBufferCollectionCreateInfoFUCHSIA* pInfo,
1979         const VkAllocationCallbacks*,
1980         VkBufferCollectionFUCHSIA* pCollection) {
1981         fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken> token_client;
1982 
1983         if (pInfo->collectionToken) {
1984             token_client =
1985                 fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken>(
1986                     zx::channel(pInfo->collectionToken));
1987         } else {
1988             auto endpoints = fidl::CreateEndpoints<
1989                 ::fuchsia_sysmem::BufferCollectionToken>();
1990             if (!endpoints.is_ok()) {
1991                 ALOGE("zx_channel_create failed: %d", endpoints.status_value());
1992                 return VK_ERROR_INITIALIZATION_FAILED;
1993             }
1994 
1995             auto result = mSysmemAllocator->AllocateSharedCollection(
1996                 std::move(endpoints->server));
1997             if (!result.ok()) {
1998                 ALOGE("AllocateSharedCollection failed: %d", result.status());
1999                 return VK_ERROR_INITIALIZATION_FAILED;
2000             }
2001             token_client = std::move(endpoints->client);
2002         }
2003 
2004         auto endpoints =
2005             fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
2006         if (!endpoints.is_ok()) {
2007             ALOGE("zx_channel_create failed: %d", endpoints.status_value());
2008             return VK_ERROR_INITIALIZATION_FAILED;
2009         }
2010         auto [collection_client, collection_server] =
2011             std::move(endpoints.value());
2012 
2013         auto result = mSysmemAllocator->BindSharedCollection(
2014             std::move(token_client), std::move(collection_server));
2015         if (!result.ok()) {
2016             ALOGE("BindSharedCollection failed: %d", result.status());
2017             return VK_ERROR_INITIALIZATION_FAILED;
2018         }
2019 
2020         auto* sysmem_collection =
2021             new fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>(
2022                 std::move(collection_client));
2023         *pCollection =
2024             reinterpret_cast<VkBufferCollectionFUCHSIA>(sysmem_collection);
2025 
2026         register_VkBufferCollectionFUCHSIA(*pCollection);
2027         return VK_SUCCESS;
2028     }
2029 
on_vkDestroyBufferCollectionFUCHSIA(void *,VkResult,VkDevice,VkBufferCollectionFUCHSIA collection,const VkAllocationCallbacks *)2030     void on_vkDestroyBufferCollectionFUCHSIA(
2031         void*,
2032         VkResult,
2033         VkDevice,
2034         VkBufferCollectionFUCHSIA collection,
2035         const VkAllocationCallbacks*) {
2036         auto sysmem_collection = reinterpret_cast<
2037             fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
2038             collection);
2039         if (sysmem_collection) {
2040             (*sysmem_collection)->Close();
2041         }
2042         delete sysmem_collection;
2043 
2044         unregister_VkBufferCollectionFUCHSIA(collection);
2045     }
2046 
2047     inline fuchsia_sysmem::wire::BufferCollectionConstraints
defaultBufferCollectionConstraints(size_t minSizeBytes,size_t minBufferCount,size_t maxBufferCount=0u,size_t minBufferCountForCamping=0u,size_t minBufferCountForDedicatedSlack=0u,size_t minBufferCountForSharedSlack=0u)2048     defaultBufferCollectionConstraints(
2049         size_t minSizeBytes,
2050         size_t minBufferCount,
2051         size_t maxBufferCount = 0u,
2052         size_t minBufferCountForCamping = 0u,
2053         size_t minBufferCountForDedicatedSlack = 0u,
2054         size_t minBufferCountForSharedSlack = 0u) {
2055         fuchsia_sysmem::wire::BufferCollectionConstraints constraints = {};
2056         constraints.min_buffer_count = minBufferCount;
2057         if (maxBufferCount > 0) {
2058             constraints.max_buffer_count = maxBufferCount;
2059         }
2060         if (minBufferCountForCamping) {
2061             constraints.min_buffer_count_for_camping = minBufferCountForCamping;
2062         }
2063         if (minBufferCountForSharedSlack) {
2064             constraints.min_buffer_count_for_shared_slack =
2065                 minBufferCountForSharedSlack;
2066         }
2067         constraints.has_buffer_memory_constraints = true;
2068         fuchsia_sysmem::wire::BufferMemoryConstraints& buffer_constraints =
2069             constraints.buffer_memory_constraints;
2070 
2071         buffer_constraints.min_size_bytes = minSizeBytes;
2072         buffer_constraints.max_size_bytes = 0xffffffff;
2073         buffer_constraints.physically_contiguous_required = false;
2074         buffer_constraints.secure_required = false;
2075 
2076         // No restrictions on coherency domain or Heaps.
2077         buffer_constraints.ram_domain_supported = true;
2078         buffer_constraints.cpu_domain_supported = true;
2079         buffer_constraints.inaccessible_domain_supported = true;
2080         buffer_constraints.heap_permitted_count = 2;
2081         buffer_constraints.heap_permitted[0] =
2082             fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2083         buffer_constraints.heap_permitted[1] =
2084             fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2085 
2086         return constraints;
2087     }
2088 
getBufferCollectionConstraintsVulkanImageUsage(const VkImageCreateInfo * pImageInfo)2089     uint32_t getBufferCollectionConstraintsVulkanImageUsage(
2090         const VkImageCreateInfo* pImageInfo) {
2091         uint32_t usage = 0u;
2092         VkImageUsageFlags imageUsage = pImageInfo->usage;
2093 
2094 #define SetUsageBit(BIT, VALUE)                                           \
2095     if (imageUsage & VK_IMAGE_USAGE_##BIT##_BIT) {                 \
2096         usage |= fuchsia_sysmem::wire::kVulkanImageUsage##VALUE; \
2097     }
2098 
2099         SetUsageBit(COLOR_ATTACHMENT, ColorAttachment);
2100         SetUsageBit(TRANSFER_SRC, TransferSrc);
2101         SetUsageBit(TRANSFER_DST, TransferDst);
2102         SetUsageBit(SAMPLED, Sampled);
2103 
2104 #undef SetUsageBit
2105         return usage;
2106     }
2107 
getBufferCollectionConstraintsVulkanBufferUsage(VkBufferUsageFlags bufferUsage)2108     uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
2109         VkBufferUsageFlags bufferUsage) {
2110         uint32_t usage = 0u;
2111 
2112 #define SetUsageBit(BIT, VALUE)                                            \
2113     if (bufferUsage & VK_BUFFER_USAGE_##BIT##_BIT) {                \
2114         usage |= fuchsia_sysmem::wire::kVulkanBufferUsage##VALUE; \
2115     }
2116 
2117         SetUsageBit(TRANSFER_SRC, TransferSrc);
2118         SetUsageBit(TRANSFER_DST, TransferDst);
2119         SetUsageBit(UNIFORM_TEXEL_BUFFER, UniformTexelBuffer);
2120         SetUsageBit(STORAGE_TEXEL_BUFFER, StorageTexelBuffer);
2121         SetUsageBit(UNIFORM_BUFFER, UniformBuffer);
2122         SetUsageBit(STORAGE_BUFFER, StorageBuffer);
2123         SetUsageBit(INDEX_BUFFER, IndexBuffer);
2124         SetUsageBit(VERTEX_BUFFER, VertexBuffer);
2125         SetUsageBit(INDIRECT_BUFFER, IndirectBuffer);
2126 
2127 #undef SetUsageBit
2128         return usage;
2129     }
2130 
getBufferCollectionConstraintsVulkanBufferUsage(const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2131     uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
2132         const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2133         VkBufferUsageFlags bufferUsage =
2134             pBufferConstraintsInfo->createInfo.usage;
2135         return getBufferCollectionConstraintsVulkanBufferUsage(bufferUsage);
2136     }
2137 
vkFormatTypeToSysmem(VkFormat format)2138     static fuchsia_sysmem::wire::PixelFormatType vkFormatTypeToSysmem(
2139         VkFormat format) {
2140         switch (format) {
2141             case VK_FORMAT_B8G8R8A8_SINT:
2142             case VK_FORMAT_B8G8R8A8_UNORM:
2143             case VK_FORMAT_B8G8R8A8_SRGB:
2144             case VK_FORMAT_B8G8R8A8_SNORM:
2145             case VK_FORMAT_B8G8R8A8_SSCALED:
2146             case VK_FORMAT_B8G8R8A8_USCALED:
2147                 return fuchsia_sysmem::wire::PixelFormatType::kBgra32;
2148             case VK_FORMAT_R8G8B8A8_SINT:
2149             case VK_FORMAT_R8G8B8A8_UNORM:
2150             case VK_FORMAT_R8G8B8A8_SRGB:
2151             case VK_FORMAT_R8G8B8A8_SNORM:
2152             case VK_FORMAT_R8G8B8A8_SSCALED:
2153             case VK_FORMAT_R8G8B8A8_USCALED:
2154                 return fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
2155             case VK_FORMAT_R8_UNORM:
2156             case VK_FORMAT_R8_UINT:
2157             case VK_FORMAT_R8_USCALED:
2158             case VK_FORMAT_R8_SNORM:
2159             case VK_FORMAT_R8_SINT:
2160             case VK_FORMAT_R8_SSCALED:
2161             case VK_FORMAT_R8_SRGB:
2162                 return fuchsia_sysmem::wire::PixelFormatType::kR8;
2163             case VK_FORMAT_R8G8_UNORM:
2164             case VK_FORMAT_R8G8_UINT:
2165             case VK_FORMAT_R8G8_USCALED:
2166             case VK_FORMAT_R8G8_SNORM:
2167             case VK_FORMAT_R8G8_SINT:
2168             case VK_FORMAT_R8G8_SSCALED:
2169             case VK_FORMAT_R8G8_SRGB:
2170                 return fuchsia_sysmem::wire::PixelFormatType::kR8G8;
2171             default:
2172                 return fuchsia_sysmem::wire::PixelFormatType::kInvalid;
2173         }
2174     }
2175 
vkFormatMatchesSysmemFormat(VkFormat vkFormat,fuchsia_sysmem::wire::PixelFormatType sysmemFormat)2176     static bool vkFormatMatchesSysmemFormat(
2177         VkFormat vkFormat,
2178         fuchsia_sysmem::wire::PixelFormatType sysmemFormat) {
2179         switch (vkFormat) {
2180             case VK_FORMAT_B8G8R8A8_SINT:
2181             case VK_FORMAT_B8G8R8A8_UNORM:
2182             case VK_FORMAT_B8G8R8A8_SRGB:
2183             case VK_FORMAT_B8G8R8A8_SNORM:
2184             case VK_FORMAT_B8G8R8A8_SSCALED:
2185             case VK_FORMAT_B8G8R8A8_USCALED:
2186                 return sysmemFormat ==
2187                        fuchsia_sysmem::wire::PixelFormatType::kBgra32;
2188             case VK_FORMAT_R8G8B8A8_SINT:
2189             case VK_FORMAT_R8G8B8A8_UNORM:
2190             case VK_FORMAT_R8G8B8A8_SRGB:
2191             case VK_FORMAT_R8G8B8A8_SNORM:
2192             case VK_FORMAT_R8G8B8A8_SSCALED:
2193             case VK_FORMAT_R8G8B8A8_USCALED:
2194                 return sysmemFormat ==
2195                        fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
2196             case VK_FORMAT_R8_UNORM:
2197             case VK_FORMAT_R8_UINT:
2198             case VK_FORMAT_R8_USCALED:
2199             case VK_FORMAT_R8_SNORM:
2200             case VK_FORMAT_R8_SINT:
2201             case VK_FORMAT_R8_SSCALED:
2202             case VK_FORMAT_R8_SRGB:
2203                 return sysmemFormat ==
2204                            fuchsia_sysmem::wire::PixelFormatType::kR8 ||
2205                        sysmemFormat ==
2206                            fuchsia_sysmem::wire::PixelFormatType::kL8;
2207             case VK_FORMAT_R8G8_UNORM:
2208             case VK_FORMAT_R8G8_UINT:
2209             case VK_FORMAT_R8G8_USCALED:
2210             case VK_FORMAT_R8G8_SNORM:
2211             case VK_FORMAT_R8G8_SINT:
2212             case VK_FORMAT_R8G8_SSCALED:
2213             case VK_FORMAT_R8G8_SRGB:
2214                 return sysmemFormat ==
2215                        fuchsia_sysmem::wire::PixelFormatType::kR8G8;
2216             default:
2217                 return false;
2218         }
2219     }
2220 
sysmemPixelFormatTypeToVk(fuchsia_sysmem::wire::PixelFormatType format)2221     static VkFormat sysmemPixelFormatTypeToVk(
2222         fuchsia_sysmem::wire::PixelFormatType format) {
2223         switch (format) {
2224             case fuchsia_sysmem::wire::PixelFormatType::kBgra32:
2225                 return VK_FORMAT_B8G8R8A8_SRGB;
2226             case fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8:
2227                 return VK_FORMAT_R8G8B8A8_SRGB;
2228             case fuchsia_sysmem::wire::PixelFormatType::kL8:
2229             case fuchsia_sysmem::wire::PixelFormatType::kR8:
2230                 return VK_FORMAT_R8_UNORM;
2231             case fuchsia_sysmem::wire::PixelFormatType::kR8G8:
2232                 return VK_FORMAT_R8G8_UNORM;
2233             default:
2234                 return VK_FORMAT_UNDEFINED;
2235         }
2236     }
2237 
2238     // TODO(fxbug.dev/90856): This is currently only used for allocating
2239     // memory for dedicated external images. It should be migrated to use
2240     // SetBufferCollectionImageConstraintsFUCHSIA.
setBufferCollectionConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * collection,const VkImageCreateInfo * pImageInfo)2241     VkResult setBufferCollectionConstraintsFUCHSIA(
2242         VkEncoder* enc,
2243         VkDevice device,
2244         fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
2245         const VkImageCreateInfo* pImageInfo) {
2246         if (pImageInfo == nullptr) {
2247             ALOGE("setBufferCollectionConstraints: pImageInfo cannot be null.");
2248             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2249         }
2250 
2251         const VkSysmemColorSpaceFUCHSIA kDefaultColorSpace = {
2252             .sType = VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA,
2253             .pNext = nullptr,
2254             .colorSpace = static_cast<uint32_t>(
2255                 fuchsia_sysmem::wire::ColorSpaceType::kSrgb),
2256         };
2257 
2258         std::vector<VkImageFormatConstraintsInfoFUCHSIA> formatInfos;
2259         if (pImageInfo->format == VK_FORMAT_UNDEFINED) {
2260             const auto kFormats = {
2261                 VK_FORMAT_B8G8R8A8_SRGB,
2262                 VK_FORMAT_R8G8B8A8_SRGB,
2263             };
2264             for (auto format : kFormats) {
2265                 // shallow copy, using pNext from pImageInfo directly.
2266                 auto createInfo = *pImageInfo;
2267                 createInfo.format = format;
2268                 formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
2269                     .sType =
2270                         VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
2271                     .pNext = nullptr,
2272                     .imageCreateInfo = createInfo,
2273                     .colorSpaceCount = 1,
2274                     .pColorSpaces = &kDefaultColorSpace,
2275                 });
2276             }
2277         } else {
2278             formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
2279                 .sType =
2280                     VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
2281                 .pNext = nullptr,
2282                 .imageCreateInfo = *pImageInfo,
2283                 .colorSpaceCount = 1,
2284                 .pColorSpaces = &kDefaultColorSpace,
2285             });
2286         }
2287 
2288         VkImageConstraintsInfoFUCHSIA imageConstraints = {
2289             .sType = VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA,
2290             .pNext = nullptr,
2291             .formatConstraintsCount = static_cast<uint32_t>(formatInfos.size()),
2292             .pFormatConstraints = formatInfos.data(),
2293             .bufferCollectionConstraints =
2294                 VkBufferCollectionConstraintsInfoFUCHSIA{
2295                     .sType =
2296                         VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
2297                     .pNext = nullptr,
2298                     .minBufferCount = 1,
2299                     .maxBufferCount = 0,
2300                     .minBufferCountForCamping = 0,
2301                     .minBufferCountForDedicatedSlack = 0,
2302                     .minBufferCountForSharedSlack = 0,
2303                 },
2304             .flags = 0u,
2305         };
2306 
2307         return setBufferCollectionImageConstraintsFUCHSIA(
2308             enc, device, collection, &imageConstraints);
2309     }
2310 
addImageBufferCollectionConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,VkPhysicalDevice physicalDevice,const VkImageFormatConstraintsInfoFUCHSIA * formatConstraints,VkImageTiling tiling,fuchsia_sysmem::wire::BufferCollectionConstraints * constraints)2311     VkResult addImageBufferCollectionConstraintsFUCHSIA(
2312         VkEncoder* enc,
2313         VkDevice device,
2314         VkPhysicalDevice physicalDevice,
2315         const VkImageFormatConstraintsInfoFUCHSIA*
2316             formatConstraints,  // always non-zero
2317         VkImageTiling tiling,
2318         fuchsia_sysmem::wire::BufferCollectionConstraints* constraints) {
2319         // First check if the format, tiling and usage is supported on host.
2320         VkImageFormatProperties imageFormatProperties;
2321         auto createInfo = &formatConstraints->imageCreateInfo;
2322         auto result = enc->vkGetPhysicalDeviceImageFormatProperties(
2323             physicalDevice, createInfo->format, createInfo->imageType, tiling,
2324             createInfo->usage, createInfo->flags, &imageFormatProperties,
2325             true /* do lock */);
2326         if (result != VK_SUCCESS) {
2327             ALOGD(
2328                 "%s: Image format (%u) type (%u) tiling (%u) "
2329                 "usage (%u) flags (%u) not supported by physical "
2330                 "device",
2331                 __func__, static_cast<uint32_t>(createInfo->format),
2332                 static_cast<uint32_t>(createInfo->imageType),
2333                 static_cast<uint32_t>(tiling),
2334                 static_cast<uint32_t>(createInfo->usage),
2335                 static_cast<uint32_t>(createInfo->flags));
2336             return VK_ERROR_FORMAT_NOT_SUPPORTED;
2337         }
2338 
2339         // Check if format constraints contains unsupported format features.
2340         {
2341             VkFormatProperties formatProperties;
2342             enc->vkGetPhysicalDeviceFormatProperties(
2343                 physicalDevice, createInfo->format, &formatProperties,
2344                 true /* do lock */);
2345 
2346             auto supportedFeatures =
2347                 (tiling == VK_IMAGE_TILING_LINEAR)
2348                     ? formatProperties.linearTilingFeatures
2349                     : formatProperties.optimalTilingFeatures;
2350             auto requiredFeatures = formatConstraints->requiredFormatFeatures;
2351             if ((~supportedFeatures) & requiredFeatures) {
2352                 ALOGD(
2353                     "%s: Host device support features for %s tiling: %08x, "
2354                     "required features: %08x, feature bits %08x missing",
2355                     __func__,
2356                     tiling == VK_IMAGE_TILING_LINEAR ? "LINEAR" : "OPTIMAL",
2357                     static_cast<uint32_t>(requiredFeatures),
2358                     static_cast<uint32_t>(supportedFeatures),
2359                     static_cast<uint32_t>((~supportedFeatures) &
2360                                           requiredFeatures));
2361                 return VK_ERROR_FORMAT_NOT_SUPPORTED;
2362             }
2363         }
2364 
2365         fuchsia_sysmem::wire::ImageFormatConstraints imageConstraints;
2366         if (formatConstraints->sysmemPixelFormat != 0) {
2367             auto pixelFormat =
2368                 static_cast<fuchsia_sysmem::wire::PixelFormatType>(
2369                     formatConstraints->sysmemPixelFormat);
2370             if (createInfo->format != VK_FORMAT_UNDEFINED &&
2371                 !vkFormatMatchesSysmemFormat(createInfo->format, pixelFormat)) {
2372                 ALOGD("%s: VkFormat %u doesn't match sysmem pixelFormat %lu",
2373                       __func__, static_cast<uint32_t>(createInfo->format),
2374                       formatConstraints->sysmemPixelFormat);
2375                 return VK_ERROR_FORMAT_NOT_SUPPORTED;
2376             }
2377             imageConstraints.pixel_format.type = pixelFormat;
2378         } else {
2379             auto pixel_format = vkFormatTypeToSysmem(createInfo->format);
2380             if (pixel_format ==
2381                 fuchsia_sysmem::wire::PixelFormatType::kInvalid) {
2382                 ALOGD("%s: Unsupported VkFormat %u", __func__,
2383                       static_cast<uint32_t>(createInfo->format));
2384                 return VK_ERROR_FORMAT_NOT_SUPPORTED;
2385             }
2386             imageConstraints.pixel_format.type = pixel_format;
2387         }
2388 
2389         imageConstraints.color_spaces_count =
2390             formatConstraints->colorSpaceCount;
2391         for (size_t i = 0; i < formatConstraints->colorSpaceCount; i++) {
2392             imageConstraints.color_space[0].type =
2393                 static_cast<fuchsia_sysmem::wire::ColorSpaceType>(
2394                     formatConstraints->pColorSpaces[i].colorSpace);
2395         }
2396 
2397         // Get row alignment from host GPU.
2398         VkDeviceSize offset = 0;
2399         VkDeviceSize rowPitchAlignment = 1u;
2400 
2401         if (tiling == VK_IMAGE_TILING_LINEAR) {
2402             VkImageCreateInfo createInfoDup = *createInfo;
2403             createInfoDup.pNext = nullptr;
2404             enc->vkGetLinearImageLayout2GOOGLE(device, &createInfoDup, &offset,
2405                                             &rowPitchAlignment,
2406                                             true /* do lock */);
2407             D("vkGetLinearImageLayout2GOOGLE: format %d offset %lu "
2408               "rowPitchAlignment = %lu",
2409               (int)createInfo->format, offset, rowPitchAlignment);
2410         }
2411 
2412         imageConstraints.min_coded_width = createInfo->extent.width;
2413         imageConstraints.max_coded_width = 0xfffffff;
2414         imageConstraints.min_coded_height = createInfo->extent.height;
2415         imageConstraints.max_coded_height = 0xffffffff;
2416         // The min_bytes_per_row can be calculated by sysmem using
2417         // |min_coded_width|, |bytes_per_row_divisor| and color format.
2418         imageConstraints.min_bytes_per_row = 0;
2419         imageConstraints.max_bytes_per_row = 0xffffffff;
2420         imageConstraints.max_coded_width_times_coded_height = 0xffffffff;
2421 
2422         imageConstraints.layers = 1;
2423         imageConstraints.coded_width_divisor = 1;
2424         imageConstraints.coded_height_divisor = 1;
2425         imageConstraints.bytes_per_row_divisor = rowPitchAlignment;
2426         imageConstraints.start_offset_divisor = 1;
2427         imageConstraints.display_width_divisor = 1;
2428         imageConstraints.display_height_divisor = 1;
2429         imageConstraints.pixel_format.has_format_modifier = true;
2430         imageConstraints.pixel_format.format_modifier.value =
2431             (tiling == VK_IMAGE_TILING_LINEAR)
2432                 ? fuchsia_sysmem::wire::kFormatModifierLinear
2433                 : fuchsia_sysmem::wire::kFormatModifierGoogleGoldfishOptimal;
2434 
2435         constraints->image_format_constraints
2436             [constraints->image_format_constraints_count++] = imageConstraints;
2437         return VK_SUCCESS;
2438     }
2439 
2440     struct SetBufferCollectionImageConstraintsResult {
2441         VkResult result;
2442         fuchsia_sysmem::wire::BufferCollectionConstraints constraints;
2443         std::vector<uint32_t> createInfoIndex;
2444     };
2445 
2446     SetBufferCollectionImageConstraintsResult
setBufferCollectionImageConstraintsImpl(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2447     setBufferCollectionImageConstraintsImpl(
2448         VkEncoder* enc,
2449         VkDevice device,
2450         fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2451         const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2452         const auto& collection = *pCollection;
2453         if (!pImageConstraintsInfo ||
2454              pImageConstraintsInfo->sType !=
2455                  VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA) {
2456             ALOGE("%s: invalid pImageConstraintsInfo", __func__);
2457             return {VK_ERROR_INITIALIZATION_FAILED};
2458         }
2459 
2460         if (pImageConstraintsInfo->formatConstraintsCount == 0) {
2461             ALOGE("%s: formatConstraintsCount must be greater than 0",
2462                   __func__);
2463             abort();
2464         }
2465 
2466         fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
2467             defaultBufferCollectionConstraints(
2468                 /* min_size_bytes */ 0,
2469                 pImageConstraintsInfo->bufferCollectionConstraints
2470                     .minBufferCount,
2471                 pImageConstraintsInfo->bufferCollectionConstraints
2472                     .maxBufferCount,
2473                 pImageConstraintsInfo->bufferCollectionConstraints
2474                     .minBufferCountForCamping,
2475                 pImageConstraintsInfo->bufferCollectionConstraints
2476                     .minBufferCountForDedicatedSlack,
2477                 pImageConstraintsInfo->bufferCollectionConstraints
2478                     .minBufferCountForSharedSlack);
2479 
2480         std::vector<fuchsia_sysmem::wire::ImageFormatConstraints>
2481             format_constraints;
2482 
2483         VkPhysicalDevice physicalDevice;
2484         {
2485             AutoLock<RecursiveLock> lock(mLock);
2486             auto deviceIt = info_VkDevice.find(device);
2487             if (deviceIt == info_VkDevice.end()) {
2488                 return {VK_ERROR_INITIALIZATION_FAILED};
2489             }
2490             physicalDevice = deviceIt->second.physdev;
2491         }
2492 
2493         std::vector<uint32_t> createInfoIndex;
2494 
2495         bool hasOptimalTiling = false;
2496         for (uint32_t i = 0; i < pImageConstraintsInfo->formatConstraintsCount;
2497              i++) {
2498             const VkImageCreateInfo* createInfo =
2499                 &pImageConstraintsInfo->pFormatConstraints[i].imageCreateInfo;
2500             const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints =
2501                 &pImageConstraintsInfo->pFormatConstraints[i];
2502 
2503             // add ImageFormatConstraints for *optimal* tiling
2504             VkResult optimalResult = VK_ERROR_FORMAT_NOT_SUPPORTED;
2505             if (createInfo->tiling == VK_IMAGE_TILING_OPTIMAL) {
2506                 optimalResult = addImageBufferCollectionConstraintsFUCHSIA(
2507                     enc, device, physicalDevice, formatConstraints,
2508                     VK_IMAGE_TILING_OPTIMAL, &constraints);
2509                 if (optimalResult == VK_SUCCESS) {
2510                     createInfoIndex.push_back(i);
2511                     hasOptimalTiling = true;
2512                 }
2513             }
2514 
2515             // Add ImageFormatConstraints for *linear* tiling
2516             VkResult linearResult = addImageBufferCollectionConstraintsFUCHSIA(
2517                 enc, device, physicalDevice, formatConstraints,
2518                 VK_IMAGE_TILING_LINEAR, &constraints);
2519             if (linearResult == VK_SUCCESS) {
2520                 createInfoIndex.push_back(i);
2521             }
2522 
2523             // Update usage and BufferMemoryConstraints
2524             if (linearResult == VK_SUCCESS || optimalResult == VK_SUCCESS) {
2525                 constraints.usage.vulkan |=
2526                     getBufferCollectionConstraintsVulkanImageUsage(createInfo);
2527 
2528                 if (formatConstraints && formatConstraints->flags) {
2529                     ALOGW(
2530                         "%s: Non-zero flags (%08x) in image format "
2531                         "constraints; this is currently not supported, see "
2532                         "fxbug.dev/68833.",
2533                         __func__, formatConstraints->flags);
2534                 }
2535             }
2536         }
2537 
2538         // Set buffer memory constraints based on optimal/linear tiling support
2539         // and flags.
2540         VkImageConstraintsInfoFlagsFUCHSIA flags = pImageConstraintsInfo->flags;
2541         if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA)
2542             constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageRead;
2543         if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA)
2544             constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageReadOften;
2545         if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA)
2546             constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWrite;
2547         if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA)
2548             constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWriteOften;
2549 
2550         constraints.has_buffer_memory_constraints = true;
2551         auto& memory_constraints = constraints.buffer_memory_constraints;
2552         memory_constraints.cpu_domain_supported = true;
2553         memory_constraints.ram_domain_supported = true;
2554         memory_constraints.inaccessible_domain_supported =
2555             hasOptimalTiling &&
2556             !(flags & (VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA |
2557                        VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA |
2558                        VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA |
2559                        VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA));
2560 
2561         if (memory_constraints.inaccessible_domain_supported) {
2562             memory_constraints.heap_permitted_count = 2;
2563             memory_constraints.heap_permitted[0] =
2564                 fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2565             memory_constraints.heap_permitted[1] =
2566                 fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2567         } else {
2568             memory_constraints.heap_permitted_count = 1;
2569             memory_constraints.heap_permitted[0] =
2570                 fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2571         }
2572 
2573         if (constraints.image_format_constraints_count == 0) {
2574             ALOGE("%s: none of the specified formats is supported by device",
2575                   __func__);
2576             return {VK_ERROR_FORMAT_NOT_SUPPORTED};
2577         }
2578 
2579         constexpr uint32_t kVulkanPriority = 5;
2580         const char kName[] = "GoldfishSysmemShared";
2581         collection->SetName(kVulkanPriority, fidl::StringView(kName));
2582 
2583         auto result = collection->SetConstraints(true, constraints);
2584         if (!result.ok()) {
2585             ALOGE("setBufferCollectionConstraints: SetConstraints failed: %d",
2586                   result.status());
2587             return {VK_ERROR_INITIALIZATION_FAILED};
2588         }
2589 
2590         return {VK_SUCCESS, constraints, std::move(createInfoIndex)};
2591     }
2592 
setBufferCollectionImageConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2593     VkResult setBufferCollectionImageConstraintsFUCHSIA(
2594         VkEncoder* enc,
2595         VkDevice device,
2596         fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2597         const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2598         const auto& collection = *pCollection;
2599 
2600         auto setConstraintsResult = setBufferCollectionImageConstraintsImpl(
2601             enc, device, pCollection, pImageConstraintsInfo);
2602         if (setConstraintsResult.result != VK_SUCCESS) {
2603             return setConstraintsResult.result;
2604         }
2605 
2606         // copy constraints to info_VkBufferCollectionFUCHSIA if
2607         // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2608         AutoLock<RecursiveLock> lock(mLock);
2609         VkBufferCollectionFUCHSIA buffer_collection =
2610             reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
2611         if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2612             info_VkBufferCollectionFUCHSIA.end()) {
2613             info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2614                 android::base::makeOptional(
2615                     std::move(setConstraintsResult.constraints));
2616             info_VkBufferCollectionFUCHSIA[buffer_collection].createInfoIndex =
2617                 std::move(setConstraintsResult.createInfoIndex);
2618         }
2619 
2620         return VK_SUCCESS;
2621     }
2622 
2623     struct SetBufferCollectionBufferConstraintsResult {
2624         VkResult result;
2625         fuchsia_sysmem::wire::BufferCollectionConstraints constraints;
2626     };
2627 
2628     SetBufferCollectionBufferConstraintsResult
setBufferCollectionBufferConstraintsImpl(fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2629     setBufferCollectionBufferConstraintsImpl(
2630         fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2631         const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2632         const auto& collection = *pCollection;
2633         if (pBufferConstraintsInfo == nullptr) {
2634             ALOGE(
2635                 "setBufferCollectionBufferConstraints: "
2636                 "pBufferConstraintsInfo cannot be null.");
2637             return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
2638         }
2639 
2640         fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
2641             defaultBufferCollectionConstraints(
2642                 /* min_size_bytes */ pBufferConstraintsInfo->createInfo.size,
2643                 /* buffer_count */ pBufferConstraintsInfo
2644                     ->bufferCollectionConstraints.minBufferCount);
2645         constraints.usage.vulkan =
2646             getBufferCollectionConstraintsVulkanBufferUsage(
2647                 pBufferConstraintsInfo);
2648 
2649         constexpr uint32_t kVulkanPriority = 5;
2650         const char kName[] = "GoldfishBufferSysmemShared";
2651         collection->SetName(kVulkanPriority, fidl::StringView(kName));
2652 
2653         auto result = collection->SetConstraints(true, constraints);
2654         if (!result.ok()) {
2655             ALOGE("setBufferCollectionConstraints: SetConstraints failed: %d",
2656                   result.status());
2657             return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
2658         }
2659 
2660         return {VK_SUCCESS, constraints};
2661     }
2662 
setBufferCollectionBufferConstraintsFUCHSIA(fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2663     VkResult setBufferCollectionBufferConstraintsFUCHSIA(
2664         fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2665         const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2666         auto setConstraintsResult = setBufferCollectionBufferConstraintsImpl(
2667             pCollection, pBufferConstraintsInfo);
2668         if (setConstraintsResult.result != VK_SUCCESS) {
2669             return setConstraintsResult.result;
2670         }
2671 
2672         // copy constraints to info_VkBufferCollectionFUCHSIA if
2673         // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2674         AutoLock<RecursiveLock> lock(mLock);
2675         VkBufferCollectionFUCHSIA buffer_collection =
2676             reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
2677         if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2678             info_VkBufferCollectionFUCHSIA.end()) {
2679             info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2680                 android::base::makeOptional(setConstraintsResult.constraints);
2681         }
2682 
2683         return VK_SUCCESS;
2684     }
2685 
on_vkSetBufferCollectionImageConstraintsFUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2686     VkResult on_vkSetBufferCollectionImageConstraintsFUCHSIA(
2687         void* context,
2688         VkResult,
2689         VkDevice device,
2690         VkBufferCollectionFUCHSIA collection,
2691         const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2692         VkEncoder* enc = (VkEncoder*)context;
2693         auto sysmem_collection = reinterpret_cast<
2694             fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
2695             collection);
2696         return setBufferCollectionImageConstraintsFUCHSIA(
2697             enc, device, sysmem_collection, pImageConstraintsInfo);
2698     }
2699 
on_vkSetBufferCollectionBufferConstraintsFUCHSIA(void *,VkResult,VkDevice,VkBufferCollectionFUCHSIA collection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2700     VkResult on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
2701         void*,
2702         VkResult,
2703         VkDevice,
2704         VkBufferCollectionFUCHSIA collection,
2705         const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2706         auto sysmem_collection = reinterpret_cast<
2707             fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
2708             collection);
2709         return setBufferCollectionBufferConstraintsFUCHSIA(
2710             sysmem_collection, pBufferConstraintsInfo);
2711     }
2712 
getBufferCollectionImageCreateInfoIndexLocked(VkBufferCollectionFUCHSIA collection,fuchsia_sysmem::wire::BufferCollectionInfo2 & info,uint32_t * outCreateInfoIndex)2713     VkResult getBufferCollectionImageCreateInfoIndexLocked(
2714         VkBufferCollectionFUCHSIA collection,
2715         fuchsia_sysmem::wire::BufferCollectionInfo2& info,
2716         uint32_t* outCreateInfoIndex) {
2717         if (!info_VkBufferCollectionFUCHSIA[collection]
2718                  .constraints.hasValue()) {
2719             ALOGE("%s: constraints not set", __func__);
2720             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2721         }
2722 
2723         if (!info.settings.has_image_format_constraints) {
2724             // no image format constraints, skip getting createInfoIndex.
2725             return VK_SUCCESS;
2726         }
2727 
2728         const auto& constraints =
2729             *info_VkBufferCollectionFUCHSIA[collection].constraints;
2730         const auto& createInfoIndices =
2731             info_VkBufferCollectionFUCHSIA[collection].createInfoIndex;
2732         const auto& out = info.settings.image_format_constraints;
2733         bool foundCreateInfo = false;
2734 
2735         for (size_t imageFormatIndex = 0;
2736              imageFormatIndex < constraints.image_format_constraints_count;
2737              imageFormatIndex++) {
2738             const auto& in =
2739                 constraints.image_format_constraints[imageFormatIndex];
2740             // These checks are sorted in order of how often they're expected to
2741             // mismatch, from most likely to least likely. They aren't always
2742             // equality comparisons, since sysmem may change some values in
2743             // compatible ways on behalf of the other participants.
2744             if ((out.pixel_format.type != in.pixel_format.type) ||
2745                 (out.pixel_format.has_format_modifier !=
2746                  in.pixel_format.has_format_modifier) ||
2747                 (out.pixel_format.format_modifier.value !=
2748                  in.pixel_format.format_modifier.value) ||
2749                 (out.min_bytes_per_row < in.min_bytes_per_row) ||
2750                 (out.required_max_coded_width < in.required_max_coded_width) ||
2751                 (out.required_max_coded_height <
2752                  in.required_max_coded_height) ||
2753                 (in.bytes_per_row_divisor != 0 &&
2754                  out.bytes_per_row_divisor % in.bytes_per_row_divisor != 0)) {
2755                 continue;
2756             }
2757             // Check if the out colorspaces are a subset of the in color spaces.
2758             bool all_color_spaces_found = true;
2759             for (uint32_t j = 0; j < out.color_spaces_count; j++) {
2760                 bool found_matching_color_space = false;
2761                 for (uint32_t k = 0; k < in.color_spaces_count; k++) {
2762                     if (out.color_space[j].type == in.color_space[k].type) {
2763                         found_matching_color_space = true;
2764                         break;
2765                     }
2766                 }
2767                 if (!found_matching_color_space) {
2768                     all_color_spaces_found = false;
2769                     break;
2770                 }
2771             }
2772             if (!all_color_spaces_found) {
2773                 continue;
2774             }
2775 
2776             // Choose the first valid format for now.
2777             *outCreateInfoIndex = createInfoIndices[imageFormatIndex];
2778             return VK_SUCCESS;
2779         }
2780 
2781         ALOGE("%s: cannot find a valid image format in constraints", __func__);
2782         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2783     }
2784 
on_vkGetBufferCollectionPropertiesFUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,VkBufferCollectionPropertiesFUCHSIA * pProperties)2785     VkResult on_vkGetBufferCollectionPropertiesFUCHSIA(
2786         void* context,
2787         VkResult,
2788         VkDevice device,
2789         VkBufferCollectionFUCHSIA collection,
2790         VkBufferCollectionPropertiesFUCHSIA* pProperties) {
2791         VkEncoder* enc = (VkEncoder*)context;
2792         const auto& sysmem_collection = *reinterpret_cast<
2793             fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
2794             collection);
2795 
2796         auto result = sysmem_collection->WaitForBuffersAllocated();
2797         if (!result.ok() || result->status != ZX_OK) {
2798             ALOGE("Failed wait for allocation: %d %d", result.status(),
2799                   GET_STATUS_SAFE(result, status));
2800             return VK_ERROR_INITIALIZATION_FAILED;
2801         }
2802         fuchsia_sysmem::wire::BufferCollectionInfo2 info =
2803             std::move(result->buffer_collection_info);
2804 
2805         bool is_host_visible =
2806             info.settings.buffer_settings.heap ==
2807             fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2808         bool is_device_local =
2809             info.settings.buffer_settings.heap ==
2810             fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2811         if (!is_host_visible && !is_device_local) {
2812             ALOGE("buffer collection uses a non-goldfish heap (type 0x%lu)",
2813                   static_cast<uint64_t>(info.settings.buffer_settings.heap));
2814             return VK_ERROR_INITIALIZATION_FAILED;
2815         }
2816 
2817         // memoryTypeBits
2818         // ====================================================================
2819         {
2820             AutoLock<RecursiveLock> lock(mLock);
2821             auto deviceIt = info_VkDevice.find(device);
2822             if (deviceIt == info_VkDevice.end()) {
2823                 return VK_ERROR_INITIALIZATION_FAILED;
2824             }
2825             auto& deviceInfo = deviceIt->second;
2826 
2827             // Device local memory type supported.
2828             pProperties->memoryTypeBits = 0;
2829             for (uint32_t i = 0; i < deviceInfo.memProps.memoryTypeCount; ++i) {
2830                 if ((is_device_local &&
2831                      (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2832                       VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
2833                     (is_host_visible &&
2834                      (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2835                       VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
2836                     pProperties->memoryTypeBits |= 1ull << i;
2837                 }
2838             }
2839         }
2840 
2841         // bufferCount
2842         // ====================================================================
2843         pProperties->bufferCount = info.buffer_count;
2844 
2845         auto storeProperties = [this, collection, pProperties]() -> VkResult {
2846             // store properties to storage
2847             AutoLock<RecursiveLock> lock(mLock);
2848             if (info_VkBufferCollectionFUCHSIA.find(collection) ==
2849                 info_VkBufferCollectionFUCHSIA.end()) {
2850                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2851             }
2852 
2853             info_VkBufferCollectionFUCHSIA[collection].properties =
2854                 android::base::makeOptional(*pProperties);
2855 
2856             // We only do a shallow copy so we should remove all pNext pointers.
2857             info_VkBufferCollectionFUCHSIA[collection].properties->pNext =
2858                 nullptr;
2859             info_VkBufferCollectionFUCHSIA[collection]
2860                 .properties->sysmemColorSpaceIndex.pNext = nullptr;
2861             return VK_SUCCESS;
2862         };
2863 
2864         // The fields below only apply to buffer collections with image formats.
2865         if (!info.settings.has_image_format_constraints) {
2866             ALOGD("%s: buffer collection doesn't have image format constraints",
2867                   __func__);
2868             return storeProperties();
2869         }
2870 
2871         // sysmemFormat
2872         // ====================================================================
2873 
2874         pProperties->sysmemPixelFormat = static_cast<uint64_t>(
2875             info.settings.image_format_constraints.pixel_format.type);
2876 
2877         // colorSpace
2878         // ====================================================================
2879         if (info.settings.image_format_constraints.color_spaces_count == 0) {
2880             ALOGE(
2881                 "%s: color space missing from allocated buffer collection "
2882                 "constraints",
2883                 __func__);
2884             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2885         }
2886         // Only report first colorspace for now.
2887         pProperties->sysmemColorSpaceIndex.colorSpace = static_cast<uint32_t>(
2888             info.settings.image_format_constraints.color_space[0].type);
2889 
2890         // createInfoIndex
2891         // ====================================================================
2892         {
2893             AutoLock<RecursiveLock> lock(mLock);
2894             auto getIndexResult = getBufferCollectionImageCreateInfoIndexLocked(
2895                 collection, info, &pProperties->createInfoIndex);
2896             if (getIndexResult != VK_SUCCESS) {
2897                 return getIndexResult;
2898             }
2899         }
2900 
2901         // formatFeatures
2902         // ====================================================================
2903         VkPhysicalDevice physicalDevice;
2904         {
2905             AutoLock<RecursiveLock> lock(mLock);
2906             auto deviceIt = info_VkDevice.find(device);
2907             if (deviceIt == info_VkDevice.end()) {
2908                 return VK_ERROR_INITIALIZATION_FAILED;
2909             }
2910             physicalDevice = deviceIt->second.physdev;
2911         }
2912 
2913         VkFormat vkFormat = sysmemPixelFormatTypeToVk(
2914             info.settings.image_format_constraints.pixel_format.type);
2915         VkFormatProperties formatProperties;
2916         enc->vkGetPhysicalDeviceFormatProperties(
2917             physicalDevice, vkFormat, &formatProperties, true /* do lock */);
2918         if (is_device_local) {
2919             pProperties->formatFeatures =
2920                 formatProperties.optimalTilingFeatures;
2921         }
2922         if (is_host_visible) {
2923             pProperties->formatFeatures = formatProperties.linearTilingFeatures;
2924         }
2925 
2926         // YCbCr properties
2927         // ====================================================================
2928         // TODO(59804): Implement this correctly when we support YUV pixel
2929         // formats in goldfish ICD.
2930         pProperties->samplerYcbcrConversionComponents.r =
2931             VK_COMPONENT_SWIZZLE_IDENTITY;
2932         pProperties->samplerYcbcrConversionComponents.g =
2933             VK_COMPONENT_SWIZZLE_IDENTITY;
2934         pProperties->samplerYcbcrConversionComponents.b =
2935             VK_COMPONENT_SWIZZLE_IDENTITY;
2936         pProperties->samplerYcbcrConversionComponents.a =
2937             VK_COMPONENT_SWIZZLE_IDENTITY;
2938         pProperties->suggestedYcbcrModel =
2939             VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
2940         pProperties->suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
2941         pProperties->suggestedXChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2942         pProperties->suggestedYChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2943 
2944         return storeProperties();
2945     }
2946 #endif
2947 
createCoherentMemory(VkDevice device,VkDeviceMemory mem,const VkMemoryAllocateInfo & hostAllocationInfo,VkEncoder * enc,VkResult & res)2948     CoherentMemoryPtr createCoherentMemory(VkDevice device,
2949                                            VkDeviceMemory mem,
2950                                            const VkMemoryAllocateInfo& hostAllocationInfo,
2951                                            VkEncoder* enc,
2952                                            VkResult& res)
2953     {
2954         CoherentMemoryPtr coherentMemory = nullptr;
2955         if (mFeatureInfo->hasDirectMem) {
2956             uint64_t gpuAddr = 0;
2957             GoldfishAddressSpaceBlockPtr block = nullptr;
2958             res = enc->vkMapMemoryIntoAddressSpaceGOOGLE(device, mem, &gpuAddr, true);
2959             if (res != VK_SUCCESS) {
2960                 return coherentMemory;
2961             }
2962             {
2963                 AutoLock<RecursiveLock> lock(mLock);
2964                 auto it = info_VkDeviceMemory.find(mem);
2965                 if (it == info_VkDeviceMemory.end()) {
2966                      res = VK_ERROR_OUT_OF_HOST_MEMORY;
2967                      return coherentMemory;
2968                 }
2969                 auto& info = it->second;
2970                 block = info.goldfishBlock;
2971                 info.goldfishBlock = nullptr;
2972 
2973                 coherentMemory =
2974                     std::make_shared<CoherentMemory>(block, gpuAddr, hostAllocationInfo.allocationSize, device, mem);
2975             }
2976         } else if (mFeatureInfo->hasVirtioGpuNext) {
2977             struct VirtGpuCreateBlob createBlob = { 0 };
2978             uint64_t hvaSizeId[3];
2979             res = enc->vkGetMemoryHostAddressInfoGOOGLE(device, mem,
2980                     &hvaSizeId[0], &hvaSizeId[1], &hvaSizeId[2], true /* do lock */);
2981             if(res != VK_SUCCESS) {
2982                 return coherentMemory;
2983             }
2984             {
2985                 AutoLock<RecursiveLock> lock(mLock);
2986                 VirtGpuDevice& instance = VirtGpuDevice::getInstance((enum VirtGpuCapset)3);
2987                 createBlob.blobMem = kBlobMemHost3d;
2988                 createBlob.flags = kBlobFlagMappable;
2989                 createBlob.blobId = hvaSizeId[2];
2990                 createBlob.size = hostAllocationInfo.allocationSize;
2991 
2992                 auto blob = instance.createBlob(createBlob);
2993                 if (!blob) {
2994                     res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2995                     return coherentMemory;
2996                 }
2997 
2998                 VirtGpuBlobMappingPtr mapping = blob->createMapping();
2999                 if (!mapping) {
3000                     res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
3001                     return coherentMemory;
3002                 }
3003 
3004                 coherentMemory =
3005                     std::make_shared<CoherentMemory>(mapping, createBlob.size, device, mem);
3006             }
3007         } else {
3008             ALOGE("FATAL: Unsupported virtual memory feature");
3009             abort();
3010         }
3011         return coherentMemory;
3012     }
3013 
allocateCoherentMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,VkEncoder * enc,VkDeviceMemory * pMemory)3014     VkResult allocateCoherentMemory(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo,
3015                                     VkEncoder* enc, VkDeviceMemory* pMemory) {
3016         uint64_t blobId = 0;
3017         uint64_t offset = 0;
3018         uint8_t *ptr = nullptr;
3019         VkMemoryAllocateFlagsInfo allocFlagsInfo;
3020         VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
3021         VkCreateBlobGOOGLE createBlobInfo;
3022         VirtGpuBlobPtr guestBlob = nullptr;
3023 
3024         memset(&createBlobInfo, 0, sizeof(struct VkCreateBlobGOOGLE));
3025         createBlobInfo.sType = VK_STRUCTURE_TYPE_CREATE_BLOB_GOOGLE;
3026 
3027         const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3028             vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
3029         const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
3030             vk_find_struct<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo);
3031 
3032         bool deviceAddressMemoryAllocation =
3033             allocFlagsInfoPtr &&
3034             ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
3035              (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
3036 
3037         bool dedicated = deviceAddressMemoryAllocation;
3038 
3039         if (mCaps.gfxstreamCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle])
3040             dedicated = true;
3041 
3042         VkMemoryAllocateInfo hostAllocationInfo = vk_make_orphan_copy(*pAllocateInfo);
3043         vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&hostAllocationInfo);
3044 
3045         if (mCaps.gfxstreamCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3046             hostAllocationInfo.allocationSize = ALIGN(pAllocateInfo->allocationSize, 4096);
3047         } else if (dedicated) {
3048             // Over-aligning to kLargestSize to some Windows drivers (b:152769369).  Can likely
3049             // have host report the desired alignment.
3050             hostAllocationInfo.allocationSize =
3051                 ALIGN(pAllocateInfo->allocationSize, kLargestPageSize);
3052         } else {
3053             VkDeviceSize roundedUpAllocSize = ALIGN(pAllocateInfo->allocationSize, kMegaByte);
3054             hostAllocationInfo.allocationSize = std::max(roundedUpAllocSize,
3055                                                          kDefaultHostMemBlockSize);
3056         }
3057 
3058         // Support device address capture/replay allocations
3059         if (deviceAddressMemoryAllocation) {
3060             if (allocFlagsInfoPtr) {
3061                 ALOGV("%s: has alloc flags\n", __func__);
3062                 allocFlagsInfo = *allocFlagsInfoPtr;
3063                 vk_append_struct(&structChainIter, &allocFlagsInfo);
3064             }
3065 
3066             if (opaqueCaptureAddressAllocInfoPtr) {
3067                 ALOGV("%s: has opaque capture address\n", __func__);
3068                 opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
3069                 vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
3070             }
3071         }
3072 
3073         if (mCaps.params[kParamCreateGuestHandle]) {
3074             struct VirtGpuCreateBlob createBlob = {0};
3075             struct VirtGpuExecBuffer exec = {};
3076             VirtGpuDevice& instance = VirtGpuDevice::getInstance();
3077             struct gfxstreamPlaceholderCommandVk placeholderCmd = {};
3078 
3079             createBlobInfo.blobId = ++mBlobId;
3080             createBlobInfo.blobMem = kBlobMemGuest;
3081             createBlobInfo.blobFlags = kBlobFlagCreateGuestHandle;
3082             vk_append_struct(&structChainIter, &createBlobInfo);
3083 
3084             createBlob.blobMem = kBlobMemGuest;
3085             createBlob.flags = kBlobFlagCreateGuestHandle;
3086             createBlob.blobId = createBlobInfo.blobId;
3087             createBlob.size = hostAllocationInfo.allocationSize;
3088 
3089             guestBlob = instance.createBlob(createBlob);
3090             if (!guestBlob) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3091 
3092             placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK;
3093             exec.command = static_cast<void*>(&placeholderCmd);
3094             exec.command_size = sizeof(placeholderCmd);
3095             exec.flags = kRingIdx;
3096             exec.ring_idx = 1;
3097             if (instance.execBuffer(exec, guestBlob)) return VK_ERROR_OUT_OF_HOST_MEMORY;
3098 
3099             guestBlob->wait();
3100         } else if (mCaps.gfxstreamCapset.deferredMapping) {
3101             createBlobInfo.blobId = ++mBlobId;
3102             createBlobInfo.blobMem = kBlobMemHost3d;
3103             vk_append_struct(&structChainIter, &createBlobInfo);
3104         }
3105 
3106         VkDeviceMemory mem = VK_NULL_HANDLE;
3107         VkResult host_res =
3108         enc->vkAllocateMemory(device, &hostAllocationInfo, nullptr,
3109                               &mem, true /* do lock */);
3110         if(host_res != VK_SUCCESS) {
3111             return host_res;
3112         }
3113 
3114         struct VkDeviceMemory_Info info;
3115         if (mCaps.gfxstreamCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3116             info.allocationSize = pAllocateInfo->allocationSize;
3117             info.blobId = createBlobInfo.blobId;
3118         }
3119 
3120         if (guestBlob) {
3121             auto mapping = guestBlob->createMapping();
3122             if (!mapping) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3123 
3124             auto coherentMemory = std::make_shared<CoherentMemory>(
3125                 mapping, hostAllocationInfo.allocationSize, device, mem);
3126 
3127             coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset);
3128             info.coherentMemoryOffset = offset;
3129             info.coherentMemory = coherentMemory;
3130             info.ptr = ptr;
3131         }
3132 
3133         info.coherentMemorySize = hostAllocationInfo.allocationSize;
3134         info.memoryTypeIndex = hostAllocationInfo.memoryTypeIndex;
3135         info.device = device;
3136         info.dedicated = dedicated;
3137         {
3138             // createCoherentMemory inside need to access info_VkDeviceMemory
3139             // information. set it before use.
3140             AutoLock<RecursiveLock> lock(mLock);
3141             info_VkDeviceMemory[mem] = info;
3142         }
3143 
3144         if (mCaps.gfxstreamCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3145             *pMemory = mem;
3146             return host_res;
3147         }
3148 
3149         auto coherentMemory = createCoherentMemory(device, mem, hostAllocationInfo, enc, host_res);
3150         if(coherentMemory) {
3151             AutoLock<RecursiveLock> lock(mLock);
3152             coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset);
3153             info.allocationSize = pAllocateInfo->allocationSize;
3154             info.coherentMemoryOffset = offset;
3155             info.coherentMemory = coherentMemory;
3156             info.ptr = ptr;
3157             info_VkDeviceMemory[mem] = info;
3158             *pMemory = mem;
3159         }
3160         else {
3161             enc->vkFreeMemory(device, mem, nullptr, true);
3162             AutoLock<RecursiveLock> lock(mLock);
3163             info_VkDeviceMemory.erase(mem);
3164         }
3165         return host_res;
3166     }
3167 
getCoherentMemory(const VkMemoryAllocateInfo * pAllocateInfo,VkEncoder * enc,VkDevice device,VkDeviceMemory * pMemory)3168     VkResult getCoherentMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkEncoder* enc,
3169                                VkDevice device, VkDeviceMemory* pMemory) {
3170         VkMemoryAllocateFlagsInfo allocFlagsInfo;
3171         VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
3172 
3173         // Add buffer device address capture structs
3174         const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3175             vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
3176 
3177         bool dedicated = allocFlagsInfoPtr &&
3178                          ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
3179                           (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
3180 
3181         if (mCaps.gfxstreamCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle])
3182             dedicated = true;
3183 
3184         CoherentMemoryPtr coherentMemory = nullptr;
3185         uint8_t *ptr = nullptr;
3186         uint64_t offset = 0;
3187         {
3188             AutoLock<RecursiveLock> lock(mLock);
3189             for (const auto &[memory, info] : info_VkDeviceMemory) {
3190                 if (info.memoryTypeIndex != pAllocateInfo->memoryTypeIndex)
3191                     continue;
3192 
3193                 if (info.dedicated || dedicated)
3194                     continue;
3195 
3196                 if (!info.coherentMemory)
3197                     continue;
3198 
3199                 if (!info.coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset))
3200                     continue;
3201 
3202                 coherentMemory = info.coherentMemory;
3203                 break;
3204             }
3205             if (coherentMemory) {
3206                 struct VkDeviceMemory_Info info;
3207                 info.coherentMemoryOffset = offset;
3208                 info.ptr = ptr;
3209                 info.memoryTypeIndex = pAllocateInfo->memoryTypeIndex;
3210                 info.allocationSize = pAllocateInfo->allocationSize;
3211                 info.coherentMemory = coherentMemory;
3212                 info.device = device;
3213 
3214                 // for suballocated memory, create an alias VkDeviceMemory handle for application
3215                 // memory used for suballocations will still be VkDeviceMemory associated with
3216                 // CoherentMemory
3217                 auto mem = new_from_host_VkDeviceMemory(VK_NULL_HANDLE);
3218                 info_VkDeviceMemory[mem] = info;
3219                 *pMemory = mem;
3220                 return VK_SUCCESS;
3221             }
3222         }
3223         return allocateCoherentMemory(device, pAllocateInfo, enc, pMemory);
3224     }
3225 
getAHardwareBufferId(AHardwareBuffer * ahw)3226     uint64_t getAHardwareBufferId(AHardwareBuffer* ahw) {
3227         uint64_t id = 0;
3228 #if defined(PLATFORM_SDK_VERSION) && PLATFORM_SDK_VERSION >= 31
3229         AHardwareBuffer_getId(ahw, &id);
3230 #else
3231         (void)ahw;
3232 #endif
3233         return id;
3234     }
3235 
on_vkAllocateMemory(void * context,VkResult input_result,VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)3236     VkResult on_vkAllocateMemory(
3237         void* context,
3238         VkResult input_result,
3239         VkDevice device,
3240         const VkMemoryAllocateInfo* pAllocateInfo,
3241         const VkAllocationCallbacks* pAllocator,
3242         VkDeviceMemory* pMemory) {
3243 
3244 #define _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(result) \
3245         { \
3246             auto it = info_VkDevice.find(device); \
3247             if (it == info_VkDevice.end()) return result; \
3248             emitDeviceMemoryReport( \
3249                 it->second, \
3250                 VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT, \
3251                 0, \
3252                 pAllocateInfo->allocationSize, \
3253                 VK_OBJECT_TYPE_DEVICE_MEMORY, \
3254                 0, \
3255                 pAllocateInfo->memoryTypeIndex); \
3256             return result; \
3257         }
3258 
3259 #define _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT \
3260         { \
3261             uint64_t memoryObjectId = (uint64_t)(void*)*pMemory; \
3262             if (ahw) { \
3263                 memoryObjectId = getAHardwareBufferId(ahw); \
3264             } \
3265             emitDeviceMemoryReport( \
3266                 info_VkDevice[device], \
3267                 isImport ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT, \
3268                 memoryObjectId, \
3269                 pAllocateInfo->allocationSize, \
3270                 VK_OBJECT_TYPE_DEVICE_MEMORY, \
3271                 (uint64_t)(void*)*pMemory, \
3272                 pAllocateInfo->memoryTypeIndex); \
3273             return VK_SUCCESS; \
3274         }
3275 
3276 
3277         if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
3278 
3279         VkEncoder* enc = (VkEncoder*)context;
3280 
3281         VkMemoryAllocateInfo finalAllocInfo = vk_make_orphan_copy(*pAllocateInfo);
3282         vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&finalAllocInfo);
3283 
3284         VkMemoryAllocateFlagsInfo allocFlagsInfo;
3285         VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
3286 
3287         // Add buffer device address capture structs
3288         const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3289             vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
3290         const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
3291             vk_find_struct<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo);
3292 
3293         if (allocFlagsInfoPtr) {
3294             ALOGV("%s: has alloc flags\n", __func__);
3295             allocFlagsInfo = *allocFlagsInfoPtr;
3296             vk_append_struct(&structChainIter, &allocFlagsInfo);
3297         }
3298 
3299         if (opaqueCaptureAddressAllocInfoPtr) {
3300             ALOGV("%s: has opaque capture address\n", __func__);
3301             opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
3302             vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
3303         }
3304 
3305         VkMemoryDedicatedAllocateInfo dedicatedAllocInfo;
3306         VkImportColorBufferGOOGLE importCbInfo = {
3307             VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE, 0,
3308         };
3309         VkImportBufferGOOGLE importBufferInfo = {
3310                 VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE,
3311                 0,
3312         };
3313         // VkImportPhysicalAddressGOOGLE importPhysAddrInfo = {
3314         //     VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE, 0,
3315         // };
3316 
3317         const VkExportMemoryAllocateInfo* exportAllocateInfoPtr =
3318             vk_find_struct<VkExportMemoryAllocateInfo>(pAllocateInfo);
3319 
3320 #ifdef VK_USE_PLATFORM_ANDROID_KHR
3321         const VkImportAndroidHardwareBufferInfoANDROID* importAhbInfoPtr =
3322             vk_find_struct<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo);
3323 #else
3324         const void* importAhbInfoPtr = nullptr;
3325 #endif
3326 
3327 #ifdef VK_USE_PLATFORM_FUCHSIA
3328         const VkImportMemoryBufferCollectionFUCHSIA*
3329             importBufferCollectionInfoPtr =
3330                 vk_find_struct<VkImportMemoryBufferCollectionFUCHSIA>(
3331                     pAllocateInfo);
3332 
3333         const VkImportMemoryZirconHandleInfoFUCHSIA* importVmoInfoPtr =
3334                 vk_find_struct<VkImportMemoryZirconHandleInfoFUCHSIA>(
3335                         pAllocateInfo);
3336 #else
3337         const void* importBufferCollectionInfoPtr = nullptr;
3338         const void* importVmoInfoPtr = nullptr;
3339 #endif  // VK_USE_PLATFORM_FUCHSIA
3340 
3341         const VkMemoryDedicatedAllocateInfo* dedicatedAllocInfoPtr =
3342             vk_find_struct<VkMemoryDedicatedAllocateInfo>(pAllocateInfo);
3343 
3344         // Note for AHardwareBuffers, the Vulkan spec states:
3345         //
3346         //     Android hardware buffers have intrinsic width, height, format, and usage
3347         //     properties, so Vulkan images bound to memory imported from an Android
3348         //     hardware buffer must use dedicated allocations
3349         //
3350         // so any allocation requests with a VkImportAndroidHardwareBufferInfoANDROID
3351         // will necessarily have a VkMemoryDedicatedAllocateInfo. However, the host
3352         // may or may not actually use a dedicated allocation to emulate
3353         // AHardwareBuffers. As such, the VkMemoryDedicatedAllocateInfo is passed to the
3354         // host and the host will decide whether or not to use it.
3355 
3356         bool shouldPassThroughDedicatedAllocInfo =
3357             !exportAllocateInfoPtr &&
3358             !importBufferCollectionInfoPtr &&
3359             !importVmoInfoPtr;
3360 
3361         const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProps
3362             = getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE);
3363 
3364         const bool requestedMemoryIsHostVisible =
3365             isHostVisible(&physicalDeviceMemoryProps, pAllocateInfo->memoryTypeIndex);
3366 
3367 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
3368         shouldPassThroughDedicatedAllocInfo &= !requestedMemoryIsHostVisible;
3369 #endif  // VK_USE_PLATFORM_FUCHSIA
3370 
3371         if (shouldPassThroughDedicatedAllocInfo &&
3372             dedicatedAllocInfoPtr) {
3373             dedicatedAllocInfo = vk_make_orphan_copy(*dedicatedAllocInfoPtr);
3374             vk_append_struct(&structChainIter, &dedicatedAllocInfo);
3375         }
3376 
3377         // State needed for import/export.
3378         bool exportAhb = false;
3379         bool exportVmo = false;
3380         bool importAhb = false;
3381         bool importBufferCollection = false;
3382         bool importVmo = false;
3383         (void)exportVmo;
3384 
3385         // Even if we export allocate, the underlying operation
3386         // for the host is always going to be an import operation.
3387         // This is also how Intel's implementation works,
3388         // and is generally simpler;
3389         // even in an export allocation,
3390         // we perform AHardwareBuffer allocation
3391         // on the guest side, at this layer,
3392         // and then we attach a new VkDeviceMemory
3393         // to the AHardwareBuffer on the host via an "import" operation.
3394         AHardwareBuffer* ahw = nullptr;
3395 
3396         if (exportAllocateInfoPtr) {
3397             exportAhb =
3398                 exportAllocateInfoPtr->handleTypes &
3399                 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
3400 #ifdef VK_USE_PLATFORM_FUCHSIA
3401             exportVmo = exportAllocateInfoPtr->handleTypes &
3402                         VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
3403 #endif  // VK_USE_PLATFORM_FUCHSIA
3404         } else if (importAhbInfoPtr) {
3405             importAhb = true;
3406         } else if (importBufferCollectionInfoPtr) {
3407             importBufferCollection = true;
3408         } else if (importVmoInfoPtr) {
3409             importVmo = true;
3410         }
3411         bool isImport = importAhb || importBufferCollection ||
3412                         importVmo;
3413 
3414 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
3415         if (exportAhb) {
3416             bool hasDedicatedImage = dedicatedAllocInfoPtr &&
3417                 (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3418             bool hasDedicatedBuffer = dedicatedAllocInfoPtr &&
3419                 (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3420             VkExtent3D imageExtent = { 0, 0, 0 };
3421             uint32_t imageLayers = 0;
3422             VkFormat imageFormat = VK_FORMAT_UNDEFINED;
3423             VkImageUsageFlags imageUsage = 0;
3424             VkImageCreateFlags imageCreateFlags = 0;
3425             VkDeviceSize bufferSize = 0;
3426             VkDeviceSize allocationInfoAllocSize =
3427                 finalAllocInfo.allocationSize;
3428 
3429             if (hasDedicatedImage) {
3430                 AutoLock<RecursiveLock> lock(mLock);
3431 
3432                 auto it = info_VkImage.find(
3433                     dedicatedAllocInfoPtr->image);
3434                 if (it == info_VkImage.end()) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3435                 const auto& info = it->second;
3436                 const auto& imgCi = info.createInfo;
3437 
3438                 imageExtent = imgCi.extent;
3439                 imageLayers = imgCi.arrayLayers;
3440                 imageFormat = imgCi.format;
3441                 imageUsage = imgCi.usage;
3442                 imageCreateFlags = imgCi.flags;
3443             }
3444 
3445             if (hasDedicatedBuffer) {
3446                 AutoLock<RecursiveLock> lock(mLock);
3447 
3448                 auto it = info_VkBuffer.find(
3449                     dedicatedAllocInfoPtr->buffer);
3450                 if (it == info_VkBuffer.end()) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3451                 const auto& info = it->second;
3452                 const auto& bufCi = info.createInfo;
3453 
3454                 bufferSize = bufCi.size;
3455             }
3456 
3457             VkResult ahbCreateRes =
3458                 createAndroidHardwareBuffer(
3459                     hasDedicatedImage,
3460                     hasDedicatedBuffer,
3461                     imageExtent,
3462                     imageLayers,
3463                     imageFormat,
3464                     imageUsage,
3465                     imageCreateFlags,
3466                     bufferSize,
3467                     allocationInfoAllocSize,
3468                     &ahw);
3469 
3470             if (ahbCreateRes != VK_SUCCESS) {
3471                 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(ahbCreateRes);
3472             }
3473         }
3474 
3475         if (importAhb) {
3476             ahw = importAhbInfoPtr->buffer;
3477             // We still need to acquire the AHardwareBuffer.
3478             importAndroidHardwareBuffer(
3479                 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(),
3480                 importAhbInfoPtr, nullptr);
3481         }
3482 
3483         if (ahw) {
3484             D("%s: Import AHardwareBuffer", __func__);
3485             const uint32_t hostHandle =
3486                 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper()
3487                     ->getHostHandle(AHardwareBuffer_getNativeHandle(ahw));
3488 
3489             AHardwareBuffer_Desc ahbDesc = {};
3490             AHardwareBuffer_describe(ahw, &ahbDesc);
3491             if (ahbDesc.format == AHARDWAREBUFFER_FORMAT_BLOB
3492                 && !ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper()->treatBlobAsImage()) {
3493                 importBufferInfo.buffer = hostHandle;
3494                 vk_append_struct(&structChainIter, &importBufferInfo);
3495             } else {
3496                 importCbInfo.colorBuffer = hostHandle;
3497                 vk_append_struct(&structChainIter, &importCbInfo);
3498             }
3499         }
3500 #endif
3501         zx_handle_t vmo_handle = ZX_HANDLE_INVALID;
3502 
3503 #ifdef VK_USE_PLATFORM_FUCHSIA
3504         if (importBufferCollection) {
3505             const auto& collection = *reinterpret_cast<
3506                 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
3507                 importBufferCollectionInfoPtr->collection);
3508             auto result = collection->WaitForBuffersAllocated();
3509             if (!result.ok() || result->status != ZX_OK) {
3510                 ALOGE("WaitForBuffersAllocated failed: %d %d", result.status(),
3511                       GET_STATUS_SAFE(result, status));
3512                 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3513             }
3514             fuchsia_sysmem::wire::BufferCollectionInfo2& info =
3515                 result->buffer_collection_info;
3516             uint32_t index = importBufferCollectionInfoPtr->index;
3517             if (info.buffer_count < index) {
3518                 ALOGE("Invalid buffer index: %d %d", index);
3519                 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3520             }
3521             vmo_handle = info.buffers[index].vmo.release();
3522         }
3523 
3524         if (importVmo) {
3525             vmo_handle = importVmoInfoPtr->handle;
3526         }
3527 
3528         if (exportVmo) {
3529             bool hasDedicatedImage = dedicatedAllocInfoPtr &&
3530                 (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3531             bool hasDedicatedBuffer =
3532                 dedicatedAllocInfoPtr &&
3533                 (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3534 
3535             if (hasDedicatedImage && hasDedicatedBuffer) {
3536                 ALOGE(
3537                     "Invalid VkMemoryDedicatedAllocationInfo: At least one "
3538                     "of image and buffer must be VK_NULL_HANDLE.");
3539                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3540             }
3541 
3542             const VkImageCreateInfo* pImageCreateInfo = nullptr;
3543 
3544             VkBufferConstraintsInfoFUCHSIA bufferConstraintsInfo = {
3545                 .sType =
3546                     VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA,
3547                 .pNext = nullptr,
3548                 .createInfo = {},
3549                 .requiredFormatFeatures = 0,
3550                 .bufferCollectionConstraints =
3551                     VkBufferCollectionConstraintsInfoFUCHSIA{
3552                         .sType =
3553                             VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
3554                         .pNext = nullptr,
3555                         .minBufferCount = 1,
3556                         .maxBufferCount = 0,
3557                         .minBufferCountForCamping = 0,
3558                         .minBufferCountForDedicatedSlack = 0,
3559                         .minBufferCountForSharedSlack = 0,
3560                     },
3561             };
3562             const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo =
3563                 nullptr;
3564 
3565             if (hasDedicatedImage) {
3566                 AutoLock<RecursiveLock> lock(mLock);
3567 
3568                 auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3569                 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
3570                 const auto& imageInfo = it->second;
3571 
3572                 pImageCreateInfo = &imageInfo.createInfo;
3573             }
3574 
3575             if (hasDedicatedBuffer) {
3576                 AutoLock<RecursiveLock> lock(mLock);
3577 
3578                 auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
3579                 if (it == info_VkBuffer.end())
3580                     return VK_ERROR_INITIALIZATION_FAILED;
3581                 const auto& bufferInfo = it->second;
3582 
3583                 bufferConstraintsInfo.createInfo = bufferInfo.createInfo;
3584                 pBufferConstraintsInfo = &bufferConstraintsInfo;
3585             }
3586 
3587             hasDedicatedImage = hasDedicatedImage &&
3588                                 getBufferCollectionConstraintsVulkanImageUsage(
3589                                     pImageCreateInfo);
3590             hasDedicatedBuffer =
3591                 hasDedicatedBuffer &&
3592                 getBufferCollectionConstraintsVulkanBufferUsage(
3593                     pBufferConstraintsInfo);
3594 
3595             if (hasDedicatedImage || hasDedicatedBuffer) {
3596                 auto token_ends =
3597                     fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
3598                 if (!token_ends.is_ok()) {
3599                     ALOGE("zx_channel_create failed: %d", token_ends.status_value());
3600                     abort();
3601                 }
3602 
3603                 {
3604                     auto result = mSysmemAllocator->AllocateSharedCollection(
3605                         std::move(token_ends->server));
3606                     if (!result.ok()) {
3607                         ALOGE("AllocateSharedCollection failed: %d",
3608                               result.status());
3609                         abort();
3610                     }
3611                 }
3612 
3613                 auto collection_ends =
3614                     fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
3615                 if (!collection_ends.is_ok()) {
3616                     ALOGE("zx_channel_create failed: %d", collection_ends.status_value());
3617                     abort();
3618                 }
3619 
3620                 {
3621                     auto result = mSysmemAllocator->BindSharedCollection(
3622                         std::move(token_ends->client), std::move(collection_ends->server));
3623                     if (!result.ok()) {
3624                         ALOGE("BindSharedCollection failed: %d",
3625                               result.status());
3626                         abort();
3627                     }
3628                 }
3629 
3630                 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> collection(
3631                     std::move(collection_ends->client));
3632                 if (hasDedicatedImage) {
3633                     // TODO(fxbug.dev/90856): Use setBufferCollectionImageConstraintsFUCHSIA.
3634                     VkResult res = setBufferCollectionConstraintsFUCHSIA(
3635                         enc, device, &collection, pImageCreateInfo);
3636                     if (res == VK_ERROR_FORMAT_NOT_SUPPORTED) {
3637                       ALOGE("setBufferCollectionConstraints failed: format %u is not supported",
3638                             pImageCreateInfo->format);
3639                       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3640                     }
3641                     if (res != VK_SUCCESS) {
3642                         ALOGE("setBufferCollectionConstraints failed: %d", res);
3643                         abort();
3644                     }
3645                 }
3646 
3647                 if (hasDedicatedBuffer) {
3648                     VkResult res = setBufferCollectionBufferConstraintsFUCHSIA(
3649                         &collection, pBufferConstraintsInfo);
3650                     if (res != VK_SUCCESS) {
3651                         ALOGE("setBufferCollectionBufferConstraints failed: %d",
3652                               res);
3653                         abort();
3654                     }
3655                 }
3656 
3657                 {
3658                     auto result = collection->WaitForBuffersAllocated();
3659                     if (result.ok() && result->status == ZX_OK) {
3660                         fuchsia_sysmem::wire::BufferCollectionInfo2& info =
3661                             result->buffer_collection_info;
3662                         if (!info.buffer_count) {
3663                             ALOGE(
3664                                 "WaitForBuffersAllocated returned "
3665                                 "invalid count: %d",
3666                                 info.buffer_count);
3667                             abort();
3668                         }
3669                         vmo_handle = info.buffers[0].vmo.release();
3670                     } else {
3671                         ALOGE("WaitForBuffersAllocated failed: %d %d",
3672                               result.status(), GET_STATUS_SAFE(result, status));
3673                         abort();
3674                     }
3675                 }
3676 
3677                 collection->Close();
3678 
3679                 zx::vmo vmo_copy;
3680                 zx_status_t status = zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS,
3681                                                          vmo_copy.reset_and_get_address());
3682                 if (status != ZX_OK) {
3683                     ALOGE("Failed to duplicate VMO: %d", status);
3684                     abort();
3685                 }
3686 
3687                 if (pImageCreateInfo) {
3688                     // Only device-local images need to create color buffer; for
3689                     // host-visible images, the color buffer is already created
3690                     // when sysmem allocates memory. Here we use the |tiling|
3691                     // field of image creation info to determine if it uses
3692                     // host-visible memory.
3693                     bool isLinear = pImageCreateInfo->tiling == VK_IMAGE_TILING_LINEAR;
3694                     if (!isLinear) {
3695                         fuchsia_hardware_goldfish::wire::ColorBufferFormatType format;
3696                         switch (pImageCreateInfo->format) {
3697                             case VK_FORMAT_B8G8R8A8_SINT:
3698                             case VK_FORMAT_B8G8R8A8_UNORM:
3699                             case VK_FORMAT_B8G8R8A8_SRGB:
3700                             case VK_FORMAT_B8G8R8A8_SNORM:
3701                             case VK_FORMAT_B8G8R8A8_SSCALED:
3702                             case VK_FORMAT_B8G8R8A8_USCALED:
3703                                 format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::
3704                                         kBgra;
3705                                 break;
3706                             case VK_FORMAT_R8G8B8A8_SINT:
3707                             case VK_FORMAT_R8G8B8A8_UNORM:
3708                             case VK_FORMAT_R8G8B8A8_SRGB:
3709                             case VK_FORMAT_R8G8B8A8_SNORM:
3710                             case VK_FORMAT_R8G8B8A8_SSCALED:
3711                             case VK_FORMAT_R8G8B8A8_USCALED:
3712                                 format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::
3713                                         kRgba;
3714                                 break;
3715                             case VK_FORMAT_R8_UNORM:
3716                             case VK_FORMAT_R8_UINT:
3717                             case VK_FORMAT_R8_USCALED:
3718                             case VK_FORMAT_R8_SNORM:
3719                             case VK_FORMAT_R8_SINT:
3720                             case VK_FORMAT_R8_SSCALED:
3721                             case VK_FORMAT_R8_SRGB:
3722                                 format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::
3723                                         kLuminance;
3724                                 break;
3725                             case VK_FORMAT_R8G8_UNORM:
3726                             case VK_FORMAT_R8G8_UINT:
3727                             case VK_FORMAT_R8G8_USCALED:
3728                             case VK_FORMAT_R8G8_SNORM:
3729                             case VK_FORMAT_R8G8_SINT:
3730                             case VK_FORMAT_R8G8_SSCALED:
3731                             case VK_FORMAT_R8G8_SRGB:
3732                                 format =
3733                                         fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRg;
3734                                 break;
3735                             default:
3736                                 ALOGE("Unsupported format: %d",
3737                                       pImageCreateInfo->format);
3738                                 abort();
3739                         }
3740 
3741                         fidl::Arena arena;
3742                         fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(
3743                                 arena);
3744                         createParams.set_width(pImageCreateInfo->extent.width)
3745                                 .set_height(pImageCreateInfo->extent.height)
3746                                 .set_format(format)
3747                                 .set_memory_property(fuchsia_hardware_goldfish::wire::
3748                                                              kMemoryPropertyDeviceLocal);
3749 
3750                         auto result = mControlDevice->CreateColorBuffer2(std::move(vmo_copy),
3751                                                                          std::move(createParams));
3752                         if (!result.ok() || result->res != ZX_OK) {
3753                             if (result.ok() &&
3754                                 result->res == ZX_ERR_ALREADY_EXISTS) {
3755                                 ALOGD("CreateColorBuffer: color buffer already "
3756                                       "exists\n");
3757                             } else {
3758                                 ALOGE("CreateColorBuffer failed: %d:%d",
3759                                       result.status(),
3760                                       GET_STATUS_SAFE(result, res));
3761                                 abort();
3762                             }
3763                         }
3764                     }
3765                 }
3766 
3767                 if (pBufferConstraintsInfo) {
3768                     fidl::Arena arena;
3769                     fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena);
3770                     createParams
3771                         .set_size(arena,
3772                                   pBufferConstraintsInfo->createInfo.size)
3773                         .set_memory_property(fuchsia_hardware_goldfish::wire::
3774                                                  kMemoryPropertyDeviceLocal);
3775 
3776                     auto result =
3777                         mControlDevice->CreateBuffer2(std::move(vmo_copy), std::move(createParams));
3778                     if (!result.ok() || result->is_error()) {
3779                         ALOGE("CreateBuffer2 failed: %d:%d", result.status(),
3780                               GET_STATUS_SAFE(result, error_value()));
3781                         abort();
3782                     }
3783                 }
3784             } else {
3785                 ALOGW("Dedicated image / buffer not available. Cannot create "
3786                       "BufferCollection to export VMOs.");
3787                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3788             }
3789         }
3790 
3791         if (vmo_handle != ZX_HANDLE_INVALID) {
3792             zx::vmo vmo_copy;
3793             zx_status_t status = zx_handle_duplicate(vmo_handle,
3794                                                      ZX_RIGHT_SAME_RIGHTS,
3795                                                      vmo_copy.reset_and_get_address());
3796             if (status != ZX_OK) {
3797                 ALOGE("Failed to duplicate VMO: %d", status);
3798                 abort();
3799             }
3800             zx_status_t status2 = ZX_OK;
3801 
3802             auto result = mControlDevice->GetBufferHandle(std::move(vmo_copy));
3803             if (!result.ok() || result->res != ZX_OK) {
3804                 ALOGE("GetBufferHandle failed: %d:%d", result.status(),
3805                       GET_STATUS_SAFE(result, res));
3806             } else {
3807                 fuchsia_hardware_goldfish::wire::BufferHandleType
3808                     handle_type = result->type;
3809                 uint32_t buffer_handle = result->id;
3810 
3811                 if (handle_type == fuchsia_hardware_goldfish::wire::
3812                                        BufferHandleType::kBuffer) {
3813                     importBufferInfo.buffer = buffer_handle;
3814                     vk_append_struct(&structChainIter, &importBufferInfo);
3815                 } else {
3816                     importCbInfo.colorBuffer = buffer_handle;
3817                     vk_append_struct(&structChainIter, &importCbInfo);
3818                 }
3819             }
3820         }
3821 #endif
3822 
3823         if (ahw || !requestedMemoryIsHostVisible) {
3824             input_result =
3825                 enc->vkAllocateMemory(
3826                     device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
3827 
3828             if (input_result != VK_SUCCESS) {
3829                 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
3830             }
3831 
3832             VkDeviceSize allocationSize = finalAllocInfo.allocationSize;
3833             setDeviceMemoryInfo(
3834                 device, *pMemory,
3835                 0, nullptr,
3836                 finalAllocInfo.memoryTypeIndex,
3837                 ahw,
3838                 isImport,
3839                 vmo_handle);
3840 
3841             _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
3842         }
3843 
3844 #ifdef VK_USE_PLATFORM_FUCHSIA
3845         if (vmo_handle != ZX_HANDLE_INVALID) {
3846             input_result = enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
3847 
3848             // Get VMO handle rights, and only use allowed rights to map the
3849             // host memory.
3850             zx_info_handle_basic handle_info;
3851             zx_status_t status = zx_object_get_info(vmo_handle, ZX_INFO_HANDLE_BASIC, &handle_info,
3852                                         sizeof(handle_info), nullptr, nullptr);
3853             if (status != ZX_OK) {
3854                 ALOGE("%s: cannot get vmo object info: vmo = %u status: %d.", __func__, vmo_handle,
3855                       status);
3856                 return VK_ERROR_OUT_OF_HOST_MEMORY;
3857             }
3858 
3859             zx_vm_option_t vm_permission = 0u;
3860             vm_permission |= (handle_info.rights & ZX_RIGHT_READ) ? ZX_VM_PERM_READ : 0;
3861             vm_permission |= (handle_info.rights & ZX_RIGHT_WRITE) ? ZX_VM_PERM_WRITE : 0;
3862 
3863             zx_paddr_t addr;
3864             status = zx_vmar_map(zx_vmar_root_self(), vm_permission, 0, vmo_handle, 0,
3865                 finalAllocInfo.allocationSize, &addr);
3866             if (status != ZX_OK) {
3867                 ALOGE("%s: cannot map vmar: status %d.", __func__, status);
3868                 return VK_ERROR_OUT_OF_HOST_MEMORY;
3869             }
3870 
3871             setDeviceMemoryInfo(device, *pMemory,
3872                 finalAllocInfo.allocationSize,
3873                 reinterpret_cast<uint8_t*>(addr), finalAllocInfo.memoryTypeIndex,
3874                 /*ahw=*/nullptr, isImport, vmo_handle);
3875             return VK_SUCCESS;
3876         }
3877 #endif
3878 
3879         // Host visible memory with direct mapping
3880         VkResult result = getCoherentMemory(&finalAllocInfo, enc, device, pMemory);
3881         if (result != VK_SUCCESS) {
3882             return result;
3883         }
3884 
3885         _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
3886     }
3887 
freeCoherentMemoryLocked(VkDeviceMemory memory,VkDeviceMemory_Info & info)3888     CoherentMemoryPtr freeCoherentMemoryLocked(VkDeviceMemory memory, VkDeviceMemory_Info& info) {
3889         if (info.coherentMemory && info.ptr) {
3890             if (info.coherentMemory->getDeviceMemory() != memory) {
3891                 delete_goldfish_VkDeviceMemory(memory);
3892             }
3893 
3894             if (info.ptr) {
3895                 info.coherentMemory->release(info.ptr);
3896                 info.ptr = nullptr;
3897             }
3898 
3899             return std::move(info.coherentMemory);
3900         }
3901 
3902         return nullptr;
3903     }
3904 
on_vkFreeMemory(void * context,VkDevice device,VkDeviceMemory memory,const VkAllocationCallbacks * pAllocateInfo)3905     void on_vkFreeMemory(
3906         void* context,
3907         VkDevice device,
3908         VkDeviceMemory memory,
3909         const VkAllocationCallbacks* pAllocateInfo) {
3910 
3911         AutoLock<RecursiveLock> lock(mLock);
3912 
3913         auto it = info_VkDeviceMemory.find(memory);
3914         if (it == info_VkDeviceMemory.end()) return;
3915         auto& info = it->second;
3916         uint64_t memoryObjectId = (uint64_t)(void*)memory;
3917 #ifdef VK_USE_PLATFORM_ANDROID_KHR
3918         if (info.ahw) {
3919             memoryObjectId = getAHardwareBufferId(info.ahw);
3920         }
3921 #endif
3922 
3923         emitDeviceMemoryReport(info_VkDevice[device],
3924                                info.imported ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT
3925                                              : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT,
3926                                memoryObjectId, 0 /* size */, VK_OBJECT_TYPE_DEVICE_MEMORY,
3927                                (uint64_t)(void*)memory);
3928 
3929 #ifdef VK_USE_PLATFORM_FUCHSIA
3930         if (info.vmoHandle && info.ptr) {
3931             zx_status_t status = zx_vmar_unmap(
3932                 zx_vmar_root_self(), reinterpret_cast<zx_paddr_t>(info.ptr), info.allocationSize);
3933             if (status != ZX_OK) {
3934                 ALOGE("%s: Cannot unmap ptr: status %d", status);
3935             }
3936             info.ptr = nullptr;
3937         }
3938 #endif
3939 
3940         if (!info.coherentMemory) {
3941             lock.unlock();
3942             VkEncoder* enc = (VkEncoder*)context;
3943             enc->vkFreeMemory(device, memory, pAllocateInfo, true /* do lock */);
3944             return;
3945         }
3946 
3947         auto coherentMemory = freeCoherentMemoryLocked(memory, info);
3948 
3949         // We have to release the lock before we could possibly free a
3950         // CoherentMemory, because that will call into VkEncoder, which
3951         // shouldn't be called when the lock is held.
3952         lock.unlock();
3953         coherentMemory = nullptr;
3954     }
3955 
on_vkMapMemory(void * context,VkResult host_result,VkDevice device,VkDeviceMemory memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags,void ** ppData)3956     VkResult on_vkMapMemory(void* context, VkResult host_result, VkDevice device,
3957                             VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size,
3958                             VkMemoryMapFlags, void** ppData) {
3959         if (host_result != VK_SUCCESS) {
3960             ALOGE("%s: Host failed to map\n", __func__);
3961             return host_result;
3962         }
3963 
3964         AutoLock<RecursiveLock> lock(mLock);
3965 
3966         auto it = info_VkDeviceMemory.find(memory);
3967         if (it == info_VkDeviceMemory.end()) {
3968             ALOGE("%s: Could not find this device memory\n", __func__);
3969             return VK_ERROR_MEMORY_MAP_FAILED;
3970         }
3971 
3972         auto& info = it->second;
3973 
3974         if (info.blobId && !info.coherentMemory && !mCaps.params[kParamCreateGuestHandle]) {
3975             VkEncoder* enc = (VkEncoder*)context;
3976             VirtGpuBlobMappingPtr mapping;
3977             VirtGpuDevice& instance = VirtGpuDevice::getInstance();
3978 
3979             uint64_t offset;
3980             uint8_t* ptr;
3981 
3982             VkResult vkResult = enc->vkGetBlobGOOGLE(device, memory, false);
3983             if (vkResult != VK_SUCCESS) return vkResult;
3984 
3985             struct VirtGpuCreateBlob createBlob = {};
3986             createBlob.blobMem = kBlobMemHost3d;
3987             createBlob.flags = kBlobFlagMappable;
3988             createBlob.blobId = info.blobId;
3989             createBlob.size = info.coherentMemorySize;
3990 
3991             auto blob = instance.createBlob(createBlob);
3992             if (!blob) {
3993                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3994             }
3995 
3996             mapping = blob->createMapping();
3997             if (!mapping) {
3998                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3999             }
4000 
4001             auto coherentMemory =
4002                 std::make_shared<CoherentMemory>(mapping, createBlob.size, device, memory);
4003 
4004             coherentMemory->subAllocate(info.allocationSize, &ptr, offset);
4005 
4006             info.coherentMemoryOffset = offset;
4007             info.coherentMemory = coherentMemory;
4008             info.ptr = ptr;
4009         }
4010 
4011         if (!info.ptr) {
4012             ALOGE("%s: ptr null\n", __func__);
4013             return VK_ERROR_MEMORY_MAP_FAILED;
4014         }
4015 
4016         if (size != VK_WHOLE_SIZE &&
4017             (info.ptr + offset + size > info.ptr + info.allocationSize)) {
4018             ALOGE("%s: size is too big. alloc size 0x%llx while we wanted offset 0x%llx size 0x%llx total 0x%llx\n", __func__,
4019                     (unsigned long long)info.allocationSize,
4020                     (unsigned long long)offset,
4021                     (unsigned long long)size,
4022                     (unsigned long long)offset);
4023             return VK_ERROR_MEMORY_MAP_FAILED;
4024         }
4025 
4026         *ppData = info.ptr + offset;
4027 
4028         return host_result;
4029     }
4030 
on_vkUnmapMemory(void *,VkDevice,VkDeviceMemory)4031     void on_vkUnmapMemory(
4032         void*,
4033         VkDevice,
4034         VkDeviceMemory) {
4035         // no-op
4036     }
4037 
transformExternalResourceMemoryDedicatedRequirementsForGuest(VkMemoryDedicatedRequirements * dedicatedReqs)4038     void transformExternalResourceMemoryDedicatedRequirementsForGuest(
4039         VkMemoryDedicatedRequirements* dedicatedReqs) {
4040         dedicatedReqs->prefersDedicatedAllocation = VK_TRUE;
4041         dedicatedReqs->requiresDedicatedAllocation = VK_TRUE;
4042     }
4043 
transformImageMemoryRequirementsForGuestLocked(VkImage image,VkMemoryRequirements * reqs)4044     void transformImageMemoryRequirementsForGuestLocked(
4045         VkImage image,
4046         VkMemoryRequirements* reqs) {
4047 
4048         setMemoryRequirementsForSysmemBackedImage(image, reqs);
4049     }
4050 
transformImageMemoryRequirements2ForGuest(VkImage image,VkMemoryRequirements2 * reqs2)4051     void transformImageMemoryRequirements2ForGuest(
4052         VkImage image,
4053         VkMemoryRequirements2* reqs2) {
4054 
4055         AutoLock<RecursiveLock> lock(mLock);
4056 
4057         auto it = info_VkImage.find(image);
4058         if (it == info_VkImage.end()) return;
4059 
4060         auto& info = it->second;
4061 
4062         if (!info.external ||
4063             !info.externalCreateInfo.handleTypes) {
4064             setMemoryRequirementsForSysmemBackedImage(image, &reqs2->memoryRequirements);
4065             return;
4066         }
4067 
4068         setMemoryRequirementsForSysmemBackedImage(image, &reqs2->memoryRequirements);
4069 
4070         VkMemoryDedicatedRequirements* dedicatedReqs =
4071             vk_find_struct<VkMemoryDedicatedRequirements>(reqs2);
4072 
4073         if (!dedicatedReqs) return;
4074 
4075         transformExternalResourceMemoryDedicatedRequirementsForGuest(
4076             dedicatedReqs);
4077     }
4078 
transformBufferMemoryRequirements2ForGuest(VkBuffer buffer,VkMemoryRequirements2 * reqs2)4079     void transformBufferMemoryRequirements2ForGuest(
4080         VkBuffer buffer,
4081         VkMemoryRequirements2* reqs2) {
4082 
4083         AutoLock<RecursiveLock> lock(mLock);
4084 
4085         auto it = info_VkBuffer.find(buffer);
4086         if (it == info_VkBuffer.end()) return;
4087 
4088         auto& info = it->second;
4089 
4090         if (!info.external ||
4091             !info.externalCreateInfo.handleTypes) {
4092             return;
4093         }
4094 
4095         VkMemoryDedicatedRequirements* dedicatedReqs =
4096             vk_find_struct<VkMemoryDedicatedRequirements>(reqs2);
4097 
4098         if (!dedicatedReqs) return;
4099 
4100         transformExternalResourceMemoryDedicatedRequirementsForGuest(
4101             dedicatedReqs);
4102     }
4103 
on_vkCreateImage(void * context,VkResult,VkDevice device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)4104     VkResult on_vkCreateImage(
4105         void* context, VkResult,
4106         VkDevice device, const VkImageCreateInfo *pCreateInfo,
4107         const VkAllocationCallbacks *pAllocator,
4108         VkImage *pImage) {
4109         VkEncoder* enc = (VkEncoder*)context;
4110 
4111         VkImageCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4112         vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
4113         VkExternalMemoryImageCreateInfo localExtImgCi;
4114 
4115         const VkExternalMemoryImageCreateInfo* extImgCiPtr =
4116             vk_find_struct<VkExternalMemoryImageCreateInfo>(pCreateInfo);
4117         if (extImgCiPtr) {
4118             localExtImgCi = vk_make_orphan_copy(*extImgCiPtr);
4119             vk_append_struct(&structChainIter, &localExtImgCi);
4120         }
4121 
4122 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4123         VkNativeBufferANDROID localAnb;
4124         const VkNativeBufferANDROID* anbInfoPtr =
4125             vk_find_struct<VkNativeBufferANDROID>(pCreateInfo);
4126         if (anbInfoPtr) {
4127             localAnb = vk_make_orphan_copy(*anbInfoPtr);
4128             vk_append_struct(&structChainIter, &localAnb);
4129         }
4130 
4131         VkExternalFormatANDROID localExtFormatAndroid;
4132         const VkExternalFormatANDROID* extFormatAndroidPtr =
4133             vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4134         if (extFormatAndroidPtr) {
4135             localExtFormatAndroid = vk_make_orphan_copy(*extFormatAndroidPtr);
4136 
4137             // Do not append external format android;
4138             // instead, replace the local image localCreateInfo format
4139             // with the corresponding Vulkan format
4140             if (extFormatAndroidPtr->externalFormat) {
4141                 localCreateInfo.format =
4142                     vk_format_from_android(extFormatAndroidPtr->externalFormat);
4143                 if (localCreateInfo.format == VK_FORMAT_UNDEFINED)
4144                     return VK_ERROR_VALIDATION_FAILED_EXT;
4145             }
4146         }
4147 #endif
4148 
4149 #ifdef VK_USE_PLATFORM_FUCHSIA
4150         const VkBufferCollectionImageCreateInfoFUCHSIA* extBufferCollectionPtr =
4151             vk_find_struct<VkBufferCollectionImageCreateInfoFUCHSIA>(
4152                 pCreateInfo);
4153 
4154         bool isSysmemBackedMemory = false;
4155 
4156         if (extImgCiPtr &&
4157             (extImgCiPtr->handleTypes &
4158              VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) {
4159             isSysmemBackedMemory = true;
4160         }
4161 
4162         if (extBufferCollectionPtr) {
4163             const auto& collection = *reinterpret_cast<
4164                 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
4165                 extBufferCollectionPtr->collection);
4166             uint32_t index = extBufferCollectionPtr->index;
4167             zx::vmo vmo;
4168 
4169             fuchsia_sysmem::wire::BufferCollectionInfo2 info;
4170 
4171             auto result = collection->WaitForBuffersAllocated();
4172             if (result.ok() && result->status == ZX_OK) {
4173                 info = std::move(result->buffer_collection_info);
4174                 if (index < info.buffer_count && info.settings.has_image_format_constraints) {
4175                     vmo = std::move(info.buffers[index].vmo);
4176                 }
4177             } else {
4178                 ALOGE("WaitForBuffersAllocated failed: %d %d", result.status(),
4179                       GET_STATUS_SAFE(result, status));
4180             }
4181 
4182             if (vmo.is_valid()) {
4183                 zx::vmo vmo_dup;
4184                 if (zx_status_t status = vmo.duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
4185                     status != ZX_OK) {
4186                     ALOGE("%s: zx_vmo_duplicate failed: %d", __func__, status);
4187                     abort();
4188                 }
4189 
4190                 auto buffer_handle_result = mControlDevice->GetBufferHandle(std::move(vmo_dup));
4191                 if (!buffer_handle_result.ok()) {
4192                     ALOGE("%s: GetBufferHandle FIDL error: %d", __func__,
4193                           buffer_handle_result.status());
4194                     abort();
4195                 }
4196                 if (buffer_handle_result.value().res == ZX_OK) {
4197                     // Buffer handle already exists.
4198                     // If it is a ColorBuffer, no-op; Otherwise return error.
4199                     if (buffer_handle_result.value().type !=
4200                         fuchsia_hardware_goldfish::wire::BufferHandleType::kColorBuffer) {
4201                         ALOGE("%s: BufferHandle %u is not a ColorBuffer", __func__,
4202                               buffer_handle_result.value().id);
4203                         return VK_ERROR_OUT_OF_HOST_MEMORY;
4204                     }
4205                 } else if (buffer_handle_result.value().res == ZX_ERR_NOT_FOUND) {
4206                     // Buffer handle not found. Create ColorBuffer based on buffer settings.
4207                     auto format =
4208                         info.settings.image_format_constraints.pixel_format.type ==
4209                                 fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8
4210                             ? fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba
4211                             : fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra;
4212 
4213                     uint32_t memory_property =
4214                         info.settings.buffer_settings.heap ==
4215                                 fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal
4216                             ? fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal
4217                             : fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
4218 
4219                     fidl::Arena arena;
4220                     fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(
4221                         arena);
4222                     createParams.set_width(
4223                             info.settings.image_format_constraints.min_coded_width)
4224                         .set_height(
4225                             info.settings.image_format_constraints.min_coded_height)
4226                         .set_format(format)
4227                         .set_memory_property(memory_property);
4228 
4229                     auto result =
4230                         mControlDevice->CreateColorBuffer2(std::move(vmo), std::move(createParams));
4231                     if (result.ok() && result->res == ZX_ERR_ALREADY_EXISTS) {
4232                         ALOGD(
4233                             "CreateColorBuffer: color buffer already exists\n");
4234                     } else if (!result.ok() || result->res != ZX_OK) {
4235                         ALOGE("CreateColorBuffer failed: %d:%d", result.status(),
4236                             GET_STATUS_SAFE(result, res));
4237                     }
4238                 }
4239 
4240                 if (info.settings.buffer_settings.heap ==
4241                     fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible) {
4242                     ALOGD(
4243                         "%s: Image uses host visible memory heap; set tiling "
4244                         "to linear to match host ImageCreateInfo",
4245                         __func__);
4246                     localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
4247                 }
4248             }
4249             isSysmemBackedMemory = true;
4250         }
4251 
4252         if (isSysmemBackedMemory) {
4253             localCreateInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
4254         }
4255 #endif
4256 
4257         VkResult res;
4258         VkMemoryRequirements memReqs;
4259 
4260         if (supportsCreateResourcesWithRequirements()) {
4261             res = enc->vkCreateImageWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator, pImage, &memReqs, true /* do lock */);
4262         } else {
4263             res = enc->vkCreateImage(device, &localCreateInfo, pAllocator, pImage, true /* do lock */);
4264         }
4265 
4266         if (res != VK_SUCCESS) return res;
4267 
4268         AutoLock<RecursiveLock> lock(mLock);
4269 
4270         auto it = info_VkImage.find(*pImage);
4271         if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
4272 
4273         auto& info = it->second;
4274 
4275         info.device = device;
4276         info.createInfo = *pCreateInfo;
4277         info.createInfo.pNext = nullptr;
4278 
4279 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4280         if (extFormatAndroidPtr && extFormatAndroidPtr->externalFormat) {
4281             info.hasExternalFormat = true;
4282             info.androidFormat = extFormatAndroidPtr->externalFormat;
4283         }
4284 #endif  // VK_USE_PLATFORM_ANDROID_KHR
4285 
4286         if (supportsCreateResourcesWithRequirements()) {
4287             info.baseRequirementsKnown = true;
4288         }
4289 
4290         if (extImgCiPtr) {
4291             info.external = true;
4292             info.externalCreateInfo = *extImgCiPtr;
4293         }
4294 
4295 #ifdef VK_USE_PLATFORM_FUCHSIA
4296         if (isSysmemBackedMemory) {
4297             info.isSysmemBackedMemory = true;
4298         }
4299 #endif
4300 
4301 // Delete `protocolVersion` check goldfish drivers are gone.
4302 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4303         if (mCaps.gfxstreamCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
4304             mCaps.gfxstreamCapset.colorBufferMemoryIndex =
4305                     getColorBufferMemoryIndex(context, device);
4306         }
4307         if (extImgCiPtr &&
4308             (extImgCiPtr->handleTypes &
4309              VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)) {
4310             updateMemoryTypeBits(&memReqs.memoryTypeBits,
4311                                  mCaps.gfxstreamCapset.colorBufferMemoryIndex);
4312         }
4313 #endif
4314 
4315         if (info.baseRequirementsKnown) {
4316             transformImageMemoryRequirementsForGuestLocked(*pImage, &memReqs);
4317             info.baseRequirements = memReqs;
4318         }
4319         return res;
4320     }
4321 
on_vkCreateSamplerYcbcrConversion(void * context,VkResult,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)4322     VkResult on_vkCreateSamplerYcbcrConversion(
4323         void* context, VkResult,
4324         VkDevice device,
4325         const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4326         const VkAllocationCallbacks* pAllocator,
4327         VkSamplerYcbcrConversion* pYcbcrConversion) {
4328 
4329         VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4330 
4331 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4332         const VkExternalFormatANDROID* extFormatAndroidPtr =
4333             vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4334         if (extFormatAndroidPtr) {
4335             if (extFormatAndroidPtr->externalFormat == AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM) {
4336                 // We don't support external formats on host and it causes RGB565
4337                 // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4338                 // when passed as an external format.
4339                 // We may consider doing this for all external formats.
4340                 // See b/134771579.
4341                 *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4342                 return VK_SUCCESS;
4343             } else if (extFormatAndroidPtr->externalFormat) {
4344                 localCreateInfo.format =
4345                     vk_format_from_android(extFormatAndroidPtr->externalFormat);
4346             }
4347         }
4348 #endif
4349 
4350         VkEncoder* enc = (VkEncoder*)context;
4351         VkResult res = enc->vkCreateSamplerYcbcrConversion(
4352             device, &localCreateInfo, pAllocator, pYcbcrConversion, true /* do lock */);
4353 
4354         if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
4355             ALOGE("FATAL: vkCreateSamplerYcbcrConversion returned a reserved value (VK_YCBCR_CONVERSION_DO_NOTHING)");
4356             abort();
4357         }
4358         return res;
4359     }
4360 
on_vkDestroySamplerYcbcrConversion(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)4361     void on_vkDestroySamplerYcbcrConversion(
4362         void* context,
4363         VkDevice device,
4364         VkSamplerYcbcrConversion ycbcrConversion,
4365         const VkAllocationCallbacks* pAllocator) {
4366         VkEncoder* enc = (VkEncoder*)context;
4367         if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4368             enc->vkDestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator, true /* do lock */);
4369         }
4370     }
4371 
on_vkCreateSamplerYcbcrConversionKHR(void * context,VkResult,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)4372     VkResult on_vkCreateSamplerYcbcrConversionKHR(
4373         void* context, VkResult,
4374         VkDevice device,
4375         const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4376         const VkAllocationCallbacks* pAllocator,
4377         VkSamplerYcbcrConversion* pYcbcrConversion) {
4378 
4379         VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4380 
4381 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
4382         const VkExternalFormatANDROID* extFormatAndroidPtr =
4383             vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4384         if (extFormatAndroidPtr) {
4385             if (extFormatAndroidPtr->externalFormat == AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM) {
4386                 // We don't support external formats on host and it causes RGB565
4387                 // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4388                 // when passed as an external format.
4389                 // We may consider doing this for all external formats.
4390                 // See b/134771579.
4391                 *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4392                 return VK_SUCCESS;
4393             } else if (extFormatAndroidPtr->externalFormat) {
4394                 localCreateInfo.format =
4395                     vk_format_from_android(extFormatAndroidPtr->externalFormat);
4396             }
4397         }
4398 #endif
4399 
4400         VkEncoder* enc = (VkEncoder*)context;
4401         VkResult res = enc->vkCreateSamplerYcbcrConversionKHR(
4402             device, &localCreateInfo, pAllocator, pYcbcrConversion, true /* do lock */);
4403 
4404         if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
4405             ALOGE("FATAL: vkCreateSamplerYcbcrConversionKHR returned a reserved value (VK_YCBCR_CONVERSION_DO_NOTHING)");
4406             abort();
4407         }
4408         return res;
4409     }
4410 
on_vkDestroySamplerYcbcrConversionKHR(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)4411     void on_vkDestroySamplerYcbcrConversionKHR(
4412         void* context,
4413         VkDevice device,
4414         VkSamplerYcbcrConversion ycbcrConversion,
4415         const VkAllocationCallbacks* pAllocator) {
4416         VkEncoder* enc = (VkEncoder*)context;
4417         if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4418             enc->vkDestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator, true /* do lock */);
4419         }
4420     }
4421 
on_vkCreateSampler(void * context,VkResult,VkDevice device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler)4422     VkResult on_vkCreateSampler(
4423         void* context, VkResult,
4424         VkDevice device,
4425         const VkSamplerCreateInfo* pCreateInfo,
4426         const VkAllocationCallbacks* pAllocator,
4427         VkSampler* pSampler) {
4428         VkSamplerCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4429         vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
4430 
4431 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(VK_USE_PLATFORM_FUCHSIA)
4432         VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
4433         const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo =
4434             vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo);
4435         if (samplerYcbcrConversionInfo) {
4436             if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4437                 localVkSamplerYcbcrConversionInfo =
4438                     vk_make_orphan_copy(*samplerYcbcrConversionInfo);
4439                 vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
4440             }
4441         }
4442 
4443         VkSamplerCustomBorderColorCreateInfoEXT localVkSamplerCustomBorderColorCreateInfo;
4444         const VkSamplerCustomBorderColorCreateInfoEXT* samplerCustomBorderColorCreateInfo =
4445             vk_find_struct<VkSamplerCustomBorderColorCreateInfoEXT>(pCreateInfo);
4446         if (samplerCustomBorderColorCreateInfo) {
4447             localVkSamplerCustomBorderColorCreateInfo =
4448                 vk_make_orphan_copy(*samplerCustomBorderColorCreateInfo);
4449             vk_append_struct(&structChainIter, &localVkSamplerCustomBorderColorCreateInfo);
4450         }
4451 #endif
4452 
4453         VkEncoder* enc = (VkEncoder*)context;
4454         return enc->vkCreateSampler(device, &localCreateInfo, pAllocator, pSampler, true /* do lock */);
4455     }
4456 
on_vkGetPhysicalDeviceExternalBufferProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)4457     void on_vkGetPhysicalDeviceExternalBufferProperties(
4458         void* context,
4459         VkPhysicalDevice physicalDevice,
4460         const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
4461         VkExternalBufferProperties* pExternalBufferProperties) {
4462         VkEncoder* enc = (VkEncoder*)context;
4463         // b/299520213
4464         // We declared blob formar not supported.
4465         if (ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper()->treatBlobAsImage()
4466             && pExternalBufferInfo->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) {
4467             pExternalBufferProperties->externalMemoryProperties.externalMemoryFeatures = 0;
4468             pExternalBufferProperties->externalMemoryProperties.exportFromImportedHandleTypes = 0;
4469             pExternalBufferProperties->externalMemoryProperties.compatibleHandleTypes = 0;
4470             return;
4471         }
4472         uint32_t supportedHandleType = 0;
4473 #ifdef VK_USE_PLATFORM_FUCHSIA
4474         supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VM_BIT_FUCHSIA;
4475 #endif
4476 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4477         supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
4478                 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
4479 #endif
4480         if (supportedHandleType) {
4481             // 0 is a valid handleType so we can't check against 0
4482             if (pExternalBufferInfo->handleType != (pExternalBufferInfo->handleType & supportedHandleType)) {
4483                 return;
4484             }
4485         }
4486         enc->vkGetPhysicalDeviceExternalBufferProperties(physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */);
4487         transformImpl_VkExternalMemoryProperties_fromhost(&pExternalBufferProperties->externalMemoryProperties, 0);
4488     }
4489 
on_vkGetPhysicalDeviceExternalFenceProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)4490     void on_vkGetPhysicalDeviceExternalFenceProperties(
4491         void* context,
4492         VkPhysicalDevice physicalDevice,
4493         const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
4494         VkExternalFenceProperties* pExternalFenceProperties) {
4495 
4496         (void)context;
4497         (void)physicalDevice;
4498 
4499         pExternalFenceProperties->exportFromImportedHandleTypes = 0;
4500         pExternalFenceProperties->compatibleHandleTypes = 0;
4501         pExternalFenceProperties->externalFenceFeatures = 0;
4502 
4503         bool syncFd =
4504             pExternalFenceInfo->handleType &
4505             VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4506 
4507         if (!syncFd) {
4508             return;
4509         }
4510 
4511 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4512         pExternalFenceProperties->exportFromImportedHandleTypes =
4513             VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4514         pExternalFenceProperties->compatibleHandleTypes =
4515             VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4516         pExternalFenceProperties->externalFenceFeatures =
4517             VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT |
4518             VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT;
4519 
4520         D("%s: asked for sync fd, set the features\n", __func__);
4521 #endif
4522     }
4523 
on_vkCreateFence(void * context,VkResult input_result,VkDevice device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)4524     VkResult on_vkCreateFence(
4525         void* context,
4526         VkResult input_result,
4527         VkDevice device,
4528         const VkFenceCreateInfo* pCreateInfo,
4529         const VkAllocationCallbacks* pAllocator, VkFence* pFence) {
4530 
4531         VkEncoder* enc = (VkEncoder*)context;
4532         VkFenceCreateInfo finalCreateInfo = *pCreateInfo;
4533 
4534         const VkExportFenceCreateInfo* exportFenceInfoPtr =
4535             vk_find_struct<VkExportFenceCreateInfo>(pCreateInfo);
4536 
4537 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4538         bool exportSyncFd =
4539             exportFenceInfoPtr &&
4540             (exportFenceInfoPtr->handleTypes &
4541              VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
4542 #endif
4543 
4544         input_result = enc->vkCreateFence(
4545             device, &finalCreateInfo, pAllocator, pFence, true /* do lock */);
4546 
4547         if (input_result != VK_SUCCESS) return input_result;
4548 
4549 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4550         if (exportSyncFd) {
4551             if (!mFeatureInfo->hasVirtioGpuNativeSync) {
4552                 ALOGV("%s: ensure sync device\n", __func__);
4553                 ensureSyncDeviceFd();
4554             }
4555 
4556             ALOGV("%s: getting fence info\n", __func__);
4557             AutoLock<RecursiveLock> lock(mLock);
4558             auto it = info_VkFence.find(*pFence);
4559 
4560             if (it == info_VkFence.end())
4561                 return VK_ERROR_INITIALIZATION_FAILED;
4562 
4563             auto& info = it->second;
4564 
4565             info.external = true;
4566             info.exportFenceCreateInfo = *exportFenceInfoPtr;
4567             ALOGV("%s: info set (fence still -1). fence: %p\n", __func__, (void*)(*pFence));
4568             // syncFd is still -1 because we expect user to explicitly
4569             // export it via vkGetFenceFdKHR
4570         }
4571 #endif
4572 
4573         return input_result;
4574     }
4575 
on_vkDestroyFence(void * context,VkDevice device,VkFence fence,const VkAllocationCallbacks * pAllocator)4576     void on_vkDestroyFence(
4577         void* context,
4578         VkDevice device,
4579         VkFence fence,
4580         const VkAllocationCallbacks* pAllocator) {
4581         VkEncoder* enc = (VkEncoder*)context;
4582         enc->vkDestroyFence(device, fence, pAllocator, true /* do lock */);
4583     }
4584 
on_vkResetFences(void * context,VkResult,VkDevice device,uint32_t fenceCount,const VkFence * pFences)4585     VkResult on_vkResetFences(
4586         void* context,
4587         VkResult,
4588         VkDevice device,
4589         uint32_t fenceCount,
4590         const VkFence* pFences) {
4591 
4592         VkEncoder* enc = (VkEncoder*)context;
4593         VkResult res = enc->vkResetFences(device, fenceCount, pFences, true /* do lock */);
4594 
4595         if (res != VK_SUCCESS) return res;
4596 
4597         if (!fenceCount) return res;
4598 
4599         // Permanence: temporary
4600         // on fence reset, close the fence fd
4601         // and act like we need to GetFenceFdKHR/ImportFenceFdKHR again
4602         AutoLock<RecursiveLock> lock(mLock);
4603         for (uint32_t i = 0; i < fenceCount; ++i) {
4604             VkFence fence = pFences[i];
4605             auto it = info_VkFence.find(fence);
4606             auto& info = it->second;
4607             if (!info.external) continue;
4608 
4609 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4610             if (info.syncFd >= 0) {
4611                 ALOGV("%s: resetting fence. make fd -1\n", __func__);
4612                 goldfish_sync_signal(info.syncFd);
4613                 close(info.syncFd);
4614                 info.syncFd = -1;
4615             }
4616 #endif
4617         }
4618 
4619         return res;
4620     }
4621 
on_vkImportFenceFdKHR(void * context,VkResult,VkDevice device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)4622     VkResult on_vkImportFenceFdKHR(
4623         void* context,
4624         VkResult,
4625         VkDevice device,
4626         const VkImportFenceFdInfoKHR* pImportFenceFdInfo) {
4627 
4628         (void)context;
4629         (void)device;
4630         (void)pImportFenceFdInfo;
4631 
4632         // Transference: copy
4633         // meaning dup() the incoming fd
4634 
4635         VkEncoder* enc = (VkEncoder*)context;
4636 
4637         bool hasFence = pImportFenceFdInfo->fence != VK_NULL_HANDLE;
4638 
4639         if (!hasFence) return VK_ERROR_OUT_OF_HOST_MEMORY;
4640 
4641 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4642 
4643         bool syncFdImport =
4644             pImportFenceFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4645 
4646         if (!syncFdImport) {
4647             ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd import\n", __func__);
4648             return VK_ERROR_OUT_OF_HOST_MEMORY;
4649         }
4650 
4651         AutoLock<RecursiveLock> lock(mLock);
4652         auto it = info_VkFence.find(pImportFenceFdInfo->fence);
4653         if (it == info_VkFence.end()) {
4654             ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
4655             return VK_ERROR_OUT_OF_HOST_MEMORY;
4656         }
4657 
4658         auto& info = it->second;
4659 
4660         if (info.syncFd >= 0) {
4661             ALOGV("%s: previous sync fd exists, close it\n", __func__);
4662             goldfish_sync_signal(info.syncFd);
4663             close(info.syncFd);
4664         }
4665 
4666         if (pImportFenceFdInfo->fd < 0) {
4667             ALOGV("%s: import -1, set to -1 and exit\n", __func__);
4668             info.syncFd = -1;
4669         } else {
4670             ALOGV("%s: import actual fd, dup and close()\n", __func__);
4671             info.syncFd = dup(pImportFenceFdInfo->fd);
4672             close(pImportFenceFdInfo->fd);
4673         }
4674         return VK_SUCCESS;
4675 #else
4676         return VK_ERROR_OUT_OF_HOST_MEMORY;
4677 #endif
4678     }
4679 
createFence(VkDevice device,uint64_t hostFenceHandle,int64_t & osHandle)4680     VkResult createFence(VkDevice device, uint64_t hostFenceHandle, int64_t& osHandle) {
4681         struct VirtGpuExecBuffer exec = { };
4682         struct gfxstreamCreateExportSyncVK exportSync = { };
4683         VirtGpuDevice& instance = VirtGpuDevice::getInstance();
4684 
4685         uint64_t hostDeviceHandle = get_host_u64_VkDevice(device);
4686 
4687         exportSync.hdr.opCode = GFXSTREAM_CREATE_EXPORT_SYNC_VK;
4688         exportSync.deviceHandleLo = (uint32_t)hostDeviceHandle;
4689         exportSync.deviceHandleHi = (uint32_t)(hostDeviceHandle >> 32);
4690         exportSync.fenceHandleLo = (uint32_t)hostFenceHandle;
4691         exportSync.fenceHandleHi = (uint32_t)(hostFenceHandle >> 32);
4692 
4693         exec.command = static_cast<void*>(&exportSync);
4694         exec.command_size = sizeof(exportSync);
4695         exec.flags = kFenceOut | kRingIdx;
4696         if (instance.execBuffer(exec, nullptr))
4697             return VK_ERROR_OUT_OF_HOST_MEMORY;
4698 
4699         osHandle = exec.handle.osHandle;
4700         return VK_SUCCESS;
4701     }
4702 
on_vkGetFenceFdKHR(void * context,VkResult,VkDevice device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd)4703     VkResult on_vkGetFenceFdKHR(
4704         void* context,
4705         VkResult,
4706         VkDevice device,
4707         const VkFenceGetFdInfoKHR* pGetFdInfo,
4708         int* pFd) {
4709 
4710         // export operation.
4711         // first check if fence is signaled
4712         // then if so, return -1
4713         // else, queue work
4714 
4715         VkEncoder* enc = (VkEncoder*)context;
4716 
4717         bool hasFence = pGetFdInfo->fence != VK_NULL_HANDLE;
4718 
4719         if (!hasFence) {
4720             ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence\n", __func__);
4721             return VK_ERROR_OUT_OF_HOST_MEMORY;
4722         }
4723 
4724 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4725         bool syncFdExport =
4726             pGetFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4727 
4728         if (!syncFdExport) {
4729             ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd fence\n", __func__);
4730             return VK_ERROR_OUT_OF_HOST_MEMORY;
4731         }
4732 
4733         VkResult currentFenceStatus = enc->vkGetFenceStatus(device, pGetFdInfo->fence, true /* do lock */);
4734 
4735         if (VK_ERROR_DEVICE_LOST == currentFenceStatus) { // Other error
4736             ALOGV("%s: VK_ERROR_DEVICE_LOST: Other error\n", __func__);
4737             *pFd = -1;
4738             return VK_ERROR_DEVICE_LOST;
4739         }
4740 
4741         if (VK_NOT_READY == currentFenceStatus || VK_SUCCESS == currentFenceStatus) {
4742             // Fence is valid. We also create a new sync fd for a signaled
4743             // fence, because ANGLE will use the returned fd directly to
4744             // implement eglDupNativeFenceFDANDROID, where -1 is only returned
4745             // when error occurs.
4746             AutoLock<RecursiveLock> lock(mLock);
4747 
4748             auto it = info_VkFence.find(pGetFdInfo->fence);
4749             if (it == info_VkFence.end()) {
4750                 ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
4751                 return VK_ERROR_OUT_OF_HOST_MEMORY;
4752             }
4753 
4754             auto& info = it->second;
4755 
4756             bool syncFdCreated =
4757                 info.external &&
4758                 (info.exportFenceCreateInfo.handleTypes &
4759                  VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
4760 
4761             if (!syncFdCreated) {
4762                 ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd created\n", __func__);
4763                 return VK_ERROR_OUT_OF_HOST_MEMORY;
4764             }
4765 
4766             if (mFeatureInfo->hasVirtioGpuNativeSync) {
4767                 VkResult result;
4768                 int64_t osHandle;
4769                 uint64_t hostFenceHandle = get_host_u64_VkFence(pGetFdInfo->fence);
4770 
4771                 result = createFence(device, hostFenceHandle, osHandle);
4772                 if (result != VK_SUCCESS)
4773                     return result;
4774 
4775                 *pFd = osHandle;
4776             } else {
4777                 goldfish_sync_queue_work(
4778                     mSyncDeviceFd,
4779                     get_host_u64_VkFence(pGetFdInfo->fence) /* the handle */,
4780                     GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */,
4781                     pFd);
4782             }
4783 
4784             // relinquish ownership
4785             info.syncFd = -1;
4786             ALOGV("%s: got fd: %d\n", __func__, *pFd);
4787             return VK_SUCCESS;
4788         }
4789         return VK_ERROR_DEVICE_LOST;
4790 #else
4791         return VK_ERROR_OUT_OF_HOST_MEMORY;
4792 #endif
4793     }
4794 
on_vkWaitForFences(void * context,VkResult,VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)4795     VkResult on_vkWaitForFences(
4796         void* context,
4797         VkResult,
4798         VkDevice device,
4799         uint32_t fenceCount,
4800         const VkFence* pFences,
4801         VkBool32 waitAll,
4802         uint64_t timeout) {
4803 
4804         VkEncoder* enc = (VkEncoder*)context;
4805 
4806 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4807         std::vector<VkFence> fencesExternal;
4808         std::vector<int> fencesExternalWaitFds;
4809         std::vector<VkFence> fencesNonExternal;
4810 
4811         AutoLock<RecursiveLock> lock(mLock);
4812 
4813         for (uint32_t i = 0; i < fenceCount; ++i) {
4814             auto it = info_VkFence.find(pFences[i]);
4815             if (it == info_VkFence.end()) continue;
4816             const auto& info = it->second;
4817             if (info.syncFd >= 0) {
4818                 fencesExternal.push_back(pFences[i]);
4819                 fencesExternalWaitFds.push_back(info.syncFd);
4820             } else {
4821                 fencesNonExternal.push_back(pFences[i]);
4822             }
4823         }
4824 
4825         lock.unlock();
4826 
4827         if (fencesExternal.empty()) {
4828             // No need for work pool, just wait with host driver.
4829             return enc->vkWaitForFences(
4830                 device, fenceCount, pFences, waitAll, timeout, true /* do lock */);
4831         } else {
4832             // Depending on wait any or wait all,
4833             // schedule a wait group with waitAny/waitAll
4834             std::vector<WorkPool::Task> tasks;
4835 
4836             ALOGV("%s: scheduling ext waits\n", __func__);
4837 
4838             for (auto fd : fencesExternalWaitFds) {
4839                 ALOGV("%s: wait on %d\n", __func__, fd);
4840                 tasks.push_back([fd] {
4841                     sync_wait(fd, 3000);
4842                     ALOGV("done waiting on fd %d\n", fd);
4843                 });
4844             }
4845 
4846             if (!fencesNonExternal.empty()) {
4847                 tasks.push_back([this,
4848                                  fencesNonExternal /* copy of vector */,
4849                                  device, waitAll, timeout] {
4850                     auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
4851                     auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
4852                     ALOGV("%s: vkWaitForFences to host\n", __func__);
4853                     vkEncoder->vkWaitForFences(device, fencesNonExternal.size(), fencesNonExternal.data(), waitAll, timeout, true /* do lock */);
4854                 });
4855             }
4856 
4857             auto waitGroupHandle = mWorkPool.schedule(tasks);
4858 
4859             // Convert timeout to microseconds from nanoseconds
4860             bool waitRes = false;
4861             if (waitAll) {
4862                 waitRes = mWorkPool.waitAll(waitGroupHandle, timeout / 1000);
4863             } else {
4864                 waitRes = mWorkPool.waitAny(waitGroupHandle, timeout / 1000);
4865             }
4866 
4867             if (waitRes) {
4868                 ALOGV("%s: VK_SUCCESS\n", __func__);
4869                 return VK_SUCCESS;
4870             } else {
4871                 ALOGV("%s: VK_TIMEOUT\n", __func__);
4872                 return VK_TIMEOUT;
4873             }
4874         }
4875 #else
4876         return enc->vkWaitForFences(
4877             device, fenceCount, pFences, waitAll, timeout, true /* do lock */);
4878 #endif
4879     }
4880 
on_vkCreateDescriptorPool(void * context,VkResult,VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)4881     VkResult on_vkCreateDescriptorPool(
4882         void* context,
4883         VkResult,
4884         VkDevice device,
4885         const VkDescriptorPoolCreateInfo* pCreateInfo,
4886         const VkAllocationCallbacks* pAllocator,
4887         VkDescriptorPool* pDescriptorPool) {
4888 
4889         VkEncoder* enc = (VkEncoder*)context;
4890 
4891         VkResult res = enc->vkCreateDescriptorPool(
4892             device, pCreateInfo, pAllocator, pDescriptorPool, true /* do lock */);
4893 
4894         if (res != VK_SUCCESS) return res;
4895 
4896         VkDescriptorPool pool = *pDescriptorPool;
4897 
4898         struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
4899         dp->allocInfo = new DescriptorPoolAllocationInfo;
4900         dp->allocInfo->device = device;
4901         dp->allocInfo->createFlags = pCreateInfo->flags;
4902         dp->allocInfo->maxSets = pCreateInfo->maxSets;
4903         dp->allocInfo->usedSets = 0;
4904 
4905         for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; ++i) {
4906             dp->allocInfo->descriptorCountInfo.push_back({
4907                 pCreateInfo->pPoolSizes[i].type,
4908                 pCreateInfo->pPoolSizes[i].descriptorCount,
4909                 0, /* used */
4910             });
4911         }
4912 
4913         if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
4914             std::vector<uint64_t> poolIds(pCreateInfo->maxSets);
4915 
4916             uint32_t count = pCreateInfo->maxSets;
4917             enc->vkCollectDescriptorPoolIdsGOOGLE(
4918                 device, pool, &count, poolIds.data(), true /* do lock */);
4919 
4920             dp->allocInfo->freePoolIds = poolIds;
4921         }
4922 
4923         return res;
4924     }
4925 
on_vkDestroyDescriptorPool(void * context,VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)4926     void on_vkDestroyDescriptorPool(
4927         void* context,
4928         VkDevice device,
4929         VkDescriptorPool descriptorPool,
4930         const VkAllocationCallbacks* pAllocator) {
4931 
4932         if (!descriptorPool) return;
4933 
4934         VkEncoder* enc = (VkEncoder*)context;
4935 
4936         clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
4937 
4938         enc->vkDestroyDescriptorPool(device, descriptorPool, pAllocator, true /* do lock */);
4939     }
4940 
on_vkResetDescriptorPool(void * context,VkResult,VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)4941     VkResult on_vkResetDescriptorPool(
4942         void* context,
4943         VkResult,
4944         VkDevice device,
4945         VkDescriptorPool descriptorPool,
4946         VkDescriptorPoolResetFlags flags) {
4947 
4948         if (!descriptorPool) return VK_ERROR_INITIALIZATION_FAILED;
4949 
4950         VkEncoder* enc = (VkEncoder*)context;
4951 
4952         VkResult res = enc->vkResetDescriptorPool(device, descriptorPool, flags, true /* do lock */);
4953 
4954         if (res != VK_SUCCESS) return res;
4955 
4956         clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
4957         return res;
4958     }
4959 
on_vkAllocateDescriptorSets(void * context,VkResult,VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)4960     VkResult on_vkAllocateDescriptorSets(
4961         void* context,
4962         VkResult,
4963         VkDevice device,
4964         const VkDescriptorSetAllocateInfo*          pAllocateInfo,
4965         VkDescriptorSet*                            pDescriptorSets) {
4966 
4967         VkEncoder* enc = (VkEncoder*)context;
4968 
4969         return allocAndInitializeDescriptorSets(context, device, pAllocateInfo, pDescriptorSets);
4970     }
4971 
on_vkFreeDescriptorSets(void * context,VkResult,VkDevice device,VkDescriptorPool descriptorPool,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)4972     VkResult on_vkFreeDescriptorSets(
4973         void* context,
4974         VkResult,
4975         VkDevice                                    device,
4976         VkDescriptorPool                            descriptorPool,
4977         uint32_t                                    descriptorSetCount,
4978         const VkDescriptorSet*                      pDescriptorSets) {
4979 
4980         VkEncoder* enc = (VkEncoder*)context;
4981 
4982         // Bit of robustness so that we can double free descriptor sets
4983         // and do other invalid usages
4984         // https://github.com/KhronosGroup/Vulkan-Docs/issues/1070
4985         // (people expect VK_SUCCESS to always be returned by vkFreeDescriptorSets)
4986         std::vector<VkDescriptorSet> toActuallyFree;
4987         {
4988             AutoLock<RecursiveLock> lock(mLock);
4989 
4990             // Pool was destroyed
4991             if (info_VkDescriptorPool.find(descriptorPool) == info_VkDescriptorPool.end()) {
4992                 return VK_SUCCESS;
4993             }
4994 
4995             if (!descriptorPoolSupportsIndividualFreeLocked(descriptorPool))
4996                 return VK_SUCCESS;
4997 
4998             std::vector<VkDescriptorSet> existingDescriptorSets;;
4999 
5000             // Check if this descriptor set was in the pool's set of allocated descriptor sets,
5001             // to guard against double free (Double free is allowed by the client)
5002             {
5003                 auto allocedSets = as_goldfish_VkDescriptorPool(descriptorPool)->allocInfo->allocedSets;
5004 
5005                 for (uint32_t i = 0; i < descriptorSetCount; ++i) {
5006 
5007                     if (allocedSets.end() == allocedSets.find(pDescriptorSets[i])) {
5008                         ALOGV("%s: Warning: descriptor set %p not found in pool. Was this double-freed?\n", __func__,
5009                               (void*)pDescriptorSets[i]);
5010                         continue;
5011                     }
5012 
5013                     auto it = info_VkDescriptorSet.find(pDescriptorSets[i]);
5014                     if (it == info_VkDescriptorSet.end())
5015                         continue;
5016 
5017                     existingDescriptorSets.push_back(pDescriptorSets[i]);
5018                 }
5019             }
5020 
5021             for (auto set : existingDescriptorSets) {
5022                 if (removeDescriptorSetFromPool(set, mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate)) {
5023                     toActuallyFree.push_back(set);
5024                 }
5025             }
5026 
5027             if (toActuallyFree.empty()) return VK_SUCCESS;
5028         }
5029 
5030         if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
5031             // In the batched set update case, decrement refcount on the set layout
5032             // and only free on host if we satisfied a pending allocation on the
5033             // host.
5034             for (uint32_t i = 0; i < toActuallyFree.size(); ++i) {
5035                 VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(toActuallyFree[i])->reified->setLayout;
5036                 decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
5037             }
5038             freeDescriptorSetsIfHostAllocated(
5039                 enc, device, (uint32_t)toActuallyFree.size(), toActuallyFree.data());
5040         } else {
5041             // In the non-batched set update case, just free them directly.
5042             enc->vkFreeDescriptorSets(device, descriptorPool, (uint32_t)toActuallyFree.size(), toActuallyFree.data(), true /* do lock */);
5043         }
5044         return VK_SUCCESS;
5045     }
5046 
on_vkCreateDescriptorSetLayout(void * context,VkResult,VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)5047     VkResult on_vkCreateDescriptorSetLayout(
5048         void* context,
5049         VkResult,
5050         VkDevice device,
5051         const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
5052         const VkAllocationCallbacks* pAllocator,
5053         VkDescriptorSetLayout* pSetLayout) {
5054 
5055         VkEncoder* enc = (VkEncoder*)context;
5056 
5057         VkResult res = enc->vkCreateDescriptorSetLayout(
5058             device, pCreateInfo, pAllocator, pSetLayout, true /* do lock */);
5059 
5060         if (res != VK_SUCCESS) return res;
5061 
5062         struct goldfish_VkDescriptorSetLayout* dsl =
5063             as_goldfish_VkDescriptorSetLayout(*pSetLayout);
5064         dsl->layoutInfo = new DescriptorSetLayoutInfo;
5065         for (uint32_t i = 0; i < pCreateInfo->bindingCount; ++i) {
5066             dsl->layoutInfo->bindings.push_back(pCreateInfo->pBindings[i]);
5067         }
5068         dsl->layoutInfo->refcount = 1;
5069 
5070         return res;
5071     }
5072 
on_vkUpdateDescriptorSets(void * context,VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)5073     void on_vkUpdateDescriptorSets(
5074         void* context,
5075         VkDevice device,
5076         uint32_t descriptorWriteCount,
5077         const VkWriteDescriptorSet* pDescriptorWrites,
5078         uint32_t descriptorCopyCount,
5079         const VkCopyDescriptorSet* pDescriptorCopies) {
5080 
5081         VkEncoder* enc = (VkEncoder*)context;
5082 
5083         std::vector<VkDescriptorImageInfo> transformedImageInfos;
5084         std::vector<VkWriteDescriptorSet> transformedWrites(descriptorWriteCount);
5085 
5086         memcpy(transformedWrites.data(), pDescriptorWrites, sizeof(VkWriteDescriptorSet) * descriptorWriteCount);
5087 
5088         size_t imageInfosNeeded = 0;
5089         for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5090             if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5091             if (!transformedWrites[i].pImageInfo) continue;
5092 
5093             imageInfosNeeded += transformedWrites[i].descriptorCount;
5094         }
5095 
5096         transformedImageInfos.resize(imageInfosNeeded);
5097 
5098         size_t imageInfoIndex = 0;
5099         for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5100             if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5101             if (!transformedWrites[i].pImageInfo) continue;
5102 
5103             for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
5104                 transformedImageInfos[imageInfoIndex] = transformedWrites[i].pImageInfo[j];
5105                 ++imageInfoIndex;
5106             }
5107             transformedWrites[i].pImageInfo = &transformedImageInfos[imageInfoIndex - transformedWrites[i].descriptorCount];
5108         }
5109 
5110         {
5111             // Validate and filter samplers
5112             AutoLock<RecursiveLock> lock(mLock);
5113             size_t imageInfoIndex = 0;
5114             for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5115 
5116                 if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5117                 if (!transformedWrites[i].pImageInfo) continue;
5118 
5119                 bool isImmutableSampler =
5120                     descriptorBindingIsImmutableSampler(
5121                         transformedWrites[i].dstSet,
5122                         transformedWrites[i].dstBinding);
5123 
5124                 for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
5125                     if (isImmutableSampler) {
5126                         transformedImageInfos[imageInfoIndex].sampler = 0;
5127                     }
5128                     transformedImageInfos[imageInfoIndex] =
5129                         filterNonexistentSampler(transformedImageInfos[imageInfoIndex]);
5130                     ++imageInfoIndex;
5131                 }
5132             }
5133         }
5134 
5135         if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
5136             for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5137                 VkDescriptorSet set = transformedWrites[i].dstSet;
5138                 doEmulatedDescriptorWrite(&transformedWrites[i],
5139                         as_goldfish_VkDescriptorSet(set)->reified);
5140             }
5141 
5142             for (uint32_t i = 0; i < descriptorCopyCount; ++i) {
5143                 doEmulatedDescriptorCopy(&pDescriptorCopies[i],
5144                         as_goldfish_VkDescriptorSet(pDescriptorCopies[i].srcSet)->reified,
5145                         as_goldfish_VkDescriptorSet(pDescriptorCopies[i].dstSet)->reified);
5146             }
5147         } else {
5148             enc->vkUpdateDescriptorSets(
5149                     device, descriptorWriteCount, transformedWrites.data(),
5150                     descriptorCopyCount, pDescriptorCopies, true /* do lock */);
5151         }
5152     }
5153 
on_vkDestroyImage(void * context,VkDevice device,VkImage image,const VkAllocationCallbacks * pAllocator)5154     void on_vkDestroyImage(
5155         void* context,
5156         VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5157 
5158 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5159         {
5160           AutoLock<RecursiveLock> lock(mLock); // do not guard encoder may cause
5161                                                // deadlock b/243339973
5162 
5163           // Wait for any pending QSRIs to prevent a race between the Gfxstream host
5164           // potentially processing the below `vkDestroyImage()` from the VK encoder
5165           // command stream before processing a previously submitted
5166           // `VIRTIO_GPU_NATIVE_SYNC_VULKAN_QSRI_EXPORT` from the virtio-gpu command
5167           // stream which relies on the image existing.
5168           auto imageInfoIt = info_VkImage.find(image);
5169           if (imageInfoIt != info_VkImage.end()) {
5170             auto& imageInfo = imageInfoIt->second;
5171             for (int syncFd : imageInfo.pendingQsriSyncFds) {
5172                 int syncWaitRet = sync_wait(syncFd, 3000);
5173                 if (syncWaitRet < 0) {
5174                     ALOGE("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d",
5175                           __func__, strerror(errno), errno);
5176                 }
5177                 close(syncFd);
5178             }
5179             imageInfo.pendingQsriSyncFds.clear();
5180           }
5181         }
5182 #endif
5183         VkEncoder* enc = (VkEncoder*)context;
5184         enc->vkDestroyImage(device, image, pAllocator, true /* do lock */);
5185     }
5186 
setMemoryRequirementsForSysmemBackedImage(VkImage image,VkMemoryRequirements * pMemoryRequirements)5187     void setMemoryRequirementsForSysmemBackedImage(
5188         VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5189 #ifdef VK_USE_PLATFORM_FUCHSIA
5190         auto it = info_VkImage.find(image);
5191         if (it == info_VkImage.end()) return;
5192         auto& info = it->second;
5193         if (info.isSysmemBackedMemory) {
5194             auto width = info.createInfo.extent.width;
5195             auto height = info.createInfo.extent.height;
5196             pMemoryRequirements->size = width * height * 4;
5197         }
5198 #else
5199         // Bypass "unused parameter" checks.
5200         (void)image;
5201         (void)pMemoryRequirements;
5202 #endif
5203     }
5204 
on_vkGetImageMemoryRequirements(void * context,VkDevice device,VkImage image,VkMemoryRequirements * pMemoryRequirements)5205     void on_vkGetImageMemoryRequirements(
5206         void *context, VkDevice device, VkImage image,
5207         VkMemoryRequirements *pMemoryRequirements) {
5208 
5209         AutoLock<RecursiveLock> lock(mLock);
5210 
5211         auto it = info_VkImage.find(image);
5212         if (it == info_VkImage.end()) return;
5213 
5214         auto& info = it->second;
5215 
5216         if (info.baseRequirementsKnown) {
5217             *pMemoryRequirements = info.baseRequirements;
5218             return;
5219         }
5220 
5221         lock.unlock();
5222 
5223         VkEncoder* enc = (VkEncoder*)context;
5224 
5225         enc->vkGetImageMemoryRequirements(
5226             device, image, pMemoryRequirements, true /* do lock */);
5227 
5228         lock.lock();
5229 
5230         transformImageMemoryRequirementsForGuestLocked(
5231             image, pMemoryRequirements);
5232 
5233         info.baseRequirementsKnown = true;
5234         info.baseRequirements = *pMemoryRequirements;
5235     }
5236 
on_vkGetImageMemoryRequirements2(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5237     void on_vkGetImageMemoryRequirements2(
5238         void *context, VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
5239         VkMemoryRequirements2 *pMemoryRequirements) {
5240         VkEncoder* enc = (VkEncoder*)context;
5241         enc->vkGetImageMemoryRequirements2(
5242             device, pInfo, pMemoryRequirements, true /* do lock */);
5243         transformImageMemoryRequirements2ForGuest(
5244             pInfo->image, pMemoryRequirements);
5245     }
5246 
on_vkGetImageMemoryRequirements2KHR(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5247     void on_vkGetImageMemoryRequirements2KHR(
5248         void *context, VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
5249         VkMemoryRequirements2 *pMemoryRequirements) {
5250         VkEncoder* enc = (VkEncoder*)context;
5251         enc->vkGetImageMemoryRequirements2KHR(
5252             device, pInfo, pMemoryRequirements, true /* do lock */);
5253         transformImageMemoryRequirements2ForGuest(
5254             pInfo->image, pMemoryRequirements);
5255     }
5256 
on_vkBindImageMemory(void * context,VkResult,VkDevice device,VkImage image,VkDeviceMemory memory,VkDeviceSize memoryOffset)5257     VkResult on_vkBindImageMemory(
5258         void* context, VkResult,
5259         VkDevice device, VkImage image, VkDeviceMemory memory,
5260         VkDeviceSize memoryOffset) {
5261         VkEncoder* enc = (VkEncoder*)context;
5262         // Do not forward calls with invalid handles to host.
5263         if (info_VkDeviceMemory.find(memory) == info_VkDeviceMemory.end() ||
5264             info_VkImage.find(image) == info_VkImage.end()) {
5265             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5266         }
5267         return enc->vkBindImageMemory(device, image, memory, memoryOffset, true /* do lock */);
5268     }
5269 
on_vkBindImageMemory2(void * context,VkResult,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)5270     VkResult on_vkBindImageMemory2(
5271         void* context, VkResult,
5272         VkDevice device, uint32_t bindingCount, const VkBindImageMemoryInfo* pBindInfos) {
5273         VkEncoder* enc = (VkEncoder*)context;
5274 
5275         if (bindingCount < 1 || !pBindInfos) {
5276             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5277         }
5278 
5279         for (uint32_t i = 0; i < bindingCount; i++) {
5280             const VkBindImageMemoryInfo& bimi = pBindInfos[i];
5281 
5282             auto imageIt = info_VkImage.find(bimi.image);
5283             if (imageIt == info_VkImage.end()) {
5284                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5285             }
5286 
5287             if (bimi.memory != VK_NULL_HANDLE) {
5288                 auto memoryIt = info_VkDeviceMemory.find(bimi.memory);
5289                 if (memoryIt == info_VkDeviceMemory.end()) {
5290                     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5291                 }
5292             }
5293         }
5294 
5295         return enc->vkBindImageMemory2(device, bindingCount, pBindInfos, true /* do lock */);
5296     }
5297 
on_vkBindImageMemory2KHR(void * context,VkResult result,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)5298     VkResult on_vkBindImageMemory2KHR(
5299         void* context, VkResult result,
5300         VkDevice device, uint32_t bindingCount, const VkBindImageMemoryInfo* pBindInfos) {
5301         return on_vkBindImageMemory2(context, result, device, bindingCount, pBindInfos);
5302     }
5303 
on_vkCreateBuffer(void * context,VkResult,VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)5304     VkResult on_vkCreateBuffer(
5305         void* context, VkResult,
5306         VkDevice device, const VkBufferCreateInfo *pCreateInfo,
5307         const VkAllocationCallbacks *pAllocator,
5308         VkBuffer *pBuffer) {
5309         VkEncoder* enc = (VkEncoder*)context;
5310 
5311         VkBufferCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
5312         vk_struct_chain_iterator structChainIter =
5313             vk_make_chain_iterator(&localCreateInfo);
5314         VkExternalMemoryBufferCreateInfo localExtBufCi;
5315 
5316         const VkExternalMemoryBufferCreateInfo* extBufCiPtr =
5317             vk_find_struct<VkExternalMemoryBufferCreateInfo>(pCreateInfo);
5318         if (extBufCiPtr) {
5319             localExtBufCi = vk_make_orphan_copy(*extBufCiPtr);
5320             vk_append_struct(&structChainIter, &localExtBufCi);
5321         }
5322 
5323         VkBufferOpaqueCaptureAddressCreateInfo localCapAddrCi;
5324         const VkBufferOpaqueCaptureAddressCreateInfo* pCapAddrCi =
5325             vk_find_struct<VkBufferOpaqueCaptureAddressCreateInfo>(pCreateInfo);
5326         if (pCapAddrCi) {
5327             localCapAddrCi = vk_make_orphan_copy(*pCapAddrCi);
5328             vk_append_struct(&structChainIter, &localCapAddrCi);
5329         }
5330 
5331         VkBufferDeviceAddressCreateInfoEXT localDevAddrCi;
5332         const VkBufferDeviceAddressCreateInfoEXT* pDevAddrCi =
5333             vk_find_struct<VkBufferDeviceAddressCreateInfoEXT>(pCreateInfo);
5334         if (pDevAddrCi) {
5335             localDevAddrCi = vk_make_orphan_copy(*pDevAddrCi);
5336             vk_append_struct(&structChainIter, &localDevAddrCi);
5337         }
5338 
5339 #ifdef VK_USE_PLATFORM_FUCHSIA
5340         Optional<zx::vmo> vmo;
5341         bool isSysmemBackedMemory = false;
5342 
5343         if (extBufCiPtr &&
5344             (extBufCiPtr->handleTypes &
5345              VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) {
5346             isSysmemBackedMemory = true;
5347         }
5348 
5349         const auto* extBufferCollectionPtr =
5350             vk_find_struct<VkBufferCollectionBufferCreateInfoFUCHSIA>(
5351                 pCreateInfo);
5352 
5353         if (extBufferCollectionPtr) {
5354             const auto& collection = *reinterpret_cast<
5355                 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
5356                 extBufferCollectionPtr->collection);
5357             uint32_t index = extBufferCollectionPtr->index;
5358 
5359             auto result = collection->WaitForBuffersAllocated();
5360             if (result.ok() && result->status == ZX_OK) {
5361                 auto& info = result->buffer_collection_info;
5362                 if (index < info.buffer_count) {
5363                     vmo = android::base::makeOptional(
5364                             std::move(info.buffers[index].vmo));
5365                 }
5366             } else {
5367                 ALOGE("WaitForBuffersAllocated failed: %d %d", result.status(),
5368                       GET_STATUS_SAFE(result, status));
5369             }
5370 
5371             if (vmo && vmo->is_valid()) {
5372                 fidl::Arena arena;
5373                 fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena);
5374                 createParams.set_size(arena, pCreateInfo->size)
5375                     .set_memory_property(
5376                         fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
5377 
5378                 auto result =
5379                     mControlDevice->CreateBuffer2(std::move(*vmo), createParams);
5380                 if (!result.ok() ||
5381                     (result->is_error() != ZX_OK &&
5382                      result->error_value() != ZX_ERR_ALREADY_EXISTS)) {
5383                     ALOGE("CreateBuffer2 failed: %d:%d", result.status(),
5384                           GET_STATUS_SAFE(result, error_value()));
5385                 }
5386                 isSysmemBackedMemory = true;
5387             }
5388         }
5389 #endif  // VK_USE_PLATFORM_FUCHSIA
5390 
5391         VkResult res;
5392         VkMemoryRequirements memReqs;
5393 
5394         if (supportsCreateResourcesWithRequirements()) {
5395             res = enc->vkCreateBufferWithRequirementsGOOGLE(
5396                 device, &localCreateInfo, pAllocator, pBuffer, &memReqs,
5397                 true /* do lock */);
5398         } else {
5399             res = enc->vkCreateBuffer(device, &localCreateInfo, pAllocator,
5400                                       pBuffer, true /* do lock */);
5401         }
5402 
5403         if (res != VK_SUCCESS) return res;
5404 
5405 // Delete `protocolVersion` check goldfish drivers are gone.
5406 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5407         if (mCaps.gfxstreamCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
5408             mCaps.gfxstreamCapset.colorBufferMemoryIndex =
5409                     getColorBufferMemoryIndex(context, device);
5410         }
5411         if (extBufCiPtr &&
5412             (extBufCiPtr->handleTypes &
5413              VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)) {
5414             updateMemoryTypeBits(&memReqs.memoryTypeBits,
5415                                  mCaps.gfxstreamCapset.colorBufferMemoryIndex);
5416         }
5417 #endif
5418 
5419         AutoLock<RecursiveLock> lock(mLock);
5420 
5421         auto it = info_VkBuffer.find(*pBuffer);
5422         if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
5423 
5424         auto& info = it->second;
5425 
5426         info.createInfo = localCreateInfo;
5427         info.createInfo.pNext = nullptr;
5428 
5429         if (supportsCreateResourcesWithRequirements()) {
5430             info.baseRequirementsKnown = true;
5431             info.baseRequirements = memReqs;
5432         }
5433 
5434         if (extBufCiPtr) {
5435             info.external = true;
5436             info.externalCreateInfo = *extBufCiPtr;
5437         }
5438 
5439 #ifdef VK_USE_PLATFORM_FUCHSIA
5440         if (isSysmemBackedMemory) {
5441             info.isSysmemBackedMemory = true;
5442         }
5443 #endif
5444 
5445         return res;
5446     }
5447 
on_vkDestroyBuffer(void * context,VkDevice device,VkBuffer buffer,const VkAllocationCallbacks * pAllocator)5448     void on_vkDestroyBuffer(
5449         void* context,
5450         VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5451         VkEncoder* enc = (VkEncoder*)context;
5452         enc->vkDestroyBuffer(device, buffer, pAllocator, true /* do lock */);
5453     }
5454 
on_vkGetBufferMemoryRequirements(void * context,VkDevice device,VkBuffer buffer,VkMemoryRequirements * pMemoryRequirements)5455     void on_vkGetBufferMemoryRequirements(
5456         void* context, VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5457 
5458         AutoLock<RecursiveLock> lock(mLock);
5459 
5460         auto it = info_VkBuffer.find(buffer);
5461         if (it == info_VkBuffer.end()) return;
5462 
5463         auto& info = it->second;
5464 
5465         if (info.baseRequirementsKnown) {
5466             *pMemoryRequirements = info.baseRequirements;
5467             return;
5468         }
5469 
5470         lock.unlock();
5471 
5472         VkEncoder* enc = (VkEncoder*)context;
5473         enc->vkGetBufferMemoryRequirements(
5474             device, buffer, pMemoryRequirements, true /* do lock */);
5475 
5476         lock.lock();
5477 
5478         info.baseRequirementsKnown = true;
5479         info.baseRequirements = *pMemoryRequirements;
5480     }
5481 
on_vkGetBufferMemoryRequirements2(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5482     void on_vkGetBufferMemoryRequirements2(
5483         void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5484         VkMemoryRequirements2* pMemoryRequirements) {
5485         VkEncoder* enc = (VkEncoder*)context;
5486         enc->vkGetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */);
5487         transformBufferMemoryRequirements2ForGuest(
5488             pInfo->buffer, pMemoryRequirements);
5489     }
5490 
on_vkGetBufferMemoryRequirements2KHR(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5491     void on_vkGetBufferMemoryRequirements2KHR(
5492         void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5493         VkMemoryRequirements2* pMemoryRequirements) {
5494         VkEncoder* enc = (VkEncoder*)context;
5495         enc->vkGetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */);
5496         transformBufferMemoryRequirements2ForGuest(
5497             pInfo->buffer, pMemoryRequirements);
5498     }
5499 
on_vkBindBufferMemory(void * context,VkResult,VkDevice device,VkBuffer buffer,VkDeviceMemory memory,VkDeviceSize memoryOffset)5500     VkResult on_vkBindBufferMemory(
5501         void *context, VkResult,
5502         VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset) {
5503         VkEncoder *enc = (VkEncoder *)context;
5504         return enc->vkBindBufferMemory(
5505             device, buffer, memory, memoryOffset, true /* do lock */);
5506     }
5507 
on_vkBindBufferMemory2(void * context,VkResult,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)5508     VkResult on_vkBindBufferMemory2(
5509         void *context, VkResult,
5510         VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) {
5511         VkEncoder *enc = (VkEncoder *)context;
5512         return enc->vkBindBufferMemory2(
5513             device, bindInfoCount, pBindInfos, true /* do lock */);
5514     }
5515 
on_vkBindBufferMemory2KHR(void * context,VkResult,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)5516     VkResult on_vkBindBufferMemory2KHR(
5517         void *context, VkResult,
5518         VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) {
5519         VkEncoder *enc = (VkEncoder *)context;
5520         return enc->vkBindBufferMemory2KHR(
5521             device, bindInfoCount, pBindInfos, true /* do lock */);
5522     }
5523 
ensureSyncDeviceFd()5524     void ensureSyncDeviceFd() {
5525 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5526         if (mSyncDeviceFd >= 0)
5527             return;
5528         mSyncDeviceFd = goldfish_sync_open();
5529         if (mSyncDeviceFd >= 0) {
5530             ALOGD("%s: created sync device for current Vulkan process: %d\n", __func__, mSyncDeviceFd);
5531         } else {
5532             ALOGD("%s: failed to create sync device for current Vulkan process\n", __func__);
5533         }
5534 #endif
5535     }
5536 
on_vkCreateSemaphore(void * context,VkResult input_result,VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)5537     VkResult on_vkCreateSemaphore(
5538         void* context, VkResult input_result,
5539         VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo,
5540         const VkAllocationCallbacks* pAllocator,
5541         VkSemaphore* pSemaphore) {
5542 
5543         VkEncoder* enc = (VkEncoder*)context;
5544 
5545         VkSemaphoreCreateInfo finalCreateInfo = *pCreateInfo;
5546 
5547         const VkExportSemaphoreCreateInfoKHR* exportSemaphoreInfoPtr =
5548             vk_find_struct<VkExportSemaphoreCreateInfoKHR>(pCreateInfo);
5549 
5550 #ifdef VK_USE_PLATFORM_FUCHSIA
5551         bool exportEvent =
5552                 exportSemaphoreInfoPtr &&
5553                 (exportSemaphoreInfoPtr->handleTypes &
5554                  VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA);
5555 
5556         if (exportEvent) {
5557             finalCreateInfo.pNext = nullptr;
5558             // If we have timeline semaphores externally, leave it there.
5559             const VkSemaphoreTypeCreateInfo* typeCi =
5560                 vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo);
5561             if (typeCi) finalCreateInfo.pNext = typeCi;
5562         }
5563 #endif
5564 
5565 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5566         bool exportSyncFd = exportSemaphoreInfoPtr &&
5567             (exportSemaphoreInfoPtr->handleTypes &
5568              VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
5569 
5570         if (exportSyncFd) {
5571             finalCreateInfo.pNext = nullptr;
5572             // If we have timeline semaphores externally, leave it there.
5573             const VkSemaphoreTypeCreateInfo* typeCi =
5574                 vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo);
5575             if (typeCi) finalCreateInfo.pNext = typeCi;
5576         }
5577 #endif
5578         input_result = enc->vkCreateSemaphore(
5579             device, &finalCreateInfo, pAllocator, pSemaphore, true /* do lock */);
5580 
5581         zx_handle_t event_handle = ZX_HANDLE_INVALID;
5582 
5583 #ifdef VK_USE_PLATFORM_FUCHSIA
5584         if (exportEvent) {
5585             zx_event_create(0, &event_handle);
5586         }
5587 #endif
5588 
5589         AutoLock<RecursiveLock> lock(mLock);
5590 
5591         auto it = info_VkSemaphore.find(*pSemaphore);
5592         if (it == info_VkSemaphore.end()) return VK_ERROR_INITIALIZATION_FAILED;
5593 
5594         auto& info = it->second;
5595 
5596         info.device = device;
5597         info.eventHandle = event_handle;
5598 #ifdef VK_USE_PLATFORM_FUCHSIA
5599         info.eventKoid = getEventKoid(info.eventHandle);
5600 #endif
5601 
5602 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5603         if (exportSyncFd) {
5604             if (mFeatureInfo->hasVirtioGpuNativeSync) {
5605                 VkResult result;
5606                 int64_t osHandle;
5607                 uint64_t hostFenceHandle = get_host_u64_VkSemaphore(*pSemaphore);
5608 
5609                 result = createFence(device, hostFenceHandle, osHandle);
5610                 if (result != VK_SUCCESS)
5611                     return result;
5612 
5613                 info.syncFd.emplace(osHandle);
5614             } else {
5615                 ensureSyncDeviceFd();
5616 
5617                 if (exportSyncFd) {
5618                     int syncFd = -1;
5619                     goldfish_sync_queue_work(
5620                             mSyncDeviceFd,
5621                             get_host_u64_VkSemaphore(*pSemaphore) /* the handle */,
5622                             GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */,
5623                             &syncFd);
5624                     info.syncFd.emplace(syncFd);
5625                 }
5626             }
5627         }
5628 #endif
5629 
5630         return VK_SUCCESS;
5631     }
5632 
on_vkDestroySemaphore(void * context,VkDevice device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)5633     void on_vkDestroySemaphore(
5634         void* context,
5635         VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5636         VkEncoder* enc = (VkEncoder*)context;
5637         enc->vkDestroySemaphore(device, semaphore, pAllocator, true /* do lock */);
5638     }
5639 
5640     // https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#vkGetSemaphoreFdKHR
5641     // Each call to vkGetSemaphoreFdKHR must create a new file descriptor and transfer ownership
5642     // of it to the application. To avoid leaking resources, the application must release ownership
5643     // of the file descriptor when it is no longer needed.
on_vkGetSemaphoreFdKHR(void * context,VkResult,VkDevice device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd)5644     VkResult on_vkGetSemaphoreFdKHR(
5645         void* context, VkResult,
5646         VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
5647         int* pFd) {
5648 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5649         VkEncoder* enc = (VkEncoder*)context;
5650         bool getSyncFd =
5651             pGetFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
5652 
5653         if (getSyncFd) {
5654             AutoLock<RecursiveLock> lock(mLock);
5655             auto it = info_VkSemaphore.find(pGetFdInfo->semaphore);
5656             if (it == info_VkSemaphore.end()) return VK_ERROR_OUT_OF_HOST_MEMORY;
5657             auto& semInfo = it->second;
5658             // syncFd is supposed to have value.
5659             *pFd = dup(semInfo.syncFd.value_or(-1));
5660             return VK_SUCCESS;
5661         } else {
5662             // opaque fd
5663             int hostFd = 0;
5664             VkResult result = enc->vkGetSemaphoreFdKHR(device, pGetFdInfo, &hostFd, true /* do lock */);
5665             if (result != VK_SUCCESS) {
5666                 return result;
5667             }
5668             *pFd = memfd_create("vk_opaque_fd", 0);
5669             write(*pFd, &hostFd, sizeof(hostFd));
5670             return VK_SUCCESS;
5671         }
5672 #else
5673         (void)context;
5674         (void)device;
5675         (void)pGetFdInfo;
5676         (void)pFd;
5677         return VK_ERROR_INCOMPATIBLE_DRIVER;
5678 #endif
5679     }
5680 
on_vkImportSemaphoreFdKHR(void * context,VkResult input_result,VkDevice device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)5681     VkResult on_vkImportSemaphoreFdKHR(
5682         void* context, VkResult input_result,
5683         VkDevice device,
5684         const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) {
5685 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5686         VkEncoder* enc = (VkEncoder*)context;
5687         if (input_result != VK_SUCCESS) {
5688             return input_result;
5689         }
5690 
5691         if (pImportSemaphoreFdInfo->handleType &
5692             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
5693             VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo;
5694 
5695             AutoLock<RecursiveLock> lock(mLock);
5696 
5697             auto semaphoreIt = info_VkSemaphore.find(pImportSemaphoreFdInfo->semaphore);
5698             auto& info = semaphoreIt->second;
5699 
5700             if (info.syncFd.value_or(-1) >= 0) {
5701                 close(info.syncFd.value());
5702             }
5703 
5704             info.syncFd.emplace(pImportSemaphoreFdInfo->fd);
5705 
5706             return VK_SUCCESS;
5707         } else {
5708             int fd = pImportSemaphoreFdInfo->fd;
5709             int err = lseek(fd, 0, SEEK_SET);
5710             if (err == -1) {
5711                 ALOGE("lseek fail on import semaphore");
5712             }
5713             int hostFd = 0;
5714             read(fd, &hostFd, sizeof(hostFd));
5715             VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo;
5716             tmpInfo.fd = hostFd;
5717             VkResult result = enc->vkImportSemaphoreFdKHR(device, &tmpInfo, true /* do lock */);
5718             close(fd);
5719             return result;
5720         }
5721 #else
5722         (void)context;
5723         (void)input_result;
5724         (void)device;
5725         (void)pImportSemaphoreFdInfo;
5726         return VK_ERROR_INCOMPATIBLE_DRIVER;
5727 #endif
5728     }
5729 
5730     struct CommandBufferPendingDescriptorSets {
5731         std::unordered_set<VkDescriptorSet> sets;
5732     };
5733 
collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer> & workingSet,std::unordered_set<VkDescriptorSet> & allDs)5734     void collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer>& workingSet, std::unordered_set<VkDescriptorSet>& allDs) {
5735         if (workingSet.empty()) return;
5736 
5737         std::vector<VkCommandBuffer> nextLevel;
5738         for (auto commandBuffer : workingSet) {
5739             struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
5740             forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
5741                     nextLevel.push_back((VkCommandBuffer)secondary);
5742                     });
5743         }
5744 
5745         collectAllPendingDescriptorSetsBottomUp(nextLevel, allDs);
5746 
5747         for (auto cmdbuf : workingSet) {
5748             struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
5749 
5750             if (!cb->userPtr) {
5751                 continue; // No descriptors to update.
5752             }
5753 
5754             CommandBufferPendingDescriptorSets* pendingDescriptorSets =
5755                 (CommandBufferPendingDescriptorSets*)(cb->userPtr);
5756 
5757             if (pendingDescriptorSets->sets.empty()) {
5758                 continue; // No descriptors to update.
5759             }
5760 
5761             allDs.insert(pendingDescriptorSets->sets.begin(), pendingDescriptorSets->sets.end());
5762         }
5763     }
5764 
commitDescriptorSetUpdates(void * context,VkQueue queue,const std::unordered_set<VkDescriptorSet> & sets)5765     void commitDescriptorSetUpdates(void* context, VkQueue queue, const std::unordered_set<VkDescriptorSet>& sets) {
5766         VkEncoder* enc = (VkEncoder*)context;
5767 
5768         std::unordered_map<VkDescriptorPool, uint32_t> poolSet;
5769         std::vector<VkDescriptorPool> pools;
5770         std::vector<VkDescriptorSetLayout> setLayouts;
5771         std::vector<uint64_t> poolIds;
5772         std::vector<uint32_t> descriptorSetWhichPool;
5773         std::vector<uint32_t> pendingAllocations;
5774         std::vector<uint32_t> writeStartingIndices;
5775         std::vector<VkWriteDescriptorSet> writesForHost;
5776 
5777         uint32_t poolIndex = 0;
5778         uint32_t currentWriteIndex = 0;
5779         for (auto set : sets) {
5780             ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
5781             VkDescriptorPool pool = reified->pool;
5782             VkDescriptorSetLayout setLayout = reified->setLayout;
5783 
5784             auto it = poolSet.find(pool);
5785             if (it == poolSet.end()) {
5786                 poolSet[pool] = poolIndex;
5787                 descriptorSetWhichPool.push_back(poolIndex);
5788                 pools.push_back(pool);
5789                 ++poolIndex;
5790             } else {
5791                 uint32_t savedPoolIndex = it->second;
5792                 descriptorSetWhichPool.push_back(savedPoolIndex);
5793             }
5794 
5795             poolIds.push_back(reified->poolId);
5796             setLayouts.push_back(setLayout);
5797             pendingAllocations.push_back(reified->allocationPending ? 1 : 0);
5798             writeStartingIndices.push_back(currentWriteIndex);
5799 
5800             auto& writes = reified->allWrites;
5801 
5802             for (size_t i = 0; i < writes.size(); ++i) {
5803                 uint32_t binding = i;
5804 
5805                 for (size_t j = 0; j < writes[i].size(); ++j) {
5806                     auto& write = writes[i][j];
5807 
5808                     if (write.type == DescriptorWriteType::Empty) continue;
5809 
5810                     uint32_t dstArrayElement = 0;
5811 
5812                     VkDescriptorImageInfo* imageInfo = nullptr;
5813                     VkDescriptorBufferInfo* bufferInfo = nullptr;
5814                     VkBufferView* bufferView = nullptr;
5815 
5816                     switch (write.type) {
5817                         case DescriptorWriteType::Empty:
5818                             break;
5819                         case DescriptorWriteType::ImageInfo:
5820                             dstArrayElement = j;
5821                             imageInfo = &write.imageInfo;
5822                             break;
5823                         case DescriptorWriteType::BufferInfo:
5824                             dstArrayElement = j;
5825                             bufferInfo = &write.bufferInfo;
5826                             break;
5827                         case DescriptorWriteType::BufferView:
5828                             dstArrayElement = j;
5829                             bufferView = &write.bufferView;
5830                             break;
5831                         case DescriptorWriteType::InlineUniformBlock:
5832                         case DescriptorWriteType::AccelerationStructure:
5833                             // TODO
5834                             ALOGE("Encountered pending inline uniform block or acceleration structure desc write, abort (NYI)\n");
5835                             abort();
5836                         default:
5837                             break;
5838 
5839                     }
5840 
5841                     // TODO: Combine multiple writes into one VkWriteDescriptorSet.
5842                     VkWriteDescriptorSet forHost = {
5843                         VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, 0 /* TODO: inline uniform block */,
5844                         set,
5845                         binding,
5846                         dstArrayElement,
5847                         1,
5848                         write.descriptorType,
5849                         imageInfo,
5850                         bufferInfo,
5851                         bufferView,
5852                     };
5853 
5854                     writesForHost.push_back(forHost);
5855                     ++currentWriteIndex;
5856 
5857                     // Set it back to empty.
5858                     write.type = DescriptorWriteType::Empty;
5859                 }
5860             }
5861         }
5862 
5863         // Skip out if there's nothing to VkWriteDescriptorSet home about.
5864         if (writesForHost.empty()) {
5865             return;
5866         }
5867 
5868         enc->vkQueueCommitDescriptorSetUpdatesGOOGLE(
5869             queue,
5870             (uint32_t)pools.size(), pools.data(),
5871             (uint32_t)sets.size(),
5872             setLayouts.data(),
5873             poolIds.data(),
5874             descriptorSetWhichPool.data(),
5875             pendingAllocations.data(),
5876             writeStartingIndices.data(),
5877             (uint32_t)writesForHost.size(),
5878             writesForHost.data(),
5879             false /* no lock */);
5880 
5881         // If we got here, then we definitely serviced the allocations.
5882         for (auto set : sets) {
5883             ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
5884             reified->allocationPending = false;
5885         }
5886     }
5887 
flushCommandBufferPendingCommandsBottomUp(void * context,VkQueue queue,const std::vector<VkCommandBuffer> & workingSet)5888     void flushCommandBufferPendingCommandsBottomUp(void* context, VkQueue queue, const std::vector<VkCommandBuffer>& workingSet) {
5889         if (workingSet.empty()) return;
5890 
5891         std::vector<VkCommandBuffer> nextLevel;
5892         for (auto commandBuffer : workingSet) {
5893             struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
5894             forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
5895                 nextLevel.push_back((VkCommandBuffer)secondary);
5896             });
5897         }
5898 
5899         flushCommandBufferPendingCommandsBottomUp(context, queue, nextLevel);
5900 
5901         // After this point, everyone at the previous level has been flushed
5902         for (auto cmdbuf : workingSet) {
5903             struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
5904 
5905             // There's no pending commands here, skip. (case 1)
5906             if (!cb->privateStream) continue;
5907 
5908             unsigned char* writtenPtr = 0;
5909             size_t written = 0;
5910             CommandBufferStagingStream* cmdBufStream =
5911                 static_cast<CommandBufferStagingStream*>(cb->privateStream);
5912             cmdBufStream->getWritten(&writtenPtr, &written);
5913 
5914             // There's no pending commands here, skip. (case 2, stream created but no new recordings)
5915             if (!written) continue;
5916 
5917             // There are pending commands to flush.
5918             VkEncoder* enc = (VkEncoder*)context;
5919             VkDeviceMemory deviceMemory = cmdBufStream->getDeviceMemory();
5920             VkDeviceSize dataOffset = 0;
5921             if (mFeatureInfo->hasVulkanAuxCommandMemory) {
5922                 // for suballocations, deviceMemory is an alias VkDeviceMemory
5923                 // get underling VkDeviceMemory for given alias
5924                 deviceMemoryTransform_tohost(&deviceMemory, 1 /*memoryCount*/, &dataOffset,
5925                                              1 /*offsetCount*/, nullptr /*size*/, 0 /*sizeCount*/,
5926                                              nullptr /*typeIndex*/, 0 /*typeIndexCount*/,
5927                                              nullptr /*typeBits*/, 0 /*typeBitCounts*/);
5928 
5929                 // mark stream as flushing before flushing commands
5930                 cmdBufStream->markFlushing();
5931                 enc->vkQueueFlushCommandsFromAuxMemoryGOOGLE(queue, cmdbuf, deviceMemory,
5932                                                              dataOffset, written, true /*do lock*/);
5933             } else {
5934                 enc->vkQueueFlushCommandsGOOGLE(queue, cmdbuf, written, (const void*)writtenPtr,
5935                                                 true /* do lock */);
5936             }
5937             // Reset this stream.
5938             // flushing happens on vkQueueSubmit
5939             // vulkan api states that on queue submit,
5940             // applications MUST not attempt to modify the command buffer in any way
5941             // -as the device may be processing the commands recorded to it.
5942             // It is safe to call reset() here for this reason.
5943             // Command Buffer associated with this stream will only leave pending state
5944             // after queue submit is complete and host has read the data
5945             cmdBufStream->reset();
5946         }
5947     }
5948 
5949     // Unlike resetCommandBufferStagingInfo, this does not always erase its
5950     // superObjects pointers because the command buffer has merely been
5951     // submitted, not reset.  However, if the command buffer was recorded with
5952     // ONE_TIME_SUBMIT_BIT, then it will also reset its primaries.
5953     //
5954     // Also, we save the set of descriptor sets referenced by this command
5955     // buffer because we only submitted the command buffer and it's possible to
5956     // update the descriptor set again and re-submit the same command without
5957     // recording it (Update-after-bind descriptor sets)
resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer)5958     void resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer) {
5959         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
5960         if (cb->flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) {
5961             resetCommandBufferStagingInfo(commandBuffer,
5962                 true /* reset primaries */,
5963                 true /* clear pending descriptor sets */);
5964         } else {
5965             resetCommandBufferStagingInfo(commandBuffer,
5966                 false /* Don't reset primaries */,
5967                 false /* Don't clear pending descriptor sets */);
5968         }
5969     }
5970 
getWaitSemaphoreCount(const VkSubmitInfo & pSubmit)5971     uint32_t getWaitSemaphoreCount(const VkSubmitInfo& pSubmit) {
5972         return pSubmit.waitSemaphoreCount;
5973     }
5974 
getWaitSemaphoreCount(const VkSubmitInfo2 & pSubmit)5975     uint32_t getWaitSemaphoreCount(const VkSubmitInfo2& pSubmit) {
5976         return pSubmit.waitSemaphoreInfoCount;
5977     }
5978 
getCommandBufferCount(const VkSubmitInfo & pSubmit)5979     uint32_t getCommandBufferCount(const VkSubmitInfo& pSubmit) {
5980         return pSubmit.commandBufferCount;
5981     }
5982 
getCommandBufferCount(const VkSubmitInfo2 & pSubmit)5983     uint32_t getCommandBufferCount(const VkSubmitInfo2& pSubmit) {
5984         return pSubmit.commandBufferInfoCount;
5985     }
5986 
getSignalSemaphoreCount(const VkSubmitInfo & pSubmit)5987     uint32_t getSignalSemaphoreCount(const VkSubmitInfo& pSubmit) {
5988         return pSubmit.signalSemaphoreCount;
5989     }
5990 
getSignalSemaphoreCount(const VkSubmitInfo2 & pSubmit)5991     uint32_t getSignalSemaphoreCount(const VkSubmitInfo2& pSubmit) {
5992         return pSubmit.signalSemaphoreInfoCount;
5993     }
5994 
getWaitSemaphore(const VkSubmitInfo & pSubmit,int i)5995     VkSemaphore getWaitSemaphore(const VkSubmitInfo& pSubmit, int i) {
5996         return pSubmit.pWaitSemaphores[i];
5997     }
5998 
getWaitSemaphore(const VkSubmitInfo2 & pSubmit,int i)5999     VkSemaphore getWaitSemaphore(const VkSubmitInfo2& pSubmit, int i) {
6000         return pSubmit.pWaitSemaphoreInfos[i].semaphore;
6001     }
6002 
getSignalSemaphore(const VkSubmitInfo & pSubmit,int i)6003     VkSemaphore getSignalSemaphore(const VkSubmitInfo& pSubmit, int i) {
6004         return pSubmit.pSignalSemaphores[i];
6005     }
6006 
getSignalSemaphore(const VkSubmitInfo2 & pSubmit,int i)6007     VkSemaphore getSignalSemaphore(const VkSubmitInfo2& pSubmit, int i) {
6008         return pSubmit.pSignalSemaphoreInfos[i].semaphore;
6009     }
6010 
getCommandBuffer(const VkSubmitInfo & pSubmit,int i)6011     VkCommandBuffer getCommandBuffer(const VkSubmitInfo& pSubmit, int i) {
6012         return pSubmit.pCommandBuffers[i];
6013     }
6014 
getCommandBuffer(const VkSubmitInfo2 & pSubmit,int i)6015     VkCommandBuffer getCommandBuffer(const VkSubmitInfo2& pSubmit, int i) {
6016         return pSubmit.pCommandBufferInfos[i].commandBuffer;
6017     }
6018 
6019     template <class VkSubmitInfoType>
flushStagingStreams(void * context,VkQueue queue,uint32_t submitCount,const VkSubmitInfoType * pSubmits)6020     void flushStagingStreams(void* context, VkQueue queue, uint32_t submitCount,
6021                              const VkSubmitInfoType* pSubmits) {
6022         std::vector<VkCommandBuffer> toFlush;
6023         for (uint32_t i = 0; i < submitCount; ++i) {
6024             for (uint32_t j = 0; j < getCommandBufferCount(pSubmits[i]); ++j) {
6025                 toFlush.push_back(getCommandBuffer(pSubmits[i], j));
6026             }
6027         }
6028 
6029         std::unordered_set<VkDescriptorSet> pendingSets;
6030         collectAllPendingDescriptorSetsBottomUp(toFlush, pendingSets);
6031         commitDescriptorSetUpdates(context, queue, pendingSets);
6032 
6033         flushCommandBufferPendingCommandsBottomUp(context, queue, toFlush);
6034 
6035         for (auto cb : toFlush) {
6036             resetCommandBufferPendingTopology(cb);
6037         }
6038     }
6039 
on_vkQueueSubmit(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)6040     VkResult on_vkQueueSubmit(
6041         void* context, VkResult input_result,
6042         VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence) {
6043         AEMU_SCOPED_TRACE("on_vkQueueSubmit");
6044         return on_vkQueueSubmitTemplate<VkSubmitInfo>(context, input_result, queue, submitCount,
6045                                                       pSubmits, fence);
6046     }
6047 
on_vkQueueSubmit2(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)6048     VkResult on_vkQueueSubmit2(void* context, VkResult input_result, VkQueue queue,
6049                                uint32_t submitCount, const VkSubmitInfo2* pSubmits, VkFence fence) {
6050         AEMU_SCOPED_TRACE("on_vkQueueSubmit2");
6051         return on_vkQueueSubmitTemplate<VkSubmitInfo2>(context, input_result, queue, submitCount,
6052                                                        pSubmits, fence);
6053     }
6054 
vkQueueSubmitEnc(VkEncoder * enc,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)6055     VkResult vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount,
6056                               const VkSubmitInfo* pSubmits, VkFence fence) {
6057         if (supportsAsyncQueueSubmit()) {
6058             enc->vkQueueSubmitAsyncGOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
6059             return VK_SUCCESS;
6060         } else {
6061             return enc->vkQueueSubmit(queue, submitCount, pSubmits, fence, true /* do lock */);
6062         }
6063     }
6064 
vkQueueSubmitEnc(VkEncoder * enc,VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)6065     VkResult vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount,
6066                               const VkSubmitInfo2* pSubmits, VkFence fence) {
6067         if (supportsAsyncQueueSubmit()) {
6068             enc->vkQueueSubmitAsync2GOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
6069             return VK_SUCCESS;
6070         } else {
6071             return enc->vkQueueSubmit2(queue, submitCount, pSubmits, fence, true /* do lock */);
6072         }
6073     }
6074 
6075     template <typename VkSubmitInfoType>
on_vkQueueSubmitTemplate(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfoType * pSubmits,VkFence fence)6076     VkResult on_vkQueueSubmitTemplate(void* context, VkResult input_result, VkQueue queue,
6077                                       uint32_t submitCount, const VkSubmitInfoType* pSubmits,
6078                                       VkFence fence) {
6079         flushStagingStreams(context, queue, submitCount, pSubmits);
6080 
6081         std::vector<VkSemaphore> pre_signal_semaphores;
6082         std::vector<zx_handle_t> pre_signal_events;
6083         std::vector<int> pre_signal_sync_fds;
6084         std::vector<std::pair<zx_handle_t, zx_koid_t>> post_wait_events;
6085         std::vector<int> post_wait_sync_fds;
6086 
6087         VkEncoder* enc = (VkEncoder*)context;
6088 
6089         AutoLock<RecursiveLock> lock(mLock);
6090 
6091         for (uint32_t i = 0; i < submitCount; ++i) {
6092             for (uint32_t j = 0; j < getWaitSemaphoreCount(pSubmits[i]); ++j) {
6093                 VkSemaphore semaphore = getWaitSemaphore(pSubmits[i], j);
6094                 auto it = info_VkSemaphore.find(semaphore);
6095                 if (it != info_VkSemaphore.end()) {
6096                     auto& semInfo = it->second;
6097 #ifdef VK_USE_PLATFORM_FUCHSIA
6098                     if (semInfo.eventHandle) {
6099                         pre_signal_events.push_back(semInfo.eventHandle);
6100                         pre_signal_semaphores.push_back(semaphore);
6101                     }
6102 #endif
6103 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
6104                     if (semInfo.syncFd.has_value()) {
6105                         pre_signal_sync_fds.push_back(semInfo.syncFd.value());
6106                         pre_signal_semaphores.push_back(semaphore);
6107                     }
6108 #endif
6109                 }
6110             }
6111             for (uint32_t j = 0; j < getSignalSemaphoreCount(pSubmits[i]); ++j) {
6112                 auto it = info_VkSemaphore.find(getSignalSemaphore(pSubmits[i], j));
6113                 if (it != info_VkSemaphore.end()) {
6114                     auto& semInfo = it->second;
6115 #ifdef VK_USE_PLATFORM_FUCHSIA
6116                     if (semInfo.eventHandle) {
6117                         post_wait_events.push_back(
6118                             {semInfo.eventHandle, semInfo.eventKoid});
6119 #ifndef FUCHSIA_NO_TRACE
6120                         if (semInfo.eventKoid != ZX_KOID_INVALID) {
6121                             // TODO(fxbug.dev/66098): Remove the "semaphore"
6122                             // FLOW_END events once it is removed from clients
6123                             // (for example, gfx Engine).
6124                             TRACE_FLOW_END("gfx", "semaphore",
6125                                            semInfo.eventKoid);
6126                             TRACE_FLOW_BEGIN("gfx", "goldfish_post_wait_event",
6127                                              semInfo.eventKoid);
6128                         }
6129 #endif
6130                     }
6131 #endif
6132 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
6133                     if (semInfo.syncFd.value_or(-1) >= 0) {
6134                         post_wait_sync_fds.push_back(semInfo.syncFd.value());
6135                     }
6136 #endif
6137                 }
6138             }
6139         }
6140         lock.unlock();
6141 
6142         if (pre_signal_semaphores.empty()) {
6143             input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence);
6144             if (input_result != VK_SUCCESS) return input_result;
6145         } else {
6146             // Schedule waits on the OS external objects and
6147             // signal the wait semaphores
6148             // in a separate thread.
6149             std::vector<WorkPool::Task> preSignalTasks;
6150             std::vector<WorkPool::Task> preSignalQueueSubmitTasks;;
6151 #ifdef VK_USE_PLATFORM_FUCHSIA
6152             for (auto event : pre_signal_events) {
6153                 preSignalTasks.push_back([event] {
6154                     zx_object_wait_one(
6155                         event,
6156                         ZX_EVENT_SIGNALED,
6157                         ZX_TIME_INFINITE,
6158                         nullptr);
6159                 });
6160             }
6161 #endif
6162 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
6163             for (auto fd : pre_signal_sync_fds) {
6164                 // https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkImportSemaphoreFdInfoKHR.html
6165                 // fd == -1 is treated as already signaled
6166                 if (fd != -1) {
6167                     preSignalTasks.push_back([fd] {
6168                         sync_wait(fd, 3000);
6169                     });
6170                 }
6171             }
6172 #endif
6173             if (!preSignalTasks.empty()) {
6174                 auto waitGroupHandle = mWorkPool.schedule(preSignalTasks);
6175                 mWorkPool.waitAll(waitGroupHandle);
6176             }
6177 
6178             // Use the old version of VkSubmitInfo
6179             VkSubmitInfo submit_info = {
6180                 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
6181                 .waitSemaphoreCount = 0,
6182                 .pWaitSemaphores = nullptr,
6183                 .pWaitDstStageMask = nullptr,
6184                 .signalSemaphoreCount =
6185                     static_cast<uint32_t>(pre_signal_semaphores.size()),
6186                 .pSignalSemaphores = pre_signal_semaphores.data()};
6187             vkQueueSubmitEnc(enc, queue, 1, &submit_info, VK_NULL_HANDLE);
6188             input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence);
6189             if (input_result != VK_SUCCESS) return input_result;
6190         }
6191         lock.lock();
6192         int externalFenceFdToSignal = -1;
6193 
6194 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
6195         if (fence != VK_NULL_HANDLE) {
6196             auto it = info_VkFence.find(fence);
6197             if (it != info_VkFence.end()) {
6198                 const auto& info = it->second;
6199                 if (info.syncFd >= 0) {
6200                     externalFenceFdToSignal = info.syncFd;
6201                 }
6202             }
6203         }
6204 #endif
6205         if (externalFenceFdToSignal >= 0 ||
6206             !post_wait_events.empty() ||
6207             !post_wait_sync_fds.empty()) {
6208 
6209             std::vector<WorkPool::Task> tasks;
6210 
6211             tasks.push_back([queue, externalFenceFdToSignal,
6212                              post_wait_events /* copy of zx handles */,
6213                              post_wait_sync_fds /* copy of sync fds */] {
6214                 auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
6215                 auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
6216                 auto waitIdleRes = vkEncoder->vkQueueWaitIdle(queue, true /* do lock */);
6217 #ifdef VK_USE_PLATFORM_FUCHSIA
6218                 AEMU_SCOPED_TRACE("on_vkQueueSubmit::SignalSemaphores");
6219                 (void)externalFenceFdToSignal;
6220                 for (auto& [event, koid] : post_wait_events) {
6221 #ifndef FUCHSIA_NO_TRACE
6222                     if (koid != ZX_KOID_INVALID) {
6223                         TRACE_FLOW_END("gfx", "goldfish_post_wait_event", koid);
6224                         TRACE_FLOW_BEGIN("gfx", "event_signal", koid);
6225                     }
6226 #endif
6227                     zx_object_signal(event, 0, ZX_EVENT_SIGNALED);
6228                 }
6229 #endif
6230 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
6231                 for (auto& fd : post_wait_sync_fds) {
6232                     goldfish_sync_signal(fd);
6233                 }
6234 
6235                 if (externalFenceFdToSignal >= 0) {
6236                     ALOGV("%s: external fence real signal: %d\n", __func__, externalFenceFdToSignal);
6237                     goldfish_sync_signal(externalFenceFdToSignal);
6238                 }
6239 #endif
6240             });
6241             auto queueAsyncWaitHandle = mWorkPool.schedule(tasks);
6242             auto& queueWorkItems = mQueueSensitiveWorkPoolItems[queue];
6243             queueWorkItems.push_back(queueAsyncWaitHandle);
6244         }
6245         return VK_SUCCESS;
6246     }
6247 
on_vkQueueWaitIdle(void * context,VkResult,VkQueue queue)6248     VkResult on_vkQueueWaitIdle(
6249         void* context, VkResult,
6250         VkQueue queue) {
6251 
6252         VkEncoder* enc = (VkEncoder*)context;
6253 
6254         AutoLock<RecursiveLock> lock(mLock);
6255         std::vector<WorkPool::WaitGroupHandle> toWait =
6256             mQueueSensitiveWorkPoolItems[queue];
6257         mQueueSensitiveWorkPoolItems[queue].clear();
6258         lock.unlock();
6259 
6260         if (toWait.empty()) {
6261             ALOGV("%s: No queue-specific work pool items\n", __func__);
6262             return enc->vkQueueWaitIdle(queue, true /* do lock */);
6263         }
6264 
6265         for (auto handle : toWait) {
6266             ALOGV("%s: waiting on work group item: %llu\n", __func__,
6267                   (unsigned long long)handle);
6268             mWorkPool.waitAll(handle);
6269         }
6270 
6271         // now done waiting, get the host's opinion
6272         return enc->vkQueueWaitIdle(queue, true /* do lock */);
6273     }
6274 
6275 #ifdef VK_USE_PLATFORM_ANDROID_KHR
unwrap_VkNativeBufferANDROID(const VkNativeBufferANDROID * inputNativeInfo,VkNativeBufferANDROID * outputNativeInfo)6276     void unwrap_VkNativeBufferANDROID(
6277         const VkNativeBufferANDROID* inputNativeInfo,
6278         VkNativeBufferANDROID* outputNativeInfo) {
6279 
6280         if (!inputNativeInfo || !inputNativeInfo->handle) {
6281             return;
6282         }
6283 
6284         if (!outputNativeInfo || !outputNativeInfo) {
6285             ALOGE("FATAL: Local native buffer info not properly allocated!");
6286             abort();
6287         }
6288 
6289         auto* gralloc = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
6290 
6291         *(uint32_t*)(outputNativeInfo->handle) =
6292             gralloc->getHostHandle((const native_handle_t*)inputNativeInfo->handle);
6293     }
6294 
unwrap_vkCreateImage_pCreateInfo(const VkImageCreateInfo * pCreateInfo,VkImageCreateInfo * local_pCreateInfo)6295     void unwrap_vkCreateImage_pCreateInfo(
6296         const VkImageCreateInfo* pCreateInfo,
6297         VkImageCreateInfo* local_pCreateInfo) {
6298 
6299         const VkNativeBufferANDROID* inputNativeInfo =
6300             vk_find_struct<VkNativeBufferANDROID>(pCreateInfo);
6301 
6302         VkNativeBufferANDROID* outputNativeInfo =
6303             const_cast<VkNativeBufferANDROID*>(
6304                 vk_find_struct<VkNativeBufferANDROID>(local_pCreateInfo));
6305 
6306         unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo);
6307     }
6308 
unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd,int *)6309     void unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd, int*) {
6310         if (fd != -1) {
6311             AEMU_SCOPED_TRACE("waitNativeFenceInAcquire");
6312             // Implicit Synchronization
6313             sync_wait(fd, 3000);
6314             // From libvulkan's swapchain.cpp:
6315             // """
6316             // NOTE: we're relying on AcquireImageANDROID to close fence_clone,
6317             // even if the call fails. We could close it ourselves on failure, but
6318             // that would create a race condition if the driver closes it on a
6319             // failure path: some other thread might create an fd with the same
6320             // number between the time the driver closes it and the time we close
6321             // it. We must assume one of: the driver *always* closes it even on
6322             // failure, or *never* closes it on failure.
6323             // """
6324             // Therefore, assume contract where we need to close fd in this driver
6325             close(fd);
6326         }
6327     }
6328 
unwrap_VkBindImageMemorySwapchainInfoKHR(const VkBindImageMemorySwapchainInfoKHR * inputBimsi,VkBindImageMemorySwapchainInfoKHR * outputBimsi)6329     void unwrap_VkBindImageMemorySwapchainInfoKHR(
6330         const VkBindImageMemorySwapchainInfoKHR* inputBimsi,
6331         VkBindImageMemorySwapchainInfoKHR* outputBimsi) {
6332         if (!inputBimsi || !inputBimsi->swapchain) {
6333             return;
6334         }
6335 
6336         if (!outputBimsi || !outputBimsi->swapchain) {
6337             ALOGE("FATAL: Local VkBindImageMemorySwapchainInfoKHR not properly allocated!");
6338             abort();
6339         }
6340 
6341         // Android based swapchains are implemented by the Android framework's
6342         // libvulkan. The only exist within the guest and should not be sent to
6343         // the host.
6344         outputBimsi->swapchain = VK_NULL_HANDLE;
6345     }
6346 
unwrap_VkBindImageMemory2_pBindInfos(uint32_t bindInfoCount,const VkBindImageMemoryInfo * inputBindInfos,VkBindImageMemoryInfo * outputBindInfos)6347     void unwrap_VkBindImageMemory2_pBindInfos(
6348             uint32_t bindInfoCount,
6349             const VkBindImageMemoryInfo* inputBindInfos,
6350             VkBindImageMemoryInfo* outputBindInfos) {
6351         for (uint32_t i = 0; i < bindInfoCount; ++i) {
6352             const VkBindImageMemoryInfo* inputBindInfo = &inputBindInfos[i];
6353             VkBindImageMemoryInfo* outputBindInfo = &outputBindInfos[i];
6354 
6355             const VkNativeBufferANDROID* inputNativeInfo =
6356                 vk_find_struct<VkNativeBufferANDROID>(inputBindInfo);
6357 
6358             VkNativeBufferANDROID* outputNativeInfo =
6359                 const_cast<VkNativeBufferANDROID*>(
6360                     vk_find_struct<VkNativeBufferANDROID>(outputBindInfo));
6361 
6362             unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo);
6363 
6364             const VkBindImageMemorySwapchainInfoKHR* inputBimsi =
6365                 vk_find_struct<VkBindImageMemorySwapchainInfoKHR>(inputBindInfo);
6366 
6367             VkBindImageMemorySwapchainInfoKHR* outputBimsi =
6368                 const_cast<VkBindImageMemorySwapchainInfoKHR*>(
6369                     vk_find_struct<VkBindImageMemorySwapchainInfoKHR>(outputBindInfo));
6370 
6371             unwrap_VkBindImageMemorySwapchainInfoKHR(inputBimsi, outputBimsi);
6372         }
6373     }
6374 #endif
6375 
6376     // Action of vkMapMemoryIntoAddressSpaceGOOGLE:
6377     // 1. preprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE_pre):
6378     //    uses address space device to reserve the right size of
6379     //    memory.
6380     // 2. the reservation results in a physical address. the physical
6381     //    address is set as |*pAddress|.
6382     // 3. after pre, the API call is encoded to the host, where the
6383     //    value of pAddress is also sent (the physical address).
6384     // 4. the host will obtain the actual gpu pointer and send it
6385     //    back out in |*pAddress|.
6386     // 5. postprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE) will run,
6387     //    using the mmap() method of GoldfishAddressSpaceBlock to obtain
6388     //    a pointer in guest userspace corresponding to the host pointer.
on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void *,VkResult,VkDevice,VkDeviceMemory memory,uint64_t * pAddress)6389     VkResult on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(
6390         void*,
6391         VkResult,
6392         VkDevice,
6393         VkDeviceMemory memory,
6394         uint64_t* pAddress) {
6395 
6396         AutoLock<RecursiveLock> lock(mLock);
6397 
6398         auto it = info_VkDeviceMemory.find(memory);
6399         if (it == info_VkDeviceMemory.end()) {
6400             return VK_ERROR_OUT_OF_HOST_MEMORY;
6401         }
6402 
6403         auto& memInfo = it->second;
6404 
6405         GoldfishAddressSpaceBlockPtr block = std::make_shared<GoldfishAddressSpaceBlock>();
6406         block->allocate(mGoldfishAddressSpaceBlockProvider.get(), memInfo.coherentMemorySize);
6407 
6408         memInfo.goldfishBlock = block;
6409         *pAddress = block->physAddr();
6410 
6411         return VK_SUCCESS;
6412     }
6413 
on_vkMapMemoryIntoAddressSpaceGOOGLE(void *,VkResult input_result,VkDevice,VkDeviceMemory memory,uint64_t * pAddress)6414     VkResult on_vkMapMemoryIntoAddressSpaceGOOGLE(
6415         void*,
6416         VkResult input_result,
6417         VkDevice,
6418         VkDeviceMemory memory,
6419         uint64_t* pAddress) {
6420         (void)memory;
6421 	(void)pAddress;
6422 
6423         if (input_result != VK_SUCCESS) {
6424             return input_result;
6425         }
6426 
6427         return input_result;
6428     }
6429 
initDescriptorUpdateTemplateBuffers(const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,VkDescriptorUpdateTemplate descriptorUpdateTemplate)6430     VkResult initDescriptorUpdateTemplateBuffers(
6431         const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6432         VkDescriptorUpdateTemplate descriptorUpdateTemplate) {
6433 
6434         AutoLock<RecursiveLock> lock(mLock);
6435 
6436         auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6437         if (it == info_VkDescriptorUpdateTemplate.end()) {
6438             return VK_ERROR_INITIALIZATION_FAILED;
6439         }
6440 
6441         auto& info = it->second;
6442         uint32_t inlineUniformBlockBufferSize = 0;
6443 
6444         for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6445             const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6446             uint32_t descCount = entry.descriptorCount;
6447             VkDescriptorType descType = entry.descriptorType;
6448             ++info.templateEntryCount;
6449             if (isDescriptorTypeInlineUniformBlock(descType)) {
6450                 inlineUniformBlockBufferSize += descCount;
6451                 ++info.inlineUniformBlockCount;
6452             } else {
6453                 for (uint32_t j = 0; j < descCount; ++j) {
6454                     if (isDescriptorTypeImageInfo(descType)) {
6455                         ++info.imageInfoCount;
6456                     } else if (isDescriptorTypeBufferInfo(descType)) {
6457                         ++info.bufferInfoCount;
6458                     } else if (isDescriptorTypeBufferView(descType)) {
6459                         ++info.bufferViewCount;
6460                     } else {
6461                         ALOGE("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6462                         abort();
6463                     }
6464                 }
6465             }
6466         }
6467 
6468         if (info.templateEntryCount)
6469             info.templateEntries = new VkDescriptorUpdateTemplateEntry[info.templateEntryCount];
6470 
6471         if (info.imageInfoCount) {
6472             info.imageInfoIndices = new uint32_t[info.imageInfoCount];
6473             info.imageInfos = new VkDescriptorImageInfo[info.imageInfoCount];
6474         }
6475 
6476         if (info.bufferInfoCount) {
6477             info.bufferInfoIndices = new uint32_t[info.bufferInfoCount];
6478             info.bufferInfos = new VkDescriptorBufferInfo[info.bufferInfoCount];
6479         }
6480 
6481         if (info.bufferViewCount) {
6482             info.bufferViewIndices = new uint32_t[info.bufferViewCount];
6483             info.bufferViews = new VkBufferView[info.bufferViewCount];
6484         }
6485 
6486         if (info.inlineUniformBlockCount) {
6487             info.inlineUniformBlockBuffer.resize(inlineUniformBlockBufferSize);
6488             info.inlineUniformBlockBytesPerBlocks.resize(info.inlineUniformBlockCount);
6489         }
6490 
6491         uint32_t imageInfoIndex = 0;
6492         uint32_t bufferInfoIndex = 0;
6493         uint32_t bufferViewIndex = 0;
6494         uint32_t inlineUniformBlockIndex = 0;
6495 
6496         for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6497             const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6498             uint32_t descCount = entry.descriptorCount;
6499             VkDescriptorType descType = entry.descriptorType;
6500 
6501             info.templateEntries[i] = entry;
6502 
6503             if (isDescriptorTypeInlineUniformBlock(descType)) {
6504                 info.inlineUniformBlockBytesPerBlocks[inlineUniformBlockIndex] = descCount;
6505                 ++inlineUniformBlockIndex;
6506             } else {
6507                 for (uint32_t j = 0; j < descCount; ++j) {
6508                     if (isDescriptorTypeImageInfo(descType)) {
6509                         info.imageInfoIndices[imageInfoIndex] = i;
6510                         ++imageInfoIndex;
6511                     } else if (isDescriptorTypeBufferInfo(descType)) {
6512                         info.bufferInfoIndices[bufferInfoIndex] = i;
6513                         ++bufferInfoIndex;
6514                     } else if (isDescriptorTypeBufferView(descType)) {
6515                         info.bufferViewIndices[bufferViewIndex] = i;
6516                         ++bufferViewIndex;
6517                     } else {
6518                         ALOGE("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6519                         // abort();
6520                     }
6521                 }
6522             }
6523         }
6524 
6525         return VK_SUCCESS;
6526     }
6527 
on_vkCreateDescriptorUpdateTemplate(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)6528     VkResult on_vkCreateDescriptorUpdateTemplate(
6529         void* context, VkResult input_result,
6530         VkDevice device,
6531         const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6532         const VkAllocationCallbacks* pAllocator,
6533         VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6534 
6535         (void)context;
6536         (void)device;
6537         (void)pAllocator;
6538 
6539         if (input_result != VK_SUCCESS) return input_result;
6540 
6541         return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6542     }
6543 
on_vkCreateDescriptorUpdateTemplateKHR(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)6544     VkResult on_vkCreateDescriptorUpdateTemplateKHR(
6545         void* context, VkResult input_result,
6546         VkDevice device,
6547         const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6548         const VkAllocationCallbacks* pAllocator,
6549         VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6550 
6551         (void)context;
6552         (void)device;
6553         (void)pAllocator;
6554 
6555         if (input_result != VK_SUCCESS) return input_result;
6556 
6557         return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6558     }
6559 
on_vkUpdateDescriptorSetWithTemplate(void * context,VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)6560     void on_vkUpdateDescriptorSetWithTemplate(
6561         void* context,
6562         VkDevice device,
6563         VkDescriptorSet descriptorSet,
6564         VkDescriptorUpdateTemplate descriptorUpdateTemplate,
6565         const void* pData) {
6566 
6567         VkEncoder* enc = (VkEncoder*)context;
6568 
6569         uint8_t* userBuffer = (uint8_t*)pData;
6570         if (!userBuffer) return;
6571 
6572         // TODO: Make this thread safe
6573         AutoLock<RecursiveLock> lock(mLock);
6574 
6575         auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6576         if (it == info_VkDescriptorUpdateTemplate.end()) {
6577             return;
6578         }
6579 
6580         auto& info = it->second;
6581 
6582         uint32_t templateEntryCount = info.templateEntryCount;
6583         VkDescriptorUpdateTemplateEntry* templateEntries = info.templateEntries;
6584 
6585         uint32_t imageInfoCount = info.imageInfoCount;
6586         uint32_t bufferInfoCount = info.bufferInfoCount;
6587         uint32_t bufferViewCount = info.bufferViewCount;
6588         uint32_t inlineUniformBlockCount = info.inlineUniformBlockCount;
6589         uint32_t* imageInfoIndices = info.imageInfoIndices;
6590         uint32_t* bufferInfoIndices = info.bufferInfoIndices;
6591         uint32_t* bufferViewIndices = info.bufferViewIndices;
6592         VkDescriptorImageInfo* imageInfos = info.imageInfos;
6593         VkDescriptorBufferInfo* bufferInfos = info.bufferInfos;
6594         VkBufferView* bufferViews = info.bufferViews;
6595         uint8_t* inlineUniformBlockBuffer = info.inlineUniformBlockBuffer.data();
6596         uint32_t* inlineUniformBlockBytesPerBlocks = info.inlineUniformBlockBytesPerBlocks.data();
6597 
6598         lock.unlock();
6599 
6600         size_t currImageInfoOffset = 0;
6601         size_t currBufferInfoOffset = 0;
6602         size_t currBufferViewOffset = 0;
6603         size_t inlineUniformBlockOffset = 0;
6604         size_t inlineUniformBlockIdx = 0;
6605 
6606         struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(descriptorSet);
6607         ReifiedDescriptorSet* reified = ds->reified;
6608 
6609         bool batched = mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate;
6610 
6611         for (uint32_t i = 0; i < templateEntryCount; ++i) {
6612             const auto& entry = templateEntries[i];
6613             VkDescriptorType descType = entry.descriptorType;
6614             uint32_t dstBinding = entry.dstBinding;
6615 
6616             auto offset = entry.offset;
6617             auto stride = entry.stride;
6618             auto dstArrayElement = entry.dstArrayElement;
6619 
6620             uint32_t descCount = entry.descriptorCount;
6621 
6622             if (isDescriptorTypeImageInfo(descType)) {
6623 
6624                 if (!stride) stride = sizeof(VkDescriptorImageInfo);
6625 
6626                 const VkDescriptorImageInfo* currImageInfoBegin =
6627                     (const VkDescriptorImageInfo*)((uint8_t*)imageInfos + currImageInfoOffset);
6628 
6629                 for (uint32_t j = 0; j < descCount; ++j) {
6630                     const VkDescriptorImageInfo* user =
6631                         (const VkDescriptorImageInfo*)(userBuffer + offset + j * stride);
6632 
6633                     memcpy(((uint8_t*)imageInfos) + currImageInfoOffset,
6634                            user, sizeof(VkDescriptorImageInfo));
6635                     currImageInfoOffset += sizeof(VkDescriptorImageInfo);
6636                 }
6637 
6638                 if (batched) {
6639                   doEmulatedDescriptorImageInfoWriteFromTemplate(
6640                         descType,
6641                         dstBinding,
6642                         dstArrayElement,
6643                         descCount,
6644                         currImageInfoBegin,
6645                         reified);
6646                 }
6647             } else if (isDescriptorTypeBufferInfo(descType)) {
6648 
6649 
6650                 if (!stride) stride = sizeof(VkDescriptorBufferInfo);
6651 
6652                 const VkDescriptorBufferInfo* currBufferInfoBegin =
6653                     (const VkDescriptorBufferInfo*)((uint8_t*)bufferInfos + currBufferInfoOffset);
6654 
6655                 for (uint32_t j = 0; j < descCount; ++j) {
6656                     const VkDescriptorBufferInfo* user =
6657                         (const VkDescriptorBufferInfo*)(userBuffer + offset + j * stride);
6658 
6659                     memcpy(((uint8_t*)bufferInfos) + currBufferInfoOffset,
6660                            user, sizeof(VkDescriptorBufferInfo));
6661                     currBufferInfoOffset += sizeof(VkDescriptorBufferInfo);
6662                 }
6663 
6664                 if (batched) {
6665                   doEmulatedDescriptorBufferInfoWriteFromTemplate(
6666                         descType,
6667                         dstBinding,
6668                         dstArrayElement,
6669                         descCount,
6670                         currBufferInfoBegin,
6671                         reified);
6672                 }
6673 
6674             } else if (isDescriptorTypeBufferView(descType)) {
6675                 if (!stride) stride = sizeof(VkBufferView);
6676 
6677                 const VkBufferView* currBufferViewBegin =
6678                     (const VkBufferView*)((uint8_t*)bufferViews + currBufferViewOffset);
6679 
6680                 for (uint32_t j = 0; j < descCount; ++j) {
6681                   const VkBufferView* user =
6682                       (const VkBufferView*)(userBuffer + offset + j * stride);
6683 
6684                   memcpy(((uint8_t*)bufferViews) + currBufferViewOffset, user,
6685                          sizeof(VkBufferView));
6686                   currBufferViewOffset += sizeof(VkBufferView);
6687                 }
6688 
6689                 if (batched) {
6690                   doEmulatedDescriptorBufferViewWriteFromTemplate(descType, dstBinding,
6691                                                                   dstArrayElement, descCount,
6692                                                                   currBufferViewBegin, reified);
6693                 }
6694             } else if (isDescriptorTypeInlineUniformBlock(descType)) {
6695                 uint32_t inlineUniformBlockBytesPerBlock =
6696                     inlineUniformBlockBytesPerBlocks[inlineUniformBlockIdx];
6697                 uint8_t* currInlineUniformBlockBufferBegin =
6698                     inlineUniformBlockBuffer + inlineUniformBlockOffset;
6699                 memcpy(currInlineUniformBlockBufferBegin, userBuffer + offset,
6700                        inlineUniformBlockBytesPerBlock);
6701                 inlineUniformBlockIdx++;
6702                 inlineUniformBlockOffset += inlineUniformBlockBytesPerBlock;
6703 
6704                 if (batched) {
6705                   doEmulatedDescriptorInlineUniformBlockFromTemplate(
6706                       descType, dstBinding, dstArrayElement, descCount,
6707                       currInlineUniformBlockBufferBegin, reified);
6708                 }
6709             } else {
6710                 ALOGE("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6711                 abort();
6712             }
6713         }
6714 
6715         if (batched) return;
6716 
6717         enc->vkUpdateDescriptorSetWithTemplateSized2GOOGLE(
6718             device, descriptorSet, descriptorUpdateTemplate, imageInfoCount, bufferInfoCount,
6719             bufferViewCount, static_cast<uint32_t>(info.inlineUniformBlockBuffer.size()),
6720             imageInfoIndices, bufferInfoIndices, bufferViewIndices, imageInfos, bufferInfos,
6721             bufferViews, inlineUniformBlockBuffer, true /* do lock */);
6722     }
6723 
on_vkGetPhysicalDeviceImageFormatProperties2_common(bool isKhr,void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6724     VkResult on_vkGetPhysicalDeviceImageFormatProperties2_common(
6725         bool isKhr,
6726         void* context, VkResult input_result,
6727         VkPhysicalDevice physicalDevice,
6728         const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6729         VkImageFormatProperties2* pImageFormatProperties) {
6730 
6731         VkEncoder* enc = (VkEncoder*)context;
6732         (void)input_result;
6733 
6734         uint32_t supportedHandleType = 0;
6735         VkExternalImageFormatProperties* ext_img_properties =
6736             vk_find_struct<VkExternalImageFormatProperties>(pImageFormatProperties);
6737 #ifdef VK_USE_PLATFORM_FUCHSIA
6738 
6739         constexpr VkFormat kExternalImageSupportedFormats[] = {
6740             VK_FORMAT_B8G8R8A8_SINT,
6741             VK_FORMAT_B8G8R8A8_UNORM,
6742             VK_FORMAT_B8G8R8A8_SRGB,
6743             VK_FORMAT_B8G8R8A8_SNORM,
6744             VK_FORMAT_B8G8R8A8_SSCALED,
6745             VK_FORMAT_B8G8R8A8_USCALED,
6746             VK_FORMAT_R8G8B8A8_SINT,
6747             VK_FORMAT_R8G8B8A8_UNORM,
6748             VK_FORMAT_R8G8B8A8_SRGB,
6749             VK_FORMAT_R8G8B8A8_SNORM,
6750             VK_FORMAT_R8G8B8A8_SSCALED,
6751             VK_FORMAT_R8G8B8A8_USCALED,
6752             VK_FORMAT_R8_UNORM,
6753             VK_FORMAT_R8_UINT,
6754             VK_FORMAT_R8_USCALED,
6755             VK_FORMAT_R8_SNORM,
6756             VK_FORMAT_R8_SINT,
6757             VK_FORMAT_R8_SSCALED,
6758             VK_FORMAT_R8_SRGB,
6759             VK_FORMAT_R8G8_UNORM,
6760             VK_FORMAT_R8G8_UINT,
6761             VK_FORMAT_R8G8_USCALED,
6762             VK_FORMAT_R8G8_SNORM,
6763             VK_FORMAT_R8G8_SINT,
6764             VK_FORMAT_R8G8_SSCALED,
6765             VK_FORMAT_R8G8_SRGB,
6766         };
6767 
6768         if (ext_img_properties) {
6769           if (std::find(std::begin(kExternalImageSupportedFormats),
6770                         std::end(kExternalImageSupportedFormats),
6771                         pImageFormatInfo->format) == std::end(kExternalImageSupportedFormats)) {
6772             return VK_ERROR_FORMAT_NOT_SUPPORTED;
6773           }
6774         }
6775         supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VM_BIT_FUCHSIA;
6776 #endif
6777 
6778 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6779         VkAndroidHardwareBufferUsageANDROID* output_ahw_usage =
6780             vk_find_struct<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties);
6781         supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
6782             VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
6783 #endif
6784         const VkPhysicalDeviceExternalImageFormatInfo* ext_img_info =
6785                 vk_find_struct<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo);
6786 
6787         if (supportedHandleType && ext_img_info) {
6788             // 0 is a valid handleType so we don't check against 0
6789             if (ext_img_info->handleType != (ext_img_info->handleType & supportedHandleType)) {
6790                 return VK_ERROR_FORMAT_NOT_SUPPORTED;
6791             }
6792         }
6793         VkResult hostRes;
6794 
6795         if (isKhr) {
6796             hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2KHR(
6797                 physicalDevice, pImageFormatInfo,
6798                 pImageFormatProperties, true /* do lock */);
6799         } else {
6800             hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2(
6801                 physicalDevice, pImageFormatInfo,
6802                 pImageFormatProperties, true /* do lock */);
6803         }
6804 
6805         if (hostRes != VK_SUCCESS) return hostRes;
6806 
6807 #ifdef VK_USE_PLATFORM_FUCHSIA
6808         if (ext_img_properties) {
6809             if (ext_img_info) {
6810                 if (static_cast<uint32_t>(ext_img_info->handleType) ==
6811                     VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
6812                     ext_img_properties->externalMemoryProperties = {
6813                             .externalMemoryFeatures =
6814                                     VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
6815                                     VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
6816                             .exportFromImportedHandleTypes =
6817                                     VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6818                             .compatibleHandleTypes =
6819                                     VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6820                     };
6821                 }
6822             }
6823         }
6824 #endif
6825 
6826 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6827         if (output_ahw_usage) {
6828             output_ahw_usage->androidHardwareBufferUsage =
6829                 getAndroidHardwareBufferUsageFromVkUsage(
6830                     pImageFormatInfo->flags,
6831                     pImageFormatInfo->usage);
6832         }
6833 #endif
6834         if (ext_img_properties) {
6835             transformImpl_VkExternalMemoryProperties_fromhost(&ext_img_properties->externalMemoryProperties, 0);
6836         }
6837         return hostRes;
6838     }
6839 
on_vkGetPhysicalDeviceImageFormatProperties2(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6840     VkResult on_vkGetPhysicalDeviceImageFormatProperties2(
6841         void* context, VkResult input_result,
6842         VkPhysicalDevice physicalDevice,
6843         const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6844         VkImageFormatProperties2* pImageFormatProperties) {
6845         return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6846             false /* not KHR */, context, input_result,
6847             physicalDevice, pImageFormatInfo, pImageFormatProperties);
6848     }
6849 
on_vkGetPhysicalDeviceImageFormatProperties2KHR(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6850     VkResult on_vkGetPhysicalDeviceImageFormatProperties2KHR(
6851         void* context, VkResult input_result,
6852         VkPhysicalDevice physicalDevice,
6853         const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6854         VkImageFormatProperties2* pImageFormatProperties) {
6855         return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6856             true /* is KHR */, context, input_result,
6857             physicalDevice, pImageFormatInfo, pImageFormatProperties);
6858     }
6859 
on_vkGetPhysicalDeviceExternalSemaphoreProperties(void *,VkPhysicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)6860     void on_vkGetPhysicalDeviceExternalSemaphoreProperties(
6861         void*,
6862         VkPhysicalDevice,
6863         const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
6864         VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
6865         (void)pExternalSemaphoreInfo;
6866         (void)pExternalSemaphoreProperties;
6867 #ifdef VK_USE_PLATFORM_FUCHSIA
6868         if (pExternalSemaphoreInfo->handleType ==
6869             static_cast<uint32_t>(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA)) {
6870             pExternalSemaphoreProperties->compatibleHandleTypes |=
6871                 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6872             pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6873                 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6874             pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6875                 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6876                 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6877         }
6878 #else
6879         const VkSemaphoreTypeCreateInfo* semaphoreTypeCi =
6880             vk_find_struct<VkSemaphoreTypeCreateInfo>(pExternalSemaphoreInfo);
6881         bool isSemaphoreTimeline = semaphoreTypeCi != nullptr && semaphoreTypeCi->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE;
6882         if (isSemaphoreTimeline) {
6883             // b/304373623
6884             // dEQP-VK.api.external.semaphore.sync_fd#info_timeline
6885             pExternalSemaphoreProperties->compatibleHandleTypes = 0;
6886             pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
6887             pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
6888         } else if (pExternalSemaphoreInfo->handleType ==
6889             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
6890             pExternalSemaphoreProperties->compatibleHandleTypes |=
6891                 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6892             pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6893                 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6894             pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6895                 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6896                 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6897         }
6898 #endif  // VK_USE_PLATFORM_FUCHSIA
6899     }
6900 
registerEncoderCleanupCallback(const VkEncoder * encoder,void * object,CleanupCallback callback)6901     void registerEncoderCleanupCallback(const VkEncoder* encoder, void* object, CleanupCallback callback) {
6902         AutoLock<RecursiveLock> lock(mLock);
6903         auto& callbacks = mEncoderCleanupCallbacks[encoder];
6904         callbacks[object] = callback;
6905     }
6906 
unregisterEncoderCleanupCallback(const VkEncoder * encoder,void * object)6907     void unregisterEncoderCleanupCallback(const VkEncoder* encoder, void* object) {
6908         AutoLock<RecursiveLock> lock(mLock);
6909         mEncoderCleanupCallbacks[encoder].erase(object);
6910     }
6911 
onEncoderDeleted(const VkEncoder * encoder)6912     void onEncoderDeleted(const VkEncoder* encoder) {
6913         AutoLock<RecursiveLock> lock(mLock);
6914         if (mEncoderCleanupCallbacks.find(encoder) == mEncoderCleanupCallbacks.end()) return;
6915 
6916         std::unordered_map<void*, CleanupCallback> callbackCopies = mEncoderCleanupCallbacks[encoder];
6917 
6918         mEncoderCleanupCallbacks.erase(encoder);
6919         lock.unlock();
6920 
6921         for (auto it : callbackCopies) {
6922             it.second();
6923         }
6924     }
6925 
syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer,VkEncoder * currentEncoder)6926     uint32_t syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer, VkEncoder* currentEncoder) {
6927         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
6928         if (!cb) return 0;
6929 
6930         auto lastEncoder = cb->lastUsedEncoder;
6931 
6932         if (lastEncoder == currentEncoder) return 0;
6933 
6934         currentEncoder->incRef();
6935 
6936         cb->lastUsedEncoder = currentEncoder;
6937 
6938         if (!lastEncoder) return 0;
6939 
6940         auto oldSeq = cb->sequenceNumber;
6941         cb->sequenceNumber += 2;
6942         lastEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, false, oldSeq + 1, true /* do lock */);
6943         lastEncoder->flush();
6944         currentEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, true, oldSeq + 2, true /* do lock */);
6945 
6946         if (lastEncoder->decRef()) {
6947             cb->lastUsedEncoder = nullptr;
6948         }
6949         return 0;
6950     }
6951 
syncEncodersForQueue(VkQueue queue,VkEncoder * currentEncoder)6952     uint32_t syncEncodersForQueue(VkQueue queue, VkEncoder* currentEncoder) {
6953         if (!supportsAsyncQueueSubmit()) {
6954             return 0;
6955         }
6956 
6957         struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
6958         if (!q) return 0;
6959 
6960         auto lastEncoder = q->lastUsedEncoder;
6961 
6962         if (lastEncoder == currentEncoder) return 0;
6963 
6964         currentEncoder->incRef();
6965 
6966         q->lastUsedEncoder = currentEncoder;
6967 
6968         if (!lastEncoder) return 0;
6969 
6970         auto oldSeq = q->sequenceNumber;
6971         q->sequenceNumber += 2;
6972         lastEncoder->vkQueueHostSyncGOOGLE(queue, false, oldSeq + 1, true /* do lock */);
6973         lastEncoder->flush();
6974         currentEncoder->vkQueueHostSyncGOOGLE(queue, true, oldSeq + 2, true /* do lock */);
6975 
6976         if (lastEncoder->decRef()) {
6977             q->lastUsedEncoder = nullptr;
6978         }
6979 
6980         return 0;
6981     }
6982 
getAlloc()6983     CommandBufferStagingStream::Alloc getAlloc() {
6984         if (mFeatureInfo->hasVulkanAuxCommandMemory) {
6985             return [this](size_t size) -> CommandBufferStagingStream::Memory {
6986                 VkMemoryAllocateInfo info{
6987                     .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
6988                     .pNext = nullptr,
6989                     .allocationSize = size,
6990                     .memoryTypeIndex = VK_MAX_MEMORY_TYPES  // indicates auxiliary memory
6991                 };
6992 
6993                 auto enc = ResourceTracker::getThreadLocalEncoder();
6994                 VkDevice device = VK_NULL_HANDLE;
6995                 VkDeviceMemory vkDeviceMem = VK_NULL_HANDLE;
6996                 VkResult result = getCoherentMemory(&info, enc, device, &vkDeviceMem);
6997                 if (result != VK_SUCCESS) {
6998                     ALOGE("Failed to get coherent memory %u", result);
6999                     return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
7000                 }
7001 
7002                 // getCoherentMemory() uses suballocations.
7003                 // To retrieve the suballocated memory address, look up
7004                 // VkDeviceMemory filled in by getCoherentMemory()
7005                 // scope of mLock
7006                 {
7007                     AutoLock<RecursiveLock> lock(mLock);
7008                     const auto it = info_VkDeviceMemory.find(vkDeviceMem);
7009                     if (it == info_VkDeviceMemory.end()) {
7010                         ALOGE("Coherent memory allocated %u not found", result);
7011                         return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
7012                     };
7013 
7014                     const auto& info = it->second;
7015                     return {.deviceMemory = vkDeviceMem, .ptr = info.ptr};
7016                 }
7017             };
7018         }
7019         return nullptr;
7020     }
7021 
getFree()7022     CommandBufferStagingStream::Free getFree() {
7023         if (mFeatureInfo->hasVulkanAuxCommandMemory) {
7024             return [this](const CommandBufferStagingStream::Memory& memory) {
7025                 // deviceMemory may not be the actual backing auxiliary VkDeviceMemory
7026                 // for suballocations, deviceMemory is a alias VkDeviceMemory hand;
7027                 // freeCoherentMemoryLocked maps the alias to the backing VkDeviceMemory
7028                 VkDeviceMemory deviceMemory = memory.deviceMemory;
7029                 AutoLock<RecursiveLock> lock(mLock);
7030                 auto it = info_VkDeviceMemory.find(deviceMemory);
7031                 if (it == info_VkDeviceMemory.end()) {
7032                     ALOGE("Device memory to free not found");
7033                     return;
7034                 }
7035                 auto coherentMemory = freeCoherentMemoryLocked(deviceMemory, it->second);
7036                 // We have to release the lock before we could possibly free a
7037                 // CoherentMemory, because that will call into VkEncoder, which
7038                 // shouldn't be called when the lock is held.
7039                 lock.unlock();
7040                 coherentMemory = nullptr;
7041             };
7042         }
7043         return nullptr;
7044     }
7045 
on_vkBeginCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)7046     VkResult on_vkBeginCommandBuffer(
7047         void* context, VkResult input_result,
7048         VkCommandBuffer commandBuffer,
7049         const VkCommandBufferBeginInfo* pBeginInfo) {
7050 
7051         (void)context;
7052 
7053         resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */, true /* also clear pending descriptor sets */);
7054 
7055         VkEncoder* enc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
7056         (void)input_result;
7057 
7058         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7059         cb->flags = pBeginInfo->flags;
7060 
7061         VkCommandBufferBeginInfo modifiedBeginInfo;
7062 
7063         if (pBeginInfo->pInheritanceInfo && !cb->isSecondary) {
7064             modifiedBeginInfo = *pBeginInfo;
7065             modifiedBeginInfo.pInheritanceInfo = nullptr;
7066             pBeginInfo = &modifiedBeginInfo;
7067         }
7068 
7069         if (!supportsDeferredCommands()) {
7070             return enc->vkBeginCommandBuffer(commandBuffer, pBeginInfo, true /* do lock */);
7071         }
7072 
7073         enc->vkBeginCommandBufferAsyncGOOGLE(commandBuffer, pBeginInfo, true /* do lock */);
7074 
7075         return VK_SUCCESS;
7076     }
7077 
on_vkEndCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer)7078     VkResult on_vkEndCommandBuffer(
7079         void* context, VkResult input_result,
7080         VkCommandBuffer commandBuffer) {
7081 
7082         VkEncoder* enc = (VkEncoder*)context;
7083         (void)input_result;
7084 
7085         if (!supportsDeferredCommands()) {
7086             return enc->vkEndCommandBuffer(commandBuffer, true /* do lock */);
7087         }
7088 
7089         enc->vkEndCommandBufferAsyncGOOGLE(commandBuffer, true /* do lock */);
7090 
7091         return VK_SUCCESS;
7092     }
7093 
on_vkResetCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)7094     VkResult on_vkResetCommandBuffer(
7095         void* context, VkResult input_result,
7096         VkCommandBuffer commandBuffer,
7097         VkCommandBufferResetFlags flags) {
7098 
7099         resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */, true /* also clear pending descriptor sets */);
7100 
7101         VkEncoder* enc = (VkEncoder*)context;
7102         (void)input_result;
7103 
7104         if (!supportsDeferredCommands()) {
7105             return enc->vkResetCommandBuffer(commandBuffer, flags, true /* do lock */);
7106         }
7107 
7108         enc->vkResetCommandBufferAsyncGOOGLE(commandBuffer, flags, true /* do lock */);
7109         return VK_SUCCESS;
7110     }
7111 
on_vkCreateImageView(void * context,VkResult input_result,VkDevice device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)7112     VkResult on_vkCreateImageView(
7113         void* context, VkResult input_result,
7114         VkDevice device,
7115         const VkImageViewCreateInfo* pCreateInfo,
7116         const VkAllocationCallbacks* pAllocator,
7117         VkImageView* pView) {
7118 
7119         VkEncoder* enc = (VkEncoder*)context;
7120         (void)input_result;
7121 
7122         VkImageViewCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
7123         vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
7124 
7125 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
7126         if (pCreateInfo->format == VK_FORMAT_UNDEFINED) {
7127             AutoLock<RecursiveLock> lock(mLock);
7128 
7129             auto it = info_VkImage.find(pCreateInfo->image);
7130             if (it != info_VkImage.end() && it->second.hasExternalFormat) {
7131                 localCreateInfo.format = vk_format_from_android(it->second.androidFormat);
7132             }
7133         }
7134         VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
7135         const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo =
7136             vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo);
7137         if (samplerYcbcrConversionInfo) {
7138             if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
7139                 localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
7140                 vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
7141             }
7142         }
7143 #endif
7144 
7145         return enc->vkCreateImageView(device, &localCreateInfo, pAllocator, pView, true /* do lock */);
7146     }
7147 
on_vkCmdExecuteCommands(void * context,VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)7148     void on_vkCmdExecuteCommands(
7149         void* context,
7150         VkCommandBuffer commandBuffer,
7151         uint32_t commandBufferCount,
7152         const VkCommandBuffer* pCommandBuffers) {
7153 
7154         VkEncoder* enc = (VkEncoder*)context;
7155 
7156         if (!mFeatureInfo->hasVulkanQueueSubmitWithCommands) {
7157             enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers, true /* do lock */);
7158             return;
7159         }
7160 
7161         struct goldfish_VkCommandBuffer* primary = as_goldfish_VkCommandBuffer(commandBuffer);
7162         for (uint32_t i = 0; i < commandBufferCount; ++i) {
7163             struct goldfish_VkCommandBuffer* secondary = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7164             appendObject(&secondary->superObjects, primary);
7165             appendObject(&primary->subObjects, secondary);
7166         }
7167 
7168         enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers, true /* do lock */);
7169     }
7170 
addPendingDescriptorSets(VkCommandBuffer commandBuffer,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)7171     void addPendingDescriptorSets(VkCommandBuffer commandBuffer, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets) {
7172         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7173 
7174         if (!cb->userPtr) {
7175             CommandBufferPendingDescriptorSets* newPendingSets =
7176                 new CommandBufferPendingDescriptorSets;
7177             cb->userPtr = newPendingSets;
7178         }
7179 
7180         CommandBufferPendingDescriptorSets* pendingSets =
7181             (CommandBufferPendingDescriptorSets*)cb->userPtr;
7182 
7183         for (uint32_t i = 0; i < descriptorSetCount; ++i) {
7184             pendingSets->sets.insert(pDescriptorSets[i]);
7185         }
7186     }
7187 
on_vkCmdBindDescriptorSets(void * context,VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)7188     void on_vkCmdBindDescriptorSets(
7189         void* context,
7190         VkCommandBuffer commandBuffer,
7191         VkPipelineBindPoint pipelineBindPoint,
7192         VkPipelineLayout layout,
7193         uint32_t firstSet,
7194         uint32_t descriptorSetCount,
7195         const VkDescriptorSet* pDescriptorSets,
7196         uint32_t dynamicOffsetCount,
7197         const uint32_t* pDynamicOffsets) {
7198 
7199         VkEncoder* enc = (VkEncoder*)context;
7200 
7201         if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate)
7202             addPendingDescriptorSets(commandBuffer, descriptorSetCount, pDescriptorSets);
7203 
7204         enc->vkCmdBindDescriptorSets(
7205             commandBuffer,
7206             pipelineBindPoint,
7207             layout,
7208             firstSet,
7209             descriptorSetCount,
7210             pDescriptorSets,
7211             dynamicOffsetCount,
7212             pDynamicOffsets,
7213             true /* do lock */);
7214     }
7215 
on_vkCmdPipelineBarrier(void * context,VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)7216     void on_vkCmdPipelineBarrier(
7217         void* context,
7218         VkCommandBuffer commandBuffer,
7219         VkPipelineStageFlags srcStageMask,
7220         VkPipelineStageFlags dstStageMask,
7221         VkDependencyFlags dependencyFlags,
7222         uint32_t memoryBarrierCount,
7223         const VkMemoryBarrier* pMemoryBarriers,
7224         uint32_t bufferMemoryBarrierCount,
7225         const VkBufferMemoryBarrier* pBufferMemoryBarriers,
7226         uint32_t imageMemoryBarrierCount,
7227         const VkImageMemoryBarrier* pImageMemoryBarriers) {
7228 
7229         VkEncoder* enc = (VkEncoder*)context;
7230 
7231         std::vector<VkImageMemoryBarrier> updatedImageMemoryBarriers;
7232         updatedImageMemoryBarriers.reserve(imageMemoryBarrierCount);
7233         for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
7234             VkImageMemoryBarrier barrier = pImageMemoryBarriers[i];
7235 
7236 #ifdef VK_USE_PLATFORM_ANDROID_KHR
7237             // Unfortunetly, Android does not yet have a mechanism for sharing the expected
7238             // VkImageLayout when passing around AHardwareBuffer-s so many existing users
7239             // that import AHardwareBuffer-s into VkImage-s/VkDeviceMemory-s simply use
7240             // VK_IMAGE_LAYOUT_UNDEFINED. However, the Vulkan spec's image layout transition
7241             // sections says "If the old layout is VK_IMAGE_LAYOUT_UNDEFINED, the contents of
7242             // that range may be discarded." Some Vulkan drivers have been observed to actually
7243             // perform the discard which leads to AHardwareBuffer-s being unintentionally
7244             // cleared. See go/ahb-vkimagelayout for more information.
7245             if (barrier.srcQueueFamilyIndex != barrier.dstQueueFamilyIndex &&
7246                 (barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
7247                  barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) &&
7248                 barrier.oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
7249                 // This is not a complete solution as the Vulkan spec does not require that
7250                 // Vulkan drivers perform a no-op in the case when oldLayout equals newLayout
7251                 // but this has been observed to be enough to work for now to avoid clearing
7252                 // out images.
7253                 // TODO(b/236179843): figure out long term solution.
7254                 barrier.oldLayout = barrier.newLayout;
7255             }
7256 #endif
7257 
7258             updatedImageMemoryBarriers.push_back(barrier);
7259         }
7260 
7261         enc->vkCmdPipelineBarrier(
7262             commandBuffer,
7263             srcStageMask,
7264             dstStageMask,
7265             dependencyFlags,
7266             memoryBarrierCount,
7267             pMemoryBarriers,
7268             bufferMemoryBarrierCount,
7269             pBufferMemoryBarriers,
7270             updatedImageMemoryBarriers.size(),
7271             updatedImageMemoryBarriers.data(),
7272             true /* do lock */);
7273     }
7274 
decDescriptorSetLayoutRef(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)7275     void decDescriptorSetLayoutRef(
7276         void* context,
7277         VkDevice device,
7278         VkDescriptorSetLayout descriptorSetLayout,
7279         const VkAllocationCallbacks* pAllocator) {
7280 
7281         if (!descriptorSetLayout) return;
7282 
7283         struct goldfish_VkDescriptorSetLayout* setLayout = as_goldfish_VkDescriptorSetLayout(descriptorSetLayout);
7284 
7285         if (0 == --setLayout->layoutInfo->refcount) {
7286             VkEncoder* enc = (VkEncoder*)context;
7287             enc->vkDestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator, true /* do lock */);
7288         }
7289     }
7290 
on_vkDestroyDescriptorSetLayout(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)7291     void on_vkDestroyDescriptorSetLayout(
7292         void* context,
7293         VkDevice device,
7294         VkDescriptorSetLayout descriptorSetLayout,
7295         const VkAllocationCallbacks* pAllocator) {
7296         decDescriptorSetLayoutRef(context, device, descriptorSetLayout, pAllocator);
7297     }
7298 
on_vkAllocateCommandBuffers(void * context,VkResult input_result,VkDevice device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)7299     VkResult on_vkAllocateCommandBuffers(
7300         void* context,
7301         VkResult input_result,
7302         VkDevice device,
7303         const VkCommandBufferAllocateInfo* pAllocateInfo,
7304         VkCommandBuffer* pCommandBuffers) {
7305 
7306         (void)input_result;
7307 
7308         VkEncoder* enc = (VkEncoder*)context;
7309         VkResult res = enc->vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers, true /* do lock */);
7310         if (VK_SUCCESS != res) return res;
7311 
7312         for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; ++i) {
7313             struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7314             cb->isSecondary = pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY;
7315             cb->device = device;
7316         }
7317 
7318         return res;
7319     }
7320 
7321 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
exportSyncFdForQSRILocked(VkImage image,int * fd)7322     VkResult exportSyncFdForQSRILocked(VkImage image, int *fd) {
7323 
7324         ALOGV("%s: call for image %p hos timage handle 0x%llx\n", __func__, (void*)image,
7325               (unsigned long long)get_host_u64_VkImage(image));
7326 
7327         if (mFeatureInfo->hasVirtioGpuNativeSync) {
7328             struct VirtGpuExecBuffer exec = { };
7329             struct gfxstreamCreateQSRIExportVK exportQSRI = { };
7330             VirtGpuDevice& instance = VirtGpuDevice::getInstance();
7331 
7332             uint64_t hostImageHandle = get_host_u64_VkImage(image);
7333 
7334             exportQSRI.hdr.opCode = GFXSTREAM_CREATE_QSRI_EXPORT_VK;
7335             exportQSRI.imageHandleLo = (uint32_t)hostImageHandle;
7336             exportQSRI.imageHandleHi = (uint32_t)(hostImageHandle >> 32);
7337 
7338             exec.command = static_cast<void*>(&exportQSRI);
7339             exec.command_size = sizeof(exportQSRI);
7340             exec.flags = kFenceOut | kRingIdx;
7341             if (instance.execBuffer(exec, nullptr))
7342                 return VK_ERROR_OUT_OF_HOST_MEMORY;
7343 
7344             *fd = exec.handle.osHandle;
7345         } else {
7346             ensureSyncDeviceFd();
7347             goldfish_sync_queue_work(
7348                     mSyncDeviceFd,
7349                     get_host_u64_VkImage(image) /* the handle */,
7350                     GOLDFISH_SYNC_VULKAN_QSRI /* thread handle (doubling as type field) */,
7351                     fd);
7352         }
7353 
7354         ALOGV("%s: got fd: %d\n", __func__, *fd);
7355         auto imageInfoIt = info_VkImage.find(image);
7356         if (imageInfoIt != info_VkImage.end()) {
7357             auto& imageInfo = imageInfoIt->second;
7358 
7359             // Remove any pending QSRI sync fds that are already signaled.
7360             auto syncFdIt = imageInfo.pendingQsriSyncFds.begin();
7361             while (syncFdIt != imageInfo.pendingQsriSyncFds.end()) {
7362                 int syncFd = *syncFdIt;
7363                 int syncWaitRet = sync_wait(syncFd, /*timeout msecs*/0);
7364                 if (syncWaitRet == 0) {
7365                     // Sync fd is signaled.
7366                     syncFdIt = imageInfo.pendingQsriSyncFds.erase(syncFdIt);
7367                     close(syncFd);
7368                 } else {
7369                     if (errno != ETIME) {
7370                         ALOGE("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d",
7371                               __func__, strerror(errno), errno);
7372                     }
7373                     break;
7374                 }
7375             }
7376 
7377             int syncFdDup = dup(*fd);
7378             if (syncFdDup < 0) {
7379                 ALOGE("%s: Failed to dup() QSRI sync fd : sterror: %s errno: %d",
7380                       __func__, strerror(errno), errno);
7381             } else {
7382                 imageInfo.pendingQsriSyncFds.push_back(syncFdDup);
7383             }
7384         }
7385 
7386         return VK_SUCCESS;
7387     }
7388 
on_vkQueueSignalReleaseImageANDROID(void * context,VkResult input_result,VkQueue queue,uint32_t waitSemaphoreCount,const VkSemaphore * pWaitSemaphores,VkImage image,int * pNativeFenceFd)7389     VkResult on_vkQueueSignalReleaseImageANDROID(
7390         void* context,
7391         VkResult input_result,
7392         VkQueue queue,
7393         uint32_t waitSemaphoreCount,
7394         const VkSemaphore* pWaitSemaphores,
7395         VkImage image,
7396         int* pNativeFenceFd) {
7397 
7398         (void)input_result;
7399 
7400         VkEncoder* enc = (VkEncoder*)context;
7401 
7402         if (!mFeatureInfo->hasVulkanAsyncQsri) {
7403             return enc->vkQueueSignalReleaseImageANDROID(queue, waitSemaphoreCount, pWaitSemaphores, image, pNativeFenceFd, true /* lock */);
7404         }
7405 
7406         {
7407             AutoLock<RecursiveLock> lock(mLock);
7408             auto it = info_VkImage.find(image);
7409             if (it == info_VkImage.end()) {
7410                 if (pNativeFenceFd) *pNativeFenceFd = -1;
7411                 return VK_ERROR_INITIALIZATION_FAILED;
7412             }
7413         }
7414 
7415         enc->vkQueueSignalReleaseImageANDROIDAsyncGOOGLE(queue, waitSemaphoreCount, pWaitSemaphores, image, true /* lock */);
7416 
7417         AutoLock<RecursiveLock> lock(mLock);
7418         VkResult result;
7419         if (pNativeFenceFd) {
7420             result =
7421                 exportSyncFdForQSRILocked(image, pNativeFenceFd);
7422         } else {
7423             int syncFd;
7424             result = exportSyncFdForQSRILocked(image, &syncFd);
7425 
7426             if (syncFd >= 0)
7427                 close(syncFd);
7428         }
7429 
7430         return result;
7431     }
7432 #endif
7433 
on_vkCreateGraphicsPipelines(void * context,VkResult input_result,VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)7434     VkResult on_vkCreateGraphicsPipelines(
7435         void* context,
7436         VkResult input_result,
7437         VkDevice device,
7438         VkPipelineCache pipelineCache,
7439         uint32_t createInfoCount,
7440         const VkGraphicsPipelineCreateInfo* pCreateInfos,
7441         const VkAllocationCallbacks* pAllocator,
7442         VkPipeline* pPipelines) {
7443         (void)input_result;
7444         VkEncoder* enc = (VkEncoder*)context;
7445         std::vector<VkGraphicsPipelineCreateInfo> localCreateInfos(
7446                 pCreateInfos, pCreateInfos + createInfoCount);
7447         for (VkGraphicsPipelineCreateInfo& graphicsPipelineCreateInfo : localCreateInfos) {
7448             // dEQP-VK.api.pipeline.pipeline_invalid_pointers_unused_structs#graphics
7449             bool requireViewportState = false;
7450             // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750
7451             requireViewportState |= graphicsPipelineCreateInfo.pRasterizationState != nullptr &&
7452                     graphicsPipelineCreateInfo.pRasterizationState->rasterizerDiscardEnable
7453                         == VK_FALSE;
7454             // VUID-VkGraphicsPipelineCreateInfo-pViewportState-04892
7455 #ifdef VK_EXT_extended_dynamic_state2
7456             if (!requireViewportState && graphicsPipelineCreateInfo.pDynamicState) {
7457                 for (uint32_t i = 0; i <
7458                             graphicsPipelineCreateInfo.pDynamicState->dynamicStateCount; i++) {
7459                     if (VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT ==
7460                                 graphicsPipelineCreateInfo.pDynamicState->pDynamicStates[i]) {
7461                         requireViewportState = true;
7462                         break;
7463                     }
7464                 }
7465             }
7466 #endif // VK_EXT_extended_dynamic_state2
7467             if (!requireViewportState) {
7468                 graphicsPipelineCreateInfo.pViewportState = nullptr;
7469             }
7470 
7471             // It has the same requirement as for pViewportState.
7472             bool shouldIncludeFragmentShaderState = requireViewportState;
7473 
7474             // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00751
7475             if (!shouldIncludeFragmentShaderState) {
7476                 graphicsPipelineCreateInfo.pMultisampleState = nullptr;
7477             }
7478 
7479             bool forceDepthStencilState = false;
7480             bool forceColorBlendState = false;
7481 
7482             const VkPipelineRenderingCreateInfo* pipelineRenderingInfo =
7483                 vk_find_struct<VkPipelineRenderingCreateInfo>(&graphicsPipelineCreateInfo);
7484             if (pipelineRenderingInfo) {
7485                 forceDepthStencilState |= pipelineRenderingInfo->depthAttachmentFormat != VK_FORMAT_UNDEFINED;
7486                 forceDepthStencilState |= pipelineRenderingInfo->stencilAttachmentFormat != VK_FORMAT_UNDEFINED;
7487                 forceColorBlendState |= pipelineRenderingInfo->colorAttachmentCount != 0;
7488             }
7489             // VUID-VkGraphicsPipelineCreateInfo-renderPass-06043
7490             // VUID-VkGraphicsPipelineCreateInfo-renderPass-06044
7491             if (graphicsPipelineCreateInfo.renderPass == VK_NULL_HANDLE
7492                     || !shouldIncludeFragmentShaderState) {
7493                 // VUID-VkGraphicsPipelineCreateInfo-renderPass-06053
7494                 if (!forceDepthStencilState) {
7495                     graphicsPipelineCreateInfo.pDepthStencilState = nullptr;
7496                 }
7497                 if (!forceColorBlendState) {
7498                     graphicsPipelineCreateInfo.pColorBlendState = nullptr;
7499                 }
7500             }
7501         }
7502         return enc->vkCreateGraphicsPipelines(device, pipelineCache, localCreateInfos.size(),
7503                 localCreateInfos.data(), pAllocator, pPipelines, true /* do lock */);
7504     }
7505 
getApiVersionFromInstance(VkInstance instance) const7506     uint32_t getApiVersionFromInstance(VkInstance instance) const {
7507         AutoLock<RecursiveLock> lock(mLock);
7508         uint32_t api = kDefaultApiVersion;
7509 
7510         auto it = info_VkInstance.find(instance);
7511         if (it == info_VkInstance.end()) return api;
7512 
7513         api = it->second.highestApiVersion;
7514 
7515         return api;
7516     }
7517 
getApiVersionFromDevice(VkDevice device) const7518     uint32_t getApiVersionFromDevice(VkDevice device) const {
7519         AutoLock<RecursiveLock> lock(mLock);
7520 
7521         uint32_t api = kDefaultApiVersion;
7522 
7523         auto it = info_VkDevice.find(device);
7524         if (it == info_VkDevice.end()) return api;
7525 
7526         api = it->second.apiVersion;
7527 
7528         return api;
7529     }
7530 
hasInstanceExtension(VkInstance instance,const std::string & name) const7531     bool hasInstanceExtension(VkInstance instance, const std::string& name) const {
7532         AutoLock<RecursiveLock> lock(mLock);
7533 
7534         auto it = info_VkInstance.find(instance);
7535         if (it == info_VkInstance.end()) return false;
7536 
7537         return it->second.enabledExtensions.find(name) !=
7538                it->second.enabledExtensions.end();
7539     }
7540 
hasDeviceExtension(VkDevice device,const std::string & name) const7541     bool hasDeviceExtension(VkDevice device, const std::string& name) const {
7542         AutoLock<RecursiveLock> lock(mLock);
7543 
7544         auto it = info_VkDevice.find(device);
7545         if (it == info_VkDevice.end()) return false;
7546 
7547         return it->second.enabledExtensions.find(name) !=
7548                it->second.enabledExtensions.end();
7549     }
7550 
getDevice(VkCommandBuffer commandBuffer) const7551     VkDevice getDevice(VkCommandBuffer commandBuffer) const {
7552         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7553         if (!cb) {
7554             return nullptr;
7555         }
7556         return cb->device;
7557     }
7558 
7559     // Resets staging stream for this command buffer and primary command buffers
7560     // where this command buffer has been recorded. If requested, also clears the pending
7561     // descriptor sets.
resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer,bool alsoResetPrimaries,bool alsoClearPendingDescriptorSets)7562     void resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer, bool alsoResetPrimaries,
7563                                        bool alsoClearPendingDescriptorSets) {
7564         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7565         if (!cb) {
7566             return;
7567         }
7568         if (cb->privateEncoder) {
7569             sStaging.pushStaging((CommandBufferStagingStream*)cb->privateStream, cb->privateEncoder);
7570             cb->privateEncoder = nullptr;
7571             cb->privateStream = nullptr;
7572         }
7573 
7574         if (alsoClearPendingDescriptorSets && cb->userPtr) {
7575             CommandBufferPendingDescriptorSets* pendingSets = (CommandBufferPendingDescriptorSets*)cb->userPtr;
7576             pendingSets->sets.clear();
7577         }
7578 
7579         if (alsoResetPrimaries) {
7580             forAllObjects(cb->superObjects, [this, alsoResetPrimaries, alsoClearPendingDescriptorSets](void* obj) {
7581                 VkCommandBuffer superCommandBuffer = (VkCommandBuffer)obj;
7582                 struct goldfish_VkCommandBuffer* superCb = as_goldfish_VkCommandBuffer(superCommandBuffer);
7583                 this->resetCommandBufferStagingInfo(superCommandBuffer, alsoResetPrimaries, alsoClearPendingDescriptorSets);
7584             });
7585             eraseObjects(&cb->superObjects);
7586         }
7587 
7588         forAllObjects(cb->subObjects, [cb](void* obj) {
7589             VkCommandBuffer subCommandBuffer = (VkCommandBuffer)obj;
7590             struct goldfish_VkCommandBuffer* subCb = as_goldfish_VkCommandBuffer(subCommandBuffer);
7591             // We don't do resetCommandBufferStagingInfo(subCommandBuffer)
7592             // since the user still might have submittable stuff pending there.
7593             eraseObject(&subCb->superObjects, (void*)cb);
7594         });
7595 
7596         eraseObjects(&cb->subObjects);
7597     }
7598 
resetCommandPoolStagingInfo(VkCommandPool commandPool)7599     void resetCommandPoolStagingInfo(VkCommandPool commandPool) {
7600         struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7601 
7602         if (!p) return;
7603 
7604         forAllObjects(p->subObjects, [this](void* commandBuffer) {
7605             this->resetCommandBufferStagingInfo((VkCommandBuffer)commandBuffer, true /* also reset primaries */, true /* also clear pending descriptor sets */);
7606         });
7607     }
7608 
addToCommandPool(VkCommandPool commandPool,uint32_t commandBufferCount,VkCommandBuffer * pCommandBuffers)7609     void addToCommandPool(VkCommandPool commandPool,
7610                           uint32_t commandBufferCount,
7611                           VkCommandBuffer* pCommandBuffers) {
7612         for (uint32_t i = 0; i < commandBufferCount; ++i) {
7613             struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7614             struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7615             appendObject(&p->subObjects, (void*)(pCommandBuffers[i]));
7616             appendObject(&cb->poolObjects, (void*)commandPool);
7617         }
7618     }
7619 
clearCommandPool(VkCommandPool commandPool)7620     void clearCommandPool(VkCommandPool commandPool) {
7621         resetCommandPoolStagingInfo(commandPool);
7622         struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7623         forAllObjects(p->subObjects, [this](void* commandBuffer) {
7624             this->unregister_VkCommandBuffer((VkCommandBuffer)commandBuffer);
7625         });
7626         eraseObjects(&p->subObjects);
7627     }
7628 
7629 private:
7630     mutable RecursiveLock mLock;
7631 
getPhysicalDeviceMemoryProperties(void * context,VkDevice device=VK_NULL_HANDLE,VkPhysicalDevice physicalDevice=VK_NULL_HANDLE)7632     const VkPhysicalDeviceMemoryProperties& getPhysicalDeviceMemoryProperties(
7633             void* context,
7634             VkDevice device = VK_NULL_HANDLE,
7635             VkPhysicalDevice physicalDevice = VK_NULL_HANDLE) {
7636         if (!mCachedPhysicalDeviceMemoryProps) {
7637             if (physicalDevice == VK_NULL_HANDLE) {
7638                 AutoLock<RecursiveLock> lock(mLock);
7639 
7640                 auto deviceInfoIt = info_VkDevice.find(device);
7641                 if (deviceInfoIt == info_VkDevice.end()) {
7642                     ALOGE("Failed to pass device or physical device.");
7643                     abort();
7644                 }
7645                 const auto& deviceInfo = deviceInfoIt->second;
7646                 physicalDevice = deviceInfo.physdev;
7647             }
7648 
7649             VkEncoder* enc = (VkEncoder*)context;
7650 
7651             VkPhysicalDeviceMemoryProperties properties;
7652             enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &properties, true /* no lock */);
7653 
7654             mCachedPhysicalDeviceMemoryProps.emplace(std::move(properties));
7655         }
7656         return *mCachedPhysicalDeviceMemoryProps;
7657     }
7658 
7659     std::optional<const VkPhysicalDeviceMemoryProperties> mCachedPhysicalDeviceMemoryProps;
7660     std::unique_ptr<EmulatorFeatureInfo> mFeatureInfo;
7661     std::unique_ptr<GoldfishAddressSpaceBlockProvider> mGoldfishAddressSpaceBlockProvider;
7662 
7663     struct VirtGpuCaps mCaps;
7664     std::vector<VkExtensionProperties> mHostInstanceExtensions;
7665     std::vector<VkExtensionProperties> mHostDeviceExtensions;
7666 
7667     // 32 bits only for now, upper bits may be used later.
7668     std::atomic<uint32_t> mBlobId = 0;
7669 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
7670     int mSyncDeviceFd = -1;
7671 #endif
7672 
7673 #ifdef VK_USE_PLATFORM_FUCHSIA
7674     fidl::WireSyncClient<fuchsia_hardware_goldfish::ControlDevice>
7675         mControlDevice;
7676     fidl::WireSyncClient<fuchsia_sysmem::Allocator>
7677         mSysmemAllocator;
7678 #endif
7679 
7680     WorkPool mWorkPool { 4 };
7681     std::unordered_map<VkQueue, std::vector<WorkPool::WaitGroupHandle>>
7682         mQueueSensitiveWorkPoolItems;
7683 
7684     std::unordered_map<const VkEncoder*, std::unordered_map<void*, CleanupCallback>> mEncoderCleanupCallbacks;
7685 
7686 };
7687 
ResourceTracker()7688 ResourceTracker::ResourceTracker() : mImpl(new ResourceTracker::Impl()) { }
~ResourceTracker()7689 ResourceTracker::~ResourceTracker() { }
createMapping()7690 VulkanHandleMapping* ResourceTracker::createMapping() {
7691     return &mImpl->createMapping;
7692 }
unwrapMapping()7693 VulkanHandleMapping* ResourceTracker::unwrapMapping() {
7694     return &mImpl->unwrapMapping;
7695 }
destroyMapping()7696 VulkanHandleMapping* ResourceTracker::destroyMapping() {
7697     return &mImpl->destroyMapping;
7698 }
defaultMapping()7699 VulkanHandleMapping* ResourceTracker::defaultMapping() {
7700     return &mImpl->defaultMapping;
7701 }
7702 static ResourceTracker* sTracker = nullptr;
7703 // static
get()7704 ResourceTracker* ResourceTracker::get() {
7705     if (!sTracker) {
7706         // To be initialized once on vulkan device open.
7707         sTracker = new ResourceTracker;
7708     }
7709     return sTracker;
7710 }
7711 
7712 #define HANDLE_REGISTER_IMPL(type) \
7713     void ResourceTracker::register_##type(type obj) { \
7714         mImpl->register_##type(obj); \
7715     } \
7716     void ResourceTracker::unregister_##type(type obj) { \
7717         mImpl->unregister_##type(obj); \
7718     } \
7719 
GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL)7720 GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL)
7721 
7722 uint8_t* ResourceTracker::getMappedPointer(VkDeviceMemory memory) {
7723     return mImpl->getMappedPointer(memory);
7724 }
7725 
getMappedSize(VkDeviceMemory memory)7726 VkDeviceSize ResourceTracker::getMappedSize(VkDeviceMemory memory) {
7727     return mImpl->getMappedSize(memory);
7728 }
7729 
isValidMemoryRange(const VkMappedMemoryRange & range) const7730 bool ResourceTracker::isValidMemoryRange(const VkMappedMemoryRange& range) const {
7731     return mImpl->isValidMemoryRange(range);
7732 }
7733 
setupFeatures(const EmulatorFeatureInfo * features)7734 void ResourceTracker::setupFeatures(const EmulatorFeatureInfo* features) {
7735     mImpl->setupFeatures(features);
7736 }
7737 
setupCaps(void)7738 void ResourceTracker::setupCaps(void) { mImpl->setupCaps(); }
7739 
setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks & callbacks)7740 void ResourceTracker::setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks& callbacks) {
7741     mImpl->setThreadingCallbacks(callbacks);
7742 }
7743 
hostSupportsVulkan() const7744 bool ResourceTracker::hostSupportsVulkan() const {
7745     return mImpl->hostSupportsVulkan();
7746 }
7747 
usingDirectMapping() const7748 bool ResourceTracker::usingDirectMapping() const {
7749     return mImpl->usingDirectMapping();
7750 }
7751 
getStreamFeatures() const7752 uint32_t ResourceTracker::getStreamFeatures() const {
7753     return mImpl->getStreamFeatures();
7754 }
7755 
getApiVersionFromInstance(VkInstance instance) const7756 uint32_t ResourceTracker::getApiVersionFromInstance(VkInstance instance) const {
7757     return mImpl->getApiVersionFromInstance(instance);
7758 }
7759 
getApiVersionFromDevice(VkDevice device) const7760 uint32_t ResourceTracker::getApiVersionFromDevice(VkDevice device) const {
7761     return mImpl->getApiVersionFromDevice(device);
7762 }
hasInstanceExtension(VkInstance instance,const std::string & name) const7763 bool ResourceTracker::hasInstanceExtension(VkInstance instance, const std::string &name) const {
7764     return mImpl->hasInstanceExtension(instance, name);
7765 }
hasDeviceExtension(VkDevice device,const std::string & name) const7766 bool ResourceTracker::hasDeviceExtension(VkDevice device, const std::string &name) const {
7767     return mImpl->hasDeviceExtension(device, name);
7768 }
getDevice(VkCommandBuffer commandBuffer) const7769 VkDevice ResourceTracker::getDevice(VkCommandBuffer commandBuffer) const {
7770     return mImpl->getDevice(commandBuffer);
7771 }
addToCommandPool(VkCommandPool commandPool,uint32_t commandBufferCount,VkCommandBuffer * pCommandBuffers)7772 void ResourceTracker::addToCommandPool(VkCommandPool commandPool,
7773                       uint32_t commandBufferCount,
7774                       VkCommandBuffer* pCommandBuffers) {
7775     mImpl->addToCommandPool(commandPool, commandBufferCount, pCommandBuffers);
7776 }
resetCommandPoolStagingInfo(VkCommandPool commandPool)7777 void ResourceTracker::resetCommandPoolStagingInfo(VkCommandPool commandPool) {
7778     mImpl->resetCommandPoolStagingInfo(commandPool);
7779 }
7780 
7781 
7782 // static
getCommandBufferEncoder(VkCommandBuffer commandBuffer)7783 ALWAYS_INLINE VkEncoder* ResourceTracker::getCommandBufferEncoder(VkCommandBuffer commandBuffer) {
7784     if (!(ResourceTracker::streamFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
7785         auto enc = ResourceTracker::getThreadLocalEncoder();
7786         ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, enc);
7787         return enc;
7788     }
7789 
7790     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7791     if (!cb->privateEncoder) {
7792         sStaging.setAllocFree(ResourceTracker::get()->getAlloc(),
7793                               ResourceTracker::get()->getFree());
7794         sStaging.popStaging((CommandBufferStagingStream**)&cb->privateStream, &cb->privateEncoder);
7795     }
7796     uint8_t* writtenPtr; size_t written;
7797     ((CommandBufferStagingStream*)cb->privateStream)->getWritten(&writtenPtr, &written);
7798     return cb->privateEncoder;
7799 }
7800 
7801 // static
getQueueEncoder(VkQueue queue)7802 ALWAYS_INLINE VkEncoder* ResourceTracker::getQueueEncoder(VkQueue queue) {
7803     auto enc = ResourceTracker::getThreadLocalEncoder();
7804     if (!(ResourceTracker::streamFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
7805         ResourceTracker::get()->syncEncodersForQueue(queue, enc);
7806     }
7807     return enc;
7808 }
7809 
7810 // static
getThreadLocalEncoder()7811 ALWAYS_INLINE VkEncoder* ResourceTracker::getThreadLocalEncoder() {
7812     auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
7813     auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
7814     return vkEncoder;
7815 }
7816 
7817 // static
setSeqnoPtr(uint32_t * seqnoptr)7818 void ResourceTracker::setSeqnoPtr(uint32_t* seqnoptr) {
7819     sSeqnoPtr = seqnoptr;
7820 }
7821 
7822 // static
nextSeqno()7823 ALWAYS_INLINE uint32_t ResourceTracker::nextSeqno() {
7824     uint32_t res = __atomic_add_fetch(sSeqnoPtr, 1, __ATOMIC_SEQ_CST);
7825     return res;
7826 }
7827 
7828 // static
getSeqno()7829 ALWAYS_INLINE uint32_t ResourceTracker::getSeqno() {
7830     uint32_t res = __atomic_load_n(sSeqnoPtr, __ATOMIC_SEQ_CST);
7831     return res;
7832 }
7833 
on_vkEnumerateInstanceExtensionProperties(void * context,VkResult input_result,const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)7834 VkResult ResourceTracker::on_vkEnumerateInstanceExtensionProperties(
7835     void* context,
7836     VkResult input_result,
7837     const char* pLayerName,
7838     uint32_t* pPropertyCount,
7839     VkExtensionProperties* pProperties) {
7840     return mImpl->on_vkEnumerateInstanceExtensionProperties(
7841         context, input_result, pLayerName, pPropertyCount, pProperties);
7842 }
7843 
on_vkEnumerateDeviceExtensionProperties(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const char * pLayerName,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)7844 VkResult ResourceTracker::on_vkEnumerateDeviceExtensionProperties(
7845     void* context,
7846     VkResult input_result,
7847     VkPhysicalDevice physicalDevice,
7848     const char* pLayerName,
7849     uint32_t* pPropertyCount,
7850     VkExtensionProperties* pProperties) {
7851     return mImpl->on_vkEnumerateDeviceExtensionProperties(
7852         context, input_result, physicalDevice, pLayerName, pPropertyCount, pProperties);
7853 }
7854 
on_vkEnumeratePhysicalDevices(void * context,VkResult input_result,VkInstance instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices)7855 VkResult ResourceTracker::on_vkEnumeratePhysicalDevices(
7856     void* context, VkResult input_result,
7857     VkInstance instance, uint32_t* pPhysicalDeviceCount,
7858     VkPhysicalDevice* pPhysicalDevices) {
7859     return mImpl->on_vkEnumeratePhysicalDevices(
7860         context, input_result, instance, pPhysicalDeviceCount,
7861         pPhysicalDevices);
7862 }
7863 
on_vkGetPhysicalDeviceProperties(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties * pProperties)7864 void ResourceTracker::on_vkGetPhysicalDeviceProperties(
7865     void* context,
7866     VkPhysicalDevice physicalDevice,
7867     VkPhysicalDeviceProperties* pProperties) {
7868     mImpl->on_vkGetPhysicalDeviceProperties(context, physicalDevice,
7869         pProperties);
7870 }
7871 
on_vkGetPhysicalDeviceFeatures2(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceFeatures2 * pFeatures)7872 void ResourceTracker::on_vkGetPhysicalDeviceFeatures2(
7873     void* context,
7874     VkPhysicalDevice physicalDevice,
7875     VkPhysicalDeviceFeatures2* pFeatures) {
7876     mImpl->on_vkGetPhysicalDeviceFeatures2(context, physicalDevice,
7877         pFeatures);
7878 }
7879 
on_vkGetPhysicalDeviceFeatures2KHR(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceFeatures2 * pFeatures)7880 void ResourceTracker::on_vkGetPhysicalDeviceFeatures2KHR(
7881     void* context,
7882     VkPhysicalDevice physicalDevice,
7883     VkPhysicalDeviceFeatures2* pFeatures) {
7884     mImpl->on_vkGetPhysicalDeviceFeatures2(context, physicalDevice,
7885         pFeatures);
7886 }
7887 
on_vkGetPhysicalDeviceProperties2(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)7888 void ResourceTracker::on_vkGetPhysicalDeviceProperties2(
7889     void* context,
7890     VkPhysicalDevice physicalDevice,
7891     VkPhysicalDeviceProperties2* pProperties) {
7892     mImpl->on_vkGetPhysicalDeviceProperties2(context, physicalDevice,
7893         pProperties);
7894 }
7895 
on_vkGetPhysicalDeviceProperties2KHR(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)7896 void ResourceTracker::on_vkGetPhysicalDeviceProperties2KHR(
7897     void* context,
7898     VkPhysicalDevice physicalDevice,
7899     VkPhysicalDeviceProperties2* pProperties) {
7900     mImpl->on_vkGetPhysicalDeviceProperties2(context, physicalDevice,
7901         pProperties);
7902 }
7903 
on_vkGetPhysicalDeviceMemoryProperties(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties * pMemoryProperties)7904 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties(
7905     void* context,
7906     VkPhysicalDevice physicalDevice,
7907     VkPhysicalDeviceMemoryProperties* pMemoryProperties) {
7908     mImpl->on_vkGetPhysicalDeviceMemoryProperties(
7909         context, physicalDevice, pMemoryProperties);
7910 }
7911 
on_vkGetPhysicalDeviceMemoryProperties2(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties2 * pMemoryProperties)7912 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties2(
7913     void* context,
7914     VkPhysicalDevice physicalDevice,
7915     VkPhysicalDeviceMemoryProperties2* pMemoryProperties) {
7916     mImpl->on_vkGetPhysicalDeviceMemoryProperties2(
7917         context, physicalDevice, pMemoryProperties);
7918 }
7919 
on_vkGetPhysicalDeviceMemoryProperties2KHR(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties2 * pMemoryProperties)7920 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties2KHR(
7921     void* context,
7922     VkPhysicalDevice physicalDevice,
7923     VkPhysicalDeviceMemoryProperties2* pMemoryProperties) {
7924     mImpl->on_vkGetPhysicalDeviceMemoryProperties2(
7925         context, physicalDevice, pMemoryProperties);
7926 }
7927 
on_vkGetDeviceQueue(void * context,VkDevice device,uint32_t queueFamilyIndex,uint32_t queueIndex,VkQueue * pQueue)7928 void ResourceTracker::on_vkGetDeviceQueue(void* context,
7929                                           VkDevice device,
7930                                           uint32_t queueFamilyIndex,
7931                                           uint32_t queueIndex,
7932                                           VkQueue* pQueue) {
7933     mImpl->on_vkGetDeviceQueue(context, device, queueFamilyIndex, queueIndex,
7934                                pQueue);
7935 }
7936 
on_vkGetDeviceQueue2(void * context,VkDevice device,const VkDeviceQueueInfo2 * pQueueInfo,VkQueue * pQueue)7937 void ResourceTracker::on_vkGetDeviceQueue2(void* context,
7938                                            VkDevice device,
7939                                            const VkDeviceQueueInfo2* pQueueInfo,
7940                                            VkQueue* pQueue) {
7941     mImpl->on_vkGetDeviceQueue2(context, device, pQueueInfo, pQueue);
7942 }
7943 
on_vkCreateInstance(void * context,VkResult input_result,const VkInstanceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkInstance * pInstance)7944 VkResult ResourceTracker::on_vkCreateInstance(
7945     void* context,
7946     VkResult input_result,
7947     const VkInstanceCreateInfo* pCreateInfo,
7948     const VkAllocationCallbacks* pAllocator,
7949     VkInstance* pInstance) {
7950     return mImpl->on_vkCreateInstance(
7951         context, input_result, pCreateInfo, pAllocator, pInstance);
7952 }
7953 
on_vkCreateDevice(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDevice * pDevice)7954 VkResult ResourceTracker::on_vkCreateDevice(
7955     void* context,
7956     VkResult input_result,
7957     VkPhysicalDevice physicalDevice,
7958     const VkDeviceCreateInfo* pCreateInfo,
7959     const VkAllocationCallbacks* pAllocator,
7960     VkDevice* pDevice) {
7961     return mImpl->on_vkCreateDevice(
7962         context, input_result, physicalDevice, pCreateInfo, pAllocator, pDevice);
7963 }
7964 
on_vkDestroyDevice_pre(void * context,VkDevice device,const VkAllocationCallbacks * pAllocator)7965 void ResourceTracker::on_vkDestroyDevice_pre(
7966     void* context,
7967     VkDevice device,
7968     const VkAllocationCallbacks* pAllocator) {
7969     mImpl->on_vkDestroyDevice_pre(context, device, pAllocator);
7970 }
7971 
on_vkAllocateMemory(void * context,VkResult input_result,VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)7972 VkResult ResourceTracker::on_vkAllocateMemory(
7973     void* context,
7974     VkResult input_result,
7975     VkDevice device,
7976     const VkMemoryAllocateInfo* pAllocateInfo,
7977     const VkAllocationCallbacks* pAllocator,
7978     VkDeviceMemory* pMemory) {
7979     return mImpl->on_vkAllocateMemory(
7980         context, input_result, device, pAllocateInfo, pAllocator, pMemory);
7981 }
7982 
on_vkFreeMemory(void * context,VkDevice device,VkDeviceMemory memory,const VkAllocationCallbacks * pAllocator)7983 void ResourceTracker::on_vkFreeMemory(
7984     void* context,
7985     VkDevice device,
7986     VkDeviceMemory memory,
7987     const VkAllocationCallbacks* pAllocator) {
7988     return mImpl->on_vkFreeMemory(
7989         context, device, memory, pAllocator);
7990 }
7991 
on_vkMapMemory(void * context,VkResult input_result,VkDevice device,VkDeviceMemory memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags flags,void ** ppData)7992 VkResult ResourceTracker::on_vkMapMemory(
7993     void* context,
7994     VkResult input_result,
7995     VkDevice device,
7996     VkDeviceMemory memory,
7997     VkDeviceSize offset,
7998     VkDeviceSize size,
7999     VkMemoryMapFlags flags,
8000     void** ppData) {
8001     return mImpl->on_vkMapMemory(
8002         context, input_result, device, memory, offset, size, flags, ppData);
8003 }
8004 
on_vkUnmapMemory(void * context,VkDevice device,VkDeviceMemory memory)8005 void ResourceTracker::on_vkUnmapMemory(
8006     void* context,
8007     VkDevice device,
8008     VkDeviceMemory memory) {
8009     mImpl->on_vkUnmapMemory(context, device, memory);
8010 }
8011 
on_vkCreateImage(void * context,VkResult input_result,VkDevice device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)8012 VkResult ResourceTracker::on_vkCreateImage(
8013     void* context, VkResult input_result,
8014     VkDevice device, const VkImageCreateInfo *pCreateInfo,
8015     const VkAllocationCallbacks *pAllocator,
8016     VkImage *pImage) {
8017     return mImpl->on_vkCreateImage(
8018         context, input_result,
8019         device, pCreateInfo, pAllocator, pImage);
8020 }
8021 
on_vkDestroyImage(void * context,VkDevice device,VkImage image,const VkAllocationCallbacks * pAllocator)8022 void ResourceTracker::on_vkDestroyImage(
8023     void* context,
8024     VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
8025     mImpl->on_vkDestroyImage(context,
8026         device, image, pAllocator);
8027 }
8028 
on_vkGetImageMemoryRequirements(void * context,VkDevice device,VkImage image,VkMemoryRequirements * pMemoryRequirements)8029 void ResourceTracker::on_vkGetImageMemoryRequirements(
8030     void *context, VkDevice device, VkImage image,
8031     VkMemoryRequirements *pMemoryRequirements) {
8032     mImpl->on_vkGetImageMemoryRequirements(
8033         context, device, image, pMemoryRequirements);
8034 }
8035 
on_vkGetImageMemoryRequirements2(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)8036 void ResourceTracker::on_vkGetImageMemoryRequirements2(
8037     void *context, VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
8038     VkMemoryRequirements2 *pMemoryRequirements) {
8039     mImpl->on_vkGetImageMemoryRequirements2(
8040         context, device, pInfo, pMemoryRequirements);
8041 }
8042 
on_vkGetImageMemoryRequirements2KHR(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)8043 void ResourceTracker::on_vkGetImageMemoryRequirements2KHR(
8044     void *context, VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
8045     VkMemoryRequirements2 *pMemoryRequirements) {
8046     mImpl->on_vkGetImageMemoryRequirements2KHR(
8047         context, device, pInfo, pMemoryRequirements);
8048 }
8049 
on_vkBindImageMemory(void * context,VkResult input_result,VkDevice device,VkImage image,VkDeviceMemory memory,VkDeviceSize memoryOffset)8050 VkResult ResourceTracker::on_vkBindImageMemory(
8051     void* context, VkResult input_result,
8052     VkDevice device, VkImage image, VkDeviceMemory memory,
8053     VkDeviceSize memoryOffset) {
8054     return mImpl->on_vkBindImageMemory(
8055         context, input_result, device, image, memory, memoryOffset);
8056 }
8057 
on_vkBindImageMemory2(void * context,VkResult input_result,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)8058 VkResult ResourceTracker::on_vkBindImageMemory2(
8059     void* context, VkResult input_result,
8060     VkDevice device, uint32_t bindingCount, const VkBindImageMemoryInfo* pBindInfos) {
8061     return mImpl->on_vkBindImageMemory2(
8062         context, input_result, device, bindingCount, pBindInfos);
8063 }
8064 
on_vkBindImageMemory2KHR(void * context,VkResult input_result,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)8065 VkResult ResourceTracker::on_vkBindImageMemory2KHR(
8066     void* context, VkResult input_result,
8067     VkDevice device, uint32_t bindingCount, const VkBindImageMemoryInfo* pBindInfos) {
8068     return mImpl->on_vkBindImageMemory2KHR(
8069         context, input_result, device, bindingCount, pBindInfos);
8070 }
8071 
on_vkCreateBuffer(void * context,VkResult input_result,VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)8072 VkResult ResourceTracker::on_vkCreateBuffer(
8073     void* context, VkResult input_result,
8074     VkDevice device, const VkBufferCreateInfo *pCreateInfo,
8075     const VkAllocationCallbacks *pAllocator,
8076     VkBuffer *pBuffer) {
8077     return mImpl->on_vkCreateBuffer(
8078         context, input_result,
8079         device, pCreateInfo, pAllocator, pBuffer);
8080 }
8081 
on_vkDestroyBuffer(void * context,VkDevice device,VkBuffer buffer,const VkAllocationCallbacks * pAllocator)8082 void ResourceTracker::on_vkDestroyBuffer(
8083     void* context,
8084     VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
8085     mImpl->on_vkDestroyBuffer(context, device, buffer, pAllocator);
8086 }
8087 
on_vkGetBufferMemoryRequirements(void * context,VkDevice device,VkBuffer buffer,VkMemoryRequirements * pMemoryRequirements)8088 void ResourceTracker::on_vkGetBufferMemoryRequirements(
8089     void* context, VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
8090     mImpl->on_vkGetBufferMemoryRequirements(context, device, buffer, pMemoryRequirements);
8091 }
8092 
on_vkGetBufferMemoryRequirements2(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)8093 void ResourceTracker::on_vkGetBufferMemoryRequirements2(
8094     void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
8095     VkMemoryRequirements2* pMemoryRequirements) {
8096     mImpl->on_vkGetBufferMemoryRequirements2(
8097         context, device, pInfo, pMemoryRequirements);
8098 }
8099 
on_vkGetBufferMemoryRequirements2KHR(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)8100 void ResourceTracker::on_vkGetBufferMemoryRequirements2KHR(
8101     void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
8102     VkMemoryRequirements2* pMemoryRequirements) {
8103     mImpl->on_vkGetBufferMemoryRequirements2KHR(
8104         context, device, pInfo, pMemoryRequirements);
8105 }
8106 
on_vkBindBufferMemory(void * context,VkResult input_result,VkDevice device,VkBuffer buffer,VkDeviceMemory memory,VkDeviceSize memoryOffset)8107 VkResult ResourceTracker::on_vkBindBufferMemory(
8108     void* context, VkResult input_result,
8109     VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset) {
8110     return mImpl->on_vkBindBufferMemory(
8111         context, input_result,
8112         device, buffer, memory, memoryOffset);
8113 }
8114 
on_vkBindBufferMemory2(void * context,VkResult input_result,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)8115 VkResult ResourceTracker::on_vkBindBufferMemory2(
8116     void* context, VkResult input_result,
8117     VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) {
8118     return mImpl->on_vkBindBufferMemory2(
8119         context, input_result,
8120         device, bindInfoCount, pBindInfos);
8121 }
8122 
on_vkBindBufferMemory2KHR(void * context,VkResult input_result,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)8123 VkResult ResourceTracker::on_vkBindBufferMemory2KHR(
8124     void* context, VkResult input_result,
8125     VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) {
8126     return mImpl->on_vkBindBufferMemory2KHR(
8127         context, input_result,
8128         device, bindInfoCount, pBindInfos);
8129 }
8130 
on_vkCreateSemaphore(void * context,VkResult input_result,VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)8131 VkResult ResourceTracker::on_vkCreateSemaphore(
8132     void* context, VkResult input_result,
8133     VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
8134     const VkAllocationCallbacks *pAllocator,
8135     VkSemaphore *pSemaphore) {
8136     return mImpl->on_vkCreateSemaphore(
8137         context, input_result,
8138         device, pCreateInfo, pAllocator, pSemaphore);
8139 }
8140 
on_vkDestroySemaphore(void * context,VkDevice device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)8141 void ResourceTracker::on_vkDestroySemaphore(
8142     void* context,
8143     VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
8144     mImpl->on_vkDestroySemaphore(context, device, semaphore, pAllocator);
8145 }
8146 
on_vkQueueSubmit(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)8147 VkResult ResourceTracker::on_vkQueueSubmit(
8148     void* context, VkResult input_result,
8149     VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence) {
8150     return mImpl->on_vkQueueSubmit(
8151         context, input_result, queue, submitCount, pSubmits, fence);
8152 }
8153 
on_vkQueueSubmit2(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)8154 VkResult ResourceTracker::on_vkQueueSubmit2(void* context, VkResult input_result, VkQueue queue,
8155                                             uint32_t submitCount, const VkSubmitInfo2* pSubmits,
8156                                             VkFence fence) {
8157     return mImpl->on_vkQueueSubmit2(context, input_result, queue, submitCount, pSubmits, fence);
8158 }
8159 
on_vkQueueWaitIdle(void * context,VkResult input_result,VkQueue queue)8160 VkResult ResourceTracker::on_vkQueueWaitIdle(
8161     void* context, VkResult input_result,
8162     VkQueue queue) {
8163     return mImpl->on_vkQueueWaitIdle(context, input_result, queue);
8164 }
8165 
on_vkGetSemaphoreFdKHR(void * context,VkResult input_result,VkDevice device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd)8166 VkResult ResourceTracker::on_vkGetSemaphoreFdKHR(
8167     void* context, VkResult input_result,
8168     VkDevice device,
8169     const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
8170     int* pFd) {
8171     return mImpl->on_vkGetSemaphoreFdKHR(context, input_result, device, pGetFdInfo, pFd);
8172 }
8173 
on_vkImportSemaphoreFdKHR(void * context,VkResult input_result,VkDevice device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)8174 VkResult ResourceTracker::on_vkImportSemaphoreFdKHR(
8175     void* context, VkResult input_result,
8176     VkDevice device,
8177     const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) {
8178     return mImpl->on_vkImportSemaphoreFdKHR(context, input_result, device, pImportSemaphoreFdInfo);
8179 }
8180 
unwrap_vkCreateImage_pCreateInfo(const VkImageCreateInfo * pCreateInfo,VkImageCreateInfo * local_pCreateInfo)8181 void ResourceTracker::unwrap_vkCreateImage_pCreateInfo(
8182     const VkImageCreateInfo* pCreateInfo,
8183     VkImageCreateInfo* local_pCreateInfo) {
8184 #ifdef VK_USE_PLATFORM_ANDROID_KHR
8185     mImpl->unwrap_vkCreateImage_pCreateInfo(pCreateInfo, local_pCreateInfo);
8186 #endif
8187 }
8188 
unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd,int * fd_out)8189 void ResourceTracker::unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd, int* fd_out) {
8190 #ifdef VK_USE_PLATFORM_ANDROID_KHR
8191     mImpl->unwrap_vkAcquireImageANDROID_nativeFenceFd(fd, fd_out);
8192 #endif
8193 }
8194 
unwrap_VkBindImageMemory2_pBindInfos(uint32_t bindInfoCount,const VkBindImageMemoryInfo * inputBindInfos,VkBindImageMemoryInfo * outputBindInfos)8195 void ResourceTracker::unwrap_VkBindImageMemory2_pBindInfos(
8196         uint32_t bindInfoCount,
8197         const VkBindImageMemoryInfo* inputBindInfos,
8198         VkBindImageMemoryInfo* outputBindInfos) {
8199 #ifdef VK_USE_PLATFORM_ANDROID_KHR
8200     mImpl->unwrap_VkBindImageMemory2_pBindInfos(bindInfoCount, inputBindInfos, outputBindInfos);
8201 #endif
8202 }
8203 
8204 #ifdef VK_USE_PLATFORM_FUCHSIA
on_vkGetMemoryZirconHandleFUCHSIA(void * context,VkResult input_result,VkDevice device,const VkMemoryGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)8205 VkResult ResourceTracker::on_vkGetMemoryZirconHandleFUCHSIA(
8206     void* context, VkResult input_result,
8207     VkDevice device,
8208     const VkMemoryGetZirconHandleInfoFUCHSIA* pInfo,
8209     uint32_t* pHandle) {
8210     return mImpl->on_vkGetMemoryZirconHandleFUCHSIA(
8211         context, input_result, device, pInfo, pHandle);
8212 }
8213 
on_vkGetMemoryZirconHandlePropertiesFUCHSIA(void * context,VkResult input_result,VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,uint32_t handle,VkMemoryZirconHandlePropertiesFUCHSIA * pProperties)8214 VkResult ResourceTracker::on_vkGetMemoryZirconHandlePropertiesFUCHSIA(
8215     void* context, VkResult input_result,
8216     VkDevice device,
8217     VkExternalMemoryHandleTypeFlagBits handleType,
8218     uint32_t handle,
8219     VkMemoryZirconHandlePropertiesFUCHSIA* pProperties) {
8220     return mImpl->on_vkGetMemoryZirconHandlePropertiesFUCHSIA(
8221         context, input_result, device, handleType, handle, pProperties);
8222 }
8223 
on_vkGetSemaphoreZirconHandleFUCHSIA(void * context,VkResult input_result,VkDevice device,const VkSemaphoreGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)8224 VkResult ResourceTracker::on_vkGetSemaphoreZirconHandleFUCHSIA(
8225     void* context, VkResult input_result,
8226     VkDevice device,
8227     const VkSemaphoreGetZirconHandleInfoFUCHSIA* pInfo,
8228     uint32_t* pHandle) {
8229     return mImpl->on_vkGetSemaphoreZirconHandleFUCHSIA(
8230         context, input_result, device, pInfo, pHandle);
8231 }
8232 
on_vkImportSemaphoreZirconHandleFUCHSIA(void * context,VkResult input_result,VkDevice device,const VkImportSemaphoreZirconHandleInfoFUCHSIA * pInfo)8233 VkResult ResourceTracker::on_vkImportSemaphoreZirconHandleFUCHSIA(
8234     void* context, VkResult input_result,
8235     VkDevice device,
8236     const VkImportSemaphoreZirconHandleInfoFUCHSIA* pInfo) {
8237     return mImpl->on_vkImportSemaphoreZirconHandleFUCHSIA(
8238         context, input_result, device, pInfo);
8239 }
8240 
on_vkCreateBufferCollectionFUCHSIA(void * context,VkResult input_result,VkDevice device,const VkBufferCollectionCreateInfoFUCHSIA * pInfo,const VkAllocationCallbacks * pAllocator,VkBufferCollectionFUCHSIA * pCollection)8241 VkResult ResourceTracker::on_vkCreateBufferCollectionFUCHSIA(
8242     void* context,
8243     VkResult input_result,
8244     VkDevice device,
8245     const VkBufferCollectionCreateInfoFUCHSIA* pInfo,
8246     const VkAllocationCallbacks* pAllocator,
8247     VkBufferCollectionFUCHSIA* pCollection) {
8248     return mImpl->on_vkCreateBufferCollectionFUCHSIA(
8249         context, input_result, device, pInfo, pAllocator, pCollection);
8250 }
8251 
on_vkDestroyBufferCollectionFUCHSIA(void * context,VkResult input_result,VkDevice device,VkBufferCollectionFUCHSIA collection,const VkAllocationCallbacks * pAllocator)8252 void ResourceTracker::on_vkDestroyBufferCollectionFUCHSIA(
8253     void* context,
8254     VkResult input_result,
8255     VkDevice device,
8256     VkBufferCollectionFUCHSIA collection,
8257     const VkAllocationCallbacks* pAllocator) {
8258     return mImpl->on_vkDestroyBufferCollectionFUCHSIA(
8259         context, input_result, device, collection, pAllocator);
8260 }
8261 
on_vkSetBufferCollectionBufferConstraintsFUCHSIA(void * context,VkResult input_result,VkDevice device,VkBufferCollectionFUCHSIA collection,const VkBufferConstraintsInfoFUCHSIA * pBufferDConstraintsInfo)8262 VkResult ResourceTracker::on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
8263     void* context,
8264     VkResult input_result,
8265     VkDevice device,
8266     VkBufferCollectionFUCHSIA collection,
8267     const VkBufferConstraintsInfoFUCHSIA* pBufferDConstraintsInfo) {
8268     return mImpl->on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
8269         context, input_result, device, collection, pBufferDConstraintsInfo);
8270 }
8271 
on_vkSetBufferCollectionImageConstraintsFUCHSIA(void * context,VkResult input_result,VkDevice device,VkBufferCollectionFUCHSIA collection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)8272 VkResult ResourceTracker::on_vkSetBufferCollectionImageConstraintsFUCHSIA(
8273     void* context,
8274     VkResult input_result,
8275     VkDevice device,
8276     VkBufferCollectionFUCHSIA collection,
8277     const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
8278     return mImpl->on_vkSetBufferCollectionImageConstraintsFUCHSIA(
8279         context, input_result, device, collection, pImageConstraintsInfo);
8280 }
8281 
on_vkGetBufferCollectionPropertiesFUCHSIA(void * context,VkResult input_result,VkDevice device,VkBufferCollectionFUCHSIA collection,VkBufferCollectionPropertiesFUCHSIA * pProperties)8282 VkResult ResourceTracker::on_vkGetBufferCollectionPropertiesFUCHSIA(
8283     void* context,
8284     VkResult input_result,
8285     VkDevice device,
8286     VkBufferCollectionFUCHSIA collection,
8287     VkBufferCollectionPropertiesFUCHSIA* pProperties) {
8288     return mImpl->on_vkGetBufferCollectionPropertiesFUCHSIA(
8289         context, input_result, device, collection, pProperties);
8290 }
8291 #endif
8292 
8293 #ifdef VK_USE_PLATFORM_ANDROID_KHR
on_vkGetAndroidHardwareBufferPropertiesANDROID(void * context,VkResult input_result,VkDevice device,const AHardwareBuffer * buffer,VkAndroidHardwareBufferPropertiesANDROID * pProperties)8294 VkResult ResourceTracker::on_vkGetAndroidHardwareBufferPropertiesANDROID(
8295     void* context, VkResult input_result,
8296     VkDevice device,
8297     const AHardwareBuffer* buffer,
8298     VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
8299     return mImpl->on_vkGetAndroidHardwareBufferPropertiesANDROID(
8300         context, input_result, device, buffer, pProperties);
8301 }
on_vkGetMemoryAndroidHardwareBufferANDROID(void * context,VkResult input_result,VkDevice device,const VkMemoryGetAndroidHardwareBufferInfoANDROID * pInfo,struct AHardwareBuffer ** pBuffer)8302 VkResult ResourceTracker::on_vkGetMemoryAndroidHardwareBufferANDROID(
8303     void* context, VkResult input_result,
8304     VkDevice device,
8305     const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
8306     struct AHardwareBuffer** pBuffer) {
8307     return mImpl->on_vkGetMemoryAndroidHardwareBufferANDROID(
8308         context, input_result,
8309         device, pInfo, pBuffer);
8310 }
8311 #endif
8312 
on_vkCreateSamplerYcbcrConversion(void * context,VkResult input_result,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)8313 VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversion(
8314     void* context, VkResult input_result,
8315     VkDevice device,
8316     const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
8317     const VkAllocationCallbacks* pAllocator,
8318     VkSamplerYcbcrConversion* pYcbcrConversion) {
8319     return mImpl->on_vkCreateSamplerYcbcrConversion(
8320         context, input_result, device, pCreateInfo, pAllocator, pYcbcrConversion);
8321 }
8322 
on_vkDestroySamplerYcbcrConversion(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)8323 void ResourceTracker::on_vkDestroySamplerYcbcrConversion(
8324     void* context,
8325     VkDevice device,
8326     VkSamplerYcbcrConversion ycbcrConversion,
8327     const VkAllocationCallbacks* pAllocator) {
8328     mImpl->on_vkDestroySamplerYcbcrConversion(
8329         context, device, ycbcrConversion, pAllocator);
8330 }
8331 
on_vkCreateSamplerYcbcrConversionKHR(void * context,VkResult input_result,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)8332 VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversionKHR(
8333     void* context, VkResult input_result,
8334     VkDevice device,
8335     const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
8336     const VkAllocationCallbacks* pAllocator,
8337     VkSamplerYcbcrConversion* pYcbcrConversion) {
8338     return mImpl->on_vkCreateSamplerYcbcrConversionKHR(
8339         context, input_result, device, pCreateInfo, pAllocator, pYcbcrConversion);
8340 }
8341 
on_vkDestroySamplerYcbcrConversionKHR(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)8342 void ResourceTracker::on_vkDestroySamplerYcbcrConversionKHR(
8343     void* context,
8344     VkDevice device,
8345     VkSamplerYcbcrConversion ycbcrConversion,
8346     const VkAllocationCallbacks* pAllocator) {
8347     mImpl->on_vkDestroySamplerYcbcrConversionKHR(
8348         context, device, ycbcrConversion, pAllocator);
8349 }
8350 
on_vkCreateSampler(void * context,VkResult input_result,VkDevice device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler)8351 VkResult ResourceTracker::on_vkCreateSampler(
8352     void* context, VkResult input_result,
8353     VkDevice device,
8354     const VkSamplerCreateInfo* pCreateInfo,
8355     const VkAllocationCallbacks* pAllocator,
8356     VkSampler* pSampler) {
8357     return mImpl->on_vkCreateSampler(
8358         context, input_result, device, pCreateInfo, pAllocator, pSampler);
8359 }
8360 
on_vkGetPhysicalDeviceExternalBufferProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)8361 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferProperties(
8362     void* context,
8363     VkPhysicalDevice physicalDevice,
8364     const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
8365     VkExternalBufferProperties* pExternalBufferProperties) {
8366     mImpl->on_vkGetPhysicalDeviceExternalBufferProperties(
8367         context, physicalDevice, pExternalBufferInfo, pExternalBufferProperties);
8368 }
8369 
on_vkGetPhysicalDeviceExternalFenceProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)8370 void ResourceTracker::on_vkGetPhysicalDeviceExternalFenceProperties(
8371     void* context,
8372     VkPhysicalDevice physicalDevice,
8373     const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
8374     VkExternalFenceProperties* pExternalFenceProperties) {
8375     mImpl->on_vkGetPhysicalDeviceExternalFenceProperties(
8376         context, physicalDevice, pExternalFenceInfo, pExternalFenceProperties);
8377 }
8378 
on_vkGetPhysicalDeviceExternalFencePropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)8379 void ResourceTracker::on_vkGetPhysicalDeviceExternalFencePropertiesKHR(
8380     void* context,
8381     VkPhysicalDevice physicalDevice,
8382     const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
8383     VkExternalFenceProperties* pExternalFenceProperties) {
8384     mImpl->on_vkGetPhysicalDeviceExternalFenceProperties(
8385         context, physicalDevice, pExternalFenceInfo, pExternalFenceProperties);
8386 }
8387 
on_vkCreateFence(void * context,VkResult input_result,VkDevice device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)8388 VkResult ResourceTracker::on_vkCreateFence(
8389     void* context,
8390     VkResult input_result,
8391     VkDevice device,
8392     const VkFenceCreateInfo* pCreateInfo,
8393     const VkAllocationCallbacks* pAllocator, VkFence* pFence) {
8394     return mImpl->on_vkCreateFence(
8395         context, input_result, device, pCreateInfo, pAllocator, pFence);
8396 }
8397 
on_vkDestroyFence(void * context,VkDevice device,VkFence fence,const VkAllocationCallbacks * pAllocator)8398 void ResourceTracker::on_vkDestroyFence(
8399     void* context,
8400     VkDevice device,
8401     VkFence fence,
8402     const VkAllocationCallbacks* pAllocator) {
8403     mImpl->on_vkDestroyFence(
8404         context, device, fence, pAllocator);
8405 }
8406 
on_vkResetFences(void * context,VkResult input_result,VkDevice device,uint32_t fenceCount,const VkFence * pFences)8407 VkResult ResourceTracker::on_vkResetFences(
8408     void* context,
8409     VkResult input_result,
8410     VkDevice device,
8411     uint32_t fenceCount,
8412     const VkFence* pFences) {
8413     return mImpl->on_vkResetFences(
8414         context, input_result, device, fenceCount, pFences);
8415 }
8416 
on_vkImportFenceFdKHR(void * context,VkResult input_result,VkDevice device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)8417 VkResult ResourceTracker::on_vkImportFenceFdKHR(
8418     void* context,
8419     VkResult input_result,
8420     VkDevice device,
8421     const VkImportFenceFdInfoKHR* pImportFenceFdInfo) {
8422     return mImpl->on_vkImportFenceFdKHR(
8423         context, input_result, device, pImportFenceFdInfo);
8424 }
8425 
on_vkGetFenceFdKHR(void * context,VkResult input_result,VkDevice device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd)8426 VkResult ResourceTracker::on_vkGetFenceFdKHR(
8427     void* context,
8428     VkResult input_result,
8429     VkDevice device,
8430     const VkFenceGetFdInfoKHR* pGetFdInfo,
8431     int* pFd) {
8432     return mImpl->on_vkGetFenceFdKHR(
8433         context, input_result, device, pGetFdInfo, pFd);
8434 }
8435 
on_vkWaitForFences(void * context,VkResult input_result,VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)8436 VkResult ResourceTracker::on_vkWaitForFences(
8437     void* context,
8438     VkResult input_result,
8439     VkDevice device,
8440     uint32_t fenceCount,
8441     const VkFence* pFences,
8442     VkBool32 waitAll,
8443     uint64_t timeout) {
8444     return mImpl->on_vkWaitForFences(
8445         context, input_result, device, fenceCount, pFences, waitAll, timeout);
8446 }
8447 
on_vkCreateDescriptorPool(void * context,VkResult input_result,VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)8448 VkResult ResourceTracker::on_vkCreateDescriptorPool(
8449     void* context,
8450     VkResult input_result,
8451     VkDevice device,
8452     const VkDescriptorPoolCreateInfo* pCreateInfo,
8453     const VkAllocationCallbacks* pAllocator,
8454     VkDescriptorPool* pDescriptorPool) {
8455     return mImpl->on_vkCreateDescriptorPool(
8456         context, input_result, device, pCreateInfo, pAllocator, pDescriptorPool);
8457 }
8458 
on_vkDestroyDescriptorPool(void * context,VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)8459 void ResourceTracker::on_vkDestroyDescriptorPool(
8460     void* context,
8461     VkDevice device,
8462     VkDescriptorPool descriptorPool,
8463     const VkAllocationCallbacks* pAllocator) {
8464     mImpl->on_vkDestroyDescriptorPool(context, device, descriptorPool, pAllocator);
8465 }
8466 
on_vkResetDescriptorPool(void * context,VkResult input_result,VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)8467 VkResult ResourceTracker::on_vkResetDescriptorPool(
8468     void* context,
8469     VkResult input_result,
8470     VkDevice device,
8471     VkDescriptorPool descriptorPool,
8472     VkDescriptorPoolResetFlags flags) {
8473     return mImpl->on_vkResetDescriptorPool(
8474         context, input_result, device, descriptorPool, flags);
8475 }
8476 
on_vkAllocateDescriptorSets(void * context,VkResult input_result,VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)8477 VkResult ResourceTracker::on_vkAllocateDescriptorSets(
8478     void* context,
8479     VkResult input_result,
8480     VkDevice                                    device,
8481     const VkDescriptorSetAllocateInfo*          pAllocateInfo,
8482     VkDescriptorSet*                            pDescriptorSets) {
8483     return mImpl->on_vkAllocateDescriptorSets(
8484         context, input_result, device, pAllocateInfo, pDescriptorSets);
8485 }
8486 
on_vkFreeDescriptorSets(void * context,VkResult input_result,VkDevice device,VkDescriptorPool descriptorPool,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)8487 VkResult ResourceTracker::on_vkFreeDescriptorSets(
8488     void* context,
8489     VkResult input_result,
8490     VkDevice                                    device,
8491     VkDescriptorPool                            descriptorPool,
8492     uint32_t                                    descriptorSetCount,
8493     const VkDescriptorSet*                      pDescriptorSets) {
8494     return mImpl->on_vkFreeDescriptorSets(
8495         context, input_result, device, descriptorPool, descriptorSetCount, pDescriptorSets);
8496 }
8497 
on_vkCreateDescriptorSetLayout(void * context,VkResult input_result,VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)8498 VkResult ResourceTracker::on_vkCreateDescriptorSetLayout(
8499     void* context,
8500     VkResult input_result,
8501     VkDevice device,
8502     const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
8503     const VkAllocationCallbacks* pAllocator,
8504     VkDescriptorSetLayout* pSetLayout) {
8505     return mImpl->on_vkCreateDescriptorSetLayout(
8506         context, input_result, device, pCreateInfo, pAllocator, pSetLayout);
8507 }
8508 
on_vkUpdateDescriptorSets(void * context,VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)8509 void ResourceTracker::on_vkUpdateDescriptorSets(
8510     void* context,
8511     VkDevice device,
8512     uint32_t descriptorWriteCount,
8513     const VkWriteDescriptorSet* pDescriptorWrites,
8514     uint32_t descriptorCopyCount,
8515     const VkCopyDescriptorSet* pDescriptorCopies) {
8516     return mImpl->on_vkUpdateDescriptorSets(
8517         context, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
8518 }
8519 
on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void * context,VkResult input_result,VkDevice device,VkDeviceMemory memory,uint64_t * pAddress)8520 VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(
8521     void* context,
8522     VkResult input_result,
8523     VkDevice device,
8524     VkDeviceMemory memory,
8525     uint64_t* pAddress) {
8526     return mImpl->on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(
8527         context, input_result, device, memory, pAddress);
8528 }
8529 
on_vkMapMemoryIntoAddressSpaceGOOGLE(void * context,VkResult input_result,VkDevice device,VkDeviceMemory memory,uint64_t * pAddress)8530 VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE(
8531     void* context,
8532     VkResult input_result,
8533     VkDevice device,
8534     VkDeviceMemory memory,
8535     uint64_t* pAddress) {
8536     return mImpl->on_vkMapMemoryIntoAddressSpaceGOOGLE(
8537         context, input_result, device, memory, pAddress);
8538 }
8539 
on_vkCreateDescriptorUpdateTemplate(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)8540 VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplate(
8541     void* context, VkResult input_result,
8542     VkDevice device,
8543     const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
8544     const VkAllocationCallbacks* pAllocator,
8545     VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
8546     return mImpl->on_vkCreateDescriptorUpdateTemplate(
8547         context, input_result,
8548         device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
8549 }
8550 
on_vkCreateDescriptorUpdateTemplateKHR(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)8551 VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplateKHR(
8552     void* context, VkResult input_result,
8553     VkDevice device,
8554     const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
8555     const VkAllocationCallbacks* pAllocator,
8556     VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
8557     return mImpl->on_vkCreateDescriptorUpdateTemplateKHR(
8558         context, input_result,
8559         device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
8560 }
8561 
on_vkUpdateDescriptorSetWithTemplate(void * context,VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)8562 void ResourceTracker::on_vkUpdateDescriptorSetWithTemplate(
8563     void* context,
8564     VkDevice device,
8565     VkDescriptorSet descriptorSet,
8566     VkDescriptorUpdateTemplate descriptorUpdateTemplate,
8567     const void* pData) {
8568     mImpl->on_vkUpdateDescriptorSetWithTemplate(
8569         context, device, descriptorSet,
8570         descriptorUpdateTemplate, pData);
8571 }
8572 
on_vkGetPhysicalDeviceImageFormatProperties2(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)8573 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2(
8574     void* context, VkResult input_result,
8575     VkPhysicalDevice physicalDevice,
8576     const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
8577     VkImageFormatProperties2* pImageFormatProperties) {
8578     return mImpl->on_vkGetPhysicalDeviceImageFormatProperties2(
8579         context, input_result, physicalDevice, pImageFormatInfo,
8580         pImageFormatProperties);
8581 }
8582 
on_vkGetPhysicalDeviceImageFormatProperties2KHR(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)8583 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2KHR(
8584     void* context, VkResult input_result,
8585     VkPhysicalDevice physicalDevice,
8586     const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
8587     VkImageFormatProperties2* pImageFormatProperties) {
8588     return mImpl->on_vkGetPhysicalDeviceImageFormatProperties2KHR(
8589         context, input_result, physicalDevice, pImageFormatInfo,
8590         pImageFormatProperties);
8591 }
8592 
on_vkGetPhysicalDeviceExternalSemaphoreProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)8593 void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphoreProperties(
8594     void* context,
8595     VkPhysicalDevice physicalDevice,
8596     const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
8597     VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
8598     mImpl->on_vkGetPhysicalDeviceExternalSemaphoreProperties(
8599         context, physicalDevice, pExternalSemaphoreInfo,
8600         pExternalSemaphoreProperties);
8601 }
8602 
on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)8603 void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(
8604     void* context,
8605     VkPhysicalDevice physicalDevice,
8606     const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
8607     VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
8608     mImpl->on_vkGetPhysicalDeviceExternalSemaphoreProperties(
8609         context, physicalDevice, pExternalSemaphoreInfo,
8610         pExternalSemaphoreProperties);
8611 }
8612 
registerEncoderCleanupCallback(const VkEncoder * encoder,void * handle,ResourceTracker::CleanupCallback callback)8613 void ResourceTracker::registerEncoderCleanupCallback(const VkEncoder* encoder, void* handle, ResourceTracker::CleanupCallback callback) {
8614     mImpl->registerEncoderCleanupCallback(encoder, handle, callback);
8615 }
8616 
unregisterEncoderCleanupCallback(const VkEncoder * encoder,void * handle)8617 void ResourceTracker::unregisterEncoderCleanupCallback(const VkEncoder* encoder, void* handle) {
8618     mImpl->unregisterEncoderCleanupCallback(encoder, handle);
8619 }
8620 
onEncoderDeleted(const VkEncoder * encoder)8621 void ResourceTracker::onEncoderDeleted(const VkEncoder* encoder) {
8622     mImpl->onEncoderDeleted(encoder);
8623 }
8624 
syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer,VkEncoder * current)8625 uint32_t ResourceTracker::syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer, VkEncoder* current) {
8626     return mImpl->syncEncodersForCommandBuffer(commandBuffer, current);
8627 }
8628 
syncEncodersForQueue(VkQueue queue,VkEncoder * current)8629 uint32_t ResourceTracker::syncEncodersForQueue(VkQueue queue, VkEncoder* current) {
8630     return mImpl->syncEncodersForQueue(queue, current);
8631 }
8632 
getAlloc()8633 CommandBufferStagingStream::Alloc ResourceTracker::getAlloc() { return mImpl->getAlloc(); }
8634 
getFree()8635 CommandBufferStagingStream::Free ResourceTracker::getFree() { return mImpl->getFree(); }
8636 
on_vkBeginCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)8637 VkResult ResourceTracker::on_vkBeginCommandBuffer(
8638     void* context, VkResult input_result,
8639     VkCommandBuffer commandBuffer,
8640     const VkCommandBufferBeginInfo* pBeginInfo) {
8641     return mImpl->on_vkBeginCommandBuffer(
8642         context, input_result, commandBuffer, pBeginInfo);
8643 }
8644 
on_vkEndCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer)8645 VkResult ResourceTracker::on_vkEndCommandBuffer(
8646     void* context, VkResult input_result,
8647     VkCommandBuffer commandBuffer) {
8648     return mImpl->on_vkEndCommandBuffer(
8649         context, input_result, commandBuffer);
8650 }
8651 
on_vkResetCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)8652 VkResult ResourceTracker::on_vkResetCommandBuffer(
8653     void* context, VkResult input_result,
8654     VkCommandBuffer commandBuffer,
8655     VkCommandBufferResetFlags flags) {
8656     return mImpl->on_vkResetCommandBuffer(
8657         context, input_result, commandBuffer, flags);
8658 }
8659 
on_vkCreateImageView(void * context,VkResult input_result,VkDevice device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)8660 VkResult ResourceTracker::on_vkCreateImageView(
8661     void* context, VkResult input_result,
8662     VkDevice device,
8663     const VkImageViewCreateInfo* pCreateInfo,
8664     const VkAllocationCallbacks* pAllocator,
8665     VkImageView* pView) {
8666     return mImpl->on_vkCreateImageView(
8667         context, input_result, device, pCreateInfo, pAllocator, pView);
8668 }
8669 
on_vkCmdExecuteCommands(void * context,VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)8670 void ResourceTracker::on_vkCmdExecuteCommands(
8671     void* context,
8672     VkCommandBuffer commandBuffer,
8673     uint32_t commandBufferCount,
8674     const VkCommandBuffer* pCommandBuffers) {
8675     mImpl->on_vkCmdExecuteCommands(
8676         context, commandBuffer, commandBufferCount, pCommandBuffers);
8677 }
8678 
on_vkCmdBindDescriptorSets(void * context,VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)8679 void ResourceTracker::on_vkCmdBindDescriptorSets(
8680     void* context,
8681     VkCommandBuffer commandBuffer,
8682     VkPipelineBindPoint pipelineBindPoint,
8683     VkPipelineLayout layout,
8684     uint32_t firstSet,
8685     uint32_t descriptorSetCount,
8686     const VkDescriptorSet* pDescriptorSets,
8687     uint32_t dynamicOffsetCount,
8688     const uint32_t* pDynamicOffsets) {
8689     mImpl->on_vkCmdBindDescriptorSets(
8690         context,
8691         commandBuffer,
8692         pipelineBindPoint,
8693         layout,
8694         firstSet,
8695         descriptorSetCount,
8696         pDescriptorSets,
8697         dynamicOffsetCount,
8698         pDynamicOffsets);
8699 }
8700 
on_vkCmdPipelineBarrier(void * context,VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)8701 void ResourceTracker::on_vkCmdPipelineBarrier(
8702     void* context,
8703     VkCommandBuffer commandBuffer,
8704     VkPipelineStageFlags srcStageMask,
8705     VkPipelineStageFlags dstStageMask,
8706     VkDependencyFlags dependencyFlags,
8707     uint32_t memoryBarrierCount,
8708     const VkMemoryBarrier* pMemoryBarriers,
8709     uint32_t bufferMemoryBarrierCount,
8710     const VkBufferMemoryBarrier* pBufferMemoryBarriers,
8711     uint32_t imageMemoryBarrierCount,
8712     const VkImageMemoryBarrier* pImageMemoryBarriers) {
8713     mImpl->on_vkCmdPipelineBarrier(
8714         context,
8715         commandBuffer,
8716         srcStageMask,
8717         dstStageMask,
8718         dependencyFlags,
8719         memoryBarrierCount,
8720         pMemoryBarriers,
8721         bufferMemoryBarrierCount,
8722         pBufferMemoryBarriers,
8723         imageMemoryBarrierCount,
8724         pImageMemoryBarriers);
8725 }
8726 
on_vkDestroyDescriptorSetLayout(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)8727 void ResourceTracker::on_vkDestroyDescriptorSetLayout(
8728     void* context,
8729     VkDevice device,
8730     VkDescriptorSetLayout descriptorSetLayout,
8731     const VkAllocationCallbacks* pAllocator) {
8732     mImpl->on_vkDestroyDescriptorSetLayout(context, device, descriptorSetLayout, pAllocator);
8733 }
8734 
on_vkAllocateCommandBuffers(void * context,VkResult input_result,VkDevice device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)8735 VkResult ResourceTracker::on_vkAllocateCommandBuffers(
8736     void* context,
8737     VkResult input_result,
8738     VkDevice device,
8739     const VkCommandBufferAllocateInfo* pAllocateInfo,
8740     VkCommandBuffer* pCommandBuffers) {
8741     return mImpl->on_vkAllocateCommandBuffers(context, input_result, device, pAllocateInfo, pCommandBuffers);
8742 }
8743 
8744 #ifdef VK_USE_PLATFORM_ANDROID_KHR
on_vkQueueSignalReleaseImageANDROID(void * context,VkResult input_result,VkQueue queue,uint32_t waitSemaphoreCount,const VkSemaphore * pWaitSemaphores,VkImage image,int * pNativeFenceFd)8745 VkResult ResourceTracker::on_vkQueueSignalReleaseImageANDROID(
8746     void* context,
8747     VkResult input_result,
8748     VkQueue queue,
8749     uint32_t waitSemaphoreCount,
8750     const VkSemaphore* pWaitSemaphores,
8751     VkImage image,
8752     int* pNativeFenceFd) {
8753     return mImpl->on_vkQueueSignalReleaseImageANDROID(context, input_result, queue, waitSemaphoreCount, pWaitSemaphores, image, pNativeFenceFd);
8754 }
8755 #endif
8756 
on_vkCreateGraphicsPipelines(void * context,VkResult input_result,VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)8757 VkResult ResourceTracker::on_vkCreateGraphicsPipelines(
8758     void* context,
8759     VkResult input_result,
8760     VkDevice device,
8761     VkPipelineCache pipelineCache,
8762     uint32_t createInfoCount,
8763     const VkGraphicsPipelineCreateInfo* pCreateInfos,
8764     const VkAllocationCallbacks* pAllocator,
8765     VkPipeline* pPipelines) {
8766     return mImpl->on_vkCreateGraphicsPipelines(context, input_result, device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
8767 }
8768 
deviceMemoryTransform_tohost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)8769 void ResourceTracker::deviceMemoryTransform_tohost(
8770     VkDeviceMemory* memory, uint32_t memoryCount,
8771     VkDeviceSize* offset, uint32_t offsetCount,
8772     VkDeviceSize* size, uint32_t sizeCount,
8773     uint32_t* typeIndex, uint32_t typeIndexCount,
8774     uint32_t* typeBits, uint32_t typeBitsCount) {
8775     mImpl->deviceMemoryTransform_tohost(
8776         memory, memoryCount,
8777         offset, offsetCount,
8778         size, sizeCount,
8779         typeIndex, typeIndexCount,
8780         typeBits, typeBitsCount);
8781 }
8782 
deviceMemoryTransform_fromhost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)8783 void ResourceTracker::deviceMemoryTransform_fromhost(
8784     VkDeviceMemory* memory, uint32_t memoryCount,
8785     VkDeviceSize* offset, uint32_t offsetCount,
8786     VkDeviceSize* size, uint32_t sizeCount,
8787     uint32_t* typeIndex, uint32_t typeIndexCount,
8788     uint32_t* typeBits, uint32_t typeBitsCount) {
8789     mImpl->deviceMemoryTransform_fromhost(
8790         memory, memoryCount,
8791         offset, offsetCount,
8792         size, sizeCount,
8793         typeIndex, typeIndexCount,
8794         typeBits, typeBitsCount);
8795 }
8796 
transformImpl_VkExternalMemoryProperties_fromhost(VkExternalMemoryProperties * pProperties,uint32_t lenAccess)8797 void ResourceTracker::transformImpl_VkExternalMemoryProperties_fromhost(
8798     VkExternalMemoryProperties* pProperties,
8799     uint32_t lenAccess) {
8800     mImpl->transformImpl_VkExternalMemoryProperties_fromhost(pProperties,
8801                                                              lenAccess);
8802 }
8803 
transformImpl_VkExternalMemoryProperties_tohost(VkExternalMemoryProperties *,uint32_t)8804 void ResourceTracker::transformImpl_VkExternalMemoryProperties_tohost(
8805     VkExternalMemoryProperties*, uint32_t) {}
8806 
transformImpl_VkImageCreateInfo_fromhost(const VkImageCreateInfo *,uint32_t)8807 void ResourceTracker::transformImpl_VkImageCreateInfo_fromhost(const VkImageCreateInfo*,
8808                                                                uint32_t) {}
transformImpl_VkImageCreateInfo_tohost(const VkImageCreateInfo *,uint32_t)8809 void ResourceTracker::transformImpl_VkImageCreateInfo_tohost(const VkImageCreateInfo*,
8810                                                              uint32_t) {}
8811 
8812 #define DEFINE_TRANSFORMED_TYPE_IMPL(type)                                  \
8813     void ResourceTracker::transformImpl_##type##_tohost(type*, uint32_t) {} \
8814     void ResourceTracker::transformImpl_##type##_fromhost(type*, uint32_t) {}
8815 
8816 LIST_TRIVIAL_TRANSFORMED_TYPES(DEFINE_TRANSFORMED_TYPE_IMPL)
8817 
8818 }  // namespace vk
8819 }  // namespace gfxstream
8820