• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (C) 2018 The Android Open Source Project
2 // Copyright (C) 2018 Google Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 
16 #include "ResourceTracker.h"
17 
18 #include "../OpenglSystemCommon/EmulatorFeatureInfo.h"
19 #include "../OpenglSystemCommon/HostConnection.h"
20 #include "CommandBufferStagingStream.h"
21 #include "DescriptorSetVirtualization.h"
22 #include "HostVisibleMemoryVirtualization.h"
23 #include "Resources.h"
24 #include "VkEncoder.h"
25 #include "aemu/base/AlignedBuf.h"
26 #include "gfxstream_vk_private.h"
27 #include "goldfish_address_space.h"
28 #include "goldfish_vk_private_defs.h"
29 #include "util.h"
30 #include "virtgpu_gfxstream_protocol.h"
31 #include "vulkan/vulkan_core.h"
32 #ifdef VK_USE_PLATFORM_ANDROID_KHR
33 #include "vk_format_info.h"
34 #endif
35 #include <log/log.h>
36 #include <stdlib.h>
37 #include <vndk/hardware_buffer.h>
38 
39 #include <algorithm>
40 #include <set>
41 #include <string>
42 #include <unordered_map>
43 #include <unordered_set>
44 
45 #include "vk_struct_id.h"
46 #include "vk_util.h"
47 
48 #if defined(__ANDROID__) || defined(__linux__) || defined(__APPLE__)
49 
50 #include <sys/mman.h>
51 #include <sys/syscall.h>
52 
53 
inline_memfd_create(const char * name,unsigned int flags)54 static inline int inline_memfd_create(const char* name, unsigned int flags) {
55 #if defined(__ANDROID__)
56     return syscall(SYS_memfd_create, name, flags);
57 #else
58     return -1;
59 #endif
60 }
61 
62 #define memfd_create inline_memfd_create
63 #endif
64 
65 #ifndef VK_USE_PLATFORM_FUCHSIA
zx_handle_close(zx_handle_t)66 void zx_handle_close(zx_handle_t) {}
zx_event_create(int,zx_handle_t *)67 void zx_event_create(int, zx_handle_t*) {}
68 #endif
69 
70 static constexpr uint32_t kDefaultApiVersion = VK_MAKE_VERSION(1, 1, 0);
71 
72 namespace gfxstream {
73 namespace vk {
74 
75 #define MAKE_HANDLE_MAPPING_FOREACH(type_name, map_impl, map_to_u64_impl, map_from_u64_impl)       \
76     void mapHandles_##type_name(type_name* handles, size_t count) override {                       \
77         for (size_t i = 0; i < count; ++i) {                                                       \
78             map_impl;                                                                              \
79         }                                                                                          \
80     }                                                                                              \
81     void mapHandles_##type_name##_u64(const type_name* handles, uint64_t* handle_u64s,             \
82                                       size_t count) override {                                     \
83         for (size_t i = 0; i < count; ++i) {                                                       \
84             map_to_u64_impl;                                                                       \
85         }                                                                                          \
86     }                                                                                              \
87     void mapHandles_u64_##type_name(const uint64_t* handle_u64s, type_name* handles, size_t count) \
88         override {                                                                                 \
89         for (size_t i = 0; i < count; ++i) {                                                       \
90             map_from_u64_impl;                                                                     \
91         }                                                                                          \
92     }
93 
94 #define DEFINE_RESOURCE_TRACKING_CLASS(class_name, impl) \
95     class class_name : public VulkanHandleMapping {      \
96        public:                                           \
97         virtual ~class_name() {}                         \
98         GOLDFISH_VK_LIST_HANDLE_TYPES(impl)              \
99     };
100 
101 #define CREATE_MAPPING_IMPL_FOR_TYPE(type_name)                                \
102     MAKE_HANDLE_MAPPING_FOREACH(                                               \
103         type_name, handles[i] = new_from_host_##type_name(handles[i]);         \
104         ResourceTracker::get()->register_##type_name(handles[i]);              \
105         , handle_u64s[i] = (uint64_t)new_from_host_##type_name(handles[i]),    \
106         handles[i] = (type_name)new_from_host_u64_##type_name(handle_u64s[i]); \
107         ResourceTracker::get()->register_##type_name(handles[i]);)
108 
109 #define UNWRAP_MAPPING_IMPL_FOR_TYPE(type_name)                          \
110     MAKE_HANDLE_MAPPING_FOREACH(                                         \
111         type_name, handles[i] = get_host_##type_name(handles[i]),        \
112         handle_u64s[i] = (uint64_t)get_host_u64_##type_name(handles[i]), \
113         handles[i] = (type_name)get_host_##type_name((type_name)handle_u64s[i]))
114 
115 #define DESTROY_MAPPING_IMPL_FOR_TYPE(type_name)                                               \
116     MAKE_HANDLE_MAPPING_FOREACH(type_name,                                                     \
117                                 ResourceTracker::get()->unregister_##type_name(handles[i]);    \
118                                 delete_goldfish_##type_name(handles[i]), (void)handle_u64s[i]; \
119                                 delete_goldfish_##type_name(handles[i]), (void)handles[i];     \
120                                 delete_goldfish_##type_name((type_name)handle_u64s[i]))
121 
122 DEFINE_RESOURCE_TRACKING_CLASS(CreateMapping, CREATE_MAPPING_IMPL_FOR_TYPE)
123 DEFINE_RESOURCE_TRACKING_CLASS(DestroyMapping, DESTROY_MAPPING_IMPL_FOR_TYPE)
124 
125 static uint32_t* sSeqnoPtr = nullptr;
126 
127 // static
128 uint32_t ResourceTracker::streamFeatureBits = 0;
129 ResourceTracker::ThreadingCallbacks ResourceTracker::threadingCallbacks;
130 
131 struct StagingInfo {
132     Lock mLock;
133     std::vector<CommandBufferStagingStream*> streams;
134     std::vector<VkEncoder*> encoders;
135     /// \brief sets alloc and free callbacks for memory allocation for CommandBufferStagingStream(s)
136     /// \param allocFn is the callback to allocate memory
137     /// \param freeFn is the callback to free memory
setAllocFreegfxstream::vk::StagingInfo138     void setAllocFree(CommandBufferStagingStream::Alloc&& allocFn,
139                       CommandBufferStagingStream::Free&& freeFn) {
140         mAlloc = allocFn;
141         mFree = freeFn;
142     }
143 
~StagingInfogfxstream::vk::StagingInfo144     ~StagingInfo() {
145         for (auto stream : streams) {
146             delete stream;
147         }
148 
149         for (auto encoder : encoders) {
150             delete encoder;
151         }
152     }
153 
pushStaginggfxstream::vk::StagingInfo154     void pushStaging(CommandBufferStagingStream* stream, VkEncoder* encoder) {
155         AutoLock<Lock> lock(mLock);
156         stream->reset();
157         streams.push_back(stream);
158         encoders.push_back(encoder);
159     }
160 
popStaginggfxstream::vk::StagingInfo161     void popStaging(CommandBufferStagingStream** streamOut, VkEncoder** encoderOut) {
162         AutoLock<Lock> lock(mLock);
163         CommandBufferStagingStream* stream;
164         VkEncoder* encoder;
165         if (streams.empty()) {
166             if (mAlloc && mFree) {
167                 // if custom allocators are provided, forward them to CommandBufferStagingStream
168                 stream = new CommandBufferStagingStream(mAlloc, mFree);
169             } else {
170                 stream = new CommandBufferStagingStream;
171             }
172             encoder = new VkEncoder(stream);
173         } else {
174             stream = streams.back();
175             encoder = encoders.back();
176             streams.pop_back();
177             encoders.pop_back();
178         }
179         *streamOut = stream;
180         *encoderOut = encoder;
181     }
182 
183    private:
184     CommandBufferStagingStream::Alloc mAlloc = nullptr;
185     CommandBufferStagingStream::Free mFree = nullptr;
186 };
187 
188 static StagingInfo sStaging;
189 
190 struct CommandBufferPendingDescriptorSets {
191     std::unordered_set<VkDescriptorSet> sets;
192 };
193 
194 #define HANDLE_REGISTER_IMPL_IMPL(type)               \
195     void ResourceTracker::register_##type(type obj) { \
196         AutoLock<RecursiveLock> lock(mLock);          \
197         info_##type[obj] = type##_Info();             \
198     }
199 
200 #define HANDLE_UNREGISTER_IMPL_IMPL(type)               \
201     void ResourceTracker::unregister_##type(type obj) { \
202         AutoLock<RecursiveLock> lock(mLock);            \
203         info_##type.erase(obj);                         \
204     }
205 
206 GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL_IMPL)
GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)207 GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)
208 uint32_t getWaitSemaphoreCount(const VkSubmitInfo& pSubmit) { return pSubmit.waitSemaphoreCount; }
209 
getWaitSemaphoreCount(const VkSubmitInfo2 & pSubmit)210 uint32_t getWaitSemaphoreCount(const VkSubmitInfo2& pSubmit) {
211     return pSubmit.waitSemaphoreInfoCount;
212 }
213 
getCommandBufferCount(const VkSubmitInfo & pSubmit)214 uint32_t getCommandBufferCount(const VkSubmitInfo& pSubmit) { return pSubmit.commandBufferCount; }
215 
getCommandBufferCount(const VkSubmitInfo2 & pSubmit)216 uint32_t getCommandBufferCount(const VkSubmitInfo2& pSubmit) {
217     return pSubmit.commandBufferInfoCount;
218 }
219 
getSignalSemaphoreCount(const VkSubmitInfo & pSubmit)220 uint32_t getSignalSemaphoreCount(const VkSubmitInfo& pSubmit) {
221     return pSubmit.signalSemaphoreCount;
222 }
223 
getSignalSemaphoreCount(const VkSubmitInfo2 & pSubmit)224 uint32_t getSignalSemaphoreCount(const VkSubmitInfo2& pSubmit) {
225     return pSubmit.signalSemaphoreInfoCount;
226 }
227 
getWaitSemaphore(const VkSubmitInfo & pSubmit,int i)228 VkSemaphore getWaitSemaphore(const VkSubmitInfo& pSubmit, int i) {
229     return pSubmit.pWaitSemaphores[i];
230 }
231 
getWaitSemaphore(const VkSubmitInfo2 & pSubmit,int i)232 VkSemaphore getWaitSemaphore(const VkSubmitInfo2& pSubmit, int i) {
233     return pSubmit.pWaitSemaphoreInfos[i].semaphore;
234 }
235 
getSignalSemaphore(const VkSubmitInfo & pSubmit,int i)236 VkSemaphore getSignalSemaphore(const VkSubmitInfo& pSubmit, int i) {
237     return pSubmit.pSignalSemaphores[i];
238 }
239 
getSignalSemaphore(const VkSubmitInfo2 & pSubmit,int i)240 VkSemaphore getSignalSemaphore(const VkSubmitInfo2& pSubmit, int i) {
241     return pSubmit.pSignalSemaphoreInfos[i].semaphore;
242 }
243 
getCommandBuffer(const VkSubmitInfo & pSubmit,int i)244 VkCommandBuffer getCommandBuffer(const VkSubmitInfo& pSubmit, int i) {
245     return pSubmit.pCommandBuffers[i];
246 }
247 
getCommandBuffer(const VkSubmitInfo2 & pSubmit,int i)248 VkCommandBuffer getCommandBuffer(const VkSubmitInfo2& pSubmit, int i) {
249     return pSubmit.pCommandBufferInfos[i].commandBuffer;
250 }
251 
descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool)252 bool descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool) {
253     return as_goldfish_VkDescriptorPool(pool)->allocInfo->createFlags &
254            VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
255 }
256 
createImmutableSamplersFilteredImageInfo(VkDescriptorType descType,VkDescriptorSet descSet,uint32_t binding,const VkDescriptorImageInfo * pImageInfo)257 VkDescriptorImageInfo createImmutableSamplersFilteredImageInfo(
258     VkDescriptorType descType, VkDescriptorSet descSet, uint32_t binding,
259     const VkDescriptorImageInfo* pImageInfo) {
260     VkDescriptorImageInfo res = *pImageInfo;
261 
262     if (descType != VK_DESCRIPTOR_TYPE_SAMPLER &&
263         descType != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
264         return res;
265 
266     bool immutableSampler =
267         as_goldfish_VkDescriptorSet(descSet)->reified->bindingIsImmutableSampler[binding];
268 
269     if (!immutableSampler) return res;
270 
271     res.sampler = 0;
272 
273     return res;
274 }
275 
descriptorBindingIsImmutableSampler(VkDescriptorSet dstSet,uint32_t dstBinding)276 bool descriptorBindingIsImmutableSampler(VkDescriptorSet dstSet, uint32_t dstBinding) {
277     return as_goldfish_VkDescriptorSet(dstSet)->reified->bindingIsImmutableSampler[dstBinding];
278 }
279 
filterNonexistentSampler(const VkDescriptorImageInfo & inputInfo)280 VkDescriptorImageInfo ResourceTracker::filterNonexistentSampler(
281     const VkDescriptorImageInfo& inputInfo) {
282     VkSampler sampler = inputInfo.sampler;
283 
284     VkDescriptorImageInfo res = inputInfo;
285 
286     if (sampler) {
287         auto it = info_VkSampler.find(sampler);
288         bool samplerExists = it != info_VkSampler.end();
289         if (!samplerExists) res.sampler = 0;
290     }
291 
292     return res;
293 }
294 
emitDeviceMemoryReport(VkDevice_Info info,VkDeviceMemoryReportEventTypeEXT type,uint64_t memoryObjectId,VkDeviceSize size,VkObjectType objectType,uint64_t objectHandle,uint32_t heapIndex)295 void ResourceTracker::emitDeviceMemoryReport(VkDevice_Info info,
296                                              VkDeviceMemoryReportEventTypeEXT type,
297                                              uint64_t memoryObjectId, VkDeviceSize size,
298                                              VkObjectType objectType, uint64_t objectHandle,
299                                              uint32_t heapIndex) {
300     if (info.deviceMemoryReportCallbacks.empty()) return;
301 
302     const VkDeviceMemoryReportCallbackDataEXT callbackData = {
303         VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT,  // sType
304         nullptr,                                                   // pNext
305         0,                                                         // flags
306         type,                                                      // type
307         memoryObjectId,                                            // memoryObjectId
308         size,                                                      // size
309         objectType,                                                // objectType
310         objectHandle,                                              // objectHandle
311         heapIndex,                                                 // heapIndex
312     };
313     for (const auto& callback : info.deviceMemoryReportCallbacks) {
314         callback.first(&callbackData, callback.second);
315     }
316 }
317 
318 #ifdef VK_USE_PLATFORM_FUCHSIA
defaultBufferCollectionConstraints(size_t minSizeBytes,size_t minBufferCount,size_t maxBufferCount=0u,size_t minBufferCountForCamping=0u,size_t minBufferCountForDedicatedSlack=0u,size_t minBufferCountForSharedSlack=0u)319 inline fuchsia_sysmem::wire::BufferCollectionConstraints defaultBufferCollectionConstraints(
320     size_t minSizeBytes, size_t minBufferCount, size_t maxBufferCount = 0u,
321     size_t minBufferCountForCamping = 0u, size_t minBufferCountForDedicatedSlack = 0u,
322     size_t minBufferCountForSharedSlack = 0u) {
323     fuchsia_sysmem::wire::BufferCollectionConstraints constraints = {};
324     constraints.min_buffer_count = minBufferCount;
325     if (maxBufferCount > 0) {
326         constraints.max_buffer_count = maxBufferCount;
327     }
328     if (minBufferCountForCamping) {
329         constraints.min_buffer_count_for_camping = minBufferCountForCamping;
330     }
331     if (minBufferCountForSharedSlack) {
332         constraints.min_buffer_count_for_shared_slack = minBufferCountForSharedSlack;
333     }
334     constraints.has_buffer_memory_constraints = true;
335     fuchsia_sysmem::wire::BufferMemoryConstraints& buffer_constraints =
336         constraints.buffer_memory_constraints;
337 
338     buffer_constraints.min_size_bytes = minSizeBytes;
339     buffer_constraints.max_size_bytes = 0xffffffff;
340     buffer_constraints.physically_contiguous_required = false;
341     buffer_constraints.secure_required = false;
342 
343     // No restrictions on coherency domain or Heaps.
344     buffer_constraints.ram_domain_supported = true;
345     buffer_constraints.cpu_domain_supported = true;
346     buffer_constraints.inaccessible_domain_supported = true;
347     buffer_constraints.heap_permitted_count = 2;
348     buffer_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
349     buffer_constraints.heap_permitted[1] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
350 
351     return constraints;
352 }
353 
getBufferCollectionConstraintsVulkanImageUsage(const VkImageCreateInfo * pImageInfo)354 uint32_t getBufferCollectionConstraintsVulkanImageUsage(const VkImageCreateInfo* pImageInfo) {
355     uint32_t usage = 0u;
356     VkImageUsageFlags imageUsage = pImageInfo->usage;
357 
358 #define SetUsageBit(BIT, VALUE)                                  \
359     if (imageUsage & VK_IMAGE_USAGE_##BIT##_BIT) {               \
360         usage |= fuchsia_sysmem::wire::kVulkanImageUsage##VALUE; \
361     }
362 
363     SetUsageBit(COLOR_ATTACHMENT, ColorAttachment);
364     SetUsageBit(TRANSFER_SRC, TransferSrc);
365     SetUsageBit(TRANSFER_DST, TransferDst);
366     SetUsageBit(SAMPLED, Sampled);
367 
368 #undef SetUsageBit
369     return usage;
370 }
371 
getBufferCollectionConstraintsVulkanBufferUsage(VkBufferUsageFlags bufferUsage)372 uint32_t getBufferCollectionConstraintsVulkanBufferUsage(VkBufferUsageFlags bufferUsage) {
373     uint32_t usage = 0u;
374 
375 #define SetUsageBit(BIT, VALUE)                                   \
376     if (bufferUsage & VK_BUFFER_USAGE_##BIT##_BIT) {              \
377         usage |= fuchsia_sysmem::wire::kVulkanBufferUsage##VALUE; \
378     }
379 
380     SetUsageBit(TRANSFER_SRC, TransferSrc);
381     SetUsageBit(TRANSFER_DST, TransferDst);
382     SetUsageBit(UNIFORM_TEXEL_BUFFER, UniformTexelBuffer);
383     SetUsageBit(STORAGE_TEXEL_BUFFER, StorageTexelBuffer);
384     SetUsageBit(UNIFORM_BUFFER, UniformBuffer);
385     SetUsageBit(STORAGE_BUFFER, StorageBuffer);
386     SetUsageBit(INDEX_BUFFER, IndexBuffer);
387     SetUsageBit(VERTEX_BUFFER, VertexBuffer);
388     SetUsageBit(INDIRECT_BUFFER, IndirectBuffer);
389 
390 #undef SetUsageBit
391     return usage;
392 }
393 
getBufferCollectionConstraintsVulkanBufferUsage(const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)394 uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
395     const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
396     VkBufferUsageFlags bufferUsage = pBufferConstraintsInfo->createInfo.usage;
397     return getBufferCollectionConstraintsVulkanBufferUsage(bufferUsage);
398 }
399 
vkFormatTypeToSysmem(VkFormat format)400 static fuchsia_sysmem::wire::PixelFormatType vkFormatTypeToSysmem(VkFormat format) {
401     switch (format) {
402         case VK_FORMAT_B8G8R8A8_SINT:
403         case VK_FORMAT_B8G8R8A8_UNORM:
404         case VK_FORMAT_B8G8R8A8_SRGB:
405         case VK_FORMAT_B8G8R8A8_SNORM:
406         case VK_FORMAT_B8G8R8A8_SSCALED:
407         case VK_FORMAT_B8G8R8A8_USCALED:
408             return fuchsia_sysmem::wire::PixelFormatType::kBgra32;
409         case VK_FORMAT_R8G8B8A8_SINT:
410         case VK_FORMAT_R8G8B8A8_UNORM:
411         case VK_FORMAT_R8G8B8A8_SRGB:
412         case VK_FORMAT_R8G8B8A8_SNORM:
413         case VK_FORMAT_R8G8B8A8_SSCALED:
414         case VK_FORMAT_R8G8B8A8_USCALED:
415             return fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
416         case VK_FORMAT_R8_UNORM:
417         case VK_FORMAT_R8_UINT:
418         case VK_FORMAT_R8_USCALED:
419         case VK_FORMAT_R8_SNORM:
420         case VK_FORMAT_R8_SINT:
421         case VK_FORMAT_R8_SSCALED:
422         case VK_FORMAT_R8_SRGB:
423             return fuchsia_sysmem::wire::PixelFormatType::kR8;
424         case VK_FORMAT_R8G8_UNORM:
425         case VK_FORMAT_R8G8_UINT:
426         case VK_FORMAT_R8G8_USCALED:
427         case VK_FORMAT_R8G8_SNORM:
428         case VK_FORMAT_R8G8_SINT:
429         case VK_FORMAT_R8G8_SSCALED:
430         case VK_FORMAT_R8G8_SRGB:
431             return fuchsia_sysmem::wire::PixelFormatType::kR8G8;
432         default:
433             return fuchsia_sysmem::wire::PixelFormatType::kInvalid;
434     }
435 }
436 
vkFormatMatchesSysmemFormat(VkFormat vkFormat,fuchsia_sysmem::wire::PixelFormatType sysmemFormat)437 static bool vkFormatMatchesSysmemFormat(VkFormat vkFormat,
438                                         fuchsia_sysmem::wire::PixelFormatType sysmemFormat) {
439     switch (vkFormat) {
440         case VK_FORMAT_B8G8R8A8_SINT:
441         case VK_FORMAT_B8G8R8A8_UNORM:
442         case VK_FORMAT_B8G8R8A8_SRGB:
443         case VK_FORMAT_B8G8R8A8_SNORM:
444         case VK_FORMAT_B8G8R8A8_SSCALED:
445         case VK_FORMAT_B8G8R8A8_USCALED:
446             return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kBgra32;
447         case VK_FORMAT_R8G8B8A8_SINT:
448         case VK_FORMAT_R8G8B8A8_UNORM:
449         case VK_FORMAT_R8G8B8A8_SRGB:
450         case VK_FORMAT_R8G8B8A8_SNORM:
451         case VK_FORMAT_R8G8B8A8_SSCALED:
452         case VK_FORMAT_R8G8B8A8_USCALED:
453             return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
454         case VK_FORMAT_R8_UNORM:
455         case VK_FORMAT_R8_UINT:
456         case VK_FORMAT_R8_USCALED:
457         case VK_FORMAT_R8_SNORM:
458         case VK_FORMAT_R8_SINT:
459         case VK_FORMAT_R8_SSCALED:
460         case VK_FORMAT_R8_SRGB:
461             return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8 ||
462                    sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kL8;
463         case VK_FORMAT_R8G8_UNORM:
464         case VK_FORMAT_R8G8_UINT:
465         case VK_FORMAT_R8G8_USCALED:
466         case VK_FORMAT_R8G8_SNORM:
467         case VK_FORMAT_R8G8_SINT:
468         case VK_FORMAT_R8G8_SSCALED:
469         case VK_FORMAT_R8G8_SRGB:
470             return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8G8;
471         default:
472             return false;
473     }
474 }
475 
sysmemPixelFormatTypeToVk(fuchsia_sysmem::wire::PixelFormatType format)476 static VkFormat sysmemPixelFormatTypeToVk(fuchsia_sysmem::wire::PixelFormatType format) {
477     switch (format) {
478         case fuchsia_sysmem::wire::PixelFormatType::kBgra32:
479             return VK_FORMAT_B8G8R8A8_SRGB;
480         case fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8:
481             return VK_FORMAT_R8G8B8A8_SRGB;
482         case fuchsia_sysmem::wire::PixelFormatType::kL8:
483         case fuchsia_sysmem::wire::PixelFormatType::kR8:
484             return VK_FORMAT_R8_UNORM;
485         case fuchsia_sysmem::wire::PixelFormatType::kR8G8:
486             return VK_FORMAT_R8G8_UNORM;
487         default:
488             return VK_FORMAT_UNDEFINED;
489     }
490 }
491 
492 // TODO(fxbug.dev/90856): This is currently only used for allocating
493 // memory for dedicated external images. It should be migrated to use
494 // SetBufferCollectionImageConstraintsFUCHSIA.
setBufferCollectionConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * collection,const VkImageCreateInfo * pImageInfo)495 VkResult ResourceTracker::setBufferCollectionConstraintsFUCHSIA(
496     VkEncoder* enc, VkDevice device,
497     fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
498     const VkImageCreateInfo* pImageInfo) {
499     if (pImageInfo == nullptr) {
500         ALOGE("setBufferCollectionConstraints: pImageInfo cannot be null.");
501         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
502     }
503 
504     const VkSysmemColorSpaceFUCHSIA kDefaultColorSpace = {
505         .sType = VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA,
506         .pNext = nullptr,
507         .colorSpace = static_cast<uint32_t>(fuchsia_sysmem::wire::ColorSpaceType::kSrgb),
508     };
509 
510     std::vector<VkImageFormatConstraintsInfoFUCHSIA> formatInfos;
511     if (pImageInfo->format == VK_FORMAT_UNDEFINED) {
512         const auto kFormats = {
513             VK_FORMAT_B8G8R8A8_SRGB,
514             VK_FORMAT_R8G8B8A8_SRGB,
515         };
516         for (auto format : kFormats) {
517             // shallow copy, using pNext from pImageInfo directly.
518             auto createInfo = *pImageInfo;
519             createInfo.format = format;
520             formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
521                 .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
522                 .pNext = nullptr,
523                 .imageCreateInfo = createInfo,
524                 .colorSpaceCount = 1,
525                 .pColorSpaces = &kDefaultColorSpace,
526             });
527         }
528     } else {
529         formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
530             .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
531             .pNext = nullptr,
532             .imageCreateInfo = *pImageInfo,
533             .colorSpaceCount = 1,
534             .pColorSpaces = &kDefaultColorSpace,
535         });
536     }
537 
538     VkImageConstraintsInfoFUCHSIA imageConstraints = {
539         .sType = VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA,
540         .pNext = nullptr,
541         .formatConstraintsCount = static_cast<uint32_t>(formatInfos.size()),
542         .pFormatConstraints = formatInfos.data(),
543         .bufferCollectionConstraints =
544             VkBufferCollectionConstraintsInfoFUCHSIA{
545                 .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
546                 .pNext = nullptr,
547                 .minBufferCount = 1,
548                 .maxBufferCount = 0,
549                 .minBufferCountForCamping = 0,
550                 .minBufferCountForDedicatedSlack = 0,
551                 .minBufferCountForSharedSlack = 0,
552             },
553         .flags = 0u,
554     };
555 
556     return setBufferCollectionImageConstraintsFUCHSIA(enc, device, collection, &imageConstraints);
557 }
558 
addImageBufferCollectionConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,VkPhysicalDevice physicalDevice,const VkImageFormatConstraintsInfoFUCHSIA * formatConstraints,VkImageTiling tiling,fuchsia_sysmem::wire::BufferCollectionConstraints * constraints)559 VkResult addImageBufferCollectionConstraintsFUCHSIA(
560     VkEncoder* enc, VkDevice device, VkPhysicalDevice physicalDevice,
561     const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints,  // always non-zero
562     VkImageTiling tiling, fuchsia_sysmem::wire::BufferCollectionConstraints* constraints) {
563     // First check if the format, tiling and usage is supported on host.
564     VkImageFormatProperties imageFormatProperties;
565     auto createInfo = &formatConstraints->imageCreateInfo;
566     auto result = enc->vkGetPhysicalDeviceImageFormatProperties(
567         physicalDevice, createInfo->format, createInfo->imageType, tiling, createInfo->usage,
568         createInfo->flags, &imageFormatProperties, true /* do lock */);
569     if (result != VK_SUCCESS) {
570         ALOGD(
571             "%s: Image format (%u) type (%u) tiling (%u) "
572             "usage (%u) flags (%u) not supported by physical "
573             "device",
574             __func__, static_cast<uint32_t>(createInfo->format),
575             static_cast<uint32_t>(createInfo->imageType), static_cast<uint32_t>(tiling),
576             static_cast<uint32_t>(createInfo->usage), static_cast<uint32_t>(createInfo->flags));
577         return VK_ERROR_FORMAT_NOT_SUPPORTED;
578     }
579 
580     // Check if format constraints contains unsupported format features.
581     {
582         VkFormatProperties formatProperties;
583         enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, createInfo->format,
584                                                  &formatProperties, true /* do lock */);
585 
586         auto supportedFeatures = (tiling == VK_IMAGE_TILING_LINEAR)
587                                      ? formatProperties.linearTilingFeatures
588                                      : formatProperties.optimalTilingFeatures;
589         auto requiredFeatures = formatConstraints->requiredFormatFeatures;
590         if ((~supportedFeatures) & requiredFeatures) {
591             ALOGD(
592                 "%s: Host device support features for %s tiling: %08x, "
593                 "required features: %08x, feature bits %08x missing",
594                 __func__, tiling == VK_IMAGE_TILING_LINEAR ? "LINEAR" : "OPTIMAL",
595                 static_cast<uint32_t>(requiredFeatures), static_cast<uint32_t>(supportedFeatures),
596                 static_cast<uint32_t>((~supportedFeatures) & requiredFeatures));
597             return VK_ERROR_FORMAT_NOT_SUPPORTED;
598         }
599     }
600 
601     fuchsia_sysmem::wire::ImageFormatConstraints imageConstraints;
602     if (formatConstraints->sysmemPixelFormat != 0) {
603         auto pixelFormat = static_cast<fuchsia_sysmem::wire::PixelFormatType>(
604             formatConstraints->sysmemPixelFormat);
605         if (createInfo->format != VK_FORMAT_UNDEFINED &&
606             !vkFormatMatchesSysmemFormat(createInfo->format, pixelFormat)) {
607             ALOGD("%s: VkFormat %u doesn't match sysmem pixelFormat %lu", __func__,
608                   static_cast<uint32_t>(createInfo->format), formatConstraints->sysmemPixelFormat);
609             return VK_ERROR_FORMAT_NOT_SUPPORTED;
610         }
611         imageConstraints.pixel_format.type = pixelFormat;
612     } else {
613         auto pixel_format = vkFormatTypeToSysmem(createInfo->format);
614         if (pixel_format == fuchsia_sysmem::wire::PixelFormatType::kInvalid) {
615             ALOGD("%s: Unsupported VkFormat %u", __func__,
616                   static_cast<uint32_t>(createInfo->format));
617             return VK_ERROR_FORMAT_NOT_SUPPORTED;
618         }
619         imageConstraints.pixel_format.type = pixel_format;
620     }
621 
622     imageConstraints.color_spaces_count = formatConstraints->colorSpaceCount;
623     for (size_t i = 0; i < formatConstraints->colorSpaceCount; i++) {
624         imageConstraints.color_space[0].type = static_cast<fuchsia_sysmem::wire::ColorSpaceType>(
625             formatConstraints->pColorSpaces[i].colorSpace);
626     }
627 
628     // Get row alignment from host GPU.
629     VkDeviceSize offset = 0;
630     VkDeviceSize rowPitchAlignment = 1u;
631 
632     if (tiling == VK_IMAGE_TILING_LINEAR) {
633         VkImageCreateInfo createInfoDup = *createInfo;
634         createInfoDup.pNext = nullptr;
635         enc->vkGetLinearImageLayout2GOOGLE(device, &createInfoDup, &offset, &rowPitchAlignment,
636                                            true /* do lock */);
637         ALOGD(
638             "vkGetLinearImageLayout2GOOGLE: format %d offset %lu "
639             "rowPitchAlignment = %lu",
640             (int)createInfo->format, offset, rowPitchAlignment);
641     }
642 
643     imageConstraints.min_coded_width = createInfo->extent.width;
644     imageConstraints.max_coded_width = 0xfffffff;
645     imageConstraints.min_coded_height = createInfo->extent.height;
646     imageConstraints.max_coded_height = 0xffffffff;
647     // The min_bytes_per_row can be calculated by sysmem using
648     // |min_coded_width|, |bytes_per_row_divisor| and color format.
649     imageConstraints.min_bytes_per_row = 0;
650     imageConstraints.max_bytes_per_row = 0xffffffff;
651     imageConstraints.max_coded_width_times_coded_height = 0xffffffff;
652 
653     imageConstraints.layers = 1;
654     imageConstraints.coded_width_divisor = 1;
655     imageConstraints.coded_height_divisor = 1;
656     imageConstraints.bytes_per_row_divisor = rowPitchAlignment;
657     imageConstraints.start_offset_divisor = 1;
658     imageConstraints.display_width_divisor = 1;
659     imageConstraints.display_height_divisor = 1;
660     imageConstraints.pixel_format.has_format_modifier = true;
661     imageConstraints.pixel_format.format_modifier.value =
662         (tiling == VK_IMAGE_TILING_LINEAR)
663             ? fuchsia_sysmem::wire::kFormatModifierLinear
664             : fuchsia_sysmem::wire::kFormatModifierGoogleGoldfishOptimal;
665 
666     constraints->image_format_constraints[constraints->image_format_constraints_count++] =
667         imageConstraints;
668     return VK_SUCCESS;
669 }
670 
setBufferCollectionBufferConstraintsImpl(fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)671 SetBufferCollectionBufferConstraintsResult setBufferCollectionBufferConstraintsImpl(
672     fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
673     const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
674     const auto& collection = *pCollection;
675     if (pBufferConstraintsInfo == nullptr) {
676         ALOGE(
677             "setBufferCollectionBufferConstraints: "
678             "pBufferConstraintsInfo cannot be null.");
679         return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
680     }
681 
682     fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
683         defaultBufferCollectionConstraints(
684             /* min_size_bytes */ pBufferConstraintsInfo->createInfo.size,
685             /* buffer_count */ pBufferConstraintsInfo->bufferCollectionConstraints.minBufferCount);
686     constraints.usage.vulkan =
687         getBufferCollectionConstraintsVulkanBufferUsage(pBufferConstraintsInfo);
688 
689     constexpr uint32_t kVulkanPriority = 5;
690     const char kName[] = "GoldfishBufferSysmemShared";
691     collection->SetName(kVulkanPriority, fidl::StringView(kName));
692 
693     auto result = collection->SetConstraints(true, constraints);
694     if (!result.ok()) {
695         ALOGE("setBufferCollectionConstraints: SetConstraints failed: %d", result.status());
696         return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
697     }
698 
699     return {VK_SUCCESS, constraints};
700 }
701 #endif
702 
getAHardwareBufferId(AHardwareBuffer * ahw)703 uint64_t getAHardwareBufferId(AHardwareBuffer* ahw) {
704     uint64_t id = 0;
705 #if defined(__ANDROID__) && ANDROID_API_LEVEL >= 31
706     AHardwareBuffer_getId(ahw, &id);
707 #else
708     (void)ahw;
709 #endif
710     return id;
711 }
712 
transformExternalResourceMemoryDedicatedRequirementsForGuest(VkMemoryDedicatedRequirements * dedicatedReqs)713 void transformExternalResourceMemoryDedicatedRequirementsForGuest(
714     VkMemoryDedicatedRequirements* dedicatedReqs) {
715     dedicatedReqs->prefersDedicatedAllocation = VK_TRUE;
716     dedicatedReqs->requiresDedicatedAllocation = VK_TRUE;
717 }
718 
transformImageMemoryRequirementsForGuestLocked(VkImage image,VkMemoryRequirements * reqs)719 void ResourceTracker::transformImageMemoryRequirementsForGuestLocked(VkImage image,
720                                                                      VkMemoryRequirements* reqs) {
721 #ifdef VK_USE_PLATFORM_FUCHSIA
722     auto it = info_VkImage.find(image);
723     if (it == info_VkImage.end()) return;
724     auto& info = it->second;
725     if (info.isSysmemBackedMemory) {
726         auto width = info.createInfo.extent.width;
727         auto height = info.createInfo.extent.height;
728         reqs->size = width * height * 4;
729     }
730 #elif defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
731     auto it = info_VkImage.find(image);
732     if (it == info_VkImage.end()) return;
733     auto& info = it->second;
734     if (info.isWsiImage) {
735         static const uint32_t kColorBufferBpp = 4;
736         reqs->size = kColorBufferBpp * info.createInfo.extent.width * info.createInfo.extent.height;
737     }
738 #else
739     // Bypass "unused parameter" checks.
740     (void)image;
741     (void)reqs;
742 #endif
743 }
744 
freeCoherentMemoryLocked(VkDeviceMemory memory,VkDeviceMemory_Info & info)745 CoherentMemoryPtr ResourceTracker::freeCoherentMemoryLocked(VkDeviceMemory memory,
746                                                             VkDeviceMemory_Info& info) {
747     if (info.coherentMemory && info.ptr) {
748         if (info.coherentMemory->getDeviceMemory() != memory) {
749             delete_goldfish_VkDeviceMemory(memory);
750         }
751 
752         if (info.ptr) {
753             info.coherentMemory->release(info.ptr);
754             info.ptr = nullptr;
755         }
756 
757         return std::move(info.coherentMemory);
758     }
759 
760     return nullptr;
761 }
762 
createFence(VkDevice device,uint64_t hostFenceHandle,int64_t & osHandle)763 VkResult createFence(VkDevice device, uint64_t hostFenceHandle, int64_t& osHandle) {
764     struct VirtGpuExecBuffer exec = {};
765     struct gfxstreamCreateExportSyncVK exportSync = {};
766     VirtGpuDevice* instance = VirtGpuDevice::getInstance();
767 
768     uint64_t hostDeviceHandle = get_host_u64_VkDevice(device);
769 
770     exportSync.hdr.opCode = GFXSTREAM_CREATE_EXPORT_SYNC_VK;
771     exportSync.deviceHandleLo = (uint32_t)hostDeviceHandle;
772     exportSync.deviceHandleHi = (uint32_t)(hostDeviceHandle >> 32);
773     exportSync.fenceHandleLo = (uint32_t)hostFenceHandle;
774     exportSync.fenceHandleHi = (uint32_t)(hostFenceHandle >> 32);
775 
776     exec.command = static_cast<void*>(&exportSync);
777     exec.command_size = sizeof(exportSync);
778     exec.flags = kFenceOut | kRingIdx;
779     if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY;
780 
781     osHandle = exec.handle.osHandle;
782     return VK_SUCCESS;
783 }
784 
collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer> & workingSet,std::unordered_set<VkDescriptorSet> & allDs)785 void collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer>& workingSet,
786                                              std::unordered_set<VkDescriptorSet>& allDs) {
787     if (workingSet.empty()) return;
788 
789     std::vector<VkCommandBuffer> nextLevel;
790     for (auto commandBuffer : workingSet) {
791         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
792         forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
793             nextLevel.push_back((VkCommandBuffer)secondary);
794         });
795     }
796 
797     collectAllPendingDescriptorSetsBottomUp(nextLevel, allDs);
798 
799     for (auto cmdbuf : workingSet) {
800         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
801 
802         if (!cb->userPtr) {
803             continue;  // No descriptors to update.
804         }
805 
806         CommandBufferPendingDescriptorSets* pendingDescriptorSets =
807             (CommandBufferPendingDescriptorSets*)(cb->userPtr);
808 
809         if (pendingDescriptorSets->sets.empty()) {
810             continue;  // No descriptors to update.
811         }
812 
813         allDs.insert(pendingDescriptorSets->sets.begin(), pendingDescriptorSets->sets.end());
814     }
815 }
816 
commitDescriptorSetUpdates(void * context,VkQueue queue,const std::unordered_set<VkDescriptorSet> & sets)817 void commitDescriptorSetUpdates(void* context, VkQueue queue,
818                                 const std::unordered_set<VkDescriptorSet>& sets) {
819     VkEncoder* enc = (VkEncoder*)context;
820 
821     std::unordered_map<VkDescriptorPool, uint32_t> poolSet;
822     std::vector<VkDescriptorPool> pools;
823     std::vector<VkDescriptorSetLayout> setLayouts;
824     std::vector<uint64_t> poolIds;
825     std::vector<uint32_t> descriptorSetWhichPool;
826     std::vector<uint32_t> pendingAllocations;
827     std::vector<uint32_t> writeStartingIndices;
828     std::vector<VkWriteDescriptorSet> writesForHost;
829 
830     uint32_t poolIndex = 0;
831     uint32_t currentWriteIndex = 0;
832     for (auto set : sets) {
833         ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
834         VkDescriptorPool pool = reified->pool;
835         VkDescriptorSetLayout setLayout = reified->setLayout;
836 
837         auto it = poolSet.find(pool);
838         if (it == poolSet.end()) {
839             poolSet[pool] = poolIndex;
840             descriptorSetWhichPool.push_back(poolIndex);
841             pools.push_back(pool);
842             ++poolIndex;
843         } else {
844             uint32_t savedPoolIndex = it->second;
845             descriptorSetWhichPool.push_back(savedPoolIndex);
846         }
847 
848         poolIds.push_back(reified->poolId);
849         setLayouts.push_back(setLayout);
850         pendingAllocations.push_back(reified->allocationPending ? 1 : 0);
851         writeStartingIndices.push_back(currentWriteIndex);
852 
853         auto& writes = reified->allWrites;
854 
855         for (size_t i = 0; i < writes.size(); ++i) {
856             uint32_t binding = i;
857 
858             for (size_t j = 0; j < writes[i].size(); ++j) {
859                 auto& write = writes[i][j];
860 
861                 if (write.type == DescriptorWriteType::Empty) continue;
862 
863                 uint32_t dstArrayElement = 0;
864 
865                 VkDescriptorImageInfo* imageInfo = nullptr;
866                 VkDescriptorBufferInfo* bufferInfo = nullptr;
867                 VkBufferView* bufferView = nullptr;
868 
869                 switch (write.type) {
870                     case DescriptorWriteType::Empty:
871                         break;
872                     case DescriptorWriteType::ImageInfo:
873                         dstArrayElement = j;
874                         imageInfo = &write.imageInfo;
875                         break;
876                     case DescriptorWriteType::BufferInfo:
877                         dstArrayElement = j;
878                         bufferInfo = &write.bufferInfo;
879                         break;
880                     case DescriptorWriteType::BufferView:
881                         dstArrayElement = j;
882                         bufferView = &write.bufferView;
883                         break;
884                     case DescriptorWriteType::InlineUniformBlock:
885                     case DescriptorWriteType::AccelerationStructure:
886                         // TODO
887                         ALOGE(
888                             "Encountered pending inline uniform block or acceleration structure "
889                             "desc write, abort (NYI)\n");
890                         abort();
891                     default:
892                         break;
893                 }
894 
895                 // TODO: Combine multiple writes into one VkWriteDescriptorSet.
896                 VkWriteDescriptorSet forHost = {
897                     VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
898                     0 /* TODO: inline uniform block */,
899                     set,
900                     binding,
901                     dstArrayElement,
902                     1,
903                     write.descriptorType,
904                     imageInfo,
905                     bufferInfo,
906                     bufferView,
907                 };
908 
909                 writesForHost.push_back(forHost);
910                 ++currentWriteIndex;
911 
912                 // Set it back to empty.
913                 write.type = DescriptorWriteType::Empty;
914             }
915         }
916     }
917 
918     // Skip out if there's nothing to VkWriteDescriptorSet home about.
919     if (writesForHost.empty()) {
920         return;
921     }
922 
923     enc->vkQueueCommitDescriptorSetUpdatesGOOGLE(
924         queue, (uint32_t)pools.size(), pools.data(), (uint32_t)sets.size(), setLayouts.data(),
925         poolIds.data(), descriptorSetWhichPool.data(), pendingAllocations.data(),
926         writeStartingIndices.data(), (uint32_t)writesForHost.size(), writesForHost.data(),
927         false /* no lock */);
928 
929     // If we got here, then we definitely serviced the allocations.
930     for (auto set : sets) {
931         ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
932         reified->allocationPending = false;
933     }
934 }
935 
syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer,VkEncoder * currentEncoder)936 uint32_t ResourceTracker::syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer,
937                                                        VkEncoder* currentEncoder) {
938     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
939     if (!cb) return 0;
940 
941     auto lastEncoder = cb->lastUsedEncoder;
942 
943     if (lastEncoder == currentEncoder) return 0;
944 
945     currentEncoder->incRef();
946 
947     cb->lastUsedEncoder = currentEncoder;
948 
949     if (!lastEncoder) return 0;
950 
951     auto oldSeq = cb->sequenceNumber;
952     cb->sequenceNumber += 2;
953     lastEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, false, oldSeq + 1,
954                                                true /* do lock */);
955     lastEncoder->flush();
956     currentEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, true, oldSeq + 2,
957                                                   true /* do lock */);
958 
959     if (lastEncoder->decRef()) {
960         cb->lastUsedEncoder = nullptr;
961     }
962     return 0;
963 }
964 
addPendingDescriptorSets(VkCommandBuffer commandBuffer,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)965 void addPendingDescriptorSets(VkCommandBuffer commandBuffer, uint32_t descriptorSetCount,
966                               const VkDescriptorSet* pDescriptorSets) {
967     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
968 
969     if (!cb->userPtr) {
970         CommandBufferPendingDescriptorSets* newPendingSets = new CommandBufferPendingDescriptorSets;
971         cb->userPtr = newPendingSets;
972     }
973 
974     CommandBufferPendingDescriptorSets* pendingSets =
975         (CommandBufferPendingDescriptorSets*)cb->userPtr;
976 
977     for (uint32_t i = 0; i < descriptorSetCount; ++i) {
978         pendingSets->sets.insert(pDescriptorSets[i]);
979     }
980 }
981 
decDescriptorSetLayoutRef(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)982 void decDescriptorSetLayoutRef(void* context, VkDevice device,
983                                VkDescriptorSetLayout descriptorSetLayout,
984                                const VkAllocationCallbacks* pAllocator) {
985     if (!descriptorSetLayout) return;
986 
987     struct goldfish_VkDescriptorSetLayout* setLayout =
988         as_goldfish_VkDescriptorSetLayout(descriptorSetLayout);
989 
990     if (0 == --setLayout->layoutInfo->refcount) {
991         VkEncoder* enc = (VkEncoder*)context;
992         enc->vkDestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator,
993                                           true /* do lock */);
994     }
995 }
996 
ensureSyncDeviceFd()997 void ResourceTracker::ensureSyncDeviceFd() {
998 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
999     if (mSyncDeviceFd >= 0) return;
1000     mSyncDeviceFd = goldfish_sync_open();
1001     if (mSyncDeviceFd >= 0) {
1002         ALOGD("%s: created sync device for current Vulkan process: %d\n", __func__, mSyncDeviceFd);
1003     } else {
1004         ALOGD("%s: failed to create sync device for current Vulkan process\n", __func__);
1005     }
1006 #endif
1007 }
1008 
unregister_VkInstance(VkInstance instance)1009 void ResourceTracker::unregister_VkInstance(VkInstance instance) {
1010     AutoLock<RecursiveLock> lock(mLock);
1011 
1012     auto it = info_VkInstance.find(instance);
1013     if (it == info_VkInstance.end()) return;
1014     auto info = it->second;
1015     info_VkInstance.erase(instance);
1016     lock.unlock();
1017 }
1018 
unregister_VkDevice(VkDevice device)1019 void ResourceTracker::unregister_VkDevice(VkDevice device) {
1020     AutoLock<RecursiveLock> lock(mLock);
1021 
1022     auto it = info_VkDevice.find(device);
1023     if (it == info_VkDevice.end()) return;
1024     auto info = it->second;
1025     info_VkDevice.erase(device);
1026     lock.unlock();
1027 }
1028 
unregister_VkCommandPool(VkCommandPool pool)1029 void ResourceTracker::unregister_VkCommandPool(VkCommandPool pool) {
1030     if (!pool) return;
1031 
1032     clearCommandPool(pool);
1033 
1034     AutoLock<RecursiveLock> lock(mLock);
1035     info_VkCommandPool.erase(pool);
1036 }
1037 
unregister_VkSampler(VkSampler sampler)1038 void ResourceTracker::unregister_VkSampler(VkSampler sampler) {
1039     if (!sampler) return;
1040 
1041     AutoLock<RecursiveLock> lock(mLock);
1042     info_VkSampler.erase(sampler);
1043 }
1044 
unregister_VkCommandBuffer(VkCommandBuffer commandBuffer)1045 void ResourceTracker::unregister_VkCommandBuffer(VkCommandBuffer commandBuffer) {
1046     resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
1047                                   true /* also clear pending descriptor sets */);
1048 
1049     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
1050     if (!cb) return;
1051     if (cb->lastUsedEncoder) {
1052         cb->lastUsedEncoder->decRef();
1053     }
1054     eraseObjects(&cb->subObjects);
1055     forAllObjects(cb->poolObjects, [cb](void* commandPool) {
1056         struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool((VkCommandPool)commandPool);
1057         eraseObject(&p->subObjects, (void*)cb);
1058     });
1059     eraseObjects(&cb->poolObjects);
1060 
1061     if (cb->userPtr) {
1062         CommandBufferPendingDescriptorSets* pendingSets =
1063             (CommandBufferPendingDescriptorSets*)cb->userPtr;
1064         delete pendingSets;
1065     }
1066 
1067     AutoLock<RecursiveLock> lock(mLock);
1068     info_VkCommandBuffer.erase(commandBuffer);
1069 }
1070 
unregister_VkQueue(VkQueue queue)1071 void ResourceTracker::unregister_VkQueue(VkQueue queue) {
1072     struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
1073     if (!q) return;
1074     if (q->lastUsedEncoder) {
1075         q->lastUsedEncoder->decRef();
1076     }
1077 
1078     AutoLock<RecursiveLock> lock(mLock);
1079     info_VkQueue.erase(queue);
1080 }
1081 
unregister_VkDeviceMemory(VkDeviceMemory mem)1082 void ResourceTracker::unregister_VkDeviceMemory(VkDeviceMemory mem) {
1083     AutoLock<RecursiveLock> lock(mLock);
1084 
1085     auto it = info_VkDeviceMemory.find(mem);
1086     if (it == info_VkDeviceMemory.end()) return;
1087 
1088     auto& memInfo = it->second;
1089 
1090 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1091     if (memInfo.ahw) {
1092         auto* gralloc =
1093             ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
1094         gralloc->release(memInfo.ahw);
1095     }
1096 #endif
1097 
1098     if (memInfo.vmoHandle != ZX_HANDLE_INVALID) {
1099         zx_handle_close(memInfo.vmoHandle);
1100     }
1101 
1102     info_VkDeviceMemory.erase(mem);
1103 }
1104 
unregister_VkImage(VkImage img)1105 void ResourceTracker::unregister_VkImage(VkImage img) {
1106     AutoLock<RecursiveLock> lock(mLock);
1107 
1108     auto it = info_VkImage.find(img);
1109     if (it == info_VkImage.end()) return;
1110 
1111     auto& imageInfo = it->second;
1112 
1113     info_VkImage.erase(img);
1114 }
1115 
unregister_VkBuffer(VkBuffer buf)1116 void ResourceTracker::unregister_VkBuffer(VkBuffer buf) {
1117     AutoLock<RecursiveLock> lock(mLock);
1118 
1119     auto it = info_VkBuffer.find(buf);
1120     if (it == info_VkBuffer.end()) return;
1121 
1122     info_VkBuffer.erase(buf);
1123 }
1124 
unregister_VkSemaphore(VkSemaphore sem)1125 void ResourceTracker::unregister_VkSemaphore(VkSemaphore sem) {
1126     AutoLock<RecursiveLock> lock(mLock);
1127 
1128     auto it = info_VkSemaphore.find(sem);
1129     if (it == info_VkSemaphore.end()) return;
1130 
1131     auto& semInfo = it->second;
1132 
1133     if (semInfo.eventHandle != ZX_HANDLE_INVALID) {
1134         zx_handle_close(semInfo.eventHandle);
1135     }
1136 
1137 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1138     if (semInfo.syncFd.value_or(-1) >= 0) {
1139         auto* syncHelper =
1140             ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
1141         syncHelper->close(semInfo.syncFd.value());
1142     }
1143 #endif
1144 
1145     info_VkSemaphore.erase(sem);
1146 }
1147 
unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ)1148 void ResourceTracker::unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ) {
1149     AutoLock<RecursiveLock> lock(mLock);
1150     auto it = info_VkDescriptorUpdateTemplate.find(templ);
1151     if (it == info_VkDescriptorUpdateTemplate.end()) return;
1152 
1153     auto& info = it->second;
1154     if (info.templateEntryCount) delete[] info.templateEntries;
1155     if (info.imageInfoCount) {
1156         delete[] info.imageInfoIndices;
1157         delete[] info.imageInfos;
1158     }
1159     if (info.bufferInfoCount) {
1160         delete[] info.bufferInfoIndices;
1161         delete[] info.bufferInfos;
1162     }
1163     if (info.bufferViewCount) {
1164         delete[] info.bufferViewIndices;
1165         delete[] info.bufferViews;
1166     }
1167     info_VkDescriptorUpdateTemplate.erase(it);
1168 }
1169 
unregister_VkFence(VkFence fence)1170 void ResourceTracker::unregister_VkFence(VkFence fence) {
1171     AutoLock<RecursiveLock> lock(mLock);
1172     auto it = info_VkFence.find(fence);
1173     if (it == info_VkFence.end()) return;
1174 
1175     auto& fenceInfo = it->second;
1176     (void)fenceInfo;
1177 
1178 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1179     if (fenceInfo.syncFd >= 0) {
1180         auto* syncHelper =
1181             ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
1182         syncHelper->close(fenceInfo.syncFd);
1183     }
1184 #endif
1185 
1186     info_VkFence.erase(fence);
1187 }
1188 
1189 #ifdef VK_USE_PLATFORM_FUCHSIA
unregister_VkBufferCollectionFUCHSIA(VkBufferCollectionFUCHSIA collection)1190 void ResourceTracker::unregister_VkBufferCollectionFUCHSIA(VkBufferCollectionFUCHSIA collection) {
1191     AutoLock<RecursiveLock> lock(mLock);
1192     info_VkBufferCollectionFUCHSIA.erase(collection);
1193 }
1194 #endif
1195 
unregister_VkDescriptorSet_locked(VkDescriptorSet set)1196 void ResourceTracker::unregister_VkDescriptorSet_locked(VkDescriptorSet set) {
1197     struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(set);
1198     delete ds->reified;
1199     info_VkDescriptorSet.erase(set);
1200 }
1201 
unregister_VkDescriptorSet(VkDescriptorSet set)1202 void ResourceTracker::unregister_VkDescriptorSet(VkDescriptorSet set) {
1203     if (!set) return;
1204 
1205     AutoLock<RecursiveLock> lock(mLock);
1206     unregister_VkDescriptorSet_locked(set);
1207 }
1208 
unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout)1209 void ResourceTracker::unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout) {
1210     if (!setLayout) return;
1211 
1212     AutoLock<RecursiveLock> lock(mLock);
1213     delete as_goldfish_VkDescriptorSetLayout(setLayout)->layoutInfo;
1214     info_VkDescriptorSetLayout.erase(setLayout);
1215 }
1216 
freeDescriptorSetsIfHostAllocated(VkEncoder * enc,VkDevice device,uint32_t descriptorSetCount,const VkDescriptorSet * sets)1217 void ResourceTracker::freeDescriptorSetsIfHostAllocated(VkEncoder* enc, VkDevice device,
1218                                                         uint32_t descriptorSetCount,
1219                                                         const VkDescriptorSet* sets) {
1220     for (uint32_t i = 0; i < descriptorSetCount; ++i) {
1221         struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(sets[i]);
1222         if (ds->reified->allocationPending) {
1223             unregister_VkDescriptorSet(sets[i]);
1224             delete_goldfish_VkDescriptorSet(sets[i]);
1225         } else {
1226             enc->vkFreeDescriptorSets(device, ds->reified->pool, 1, &sets[i], false /* no lock */);
1227         }
1228     }
1229 }
1230 
clearDescriptorPoolAndUnregisterDescriptorSets(void * context,VkDevice device,VkDescriptorPool pool)1231 void ResourceTracker::clearDescriptorPoolAndUnregisterDescriptorSets(void* context, VkDevice device,
1232                                                                      VkDescriptorPool pool) {
1233     std::vector<VkDescriptorSet> toClear =
1234         clearDescriptorPool(pool, mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate);
1235 
1236     for (auto set : toClear) {
1237         if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
1238             VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(set)->reified->setLayout;
1239             decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
1240         }
1241         unregister_VkDescriptorSet(set);
1242         delete_goldfish_VkDescriptorSet(set);
1243     }
1244 }
1245 
unregister_VkDescriptorPool(VkDescriptorPool pool)1246 void ResourceTracker::unregister_VkDescriptorPool(VkDescriptorPool pool) {
1247     if (!pool) return;
1248 
1249     AutoLock<RecursiveLock> lock(mLock);
1250 
1251     struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
1252     delete dp->allocInfo;
1253 
1254     info_VkDescriptorPool.erase(pool);
1255 }
1256 
deviceMemoryTransform_fromhost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)1257 void ResourceTracker::deviceMemoryTransform_fromhost(VkDeviceMemory* memory, uint32_t memoryCount,
1258                                                      VkDeviceSize* offset, uint32_t offsetCount,
1259                                                      VkDeviceSize* size, uint32_t sizeCount,
1260                                                      uint32_t* typeIndex, uint32_t typeIndexCount,
1261                                                      uint32_t* typeBits, uint32_t typeBitsCount) {
1262     (void)memory;
1263     (void)memoryCount;
1264     (void)offset;
1265     (void)offsetCount;
1266     (void)size;
1267     (void)sizeCount;
1268     (void)typeIndex;
1269     (void)typeIndexCount;
1270     (void)typeBits;
1271     (void)typeBitsCount;
1272 }
1273 
transformImpl_VkExternalMemoryProperties_fromhost(VkExternalMemoryProperties * pProperties,uint32_t)1274 void ResourceTracker::transformImpl_VkExternalMemoryProperties_fromhost(
1275     VkExternalMemoryProperties* pProperties, uint32_t) {
1276     VkExternalMemoryHandleTypeFlags supportedHandleType = 0u;
1277 #ifdef VK_USE_PLATFORM_FUCHSIA
1278     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
1279 #endif  // VK_USE_PLATFORM_FUCHSIA
1280 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1281     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
1282                            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
1283 #endif  // VK_USE_PLATFORM_ANDROID_KHR
1284     if (supportedHandleType) {
1285         pProperties->compatibleHandleTypes &= supportedHandleType;
1286         pProperties->exportFromImportedHandleTypes &= supportedHandleType;
1287     }
1288 }
1289 
setInstanceInfo(VkInstance instance,uint32_t enabledExtensionCount,const char * const * ppEnabledExtensionNames,uint32_t apiVersion)1290 void ResourceTracker::setInstanceInfo(VkInstance instance, uint32_t enabledExtensionCount,
1291                                       const char* const* ppEnabledExtensionNames,
1292                                       uint32_t apiVersion) {
1293     AutoLock<RecursiveLock> lock(mLock);
1294     auto& info = info_VkInstance[instance];
1295     info.highestApiVersion = apiVersion;
1296 
1297     if (!ppEnabledExtensionNames) return;
1298 
1299     for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
1300         info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
1301     }
1302 }
1303 
setDeviceInfo(VkDevice device,VkPhysicalDevice physdev,VkPhysicalDeviceProperties props,VkPhysicalDeviceMemoryProperties memProps,uint32_t enabledExtensionCount,const char * const * ppEnabledExtensionNames,const void * pNext)1304 void ResourceTracker::setDeviceInfo(VkDevice device, VkPhysicalDevice physdev,
1305                                     VkPhysicalDeviceProperties props,
1306                                     VkPhysicalDeviceMemoryProperties memProps,
1307                                     uint32_t enabledExtensionCount,
1308                                     const char* const* ppEnabledExtensionNames, const void* pNext) {
1309     AutoLock<RecursiveLock> lock(mLock);
1310     auto& info = info_VkDevice[device];
1311     info.physdev = physdev;
1312     info.props = props;
1313     info.memProps = memProps;
1314     info.apiVersion = props.apiVersion;
1315 
1316     const VkBaseInStructure* extensionCreateInfo =
1317         reinterpret_cast<const VkBaseInStructure*>(pNext);
1318     while (extensionCreateInfo) {
1319         if (extensionCreateInfo->sType ==
1320             VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT) {
1321             auto deviceMemoryReportCreateInfo =
1322                 reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT*>(
1323                     extensionCreateInfo);
1324             if (deviceMemoryReportCreateInfo->pfnUserCallback != nullptr) {
1325                 info.deviceMemoryReportCallbacks.emplace_back(
1326                     deviceMemoryReportCreateInfo->pfnUserCallback,
1327                     deviceMemoryReportCreateInfo->pUserData);
1328             }
1329         }
1330         extensionCreateInfo = extensionCreateInfo->pNext;
1331     }
1332 
1333     if (!ppEnabledExtensionNames) return;
1334 
1335     for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
1336         info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
1337     }
1338 }
1339 
setDeviceMemoryInfo(VkDevice device,VkDeviceMemory memory,VkDeviceSize allocationSize,uint8_t * ptr,uint32_t memoryTypeIndex,AHardwareBuffer * ahw,bool imported,zx_handle_t vmoHandle,VirtGpuBlobPtr blobPtr)1340 void ResourceTracker::setDeviceMemoryInfo(VkDevice device, VkDeviceMemory memory,
1341                                           VkDeviceSize allocationSize, uint8_t* ptr,
1342                                           uint32_t memoryTypeIndex, AHardwareBuffer* ahw,
1343                                           bool imported, zx_handle_t vmoHandle,
1344                                           VirtGpuBlobPtr blobPtr) {
1345     AutoLock<RecursiveLock> lock(mLock);
1346     auto& info = info_VkDeviceMemory[memory];
1347 
1348     info.device = device;
1349     info.allocationSize = allocationSize;
1350     info.ptr = ptr;
1351     info.memoryTypeIndex = memoryTypeIndex;
1352 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1353     info.ahw = ahw;
1354 #endif
1355     info.imported = imported;
1356     info.vmoHandle = vmoHandle;
1357     info.blobPtr = blobPtr;
1358 }
1359 
setImageInfo(VkImage image,VkDevice device,const VkImageCreateInfo * pCreateInfo)1360 void ResourceTracker::setImageInfo(VkImage image, VkDevice device,
1361                                    const VkImageCreateInfo* pCreateInfo) {
1362     AutoLock<RecursiveLock> lock(mLock);
1363     auto& info = info_VkImage[image];
1364 
1365     info.device = device;
1366     info.createInfo = *pCreateInfo;
1367 }
1368 
getMappedPointer(VkDeviceMemory memory)1369 uint8_t* ResourceTracker::getMappedPointer(VkDeviceMemory memory) {
1370     AutoLock<RecursiveLock> lock(mLock);
1371     const auto it = info_VkDeviceMemory.find(memory);
1372     if (it == info_VkDeviceMemory.end()) return nullptr;
1373 
1374     const auto& info = it->second;
1375     return info.ptr;
1376 }
1377 
getMappedSize(VkDeviceMemory memory)1378 VkDeviceSize ResourceTracker::getMappedSize(VkDeviceMemory memory) {
1379     AutoLock<RecursiveLock> lock(mLock);
1380     const auto it = info_VkDeviceMemory.find(memory);
1381     if (it == info_VkDeviceMemory.end()) return 0;
1382 
1383     const auto& info = it->second;
1384     return info.allocationSize;
1385 }
1386 
isValidMemoryRange(const VkMappedMemoryRange & range) const1387 bool ResourceTracker::isValidMemoryRange(const VkMappedMemoryRange& range) const {
1388     AutoLock<RecursiveLock> lock(mLock);
1389     const auto it = info_VkDeviceMemory.find(range.memory);
1390     if (it == info_VkDeviceMemory.end()) return false;
1391     const auto& info = it->second;
1392 
1393     if (!info.ptr) return false;
1394 
1395     VkDeviceSize offset = range.offset;
1396     VkDeviceSize size = range.size;
1397 
1398     if (size == VK_WHOLE_SIZE) {
1399         return offset <= info.allocationSize;
1400     }
1401 
1402     return offset + size <= info.allocationSize;
1403 }
1404 
setupCaps(uint32_t & noRenderControlEnc)1405 void ResourceTracker::setupCaps(uint32_t& noRenderControlEnc) {
1406     VirtGpuDevice* instance = VirtGpuDevice::getInstance(kCapsetGfxStreamVulkan);
1407     mCaps = instance->getCaps();
1408 
1409     // Delete once goldfish Linux drivers are gone
1410     if (mCaps.vulkanCapset.protocolVersion == 0) {
1411         mCaps.vulkanCapset.colorBufferMemoryIndex = 0xFFFFFFFF;
1412     } else {
1413         // Don't query the render control encoder for features, since for virtio-gpu the
1414         // capabilities provide versioning. Set features to be unconditionally true, since
1415         // using virtio-gpu encompasses all prior goldfish features.  mFeatureInfo should be
1416         // deprecated in favor of caps.
1417 
1418         mFeatureInfo.reset(new EmulatorFeatureInfo);
1419 
1420         mFeatureInfo->hasVulkanNullOptionalStrings = true;
1421         mFeatureInfo->hasVulkanIgnoredHandles = true;
1422         mFeatureInfo->hasVulkanShaderFloat16Int8 = true;
1423         mFeatureInfo->hasVulkanQueueSubmitWithCommands = true;
1424         mFeatureInfo->hasDeferredVulkanCommands = true;
1425         mFeatureInfo->hasVulkanAsyncQueueSubmit = true;
1426         mFeatureInfo->hasVulkanCreateResourcesWithRequirements = true;
1427         mFeatureInfo->hasVirtioGpuNext = true;
1428         mFeatureInfo->hasVirtioGpuNativeSync = true;
1429         mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate = true;
1430         mFeatureInfo->hasVulkanAsyncQsri = true;
1431 
1432         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
1433         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
1434         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
1435         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
1436     }
1437 
1438     noRenderControlEnc = mCaps.vulkanCapset.noRenderControlEnc;
1439 }
1440 
setupFeatures(const EmulatorFeatureInfo * features)1441 void ResourceTracker::setupFeatures(const EmulatorFeatureInfo* features) {
1442     if (!features || mFeatureInfo) return;
1443     mFeatureInfo.reset(new EmulatorFeatureInfo);
1444     *mFeatureInfo = *features;
1445 
1446 #if defined(__ANDROID__)
1447     if (mFeatureInfo->hasDirectMem) {
1448         mGoldfishAddressSpaceBlockProvider.reset(
1449             new GoldfishAddressSpaceBlockProvider(GoldfishAddressSpaceSubdeviceType::NoSubdevice));
1450     }
1451 #endif  // defined(__ANDROID__)
1452 
1453 #ifdef VK_USE_PLATFORM_FUCHSIA
1454     if (mFeatureInfo->hasVulkan) {
1455         fidl::ClientEnd<fuchsia_hardware_goldfish::ControlDevice> channel{zx::channel(
1456             GetConnectToServiceFunction()("/loader-gpu-devices/class/goldfish-control/000"))};
1457         if (!channel) {
1458             ALOGE("failed to open control device");
1459             abort();
1460         }
1461         mControlDevice =
1462             fidl::WireSyncClient<fuchsia_hardware_goldfish::ControlDevice>(std::move(channel));
1463 
1464         fidl::ClientEnd<fuchsia_sysmem::Allocator> sysmem_channel{
1465             zx::channel(GetConnectToServiceFunction()("/svc/fuchsia.sysmem.Allocator"))};
1466         if (!sysmem_channel) {
1467             ALOGE("failed to open sysmem connection");
1468         }
1469         mSysmemAllocator =
1470             fidl::WireSyncClient<fuchsia_sysmem::Allocator>(std::move(sysmem_channel));
1471         char name[ZX_MAX_NAME_LEN] = {};
1472         zx_object_get_property(zx_process_self(), ZX_PROP_NAME, name, sizeof(name));
1473         std::string client_name(name);
1474         client_name += "-goldfish";
1475         zx_info_handle_basic_t info;
1476         zx_object_get_info(zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, sizeof(info), nullptr,
1477                            nullptr);
1478         mSysmemAllocator->SetDebugClientInfo(fidl::StringView::FromExternal(client_name),
1479                                              info.koid);
1480     }
1481 #endif
1482 
1483     if (mFeatureInfo->hasVulkanNullOptionalStrings) {
1484         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
1485     }
1486     if (mFeatureInfo->hasVulkanIgnoredHandles) {
1487         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
1488     }
1489     if (mFeatureInfo->hasVulkanShaderFloat16Int8) {
1490         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
1491     }
1492     if (mFeatureInfo->hasVulkanQueueSubmitWithCommands) {
1493         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
1494     }
1495 }
1496 
setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks & callbacks)1497 void ResourceTracker::setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks& callbacks) {
1498     ResourceTracker::threadingCallbacks = callbacks;
1499 }
1500 
hostSupportsVulkan() const1501 bool ResourceTracker::hostSupportsVulkan() const {
1502     if (!mFeatureInfo) return false;
1503 
1504     return mFeatureInfo->hasVulkan;
1505 }
1506 
usingDirectMapping() const1507 bool ResourceTracker::usingDirectMapping() const { return true; }
1508 
getStreamFeatures() const1509 uint32_t ResourceTracker::getStreamFeatures() const { return ResourceTracker::streamFeatureBits; }
1510 
supportsDeferredCommands() const1511 bool ResourceTracker::supportsDeferredCommands() const {
1512     if (!mFeatureInfo) return false;
1513     return mFeatureInfo->hasDeferredVulkanCommands;
1514 }
1515 
supportsAsyncQueueSubmit() const1516 bool ResourceTracker::supportsAsyncQueueSubmit() const {
1517     if (!mFeatureInfo) return false;
1518     return mFeatureInfo->hasVulkanAsyncQueueSubmit;
1519 }
1520 
supportsCreateResourcesWithRequirements() const1521 bool ResourceTracker::supportsCreateResourcesWithRequirements() const {
1522     if (!mFeatureInfo) return false;
1523     return mFeatureInfo->hasVulkanCreateResourcesWithRequirements;
1524 }
1525 
getHostInstanceExtensionIndex(const std::string & extName) const1526 int ResourceTracker::getHostInstanceExtensionIndex(const std::string& extName) const {
1527     int i = 0;
1528     for (const auto& prop : mHostInstanceExtensions) {
1529         if (extName == std::string(prop.extensionName)) {
1530             return i;
1531         }
1532         ++i;
1533     }
1534     return -1;
1535 }
1536 
getHostDeviceExtensionIndex(const std::string & extName) const1537 int ResourceTracker::getHostDeviceExtensionIndex(const std::string& extName) const {
1538     int i = 0;
1539     for (const auto& prop : mHostDeviceExtensions) {
1540         if (extName == std::string(prop.extensionName)) {
1541             return i;
1542         }
1543         ++i;
1544     }
1545     return -1;
1546 }
1547 
deviceMemoryTransform_tohost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)1548 void ResourceTracker::deviceMemoryTransform_tohost(VkDeviceMemory* memory, uint32_t memoryCount,
1549                                                    VkDeviceSize* offset, uint32_t offsetCount,
1550                                                    VkDeviceSize* size, uint32_t sizeCount,
1551                                                    uint32_t* typeIndex, uint32_t typeIndexCount,
1552                                                    uint32_t* typeBits, uint32_t typeBitsCount) {
1553     (void)memoryCount;
1554     (void)offsetCount;
1555     (void)sizeCount;
1556     (void)typeIndex;
1557     (void)typeIndexCount;
1558     (void)typeBits;
1559     (void)typeBitsCount;
1560 
1561     if (memory) {
1562         AutoLock<RecursiveLock> lock(mLock);
1563 
1564         for (uint32_t i = 0; i < memoryCount; ++i) {
1565             VkDeviceMemory mem = memory[i];
1566 
1567             auto it = info_VkDeviceMemory.find(mem);
1568             if (it == info_VkDeviceMemory.end()) return;
1569 
1570             const auto& info = it->second;
1571 
1572             if (!info.coherentMemory) continue;
1573 
1574             memory[i] = info.coherentMemory->getDeviceMemory();
1575 
1576             if (offset) {
1577                 offset[i] = info.coherentMemoryOffset + offset[i];
1578             }
1579 
1580             if (size && size[i] == VK_WHOLE_SIZE) {
1581                 size[i] = info.allocationSize;
1582             }
1583 
1584             // TODO
1585             (void)memory;
1586             (void)offset;
1587             (void)size;
1588         }
1589     }
1590 }
1591 
getColorBufferMemoryIndex(void * context,VkDevice device)1592 uint32_t ResourceTracker::getColorBufferMemoryIndex(void* context, VkDevice device) {
1593     // Create test image to get the memory requirements
1594     VkEncoder* enc = (VkEncoder*)context;
1595     VkImageCreateInfo createInfo = {
1596         .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1597         .imageType = VK_IMAGE_TYPE_2D,
1598         .format = VK_FORMAT_R8G8B8A8_UNORM,
1599         .extent = {64, 64, 1},
1600         .mipLevels = 1,
1601         .arrayLayers = 1,
1602         .samples = VK_SAMPLE_COUNT_1_BIT,
1603         .tiling = VK_IMAGE_TILING_OPTIMAL,
1604         .usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
1605                  VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
1606                  VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
1607         .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
1608     };
1609     VkImage image = VK_NULL_HANDLE;
1610     VkResult res = enc->vkCreateImage(device, &createInfo, nullptr, &image, true /* do lock */);
1611 
1612     if (res != VK_SUCCESS) {
1613         return 0;
1614     }
1615 
1616     VkMemoryRequirements memReqs;
1617     enc->vkGetImageMemoryRequirements(device, image, &memReqs, true /* do lock */);
1618     enc->vkDestroyImage(device, image, nullptr, true /* do lock */);
1619 
1620     const VkPhysicalDeviceMemoryProperties& memProps =
1621         getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE);
1622 
1623     // Currently, host looks for the last index that has with memory
1624     // property type VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
1625     VkMemoryPropertyFlags memoryProperty = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1626     for (int i = VK_MAX_MEMORY_TYPES - 1; i >= 0; --i) {
1627         if ((memReqs.memoryTypeBits & (1u << i)) &&
1628             (memProps.memoryTypes[i].propertyFlags & memoryProperty)) {
1629             return i;
1630         }
1631     }
1632 
1633     return 0;
1634 }
1635 
on_vkEnumerateInstanceExtensionProperties(void * context,VkResult,const char *,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)1636 VkResult ResourceTracker::on_vkEnumerateInstanceExtensionProperties(
1637     void* context, VkResult, const char*, uint32_t* pPropertyCount,
1638     VkExtensionProperties* pProperties) {
1639     std::vector<const char*> allowedExtensionNames = {
1640         "VK_KHR_get_physical_device_properties2",
1641         "VK_KHR_sampler_ycbcr_conversion",
1642 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1643         "VK_KHR_external_semaphore_capabilities",
1644         "VK_KHR_external_memory_capabilities",
1645         "VK_KHR_external_fence_capabilities",
1646         "VK_EXT_debug_utils",
1647 #endif
1648     };
1649 
1650     VkEncoder* enc = (VkEncoder*)context;
1651 
1652     // Only advertise a select set of extensions.
1653     if (mHostInstanceExtensions.empty()) {
1654         uint32_t hostPropCount = 0;
1655         enc->vkEnumerateInstanceExtensionProperties(nullptr, &hostPropCount, nullptr,
1656                                                     true /* do lock */);
1657         mHostInstanceExtensions.resize(hostPropCount);
1658 
1659         VkResult hostRes = enc->vkEnumerateInstanceExtensionProperties(
1660             nullptr, &hostPropCount, mHostInstanceExtensions.data(), true /* do lock */);
1661 
1662         if (hostRes != VK_SUCCESS) {
1663             return hostRes;
1664         }
1665     }
1666 
1667     std::vector<VkExtensionProperties> filteredExts;
1668 
1669     for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1670         auto extIndex = getHostInstanceExtensionIndex(allowedExtensionNames[i]);
1671         if (extIndex != -1) {
1672             filteredExts.push_back(mHostInstanceExtensions[extIndex]);
1673         }
1674     }
1675 
1676     VkExtensionProperties anbExtProps[] = {
1677 #ifdef VK_USE_PLATFORM_FUCHSIA
1678         {"VK_KHR_external_memory_capabilities", 1},
1679         {"VK_KHR_external_semaphore_capabilities", 1},
1680 #endif
1681     };
1682 
1683     for (auto& anbExtProp : anbExtProps) {
1684         filteredExts.push_back(anbExtProp);
1685     }
1686 
1687     // Spec:
1688     //
1689     // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
1690     //
1691     // If pProperties is NULL, then the number of extensions properties
1692     // available is returned in pPropertyCount. Otherwise, pPropertyCount
1693     // must point to a variable set by the user to the number of elements
1694     // in the pProperties array, and on return the variable is overwritten
1695     // with the number of structures actually written to pProperties. If
1696     // pPropertyCount is less than the number of extension properties
1697     // available, at most pPropertyCount structures will be written. If
1698     // pPropertyCount is smaller than the number of extensions available,
1699     // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1700     // that not all the available properties were returned.
1701     //
1702     // pPropertyCount must be a valid pointer to a uint32_t value
1703     if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1704 
1705     if (!pProperties) {
1706         *pPropertyCount = (uint32_t)filteredExts.size();
1707         return VK_SUCCESS;
1708     } else {
1709         auto actualExtensionCount = (uint32_t)filteredExts.size();
1710         if (*pPropertyCount > actualExtensionCount) {
1711             *pPropertyCount = actualExtensionCount;
1712         }
1713 
1714         for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1715             pProperties[i] = filteredExts[i];
1716         }
1717 
1718         if (actualExtensionCount > *pPropertyCount) {
1719             return VK_INCOMPLETE;
1720         }
1721 
1722         return VK_SUCCESS;
1723     }
1724 }
1725 
on_vkEnumerateDeviceExtensionProperties(void * context,VkResult,VkPhysicalDevice physdev,const char *,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)1726 VkResult ResourceTracker::on_vkEnumerateDeviceExtensionProperties(
1727     void* context, VkResult, VkPhysicalDevice physdev, const char*, uint32_t* pPropertyCount,
1728     VkExtensionProperties* pProperties) {
1729     std::vector<const char*> allowedExtensionNames = {
1730         "VK_KHR_vulkan_memory_model",
1731         "VK_KHR_buffer_device_address",
1732         "VK_KHR_maintenance1",
1733         "VK_KHR_maintenance2",
1734         "VK_KHR_maintenance3",
1735         "VK_KHR_bind_memory2",
1736         "VK_KHR_dedicated_allocation",
1737         "VK_KHR_get_memory_requirements2",
1738         "VK_KHR_sampler_ycbcr_conversion",
1739         "VK_KHR_shader_float16_int8",
1740     // Timeline semaphores buggy in newer NVIDIA drivers
1741     // (vkWaitSemaphoresKHR causes further vkCommandBuffer dispatches to deadlock)
1742 #ifndef VK_USE_PLATFORM_ANDROID_KHR
1743         "VK_KHR_timeline_semaphore",
1744 #endif
1745         "VK_AMD_gpu_shader_half_float",
1746         "VK_NV_shader_subgroup_partitioned",
1747         "VK_KHR_shader_subgroup_extended_types",
1748         "VK_EXT_subgroup_size_control",
1749         "VK_EXT_provoking_vertex",
1750         "VK_EXT_line_rasterization",
1751         "VK_KHR_shader_terminate_invocation",
1752         "VK_EXT_transform_feedback",
1753         "VK_EXT_primitive_topology_list_restart",
1754         "VK_EXT_index_type_uint8",
1755         "VK_EXT_load_store_op_none",
1756         "VK_EXT_swapchain_colorspace",
1757         "VK_EXT_image_robustness",
1758         "VK_EXT_custom_border_color",
1759         "VK_EXT_shader_stencil_export",
1760         "VK_KHR_image_format_list",
1761         "VK_KHR_incremental_present",
1762         "VK_KHR_pipeline_executable_properties",
1763         "VK_EXT_queue_family_foreign",
1764         "VK_EXT_scalar_block_layout",
1765         "VK_KHR_descriptor_update_template",
1766         "VK_KHR_storage_buffer_storage_class",
1767         "VK_EXT_depth_clip_enable",
1768         "VK_KHR_create_renderpass2",
1769 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1770         "VK_KHR_external_semaphore",
1771         "VK_KHR_external_semaphore_fd",
1772         // "VK_KHR_external_semaphore_win32", not exposed because it's translated to fd
1773         "VK_KHR_external_memory",
1774         "VK_KHR_external_fence",
1775         "VK_KHR_external_fence_fd",
1776         "VK_EXT_device_memory_report",
1777 #endif
1778 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
1779         "VK_KHR_imageless_framebuffer",
1780 #endif
1781         // Vulkan 1.3
1782         "VK_KHR_synchronization2",
1783         "VK_EXT_private_data",
1784     };
1785 
1786     VkEncoder* enc = (VkEncoder*)context;
1787 
1788     if (mHostDeviceExtensions.empty()) {
1789         uint32_t hostPropCount = 0;
1790         enc->vkEnumerateDeviceExtensionProperties(physdev, nullptr, &hostPropCount, nullptr,
1791                                                   true /* do lock */);
1792         mHostDeviceExtensions.resize(hostPropCount);
1793 
1794         VkResult hostRes = enc->vkEnumerateDeviceExtensionProperties(
1795             physdev, nullptr, &hostPropCount, mHostDeviceExtensions.data(), true /* do lock */);
1796 
1797         if (hostRes != VK_SUCCESS) {
1798             return hostRes;
1799         }
1800     }
1801 
1802     std::vector<VkExtensionProperties> filteredExts;
1803 
1804     for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1805         auto extIndex = getHostDeviceExtensionIndex(allowedExtensionNames[i]);
1806         if (extIndex != -1) {
1807             filteredExts.push_back(mHostDeviceExtensions[extIndex]);
1808         }
1809     }
1810 
1811     VkExtensionProperties anbExtProps[] = {
1812 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1813         {"VK_ANDROID_native_buffer", 7},
1814 #endif
1815 #ifdef VK_USE_PLATFORM_FUCHSIA
1816         {"VK_KHR_external_memory", 1},
1817         {"VK_KHR_external_semaphore", 1},
1818         {"VK_FUCHSIA_external_semaphore", 1},
1819 #endif
1820     };
1821 
1822     for (auto& anbExtProp : anbExtProps) {
1823         filteredExts.push_back(anbExtProp);
1824     }
1825 
1826 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1827     bool hostSupportsExternalFenceFd =
1828         getHostDeviceExtensionIndex("VK_KHR_external_fence_fd") != -1;
1829     if (!hostSupportsExternalFenceFd) {
1830         filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_fence_fd", 1});
1831     }
1832 #endif
1833 
1834 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1835     bool hostHasPosixExternalSemaphore =
1836         getHostDeviceExtensionIndex("VK_KHR_external_semaphore_fd") != -1;
1837     if (!hostHasPosixExternalSemaphore) {
1838         // Always advertise posix external semaphore capabilities on Android/Linux.
1839         // SYNC_FD handles will always work, regardless of host support. Support
1840         // for non-sync, opaque FDs, depends on host driver support, but will
1841         // be handled accordingly by host.
1842         filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_semaphore_fd", 1});
1843     }
1844 #endif
1845 
1846     bool win32ExtMemAvailable = getHostDeviceExtensionIndex("VK_KHR_external_memory_win32") != -1;
1847     bool posixExtMemAvailable = getHostDeviceExtensionIndex("VK_KHR_external_memory_fd") != -1;
1848     bool moltenVkExtAvailable = getHostDeviceExtensionIndex("VK_MVK_moltenvk") != -1;
1849     bool qnxExtMemAvailable =
1850         getHostDeviceExtensionIndex("VK_QNX_external_memory_screen_buffer") != -1;
1851 
1852     bool hostHasExternalMemorySupport =
1853         win32ExtMemAvailable || posixExtMemAvailable || moltenVkExtAvailable || qnxExtMemAvailable;
1854 
1855     if (hostHasExternalMemorySupport) {
1856 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1857         filteredExts.push_back(
1858             VkExtensionProperties{"VK_ANDROID_external_memory_android_hardware_buffer", 7});
1859         filteredExts.push_back(VkExtensionProperties{"VK_EXT_queue_family_foreign", 1});
1860 #endif
1861 #ifdef VK_USE_PLATFORM_FUCHSIA
1862         filteredExts.push_back(VkExtensionProperties{"VK_FUCHSIA_external_memory", 1});
1863         filteredExts.push_back(VkExtensionProperties{"VK_FUCHSIA_buffer_collection", 1});
1864 #endif
1865 #if !defined(VK_USE_PLATFORM_ANDROID_KHR) && defined(__linux__)
1866         filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_memory_fd", 1});
1867         filteredExts.push_back(VkExtensionProperties{"VK_EXT_external_memory_dma_buf", 1});
1868 #endif
1869     }
1870 
1871     // NOTE: the Vulkan Loader's trampoline functions will remove duplicates. This can lead
1872     // to lead errors if this function returns VK_SUCCESS with N elements (including a duplicate)
1873     // but the Vulkan Loader's trampoline function returns VK_INCOMPLETE with N-1 elements
1874     // (without the duplicate).
1875     std::sort(filteredExts.begin(),
1876               filteredExts.end(),
1877               [](const VkExtensionProperties& a,
1878                  const VkExtensionProperties& b) {
1879                   return strcmp(a.extensionName, b.extensionName) < 0;
1880               });
1881     filteredExts.erase(std::unique(filteredExts.begin(),
1882                                    filteredExts.end(),
1883                                    [](const VkExtensionProperties& a,
1884                                       const VkExtensionProperties& b) {
1885                                        return strcmp(a.extensionName, b.extensionName) == 0;
1886                                    }),
1887                        filteredExts.end());
1888 
1889     // Spec:
1890     //
1891     // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateDeviceExtensionProperties.html
1892     //
1893     // pPropertyCount is a pointer to an integer related to the number of
1894     // extension properties available or queried, and is treated in the
1895     // same fashion as the
1896     // vkEnumerateInstanceExtensionProperties::pPropertyCount parameter.
1897     //
1898     // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
1899     //
1900     // If pProperties is NULL, then the number of extensions properties
1901     // available is returned in pPropertyCount. Otherwise, pPropertyCount
1902     // must point to a variable set by the user to the number of elements
1903     // in the pProperties array, and on return the variable is overwritten
1904     // with the number of structures actually written to pProperties. If
1905     // pPropertyCount is less than the number of extension properties
1906     // available, at most pPropertyCount structures will be written. If
1907     // pPropertyCount is smaller than the number of extensions available,
1908     // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1909     // that not all the available properties were returned.
1910     //
1911     // pPropertyCount must be a valid pointer to a uint32_t value
1912 
1913     if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1914 
1915     if (!pProperties) {
1916         *pPropertyCount = (uint32_t)filteredExts.size();
1917         return VK_SUCCESS;
1918     } else {
1919         auto actualExtensionCount = (uint32_t)filteredExts.size();
1920         if (*pPropertyCount > actualExtensionCount) {
1921             *pPropertyCount = actualExtensionCount;
1922         }
1923 
1924         for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1925             pProperties[i] = filteredExts[i];
1926         }
1927 
1928         if (actualExtensionCount > *pPropertyCount) {
1929             return VK_INCOMPLETE;
1930         }
1931 
1932         return VK_SUCCESS;
1933     }
1934 }
1935 
on_vkEnumeratePhysicalDevices(void * context,VkResult,VkInstance instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices)1936 VkResult ResourceTracker::on_vkEnumeratePhysicalDevices(void* context, VkResult,
1937                                                         VkInstance instance,
1938                                                         uint32_t* pPhysicalDeviceCount,
1939                                                         VkPhysicalDevice* pPhysicalDevices) {
1940     VkEncoder* enc = (VkEncoder*)context;
1941 
1942     if (!instance) return VK_ERROR_INITIALIZATION_FAILED;
1943 
1944     if (!pPhysicalDeviceCount) return VK_ERROR_INITIALIZATION_FAILED;
1945 
1946     AutoLock<RecursiveLock> lock(mLock);
1947 
1948     // When this function is called, we actually need to do two things:
1949     // - Get full information about physical devices from the host,
1950     // even if the guest did not ask for it
1951     // - Serve the guest query according to the spec:
1952     //
1953     // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
1954 
1955     auto it = info_VkInstance.find(instance);
1956 
1957     if (it == info_VkInstance.end()) return VK_ERROR_INITIALIZATION_FAILED;
1958 
1959     auto& info = it->second;
1960 
1961     // Get the full host information here if it doesn't exist already.
1962     if (info.physicalDevices.empty()) {
1963         uint32_t hostPhysicalDeviceCount = 0;
1964 
1965         lock.unlock();
1966         VkResult countRes = enc->vkEnumeratePhysicalDevices(instance, &hostPhysicalDeviceCount,
1967                                                             nullptr, false /* no lock */);
1968         lock.lock();
1969 
1970         if (countRes != VK_SUCCESS) {
1971             ALOGE(
1972                 "%s: failed: could not count host physical devices. "
1973                 "Error %d\n",
1974                 __func__, countRes);
1975             return countRes;
1976         }
1977 
1978         info.physicalDevices.resize(hostPhysicalDeviceCount);
1979 
1980         lock.unlock();
1981         VkResult enumRes = enc->vkEnumeratePhysicalDevices(
1982             instance, &hostPhysicalDeviceCount, info.physicalDevices.data(), false /* no lock */);
1983         lock.lock();
1984 
1985         if (enumRes != VK_SUCCESS) {
1986             ALOGE(
1987                 "%s: failed: could not retrieve host physical devices. "
1988                 "Error %d\n",
1989                 __func__, enumRes);
1990             return enumRes;
1991         }
1992     }
1993 
1994     // Serve the guest query according to the spec.
1995     //
1996     // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
1997     //
1998     // If pPhysicalDevices is NULL, then the number of physical devices
1999     // available is returned in pPhysicalDeviceCount. Otherwise,
2000     // pPhysicalDeviceCount must point to a variable set by the user to the
2001     // number of elements in the pPhysicalDevices array, and on return the
2002     // variable is overwritten with the number of handles actually written
2003     // to pPhysicalDevices. If pPhysicalDeviceCount is less than the number
2004     // of physical devices available, at most pPhysicalDeviceCount
2005     // structures will be written.  If pPhysicalDeviceCount is smaller than
2006     // the number of physical devices available, VK_INCOMPLETE will be
2007     // returned instead of VK_SUCCESS, to indicate that not all the
2008     // available physical devices were returned.
2009 
2010     if (!pPhysicalDevices) {
2011         *pPhysicalDeviceCount = (uint32_t)info.physicalDevices.size();
2012         return VK_SUCCESS;
2013     } else {
2014         uint32_t actualDeviceCount = (uint32_t)info.physicalDevices.size();
2015         uint32_t toWrite =
2016             actualDeviceCount < *pPhysicalDeviceCount ? actualDeviceCount : *pPhysicalDeviceCount;
2017 
2018         for (uint32_t i = 0; i < toWrite; ++i) {
2019             pPhysicalDevices[i] = info.physicalDevices[i];
2020         }
2021 
2022         *pPhysicalDeviceCount = toWrite;
2023 
2024         if (actualDeviceCount > *pPhysicalDeviceCount) {
2025             return VK_INCOMPLETE;
2026         }
2027 
2028         return VK_SUCCESS;
2029     }
2030 }
2031 
on_vkGetPhysicalDeviceProperties(void *,VkPhysicalDevice,VkPhysicalDeviceProperties * pProperties)2032 void ResourceTracker::on_vkGetPhysicalDeviceProperties(void*, VkPhysicalDevice,
2033                                                        VkPhysicalDeviceProperties* pProperties) {
2034 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
2035     if (pProperties) {
2036         if (VK_PHYSICAL_DEVICE_TYPE_CPU == pProperties->deviceType) {
2037             /* For Linux guest: Even if host driver reports DEVICE_TYPE_CPU,
2038              * override this to VIRTUAL_GPU, otherwise Linux DRM interfaces
2039              * will take unexpected code paths to deal with "software" driver
2040              */
2041             pProperties->deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
2042         }
2043     }
2044 #endif
2045 }
2046 
on_vkGetPhysicalDeviceFeatures2(void *,VkPhysicalDevice,VkPhysicalDeviceFeatures2 * pFeatures)2047 void ResourceTracker::on_vkGetPhysicalDeviceFeatures2(void*, VkPhysicalDevice,
2048                                                       VkPhysicalDeviceFeatures2* pFeatures) {
2049     if (pFeatures) {
2050         VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
2051             vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pFeatures);
2052         if (memoryReportFeaturesEXT) {
2053             memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
2054         }
2055     }
2056 }
2057 
on_vkGetPhysicalDeviceFeatures2KHR(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceFeatures2 * pFeatures)2058 void ResourceTracker::on_vkGetPhysicalDeviceFeatures2KHR(void* context,
2059                                                          VkPhysicalDevice physicalDevice,
2060                                                          VkPhysicalDeviceFeatures2* pFeatures) {
2061     on_vkGetPhysicalDeviceFeatures2(context, physicalDevice, pFeatures);
2062 }
2063 
on_vkGetPhysicalDeviceProperties2(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)2064 void ResourceTracker::on_vkGetPhysicalDeviceProperties2(void* context,
2065                                                         VkPhysicalDevice physicalDevice,
2066                                                         VkPhysicalDeviceProperties2* pProperties) {
2067     if (pProperties) {
2068         VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
2069             vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pProperties);
2070         if (memoryReportFeaturesEXT) {
2071             memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
2072         }
2073         on_vkGetPhysicalDeviceProperties(context, physicalDevice, &pProperties->properties);
2074     }
2075 }
2076 
on_vkGetPhysicalDeviceProperties2KHR(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)2077 void ResourceTracker::on_vkGetPhysicalDeviceProperties2KHR(
2078     void* context, VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties) {
2079     on_vkGetPhysicalDeviceProperties2(context, physicalDevice, pProperties);
2080 }
2081 
on_vkGetPhysicalDeviceMemoryProperties(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties * out)2082 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties(
2083     void* context, VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* out) {
2084     // gfxstream decides which physical device to expose to the guest on startup.
2085     // Otherwise, we would need a physical device to properties mapping.
2086     *out = getPhysicalDeviceMemoryProperties(context, VK_NULL_HANDLE, physicalDevice);
2087 }
2088 
on_vkGetPhysicalDeviceMemoryProperties2(void *,VkPhysicalDevice physdev,VkPhysicalDeviceMemoryProperties2 * out)2089 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties2(
2090     void*, VkPhysicalDevice physdev, VkPhysicalDeviceMemoryProperties2* out) {
2091     on_vkGetPhysicalDeviceMemoryProperties(nullptr, physdev, &out->memoryProperties);
2092 }
2093 
on_vkGetDeviceQueue(void *,VkDevice device,uint32_t,uint32_t,VkQueue * pQueue)2094 void ResourceTracker::on_vkGetDeviceQueue(void*, VkDevice device, uint32_t, uint32_t,
2095                                           VkQueue* pQueue) {
2096     AutoLock<RecursiveLock> lock(mLock);
2097     info_VkQueue[*pQueue].device = device;
2098 }
2099 
on_vkGetDeviceQueue2(void *,VkDevice device,const VkDeviceQueueInfo2 *,VkQueue * pQueue)2100 void ResourceTracker::on_vkGetDeviceQueue2(void*, VkDevice device, const VkDeviceQueueInfo2*,
2101                                            VkQueue* pQueue) {
2102     AutoLock<RecursiveLock> lock(mLock);
2103     info_VkQueue[*pQueue].device = device;
2104 }
2105 
on_vkCreateInstance(void * context,VkResult input_result,const VkInstanceCreateInfo * createInfo,const VkAllocationCallbacks *,VkInstance * pInstance)2106 VkResult ResourceTracker::on_vkCreateInstance(void* context, VkResult input_result,
2107                                               const VkInstanceCreateInfo* createInfo,
2108                                               const VkAllocationCallbacks*, VkInstance* pInstance) {
2109     if (input_result != VK_SUCCESS) return input_result;
2110 
2111     VkEncoder* enc = (VkEncoder*)context;
2112 
2113     uint32_t apiVersion;
2114     VkResult enumInstanceVersionRes =
2115         enc->vkEnumerateInstanceVersion(&apiVersion, false /* no lock */);
2116 
2117     setInstanceInfo(*pInstance, createInfo->enabledExtensionCount,
2118                     createInfo->ppEnabledExtensionNames, apiVersion);
2119 
2120     return input_result;
2121 }
2122 
on_vkCreateDevice(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks *,VkDevice * pDevice)2123 VkResult ResourceTracker::on_vkCreateDevice(void* context, VkResult input_result,
2124                                             VkPhysicalDevice physicalDevice,
2125                                             const VkDeviceCreateInfo* pCreateInfo,
2126                                             const VkAllocationCallbacks*, VkDevice* pDevice) {
2127     if (input_result != VK_SUCCESS) return input_result;
2128 
2129     VkEncoder* enc = (VkEncoder*)context;
2130 
2131     VkPhysicalDeviceProperties props;
2132     VkPhysicalDeviceMemoryProperties memProps;
2133     enc->vkGetPhysicalDeviceProperties(physicalDevice, &props, false /* no lock */);
2134     enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps, false /* no lock */);
2135 
2136     setDeviceInfo(*pDevice, physicalDevice, props, memProps, pCreateInfo->enabledExtensionCount,
2137                   pCreateInfo->ppEnabledExtensionNames, pCreateInfo->pNext);
2138 
2139     return input_result;
2140 }
2141 
on_vkDestroyDevice_pre(void * context,VkDevice device,const VkAllocationCallbacks *)2142 void ResourceTracker::on_vkDestroyDevice_pre(void* context, VkDevice device,
2143                                              const VkAllocationCallbacks*) {
2144     (void)context;
2145     AutoLock<RecursiveLock> lock(mLock);
2146 
2147     auto it = info_VkDevice.find(device);
2148     if (it == info_VkDevice.end()) return;
2149 
2150     for (auto itr = info_VkDeviceMemory.cbegin(); itr != info_VkDeviceMemory.cend();) {
2151         auto& memInfo = itr->second;
2152         if (memInfo.device == device) {
2153             itr = info_VkDeviceMemory.erase(itr);
2154         } else {
2155             itr++;
2156         }
2157     }
2158 }
2159 
2160 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
updateMemoryTypeBits(uint32_t * memoryTypeBits,uint32_t memoryIndex)2161 void updateMemoryTypeBits(uint32_t* memoryTypeBits, uint32_t memoryIndex) {
2162     *memoryTypeBits = 1u << memoryIndex;
2163 }
2164 #endif
2165 
2166 #ifdef VK_USE_PLATFORM_ANDROID_KHR
2167 
on_vkGetAndroidHardwareBufferPropertiesANDROID(void * context,VkResult,VkDevice device,const AHardwareBuffer * buffer,VkAndroidHardwareBufferPropertiesANDROID * pProperties)2168 VkResult ResourceTracker::on_vkGetAndroidHardwareBufferPropertiesANDROID(
2169     void* context, VkResult, VkDevice device, const AHardwareBuffer* buffer,
2170     VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
2171     auto grallocHelper =
2172         ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
2173 
2174     // Delete once goldfish Linux drivers are gone
2175     if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
2176         mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
2177     }
2178 
2179     updateMemoryTypeBits(&pProperties->memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
2180 
2181     return getAndroidHardwareBufferPropertiesANDROID(grallocHelper, buffer, pProperties);
2182 }
2183 
on_vkGetMemoryAndroidHardwareBufferANDROID(void *,VkResult,VkDevice device,const VkMemoryGetAndroidHardwareBufferInfoANDROID * pInfo,struct AHardwareBuffer ** pBuffer)2184 VkResult ResourceTracker::on_vkGetMemoryAndroidHardwareBufferANDROID(
2185     void*, VkResult, VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
2186     struct AHardwareBuffer** pBuffer) {
2187     if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2188     if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
2189 
2190     AutoLock<RecursiveLock> lock(mLock);
2191 
2192     auto deviceIt = info_VkDevice.find(device);
2193 
2194     if (deviceIt == info_VkDevice.end()) {
2195         return VK_ERROR_INITIALIZATION_FAILED;
2196     }
2197 
2198     auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
2199 
2200     if (memoryIt == info_VkDeviceMemory.end()) {
2201         return VK_ERROR_INITIALIZATION_FAILED;
2202     }
2203 
2204     auto& info = memoryIt->second;
2205 
2206     auto* gralloc = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
2207     VkResult queryRes = getMemoryAndroidHardwareBufferANDROID(gralloc, &info.ahw);
2208 
2209     if (queryRes != VK_SUCCESS) return queryRes;
2210 
2211     *pBuffer = info.ahw;
2212 
2213     return queryRes;
2214 }
2215 #endif
2216 
2217 #ifdef VK_USE_PLATFORM_FUCHSIA
on_vkGetMemoryZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkMemoryGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)2218 VkResult ResourceTracker::on_vkGetMemoryZirconHandleFUCHSIA(
2219     void*, VkResult, VkDevice device, const VkMemoryGetZirconHandleInfoFUCHSIA* pInfo,
2220     uint32_t* pHandle) {
2221     if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2222     if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
2223 
2224     AutoLock<RecursiveLock> lock(mLock);
2225 
2226     auto deviceIt = info_VkDevice.find(device);
2227 
2228     if (deviceIt == info_VkDevice.end()) {
2229         return VK_ERROR_INITIALIZATION_FAILED;
2230     }
2231 
2232     auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
2233 
2234     if (memoryIt == info_VkDeviceMemory.end()) {
2235         return VK_ERROR_INITIALIZATION_FAILED;
2236     }
2237 
2238     auto& info = memoryIt->second;
2239 
2240     if (info.vmoHandle == ZX_HANDLE_INVALID) {
2241         ALOGE("%s: memory cannot be exported", __func__);
2242         return VK_ERROR_INITIALIZATION_FAILED;
2243     }
2244 
2245     *pHandle = ZX_HANDLE_INVALID;
2246     zx_handle_duplicate(info.vmoHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
2247     return VK_SUCCESS;
2248 }
2249 
on_vkGetMemoryZirconHandlePropertiesFUCHSIA(void *,VkResult,VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,uint32_t handle,VkMemoryZirconHandlePropertiesFUCHSIA * pProperties)2250 VkResult ResourceTracker::on_vkGetMemoryZirconHandlePropertiesFUCHSIA(
2251     void*, VkResult, VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType,
2252     uint32_t handle, VkMemoryZirconHandlePropertiesFUCHSIA* pProperties) {
2253     using fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal;
2254     using fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
2255 
2256     if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
2257         return VK_ERROR_INITIALIZATION_FAILED;
2258     }
2259 
2260     zx_info_handle_basic_t handleInfo;
2261     zx_status_t status = zx::unowned_vmo(handle)->get_info(ZX_INFO_HANDLE_BASIC, &handleInfo,
2262                                                            sizeof(handleInfo), nullptr, nullptr);
2263     if (status != ZX_OK || handleInfo.type != ZX_OBJ_TYPE_VMO) {
2264         return VK_ERROR_INVALID_EXTERNAL_HANDLE;
2265     }
2266 
2267     AutoLock<RecursiveLock> lock(mLock);
2268 
2269     auto deviceIt = info_VkDevice.find(device);
2270 
2271     if (deviceIt == info_VkDevice.end()) {
2272         return VK_ERROR_INITIALIZATION_FAILED;
2273     }
2274 
2275     auto& info = deviceIt->second;
2276 
2277     zx::vmo vmo_dup;
2278     status = zx::unowned_vmo(handle)->duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
2279     if (status != ZX_OK) {
2280         ALOGE("zx_handle_duplicate() error: %d", status);
2281         return VK_ERROR_INITIALIZATION_FAILED;
2282     }
2283 
2284     uint32_t memoryProperty = 0u;
2285 
2286     auto result = mControlDevice->GetBufferHandleInfo(std::move(vmo_dup));
2287     if (!result.ok()) {
2288         ALOGE("mControlDevice->GetBufferHandleInfo fatal error: epitaph: %d", result.status());
2289         return VK_ERROR_INITIALIZATION_FAILED;
2290     }
2291     if (result.value().is_ok()) {
2292         memoryProperty = result.value().value()->info.memory_property();
2293     } else if (result.value().error_value() == ZX_ERR_NOT_FOUND) {
2294         // If a VMO is allocated while ColorBuffer/Buffer is not created,
2295         // it must be a device-local buffer, since for host-visible buffers,
2296         // ColorBuffer/Buffer is created at sysmem allocation time.
2297         memoryProperty = kMemoryPropertyDeviceLocal;
2298     } else {
2299         // Importing read-only host memory into the Vulkan driver should not
2300         // work, but it is not an error to try to do so. Returning a
2301         // VkMemoryZirconHandlePropertiesFUCHSIA with no available
2302         // memoryType bits should be enough for clients. See fxbug.dev/24225
2303         // for other issues this this flow.
2304         ALOGW("GetBufferHandleInfo failed: %d", result.value().error_value());
2305         pProperties->memoryTypeBits = 0;
2306         return VK_SUCCESS;
2307     }
2308 
2309     pProperties->memoryTypeBits = 0;
2310     for (uint32_t i = 0; i < info.memProps.memoryTypeCount; ++i) {
2311         if (((memoryProperty & kMemoryPropertyDeviceLocal) &&
2312              (info.memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
2313             ((memoryProperty & kMemoryPropertyHostVisible) &&
2314              (info.memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
2315             pProperties->memoryTypeBits |= 1ull << i;
2316         }
2317     }
2318     return VK_SUCCESS;
2319 }
2320 
getEventKoid(zx_handle_t eventHandle)2321 zx_koid_t getEventKoid(zx_handle_t eventHandle) {
2322     if (eventHandle == ZX_HANDLE_INVALID) {
2323         return ZX_KOID_INVALID;
2324     }
2325 
2326     zx_info_handle_basic_t info;
2327     zx_status_t status = zx_object_get_info(eventHandle, ZX_INFO_HANDLE_BASIC, &info, sizeof(info),
2328                                             nullptr, nullptr);
2329     if (status != ZX_OK) {
2330         ALOGE("Cannot get object info of handle %u: %d", eventHandle, status);
2331         return ZX_KOID_INVALID;
2332     }
2333     return info.koid;
2334 }
2335 
on_vkImportSemaphoreZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkImportSemaphoreZirconHandleInfoFUCHSIA * pInfo)2336 VkResult ResourceTracker::on_vkImportSemaphoreZirconHandleFUCHSIA(
2337     void*, VkResult, VkDevice device, const VkImportSemaphoreZirconHandleInfoFUCHSIA* pInfo) {
2338     if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2339     if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
2340 
2341     AutoLock<RecursiveLock> lock(mLock);
2342 
2343     auto deviceIt = info_VkDevice.find(device);
2344 
2345     if (deviceIt == info_VkDevice.end()) {
2346         return VK_ERROR_INITIALIZATION_FAILED;
2347     }
2348 
2349     auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
2350 
2351     if (semaphoreIt == info_VkSemaphore.end()) {
2352         return VK_ERROR_INITIALIZATION_FAILED;
2353     }
2354 
2355     auto& info = semaphoreIt->second;
2356 
2357     if (info.eventHandle != ZX_HANDLE_INVALID) {
2358         zx_handle_close(info.eventHandle);
2359     }
2360 #if VK_HEADER_VERSION < 174
2361     info.eventHandle = pInfo->handle;
2362 #else   // VK_HEADER_VERSION >= 174
2363     info.eventHandle = pInfo->zirconHandle;
2364 #endif  // VK_HEADER_VERSION < 174
2365     if (info.eventHandle != ZX_HANDLE_INVALID) {
2366         info.eventKoid = getEventKoid(info.eventHandle);
2367     }
2368 
2369     return VK_SUCCESS;
2370 }
2371 
on_vkGetSemaphoreZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkSemaphoreGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)2372 VkResult ResourceTracker::on_vkGetSemaphoreZirconHandleFUCHSIA(
2373     void*, VkResult, VkDevice device, const VkSemaphoreGetZirconHandleInfoFUCHSIA* pInfo,
2374     uint32_t* pHandle) {
2375     if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2376     if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
2377 
2378     AutoLock<RecursiveLock> lock(mLock);
2379 
2380     auto deviceIt = info_VkDevice.find(device);
2381 
2382     if (deviceIt == info_VkDevice.end()) {
2383         return VK_ERROR_INITIALIZATION_FAILED;
2384     }
2385 
2386     auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
2387 
2388     if (semaphoreIt == info_VkSemaphore.end()) {
2389         return VK_ERROR_INITIALIZATION_FAILED;
2390     }
2391 
2392     auto& info = semaphoreIt->second;
2393 
2394     if (info.eventHandle == ZX_HANDLE_INVALID) {
2395         return VK_ERROR_INITIALIZATION_FAILED;
2396     }
2397 
2398     *pHandle = ZX_HANDLE_INVALID;
2399     zx_handle_duplicate(info.eventHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
2400     return VK_SUCCESS;
2401 }
2402 
on_vkCreateBufferCollectionFUCHSIA(void *,VkResult,VkDevice,const VkBufferCollectionCreateInfoFUCHSIA * pInfo,const VkAllocationCallbacks *,VkBufferCollectionFUCHSIA * pCollection)2403 VkResult ResourceTracker::on_vkCreateBufferCollectionFUCHSIA(
2404     void*, VkResult, VkDevice, const VkBufferCollectionCreateInfoFUCHSIA* pInfo,
2405     const VkAllocationCallbacks*, VkBufferCollectionFUCHSIA* pCollection) {
2406     fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken> token_client;
2407 
2408     if (pInfo->collectionToken) {
2409         token_client = fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken>(
2410             zx::channel(pInfo->collectionToken));
2411     } else {
2412         auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
2413         if (!endpoints.is_ok()) {
2414             ALOGE("zx_channel_create failed: %d", endpoints.status_value());
2415             return VK_ERROR_INITIALIZATION_FAILED;
2416         }
2417 
2418         auto result = mSysmemAllocator->AllocateSharedCollection(std::move(endpoints->server));
2419         if (!result.ok()) {
2420             ALOGE("AllocateSharedCollection failed: %d", result.status());
2421             return VK_ERROR_INITIALIZATION_FAILED;
2422         }
2423         token_client = std::move(endpoints->client);
2424     }
2425 
2426     auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
2427     if (!endpoints.is_ok()) {
2428         ALOGE("zx_channel_create failed: %d", endpoints.status_value());
2429         return VK_ERROR_INITIALIZATION_FAILED;
2430     }
2431     auto [collection_client, collection_server] = std::move(endpoints.value());
2432 
2433     auto result = mSysmemAllocator->BindSharedCollection(std::move(token_client),
2434                                                          std::move(collection_server));
2435     if (!result.ok()) {
2436         ALOGE("BindSharedCollection failed: %d", result.status());
2437         return VK_ERROR_INITIALIZATION_FAILED;
2438     }
2439 
2440     auto* sysmem_collection =
2441         new fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>(std::move(collection_client));
2442     *pCollection = reinterpret_cast<VkBufferCollectionFUCHSIA>(sysmem_collection);
2443 
2444     register_VkBufferCollectionFUCHSIA(*pCollection);
2445     return VK_SUCCESS;
2446 }
2447 
on_vkDestroyBufferCollectionFUCHSIA(void *,VkResult,VkDevice,VkBufferCollectionFUCHSIA collection,const VkAllocationCallbacks *)2448 void ResourceTracker::on_vkDestroyBufferCollectionFUCHSIA(void*, VkResult, VkDevice,
2449                                                           VkBufferCollectionFUCHSIA collection,
2450                                                           const VkAllocationCallbacks*) {
2451     auto sysmem_collection =
2452         reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2453     if (sysmem_collection) {
2454         (*sysmem_collection)->Close();
2455     }
2456     delete sysmem_collection;
2457 
2458     unregister_VkBufferCollectionFUCHSIA(collection);
2459 }
2460 
setBufferCollectionImageConstraintsImpl(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2461 SetBufferCollectionImageConstraintsResult ResourceTracker::setBufferCollectionImageConstraintsImpl(
2462     VkEncoder* enc, VkDevice device,
2463     fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2464     const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2465     const auto& collection = *pCollection;
2466     if (!pImageConstraintsInfo ||
2467         pImageConstraintsInfo->sType != VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA) {
2468         ALOGE("%s: invalid pImageConstraintsInfo", __func__);
2469         return {VK_ERROR_INITIALIZATION_FAILED};
2470     }
2471 
2472     if (pImageConstraintsInfo->formatConstraintsCount == 0) {
2473         ALOGE("%s: formatConstraintsCount must be greater than 0", __func__);
2474         abort();
2475     }
2476 
2477     fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
2478         defaultBufferCollectionConstraints(
2479             /* min_size_bytes */ 0,
2480             pImageConstraintsInfo->bufferCollectionConstraints.minBufferCount,
2481             pImageConstraintsInfo->bufferCollectionConstraints.maxBufferCount,
2482             pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForCamping,
2483             pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForDedicatedSlack,
2484             pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForSharedSlack);
2485 
2486     std::vector<fuchsia_sysmem::wire::ImageFormatConstraints> format_constraints;
2487 
2488     VkPhysicalDevice physicalDevice;
2489     {
2490         AutoLock<RecursiveLock> lock(mLock);
2491         auto deviceIt = info_VkDevice.find(device);
2492         if (deviceIt == info_VkDevice.end()) {
2493             return {VK_ERROR_INITIALIZATION_FAILED};
2494         }
2495         physicalDevice = deviceIt->second.physdev;
2496     }
2497 
2498     std::vector<uint32_t> createInfoIndex;
2499 
2500     bool hasOptimalTiling = false;
2501     for (uint32_t i = 0; i < pImageConstraintsInfo->formatConstraintsCount; i++) {
2502         const VkImageCreateInfo* createInfo =
2503             &pImageConstraintsInfo->pFormatConstraints[i].imageCreateInfo;
2504         const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints =
2505             &pImageConstraintsInfo->pFormatConstraints[i];
2506 
2507         // add ImageFormatConstraints for *optimal* tiling
2508         VkResult optimalResult = VK_ERROR_FORMAT_NOT_SUPPORTED;
2509         if (createInfo->tiling == VK_IMAGE_TILING_OPTIMAL) {
2510             optimalResult = addImageBufferCollectionConstraintsFUCHSIA(
2511                 enc, device, physicalDevice, formatConstraints, VK_IMAGE_TILING_OPTIMAL,
2512                 &constraints);
2513             if (optimalResult == VK_SUCCESS) {
2514                 createInfoIndex.push_back(i);
2515                 hasOptimalTiling = true;
2516             }
2517         }
2518 
2519         // Add ImageFormatConstraints for *linear* tiling
2520         VkResult linearResult = addImageBufferCollectionConstraintsFUCHSIA(
2521             enc, device, physicalDevice, formatConstraints, VK_IMAGE_TILING_LINEAR, &constraints);
2522         if (linearResult == VK_SUCCESS) {
2523             createInfoIndex.push_back(i);
2524         }
2525 
2526         // Update usage and BufferMemoryConstraints
2527         if (linearResult == VK_SUCCESS || optimalResult == VK_SUCCESS) {
2528             constraints.usage.vulkan |= getBufferCollectionConstraintsVulkanImageUsage(createInfo);
2529 
2530             if (formatConstraints && formatConstraints->flags) {
2531                 ALOGW(
2532                     "%s: Non-zero flags (%08x) in image format "
2533                     "constraints; this is currently not supported, see "
2534                     "fxbug.dev/68833.",
2535                     __func__, formatConstraints->flags);
2536             }
2537         }
2538     }
2539 
2540     // Set buffer memory constraints based on optimal/linear tiling support
2541     // and flags.
2542     VkImageConstraintsInfoFlagsFUCHSIA flags = pImageConstraintsInfo->flags;
2543     if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA)
2544         constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageRead;
2545     if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA)
2546         constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageReadOften;
2547     if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA)
2548         constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWrite;
2549     if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA)
2550         constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWriteOften;
2551 
2552     constraints.has_buffer_memory_constraints = true;
2553     auto& memory_constraints = constraints.buffer_memory_constraints;
2554     memory_constraints.cpu_domain_supported = true;
2555     memory_constraints.ram_domain_supported = true;
2556     memory_constraints.inaccessible_domain_supported =
2557         hasOptimalTiling && !(flags & (VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA |
2558                                        VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA |
2559                                        VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA |
2560                                        VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA));
2561 
2562     if (memory_constraints.inaccessible_domain_supported) {
2563         memory_constraints.heap_permitted_count = 2;
2564         memory_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2565         memory_constraints.heap_permitted[1] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2566     } else {
2567         memory_constraints.heap_permitted_count = 1;
2568         memory_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2569     }
2570 
2571     if (constraints.image_format_constraints_count == 0) {
2572         ALOGE("%s: none of the specified formats is supported by device", __func__);
2573         return {VK_ERROR_FORMAT_NOT_SUPPORTED};
2574     }
2575 
2576     constexpr uint32_t kVulkanPriority = 5;
2577     const char kName[] = "GoldfishSysmemShared";
2578     collection->SetName(kVulkanPriority, fidl::StringView(kName));
2579 
2580     auto result = collection->SetConstraints(true, constraints);
2581     if (!result.ok()) {
2582         ALOGE("setBufferCollectionConstraints: SetConstraints failed: %d", result.status());
2583         return {VK_ERROR_INITIALIZATION_FAILED};
2584     }
2585 
2586     return {VK_SUCCESS, constraints, std::move(createInfoIndex)};
2587 }
2588 
setBufferCollectionImageConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2589 VkResult ResourceTracker::setBufferCollectionImageConstraintsFUCHSIA(
2590     VkEncoder* enc, VkDevice device,
2591     fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2592     const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2593     const auto& collection = *pCollection;
2594 
2595     auto setConstraintsResult =
2596         setBufferCollectionImageConstraintsImpl(enc, device, pCollection, pImageConstraintsInfo);
2597     if (setConstraintsResult.result != VK_SUCCESS) {
2598         return setConstraintsResult.result;
2599     }
2600 
2601     // copy constraints to info_VkBufferCollectionFUCHSIA if
2602     // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2603     AutoLock<RecursiveLock> lock(mLock);
2604     VkBufferCollectionFUCHSIA buffer_collection =
2605         reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
2606     if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2607         info_VkBufferCollectionFUCHSIA.end()) {
2608         info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2609             gfxstream::guest::makeOptional(std::move(setConstraintsResult.constraints));
2610         info_VkBufferCollectionFUCHSIA[buffer_collection].createInfoIndex =
2611             std::move(setConstraintsResult.createInfoIndex);
2612     }
2613 
2614     return VK_SUCCESS;
2615 }
2616 
setBufferCollectionBufferConstraintsFUCHSIA(fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2617 VkResult ResourceTracker::setBufferCollectionBufferConstraintsFUCHSIA(
2618     fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2619     const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2620     auto setConstraintsResult =
2621         setBufferCollectionBufferConstraintsImpl(pCollection, pBufferConstraintsInfo);
2622     if (setConstraintsResult.result != VK_SUCCESS) {
2623         return setConstraintsResult.result;
2624     }
2625 
2626     // copy constraints to info_VkBufferCollectionFUCHSIA if
2627     // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2628     AutoLock<RecursiveLock> lock(mLock);
2629     VkBufferCollectionFUCHSIA buffer_collection =
2630         reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
2631     if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2632         info_VkBufferCollectionFUCHSIA.end()) {
2633         info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2634             gfxstream::guest::makeOptional(setConstraintsResult.constraints);
2635     }
2636 
2637     return VK_SUCCESS;
2638 }
2639 
on_vkSetBufferCollectionImageConstraintsFUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2640 VkResult ResourceTracker::on_vkSetBufferCollectionImageConstraintsFUCHSIA(
2641     void* context, VkResult, VkDevice device, VkBufferCollectionFUCHSIA collection,
2642     const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2643     VkEncoder* enc = (VkEncoder*)context;
2644     auto sysmem_collection =
2645         reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2646     return setBufferCollectionImageConstraintsFUCHSIA(enc, device, sysmem_collection,
2647                                                       pImageConstraintsInfo);
2648 }
2649 
on_vkSetBufferCollectionBufferConstraintsFUCHSIA(void *,VkResult,VkDevice,VkBufferCollectionFUCHSIA collection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2650 VkResult ResourceTracker::on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
2651     void*, VkResult, VkDevice, VkBufferCollectionFUCHSIA collection,
2652     const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2653     auto sysmem_collection =
2654         reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2655     return setBufferCollectionBufferConstraintsFUCHSIA(sysmem_collection, pBufferConstraintsInfo);
2656 }
2657 
getBufferCollectionImageCreateInfoIndexLocked(VkBufferCollectionFUCHSIA collection,fuchsia_sysmem::wire::BufferCollectionInfo2 & info,uint32_t * outCreateInfoIndex)2658 VkResult ResourceTracker::getBufferCollectionImageCreateInfoIndexLocked(
2659     VkBufferCollectionFUCHSIA collection, fuchsia_sysmem::wire::BufferCollectionInfo2& info,
2660     uint32_t* outCreateInfoIndex) {
2661     if (!info_VkBufferCollectionFUCHSIA[collection].constraints.hasValue()) {
2662         ALOGE("%s: constraints not set", __func__);
2663         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2664     }
2665 
2666     if (!info.settings.has_image_format_constraints) {
2667         // no image format constraints, skip getting createInfoIndex.
2668         return VK_SUCCESS;
2669     }
2670 
2671     const auto& constraints = *info_VkBufferCollectionFUCHSIA[collection].constraints;
2672     const auto& createInfoIndices = info_VkBufferCollectionFUCHSIA[collection].createInfoIndex;
2673     const auto& out = info.settings.image_format_constraints;
2674     bool foundCreateInfo = false;
2675 
2676     for (size_t imageFormatIndex = 0; imageFormatIndex < constraints.image_format_constraints_count;
2677          imageFormatIndex++) {
2678         const auto& in = constraints.image_format_constraints[imageFormatIndex];
2679         // These checks are sorted in order of how often they're expected to
2680         // mismatch, from most likely to least likely. They aren't always
2681         // equality comparisons, since sysmem may change some values in
2682         // compatible ways on behalf of the other participants.
2683         if ((out.pixel_format.type != in.pixel_format.type) ||
2684             (out.pixel_format.has_format_modifier != in.pixel_format.has_format_modifier) ||
2685             (out.pixel_format.format_modifier.value != in.pixel_format.format_modifier.value) ||
2686             (out.min_bytes_per_row < in.min_bytes_per_row) ||
2687             (out.required_max_coded_width < in.required_max_coded_width) ||
2688             (out.required_max_coded_height < in.required_max_coded_height) ||
2689             (in.bytes_per_row_divisor != 0 &&
2690              out.bytes_per_row_divisor % in.bytes_per_row_divisor != 0)) {
2691             continue;
2692         }
2693         // Check if the out colorspaces are a subset of the in color spaces.
2694         bool all_color_spaces_found = true;
2695         for (uint32_t j = 0; j < out.color_spaces_count; j++) {
2696             bool found_matching_color_space = false;
2697             for (uint32_t k = 0; k < in.color_spaces_count; k++) {
2698                 if (out.color_space[j].type == in.color_space[k].type) {
2699                     found_matching_color_space = true;
2700                     break;
2701                 }
2702             }
2703             if (!found_matching_color_space) {
2704                 all_color_spaces_found = false;
2705                 break;
2706             }
2707         }
2708         if (!all_color_spaces_found) {
2709             continue;
2710         }
2711 
2712         // Choose the first valid format for now.
2713         *outCreateInfoIndex = createInfoIndices[imageFormatIndex];
2714         return VK_SUCCESS;
2715     }
2716 
2717     ALOGE("%s: cannot find a valid image format in constraints", __func__);
2718     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2719 }
2720 
on_vkGetBufferCollectionPropertiesFUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,VkBufferCollectionPropertiesFUCHSIA * pProperties)2721 VkResult ResourceTracker::on_vkGetBufferCollectionPropertiesFUCHSIA(
2722     void* context, VkResult, VkDevice device, VkBufferCollectionFUCHSIA collection,
2723     VkBufferCollectionPropertiesFUCHSIA* pProperties) {
2724     VkEncoder* enc = (VkEncoder*)context;
2725     const auto& sysmem_collection =
2726         *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2727 
2728     auto result = sysmem_collection->WaitForBuffersAllocated();
2729     if (!result.ok() || result->status != ZX_OK) {
2730         ALOGE("Failed wait for allocation: %d %d", result.status(),
2731               GET_STATUS_SAFE(result, status));
2732         return VK_ERROR_INITIALIZATION_FAILED;
2733     }
2734     fuchsia_sysmem::wire::BufferCollectionInfo2 info = std::move(result->buffer_collection_info);
2735 
2736     bool is_host_visible =
2737         info.settings.buffer_settings.heap == fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2738     bool is_device_local =
2739         info.settings.buffer_settings.heap == fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2740     if (!is_host_visible && !is_device_local) {
2741         ALOGE("buffer collection uses a non-goldfish heap (type 0x%lu)",
2742               static_cast<uint64_t>(info.settings.buffer_settings.heap));
2743         return VK_ERROR_INITIALIZATION_FAILED;
2744     }
2745 
2746     // memoryTypeBits
2747     // ====================================================================
2748     {
2749         AutoLock<RecursiveLock> lock(mLock);
2750         auto deviceIt = info_VkDevice.find(device);
2751         if (deviceIt == info_VkDevice.end()) {
2752             return VK_ERROR_INITIALIZATION_FAILED;
2753         }
2754         auto& deviceInfo = deviceIt->second;
2755 
2756         // Device local memory type supported.
2757         pProperties->memoryTypeBits = 0;
2758         for (uint32_t i = 0; i < deviceInfo.memProps.memoryTypeCount; ++i) {
2759             if ((is_device_local && (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2760                                      VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
2761                 (is_host_visible && (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2762                                      VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
2763                 pProperties->memoryTypeBits |= 1ull << i;
2764             }
2765         }
2766     }
2767 
2768     // bufferCount
2769     // ====================================================================
2770     pProperties->bufferCount = info.buffer_count;
2771 
2772     auto storeProperties = [this, collection, pProperties]() -> VkResult {
2773         // store properties to storage
2774         AutoLock<RecursiveLock> lock(mLock);
2775         if (info_VkBufferCollectionFUCHSIA.find(collection) ==
2776             info_VkBufferCollectionFUCHSIA.end()) {
2777             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2778         }
2779 
2780         info_VkBufferCollectionFUCHSIA[collection].properties =
2781             gfxstream::guest::makeOptional(*pProperties);
2782 
2783         // We only do a shallow copy so we should remove all pNext pointers.
2784         info_VkBufferCollectionFUCHSIA[collection].properties->pNext = nullptr;
2785         info_VkBufferCollectionFUCHSIA[collection].properties->sysmemColorSpaceIndex.pNext =
2786             nullptr;
2787         return VK_SUCCESS;
2788     };
2789 
2790     // The fields below only apply to buffer collections with image formats.
2791     if (!info.settings.has_image_format_constraints) {
2792         ALOGD("%s: buffer collection doesn't have image format constraints", __func__);
2793         return storeProperties();
2794     }
2795 
2796     // sysmemFormat
2797     // ====================================================================
2798 
2799     pProperties->sysmemPixelFormat =
2800         static_cast<uint64_t>(info.settings.image_format_constraints.pixel_format.type);
2801 
2802     // colorSpace
2803     // ====================================================================
2804     if (info.settings.image_format_constraints.color_spaces_count == 0) {
2805         ALOGE(
2806             "%s: color space missing from allocated buffer collection "
2807             "constraints",
2808             __func__);
2809         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2810     }
2811     // Only report first colorspace for now.
2812     pProperties->sysmemColorSpaceIndex.colorSpace =
2813         static_cast<uint32_t>(info.settings.image_format_constraints.color_space[0].type);
2814 
2815     // createInfoIndex
2816     // ====================================================================
2817     {
2818         AutoLock<RecursiveLock> lock(mLock);
2819         auto getIndexResult = getBufferCollectionImageCreateInfoIndexLocked(
2820             collection, info, &pProperties->createInfoIndex);
2821         if (getIndexResult != VK_SUCCESS) {
2822             return getIndexResult;
2823         }
2824     }
2825 
2826     // formatFeatures
2827     // ====================================================================
2828     VkPhysicalDevice physicalDevice;
2829     {
2830         AutoLock<RecursiveLock> lock(mLock);
2831         auto deviceIt = info_VkDevice.find(device);
2832         if (deviceIt == info_VkDevice.end()) {
2833             return VK_ERROR_INITIALIZATION_FAILED;
2834         }
2835         physicalDevice = deviceIt->second.physdev;
2836     }
2837 
2838     VkFormat vkFormat =
2839         sysmemPixelFormatTypeToVk(info.settings.image_format_constraints.pixel_format.type);
2840     VkFormatProperties formatProperties;
2841     enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, vkFormat, &formatProperties,
2842                                              true /* do lock */);
2843     if (is_device_local) {
2844         pProperties->formatFeatures = formatProperties.optimalTilingFeatures;
2845     }
2846     if (is_host_visible) {
2847         pProperties->formatFeatures = formatProperties.linearTilingFeatures;
2848     }
2849 
2850     // YCbCr properties
2851     // ====================================================================
2852     // TODO(59804): Implement this correctly when we support YUV pixel
2853     // formats in goldfish ICD.
2854     pProperties->samplerYcbcrConversionComponents.r = VK_COMPONENT_SWIZZLE_IDENTITY;
2855     pProperties->samplerYcbcrConversionComponents.g = VK_COMPONENT_SWIZZLE_IDENTITY;
2856     pProperties->samplerYcbcrConversionComponents.b = VK_COMPONENT_SWIZZLE_IDENTITY;
2857     pProperties->samplerYcbcrConversionComponents.a = VK_COMPONENT_SWIZZLE_IDENTITY;
2858     pProperties->suggestedYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
2859     pProperties->suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
2860     pProperties->suggestedXChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2861     pProperties->suggestedYChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2862 
2863     return storeProperties();
2864 }
2865 #endif
2866 
getVirglFormat(VkFormat vkFormat)2867 static uint32_t getVirglFormat(VkFormat vkFormat) {
2868     uint32_t virglFormat = 0;
2869 
2870     switch (vkFormat) {
2871         case VK_FORMAT_R8G8B8A8_SINT:
2872         case VK_FORMAT_R8G8B8A8_UNORM:
2873         case VK_FORMAT_R8G8B8A8_SRGB:
2874         case VK_FORMAT_R8G8B8A8_SNORM:
2875         case VK_FORMAT_R8G8B8A8_SSCALED:
2876         case VK_FORMAT_R8G8B8A8_USCALED:
2877             virglFormat = VIRGL_FORMAT_R8G8B8A8_UNORM;
2878             break;
2879         case VK_FORMAT_B8G8R8A8_SINT:
2880         case VK_FORMAT_B8G8R8A8_UNORM:
2881         case VK_FORMAT_B8G8R8A8_SRGB:
2882         case VK_FORMAT_B8G8R8A8_SNORM:
2883         case VK_FORMAT_B8G8R8A8_SSCALED:
2884         case VK_FORMAT_B8G8R8A8_USCALED:
2885             virglFormat = VIRGL_FORMAT_B8G8R8A8_UNORM;
2886             break;
2887         default:
2888             break;
2889     }
2890 
2891     return virglFormat;
2892 }
2893 
createCoherentMemory(VkDevice device,VkDeviceMemory mem,const VkMemoryAllocateInfo & hostAllocationInfo,VkEncoder * enc,VkResult & res)2894 CoherentMemoryPtr ResourceTracker::createCoherentMemory(
2895     VkDevice device, VkDeviceMemory mem, const VkMemoryAllocateInfo& hostAllocationInfo,
2896     VkEncoder* enc, VkResult& res) {
2897     CoherentMemoryPtr coherentMemory = nullptr;
2898 
2899 #if defined(__ANDROID__)
2900     if (mFeatureInfo->hasDirectMem) {
2901         uint64_t gpuAddr = 0;
2902         GoldfishAddressSpaceBlockPtr block = nullptr;
2903         res = enc->vkMapMemoryIntoAddressSpaceGOOGLE(device, mem, &gpuAddr, true);
2904         if (res != VK_SUCCESS) {
2905             ALOGE(
2906                 "Failed to create coherent memory: vkMapMemoryIntoAddressSpaceGOOGLE "
2907                 "returned:%d.",
2908                 res);
2909             return coherentMemory;
2910         }
2911         {
2912             AutoLock<RecursiveLock> lock(mLock);
2913             auto it = info_VkDeviceMemory.find(mem);
2914             if (it == info_VkDeviceMemory.end()) {
2915                 ALOGE("Failed to create coherent memory: failed to find device memory.");
2916                 res = VK_ERROR_OUT_OF_HOST_MEMORY;
2917                 return coherentMemory;
2918             }
2919             auto& info = it->second;
2920             block = info.goldfishBlock;
2921             info.goldfishBlock = nullptr;
2922 
2923             coherentMemory = std::make_shared<CoherentMemory>(
2924                 block, gpuAddr, hostAllocationInfo.allocationSize, device, mem);
2925         }
2926     } else
2927 #endif  // defined(__ANDROID__)
2928         if (mFeatureInfo->hasVirtioGpuNext) {
2929             struct VirtGpuCreateBlob createBlob = {0};
2930             uint64_t hvaSizeId[3];
2931             res = enc->vkGetMemoryHostAddressInfoGOOGLE(device, mem, &hvaSizeId[0], &hvaSizeId[1],
2932                                                         &hvaSizeId[2], true /* do lock */);
2933             if (res != VK_SUCCESS) {
2934                 ALOGE(
2935                     "Failed to create coherent memory: vkMapMemoryIntoAddressSpaceGOOGLE "
2936                     "returned:%d.",
2937                     res);
2938                 return coherentMemory;
2939             }
2940             {
2941                 AutoLock<RecursiveLock> lock(mLock);
2942                 VirtGpuDevice* instance = VirtGpuDevice::getInstance((enum VirtGpuCapset)3);
2943                 createBlob.blobMem = kBlobMemHost3d;
2944                 createBlob.flags = kBlobFlagMappable;
2945                 createBlob.blobId = hvaSizeId[2];
2946                 createBlob.size = hostAllocationInfo.allocationSize;
2947 
2948                 auto blob = instance->createBlob(createBlob);
2949                 if (!blob) {
2950                     ALOGE("Failed to create coherent memory: failed to create blob.");
2951                     res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2952                     return coherentMemory;
2953                 }
2954 
2955                 VirtGpuBlobMappingPtr mapping = blob->createMapping();
2956                 if (!mapping) {
2957                     ALOGE("Failed to create coherent memory: failed to create blob mapping.");
2958                     res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2959                     return coherentMemory;
2960                 }
2961 
2962                 coherentMemory =
2963                     std::make_shared<CoherentMemory>(mapping, createBlob.size, device, mem);
2964             }
2965         } else {
2966             ALOGE("FATAL: Unsupported virtual memory feature");
2967             abort();
2968         }
2969     return coherentMemory;
2970 }
2971 
allocateCoherentMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,VkEncoder * enc,VkDeviceMemory * pMemory)2972 VkResult ResourceTracker::allocateCoherentMemory(VkDevice device,
2973                                                  const VkMemoryAllocateInfo* pAllocateInfo,
2974                                                  VkEncoder* enc, VkDeviceMemory* pMemory) {
2975     uint64_t blobId = 0;
2976     uint64_t offset = 0;
2977     uint8_t* ptr = nullptr;
2978     VkMemoryAllocateFlagsInfo allocFlagsInfo;
2979     VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
2980     VkCreateBlobGOOGLE createBlobInfo;
2981     VirtGpuBlobPtr guestBlob = nullptr;
2982 
2983     memset(&createBlobInfo, 0, sizeof(struct VkCreateBlobGOOGLE));
2984     createBlobInfo.sType = VK_STRUCTURE_TYPE_CREATE_BLOB_GOOGLE;
2985 
2986     const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
2987         vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
2988     const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
2989         vk_find_struct<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo);
2990 
2991     bool deviceAddressMemoryAllocation =
2992         allocFlagsInfoPtr &&
2993         ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
2994          (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
2995 
2996     bool dedicated = deviceAddressMemoryAllocation;
2997 
2998     if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle])
2999         dedicated = true;
3000 
3001     VkMemoryAllocateInfo hostAllocationInfo = vk_make_orphan_copy(*pAllocateInfo);
3002     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&hostAllocationInfo);
3003 
3004     if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3005         hostAllocationInfo.allocationSize =
3006             ALIGN(pAllocateInfo->allocationSize, mCaps.vulkanCapset.blobAlignment);
3007     } else if (dedicated) {
3008         // Over-aligning to kLargestSize to some Windows drivers (b:152769369).  Can likely
3009         // have host report the desired alignment.
3010         hostAllocationInfo.allocationSize = ALIGN(pAllocateInfo->allocationSize, kLargestPageSize);
3011     } else {
3012         VkDeviceSize roundedUpAllocSize = ALIGN(pAllocateInfo->allocationSize, kMegaByte);
3013         hostAllocationInfo.allocationSize = std::max(roundedUpAllocSize, kDefaultHostMemBlockSize);
3014     }
3015 
3016     // Support device address capture/replay allocations
3017     if (deviceAddressMemoryAllocation) {
3018         if (allocFlagsInfoPtr) {
3019             ALOGV("%s: has alloc flags\n", __func__);
3020             allocFlagsInfo = *allocFlagsInfoPtr;
3021             vk_append_struct(&structChainIter, &allocFlagsInfo);
3022         }
3023 
3024         if (opaqueCaptureAddressAllocInfoPtr) {
3025             ALOGV("%s: has opaque capture address\n", __func__);
3026             opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
3027             vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
3028         }
3029     }
3030 
3031     if (mCaps.params[kParamCreateGuestHandle]) {
3032         struct VirtGpuCreateBlob createBlob = {0};
3033         struct VirtGpuExecBuffer exec = {};
3034         VirtGpuDevice* instance = VirtGpuDevice::getInstance();
3035         struct gfxstreamPlaceholderCommandVk placeholderCmd = {};
3036 
3037         createBlobInfo.blobId = ++mBlobId;
3038         createBlobInfo.blobMem = kBlobMemGuest;
3039         createBlobInfo.blobFlags = kBlobFlagCreateGuestHandle;
3040         vk_append_struct(&structChainIter, &createBlobInfo);
3041 
3042         createBlob.blobMem = kBlobMemGuest;
3043         createBlob.flags = kBlobFlagCreateGuestHandle;
3044         createBlob.blobId = createBlobInfo.blobId;
3045         createBlob.size = hostAllocationInfo.allocationSize;
3046 
3047         guestBlob = instance->createBlob(createBlob);
3048         if (!guestBlob) {
3049             ALOGE("Failed to allocate coherent memory: failed to create blob.");
3050             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3051         }
3052 
3053         placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK;
3054         exec.command = static_cast<void*>(&placeholderCmd);
3055         exec.command_size = sizeof(placeholderCmd);
3056         exec.flags = kRingIdx;
3057         exec.ring_idx = 1;
3058         if (instance->execBuffer(exec, guestBlob.get())) {
3059             ALOGE("Failed to allocate coherent memory: failed to execbuffer for wait.");
3060             return VK_ERROR_OUT_OF_HOST_MEMORY;
3061         }
3062 
3063         guestBlob->wait();
3064     } else if (mCaps.vulkanCapset.deferredMapping) {
3065         createBlobInfo.blobId = ++mBlobId;
3066         createBlobInfo.blobMem = kBlobMemHost3d;
3067         vk_append_struct(&structChainIter, &createBlobInfo);
3068     }
3069 
3070     VkDeviceMemory mem = VK_NULL_HANDLE;
3071     VkResult host_res =
3072         enc->vkAllocateMemory(device, &hostAllocationInfo, nullptr, &mem, true /* do lock */);
3073     if (host_res != VK_SUCCESS) {
3074         ALOGE("Failed to allocate coherent memory: failed to allocate on the host: %d.", host_res);
3075         return host_res;
3076     }
3077 
3078     struct VkDeviceMemory_Info info;
3079     if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3080         info.allocationSize = pAllocateInfo->allocationSize;
3081         info.blobId = createBlobInfo.blobId;
3082     }
3083 
3084     if (guestBlob) {
3085         auto mapping = guestBlob->createMapping();
3086         if (!mapping) {
3087             ALOGE("Failed to allocate coherent memory: failed to create blob mapping.");
3088             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3089         }
3090 
3091         auto coherentMemory = std::make_shared<CoherentMemory>(
3092             mapping, hostAllocationInfo.allocationSize, device, mem);
3093 
3094         coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset);
3095         info.coherentMemoryOffset = offset;
3096         info.coherentMemory = coherentMemory;
3097         info.ptr = ptr;
3098     }
3099 
3100     info.coherentMemorySize = hostAllocationInfo.allocationSize;
3101     info.memoryTypeIndex = hostAllocationInfo.memoryTypeIndex;
3102     info.device = device;
3103     info.dedicated = dedicated;
3104     {
3105         // createCoherentMemory inside need to access info_VkDeviceMemory
3106         // information. set it before use.
3107         AutoLock<RecursiveLock> lock(mLock);
3108         info_VkDeviceMemory[mem] = info;
3109     }
3110 
3111     if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3112         *pMemory = mem;
3113         return host_res;
3114     }
3115 
3116     auto coherentMemory = createCoherentMemory(device, mem, hostAllocationInfo, enc, host_res);
3117     if (coherentMemory) {
3118         AutoLock<RecursiveLock> lock(mLock);
3119         coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset);
3120         info.allocationSize = pAllocateInfo->allocationSize;
3121         info.coherentMemoryOffset = offset;
3122         info.coherentMemory = coherentMemory;
3123         info.ptr = ptr;
3124         info_VkDeviceMemory[mem] = info;
3125         *pMemory = mem;
3126     } else {
3127         enc->vkFreeMemory(device, mem, nullptr, true);
3128         AutoLock<RecursiveLock> lock(mLock);
3129         info_VkDeviceMemory.erase(mem);
3130     }
3131     return host_res;
3132 }
3133 
getCoherentMemory(const VkMemoryAllocateInfo * pAllocateInfo,VkEncoder * enc,VkDevice device,VkDeviceMemory * pMemory)3134 VkResult ResourceTracker::getCoherentMemory(const VkMemoryAllocateInfo* pAllocateInfo,
3135                                             VkEncoder* enc, VkDevice device,
3136                                             VkDeviceMemory* pMemory) {
3137     VkMemoryAllocateFlagsInfo allocFlagsInfo;
3138     VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
3139 
3140     // Add buffer device address capture structs
3141     const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3142         vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
3143 
3144     bool dedicated =
3145         allocFlagsInfoPtr &&
3146         ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
3147          (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
3148 
3149     if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle])
3150         dedicated = true;
3151 
3152     CoherentMemoryPtr coherentMemory = nullptr;
3153     uint8_t* ptr = nullptr;
3154     uint64_t offset = 0;
3155     {
3156         AutoLock<RecursiveLock> lock(mLock);
3157         for (const auto& [memory, info] : info_VkDeviceMemory) {
3158             if (info.memoryTypeIndex != pAllocateInfo->memoryTypeIndex) continue;
3159 
3160             if (info.dedicated || dedicated) continue;
3161 
3162             if (!info.coherentMemory) continue;
3163 
3164             if (!info.coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset))
3165                 continue;
3166 
3167             coherentMemory = info.coherentMemory;
3168             break;
3169         }
3170         if (coherentMemory) {
3171             struct VkDeviceMemory_Info info;
3172             info.coherentMemoryOffset = offset;
3173             info.ptr = ptr;
3174             info.memoryTypeIndex = pAllocateInfo->memoryTypeIndex;
3175             info.allocationSize = pAllocateInfo->allocationSize;
3176             info.coherentMemory = coherentMemory;
3177             info.device = device;
3178 
3179             // for suballocated memory, create an alias VkDeviceMemory handle for application
3180             // memory used for suballocations will still be VkDeviceMemory associated with
3181             // CoherentMemory
3182             auto mem = new_from_host_VkDeviceMemory(VK_NULL_HANDLE);
3183             info_VkDeviceMemory[mem] = info;
3184             *pMemory = mem;
3185             return VK_SUCCESS;
3186         }
3187     }
3188     return allocateCoherentMemory(device, pAllocateInfo, enc, pMemory);
3189 }
3190 
on_vkAllocateMemory(void * context,VkResult input_result,VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)3191 VkResult ResourceTracker::on_vkAllocateMemory(void* context, VkResult input_result, VkDevice device,
3192                                               const VkMemoryAllocateInfo* pAllocateInfo,
3193                                               const VkAllocationCallbacks* pAllocator,
3194                                               VkDeviceMemory* pMemory) {
3195 #define _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(result)                                      \
3196     {                                                                                          \
3197         auto it = info_VkDevice.find(device);                                                  \
3198         if (it == info_VkDevice.end()) return result;                                          \
3199         emitDeviceMemoryReport(it->second,                                                     \
3200                                VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT, 0,    \
3201                                pAllocateInfo->allocationSize, VK_OBJECT_TYPE_DEVICE_MEMORY, 0, \
3202                                pAllocateInfo->memoryTypeIndex);                                \
3203         return result;                                                                         \
3204     }
3205 
3206 #define _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT                                         \
3207     {                                                                                      \
3208         uint64_t memoryObjectId = (uint64_t)(void*)*pMemory;                               \
3209         if (ahw) {                                                                         \
3210             memoryObjectId = getAHardwareBufferId(ahw);                                    \
3211         }                                                                                  \
3212         emitDeviceMemoryReport(info_VkDevice[device],                                      \
3213                                isImport ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT    \
3214                                         : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT, \
3215                                memoryObjectId, pAllocateInfo->allocationSize,              \
3216                                VK_OBJECT_TYPE_DEVICE_MEMORY, (uint64_t)(void*)*pMemory,    \
3217                                pAllocateInfo->memoryTypeIndex);                            \
3218         return VK_SUCCESS;                                                                 \
3219     }
3220 
3221     if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
3222 
3223     VkEncoder* enc = (VkEncoder*)context;
3224 
3225     VkMemoryAllocateInfo finalAllocInfo = vk_make_orphan_copy(*pAllocateInfo);
3226     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&finalAllocInfo);
3227 
3228     VkMemoryAllocateFlagsInfo allocFlagsInfo;
3229     VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
3230 
3231     // Add buffer device address capture structs
3232     const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3233         vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
3234     const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
3235         vk_find_struct<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo);
3236 
3237     if (allocFlagsInfoPtr) {
3238         ALOGV("%s: has alloc flags\n", __func__);
3239         allocFlagsInfo = *allocFlagsInfoPtr;
3240         vk_append_struct(&structChainIter, &allocFlagsInfo);
3241     }
3242 
3243     if (opaqueCaptureAddressAllocInfoPtr) {
3244         ALOGV("%s: has opaque capture address\n", __func__);
3245         opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
3246         vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
3247     }
3248 
3249     VkMemoryDedicatedAllocateInfo dedicatedAllocInfo;
3250     VkImportColorBufferGOOGLE importCbInfo = {
3251         VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE,
3252         0,
3253     };
3254     VkImportBufferGOOGLE importBufferInfo = {
3255         VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE,
3256         0,
3257     };
3258     // VkImportPhysicalAddressGOOGLE importPhysAddrInfo = {
3259     //     VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE, 0,
3260     // };
3261 
3262     const VkExportMemoryAllocateInfo* exportAllocateInfoPtr =
3263         vk_find_struct<VkExportMemoryAllocateInfo>(pAllocateInfo);
3264 
3265 #ifdef VK_USE_PLATFORM_ANDROID_KHR
3266     const VkImportAndroidHardwareBufferInfoANDROID* importAhbInfoPtr =
3267         vk_find_struct<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo);
3268 #else
3269     const void* importAhbInfoPtr = nullptr;
3270 #endif
3271 
3272 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
3273     const VkImportMemoryFdInfoKHR* importFdInfoPtr =
3274         vk_find_struct<VkImportMemoryFdInfoKHR>(pAllocateInfo);
3275 #else
3276     const VkImportMemoryFdInfoKHR* importFdInfoPtr = nullptr;
3277 #endif
3278 
3279 #ifdef VK_USE_PLATFORM_FUCHSIA
3280     const VkImportMemoryBufferCollectionFUCHSIA* importBufferCollectionInfoPtr =
3281         vk_find_struct<VkImportMemoryBufferCollectionFUCHSIA>(pAllocateInfo);
3282 
3283     const VkImportMemoryZirconHandleInfoFUCHSIA* importVmoInfoPtr =
3284         vk_find_struct<VkImportMemoryZirconHandleInfoFUCHSIA>(pAllocateInfo);
3285 #else
3286     const void* importBufferCollectionInfoPtr = nullptr;
3287     const void* importVmoInfoPtr = nullptr;
3288 #endif  // VK_USE_PLATFORM_FUCHSIA
3289 
3290     const VkMemoryDedicatedAllocateInfo* dedicatedAllocInfoPtr =
3291         vk_find_struct<VkMemoryDedicatedAllocateInfo>(pAllocateInfo);
3292 
3293     // Note for AHardwareBuffers, the Vulkan spec states:
3294     //
3295     //     Android hardware buffers have intrinsic width, height, format, and usage
3296     //     properties, so Vulkan images bound to memory imported from an Android
3297     //     hardware buffer must use dedicated allocations
3298     //
3299     // so any allocation requests with a VkImportAndroidHardwareBufferInfoANDROID
3300     // will necessarily have a VkMemoryDedicatedAllocateInfo. However, the host
3301     // may or may not actually use a dedicated allocation to emulate
3302     // AHardwareBuffers. As such, the VkMemoryDedicatedAllocateInfo is passed to the
3303     // host and the host will decide whether or not to use it.
3304 
3305     bool shouldPassThroughDedicatedAllocInfo =
3306         !exportAllocateInfoPtr && !importBufferCollectionInfoPtr && !importVmoInfoPtr;
3307 
3308     const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProps =
3309         getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE);
3310 
3311     const bool requestedMemoryIsHostVisible =
3312         isHostVisible(&physicalDeviceMemoryProps, pAllocateInfo->memoryTypeIndex);
3313 
3314 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
3315     shouldPassThroughDedicatedAllocInfo &= !requestedMemoryIsHostVisible;
3316 #endif  // VK_USE_PLATFORM_FUCHSIA
3317 
3318     if (shouldPassThroughDedicatedAllocInfo && dedicatedAllocInfoPtr) {
3319         dedicatedAllocInfo = vk_make_orphan_copy(*dedicatedAllocInfoPtr);
3320         vk_append_struct(&structChainIter, &dedicatedAllocInfo);
3321     }
3322 
3323     // State needed for import/export.
3324     bool exportAhb = false;
3325     bool exportVmo = false;
3326     bool exportDmabuf = false;
3327     bool importAhb = false;
3328     bool importBufferCollection = false;
3329     bool importVmo = false;
3330     bool importDmabuf = false;
3331     (void)exportVmo;
3332 
3333     // Even if we export allocate, the underlying operation
3334     // for the host is always going to be an import operation.
3335     // This is also how Intel's implementation works,
3336     // and is generally simpler;
3337     // even in an export allocation,
3338     // we perform AHardwareBuffer allocation
3339     // on the guest side, at this layer,
3340     // and then we attach a new VkDeviceMemory
3341     // to the AHardwareBuffer on the host via an "import" operation.
3342     AHardwareBuffer* ahw = nullptr;
3343 
3344     if (exportAllocateInfoPtr) {
3345         exportAhb = exportAllocateInfoPtr->handleTypes &
3346                     VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
3347 #ifdef VK_USE_PLATFORM_FUCHSIA
3348         exportVmo = exportAllocateInfoPtr->handleTypes &
3349                     VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
3350 #endif  // VK_USE_PLATFORM_FUCHSIA
3351         exportDmabuf =
3352             exportAllocateInfoPtr->handleTypes & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
3353                                                   VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
3354     } else if (importAhbInfoPtr) {
3355         importAhb = true;
3356     } else if (importBufferCollectionInfoPtr) {
3357         importBufferCollection = true;
3358     } else if (importVmoInfoPtr) {
3359         importVmo = true;
3360     }
3361 
3362     if (importFdInfoPtr) {
3363         importDmabuf =
3364             (importFdInfoPtr->handleType & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
3365                                             VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT));
3366     }
3367     bool isImport = importAhb || importBufferCollection || importVmo || importDmabuf;
3368 
3369 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
3370     if (exportAhb) {
3371         bool hasDedicatedImage =
3372             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3373         bool hasDedicatedBuffer =
3374             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3375         VkExtent3D imageExtent = {0, 0, 0};
3376         uint32_t imageLayers = 0;
3377         VkFormat imageFormat = VK_FORMAT_UNDEFINED;
3378         VkImageUsageFlags imageUsage = 0;
3379         VkImageCreateFlags imageCreateFlags = 0;
3380         VkDeviceSize bufferSize = 0;
3381         VkDeviceSize allocationInfoAllocSize = finalAllocInfo.allocationSize;
3382 
3383         if (hasDedicatedImage) {
3384             AutoLock<RecursiveLock> lock(mLock);
3385 
3386             auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3387             if (it == info_VkImage.end())
3388                 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3389             const auto& info = it->second;
3390             const auto& imgCi = info.createInfo;
3391 
3392             imageExtent = imgCi.extent;
3393             imageLayers = imgCi.arrayLayers;
3394             imageFormat = imgCi.format;
3395             imageUsage = imgCi.usage;
3396             imageCreateFlags = imgCi.flags;
3397         }
3398 
3399         if (hasDedicatedBuffer) {
3400             AutoLock<RecursiveLock> lock(mLock);
3401 
3402             auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
3403             if (it == info_VkBuffer.end())
3404                 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3405             const auto& info = it->second;
3406             const auto& bufCi = info.createInfo;
3407 
3408             bufferSize = bufCi.size;
3409         }
3410 
3411         VkResult ahbCreateRes = createAndroidHardwareBuffer(
3412             ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(),
3413             hasDedicatedImage, hasDedicatedBuffer, imageExtent, imageLayers, imageFormat,
3414             imageUsage, imageCreateFlags, bufferSize, allocationInfoAllocSize, &ahw);
3415 
3416         if (ahbCreateRes != VK_SUCCESS) {
3417             _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(ahbCreateRes);
3418         }
3419     }
3420 
3421     if (importAhb) {
3422         ahw = importAhbInfoPtr->buffer;
3423         // We still need to acquire the AHardwareBuffer.
3424         importAndroidHardwareBuffer(
3425             ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(),
3426             importAhbInfoPtr, nullptr);
3427     }
3428 
3429     if (ahw) {
3430         auto* gralloc =
3431             ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
3432 
3433         const uint32_t hostHandle = gralloc->getHostHandle(ahw);
3434         if (gralloc->getFormat(ahw) == AHARDWAREBUFFER_FORMAT_BLOB &&
3435             !gralloc->treatBlobAsImage()) {
3436             importBufferInfo.buffer = hostHandle;
3437             vk_append_struct(&structChainIter, &importBufferInfo);
3438         } else {
3439             importCbInfo.colorBuffer = hostHandle;
3440             vk_append_struct(&structChainIter, &importCbInfo);
3441         }
3442     }
3443 #endif
3444     zx_handle_t vmo_handle = ZX_HANDLE_INVALID;
3445 
3446 #ifdef VK_USE_PLATFORM_FUCHSIA
3447     if (importBufferCollection) {
3448         const auto& collection =
3449             *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
3450                 importBufferCollectionInfoPtr->collection);
3451         auto result = collection->WaitForBuffersAllocated();
3452         if (!result.ok() || result->status != ZX_OK) {
3453             ALOGE("WaitForBuffersAllocated failed: %d %d", result.status(),
3454                   GET_STATUS_SAFE(result, status));
3455             _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3456         }
3457         fuchsia_sysmem::wire::BufferCollectionInfo2& info = result->buffer_collection_info;
3458         uint32_t index = importBufferCollectionInfoPtr->index;
3459         if (info.buffer_count < index) {
3460             ALOGE("Invalid buffer index: %d %d", index);
3461             _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3462         }
3463         vmo_handle = info.buffers[index].vmo.release();
3464     }
3465 
3466     if (importVmo) {
3467         vmo_handle = importVmoInfoPtr->handle;
3468     }
3469 
3470     if (exportVmo) {
3471         bool hasDedicatedImage =
3472             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3473         bool hasDedicatedBuffer =
3474             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3475 
3476         if (hasDedicatedImage && hasDedicatedBuffer) {
3477             ALOGE(
3478                 "Invalid VkMemoryDedicatedAllocationInfo: At least one "
3479                 "of image and buffer must be VK_NULL_HANDLE.");
3480             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3481         }
3482 
3483         const VkImageCreateInfo* pImageCreateInfo = nullptr;
3484 
3485         VkBufferConstraintsInfoFUCHSIA bufferConstraintsInfo = {
3486             .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA,
3487             .pNext = nullptr,
3488             .createInfo = {},
3489             .requiredFormatFeatures = 0,
3490             .bufferCollectionConstraints =
3491                 VkBufferCollectionConstraintsInfoFUCHSIA{
3492                     .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
3493                     .pNext = nullptr,
3494                     .minBufferCount = 1,
3495                     .maxBufferCount = 0,
3496                     .minBufferCountForCamping = 0,
3497                     .minBufferCountForDedicatedSlack = 0,
3498                     .minBufferCountForSharedSlack = 0,
3499                 },
3500         };
3501         const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo = nullptr;
3502 
3503         if (hasDedicatedImage) {
3504             AutoLock<RecursiveLock> lock(mLock);
3505 
3506             auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3507             if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
3508             const auto& imageInfo = it->second;
3509 
3510             pImageCreateInfo = &imageInfo.createInfo;
3511         }
3512 
3513         if (hasDedicatedBuffer) {
3514             AutoLock<RecursiveLock> lock(mLock);
3515 
3516             auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
3517             if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
3518             const auto& bufferInfo = it->second;
3519 
3520             bufferConstraintsInfo.createInfo = bufferInfo.createInfo;
3521             pBufferConstraintsInfo = &bufferConstraintsInfo;
3522         }
3523 
3524         hasDedicatedImage =
3525             hasDedicatedImage && getBufferCollectionConstraintsVulkanImageUsage(pImageCreateInfo);
3526         hasDedicatedBuffer = hasDedicatedBuffer && getBufferCollectionConstraintsVulkanBufferUsage(
3527                                                        pBufferConstraintsInfo);
3528 
3529         if (hasDedicatedImage || hasDedicatedBuffer) {
3530             auto token_ends = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
3531             if (!token_ends.is_ok()) {
3532                 ALOGE("zx_channel_create failed: %d", token_ends.status_value());
3533                 abort();
3534             }
3535 
3536             {
3537                 auto result =
3538                     mSysmemAllocator->AllocateSharedCollection(std::move(token_ends->server));
3539                 if (!result.ok()) {
3540                     ALOGE("AllocateSharedCollection failed: %d", result.status());
3541                     abort();
3542                 }
3543             }
3544 
3545             auto collection_ends = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
3546             if (!collection_ends.is_ok()) {
3547                 ALOGE("zx_channel_create failed: %d", collection_ends.status_value());
3548                 abort();
3549             }
3550 
3551             {
3552                 auto result = mSysmemAllocator->BindSharedCollection(
3553                     std::move(token_ends->client), std::move(collection_ends->server));
3554                 if (!result.ok()) {
3555                     ALOGE("BindSharedCollection failed: %d", result.status());
3556                     abort();
3557                 }
3558             }
3559 
3560             fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> collection(
3561                 std::move(collection_ends->client));
3562             if (hasDedicatedImage) {
3563                 // TODO(fxbug.dev/90856): Use setBufferCollectionImageConstraintsFUCHSIA.
3564                 VkResult res = setBufferCollectionConstraintsFUCHSIA(enc, device, &collection,
3565                                                                      pImageCreateInfo);
3566                 if (res == VK_ERROR_FORMAT_NOT_SUPPORTED) {
3567                     ALOGE("setBufferCollectionConstraints failed: format %u is not supported",
3568                           pImageCreateInfo->format);
3569                     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3570                 }
3571                 if (res != VK_SUCCESS) {
3572                     ALOGE("setBufferCollectionConstraints failed: %d", res);
3573                     abort();
3574                 }
3575             }
3576 
3577             if (hasDedicatedBuffer) {
3578                 VkResult res = setBufferCollectionBufferConstraintsFUCHSIA(&collection,
3579                                                                            pBufferConstraintsInfo);
3580                 if (res != VK_SUCCESS) {
3581                     ALOGE("setBufferCollectionBufferConstraints failed: %d", res);
3582                     abort();
3583                 }
3584             }
3585 
3586             {
3587                 auto result = collection->WaitForBuffersAllocated();
3588                 if (result.ok() && result->status == ZX_OK) {
3589                     fuchsia_sysmem::wire::BufferCollectionInfo2& info =
3590                         result->buffer_collection_info;
3591                     if (!info.buffer_count) {
3592                         ALOGE(
3593                             "WaitForBuffersAllocated returned "
3594                             "invalid count: %d",
3595                             info.buffer_count);
3596                         abort();
3597                     }
3598                     vmo_handle = info.buffers[0].vmo.release();
3599                 } else {
3600                     ALOGE("WaitForBuffersAllocated failed: %d %d", result.status(),
3601                           GET_STATUS_SAFE(result, status));
3602                     abort();
3603                 }
3604             }
3605 
3606             collection->Close();
3607 
3608             zx::vmo vmo_copy;
3609             zx_status_t status = zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS,
3610                                                      vmo_copy.reset_and_get_address());
3611             if (status != ZX_OK) {
3612                 ALOGE("Failed to duplicate VMO: %d", status);
3613                 abort();
3614             }
3615 
3616             if (pImageCreateInfo) {
3617                 // Only device-local images need to create color buffer; for
3618                 // host-visible images, the color buffer is already created
3619                 // when sysmem allocates memory. Here we use the |tiling|
3620                 // field of image creation info to determine if it uses
3621                 // host-visible memory.
3622                 bool isLinear = pImageCreateInfo->tiling == VK_IMAGE_TILING_LINEAR;
3623                 if (!isLinear) {
3624                     fuchsia_hardware_goldfish::wire::ColorBufferFormatType format;
3625                     switch (pImageCreateInfo->format) {
3626                         case VK_FORMAT_B8G8R8A8_SINT:
3627                         case VK_FORMAT_B8G8R8A8_UNORM:
3628                         case VK_FORMAT_B8G8R8A8_SRGB:
3629                         case VK_FORMAT_B8G8R8A8_SNORM:
3630                         case VK_FORMAT_B8G8R8A8_SSCALED:
3631                         case VK_FORMAT_B8G8R8A8_USCALED:
3632                             format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra;
3633                             break;
3634                         case VK_FORMAT_R8G8B8A8_SINT:
3635                         case VK_FORMAT_R8G8B8A8_UNORM:
3636                         case VK_FORMAT_R8G8B8A8_SRGB:
3637                         case VK_FORMAT_R8G8B8A8_SNORM:
3638                         case VK_FORMAT_R8G8B8A8_SSCALED:
3639                         case VK_FORMAT_R8G8B8A8_USCALED:
3640                             format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba;
3641                             break;
3642                         case VK_FORMAT_R8_UNORM:
3643                         case VK_FORMAT_R8_UINT:
3644                         case VK_FORMAT_R8_USCALED:
3645                         case VK_FORMAT_R8_SNORM:
3646                         case VK_FORMAT_R8_SINT:
3647                         case VK_FORMAT_R8_SSCALED:
3648                         case VK_FORMAT_R8_SRGB:
3649                             format =
3650                                 fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kLuminance;
3651                             break;
3652                         case VK_FORMAT_R8G8_UNORM:
3653                         case VK_FORMAT_R8G8_UINT:
3654                         case VK_FORMAT_R8G8_USCALED:
3655                         case VK_FORMAT_R8G8_SNORM:
3656                         case VK_FORMAT_R8G8_SINT:
3657                         case VK_FORMAT_R8G8_SSCALED:
3658                         case VK_FORMAT_R8G8_SRGB:
3659                             format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRg;
3660                             break;
3661                         default:
3662                             ALOGE("Unsupported format: %d", pImageCreateInfo->format);
3663                             abort();
3664                     }
3665 
3666                     fidl::Arena arena;
3667                     fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(arena);
3668                     createParams.set_width(pImageCreateInfo->extent.width)
3669                         .set_height(pImageCreateInfo->extent.height)
3670                         .set_format(format)
3671                         .set_memory_property(
3672                             fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
3673 
3674                     auto result = mControlDevice->CreateColorBuffer2(std::move(vmo_copy),
3675                                                                      std::move(createParams));
3676                     if (!result.ok() || result->res != ZX_OK) {
3677                         if (result.ok() && result->res == ZX_ERR_ALREADY_EXISTS) {
3678                             ALOGD(
3679                                 "CreateColorBuffer: color buffer already "
3680                                 "exists\n");
3681                         } else {
3682                             ALOGE("CreateColorBuffer failed: %d:%d", result.status(),
3683                                   GET_STATUS_SAFE(result, res));
3684                             abort();
3685                         }
3686                     }
3687                 }
3688             }
3689 
3690             if (pBufferConstraintsInfo) {
3691                 fidl::Arena arena;
3692                 fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena);
3693                 createParams.set_size(arena, pBufferConstraintsInfo->createInfo.size)
3694                     .set_memory_property(
3695                         fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
3696 
3697                 auto result =
3698                     mControlDevice->CreateBuffer2(std::move(vmo_copy), std::move(createParams));
3699                 if (!result.ok() || result->is_error()) {
3700                     ALOGE("CreateBuffer2 failed: %d:%d", result.status(),
3701                           GET_STATUS_SAFE(result, error_value()));
3702                     abort();
3703                 }
3704             }
3705         } else {
3706             ALOGW(
3707                 "Dedicated image / buffer not available. Cannot create "
3708                 "BufferCollection to export VMOs.");
3709             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3710         }
3711     }
3712 
3713     if (vmo_handle != ZX_HANDLE_INVALID) {
3714         zx::vmo vmo_copy;
3715         zx_status_t status =
3716             zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS, vmo_copy.reset_and_get_address());
3717         if (status != ZX_OK) {
3718             ALOGE("Failed to duplicate VMO: %d", status);
3719             abort();
3720         }
3721         zx_status_t status2 = ZX_OK;
3722 
3723         auto result = mControlDevice->GetBufferHandle(std::move(vmo_copy));
3724         if (!result.ok() || result->res != ZX_OK) {
3725             ALOGE("GetBufferHandle failed: %d:%d", result.status(), GET_STATUS_SAFE(result, res));
3726         } else {
3727             fuchsia_hardware_goldfish::wire::BufferHandleType handle_type = result->type;
3728             uint32_t buffer_handle = result->id;
3729 
3730             if (handle_type == fuchsia_hardware_goldfish::wire::BufferHandleType::kBuffer) {
3731                 importBufferInfo.buffer = buffer_handle;
3732                 vk_append_struct(&structChainIter, &importBufferInfo);
3733             } else {
3734                 importCbInfo.colorBuffer = buffer_handle;
3735                 vk_append_struct(&structChainIter, &importCbInfo);
3736             }
3737         }
3738     }
3739 #endif
3740 
3741     VirtGpuBlobPtr colorBufferBlob = nullptr;
3742 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
3743     if (exportDmabuf) {
3744         VirtGpuDevice* instance = VirtGpuDevice::getInstance();
3745         // // TODO: any special action for VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA? Can mark
3746         // special state if needed.
3747         // // const wsi_memory_allocate_info* wsiAllocateInfoPtr =
3748         // vk_find_struct<wsi_memory_allocate_info>(pAllocateInfo);
3749         bool hasDedicatedImage =
3750             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3751         bool hasDedicatedBuffer =
3752             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3753         if (!hasDedicatedImage && !hasDedicatedBuffer) {
3754             ALOGE(
3755                 "%s: dma-buf exportable memory requires dedicated Image or Buffer information.\n");
3756             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3757         }
3758 
3759         if (hasDedicatedImage) {
3760             VkImageCreateInfo imageCreateInfo;
3761             {
3762                 AutoLock<RecursiveLock> lock(mLock);
3763 
3764                 auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3765                 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
3766                 const auto& imageInfo = it->second;
3767 
3768                 imageCreateInfo = imageInfo.createInfo;
3769             }
3770             uint32_t virglFormat = gfxstream::vk::getVirglFormat(imageCreateInfo.format);
3771             if (virglFormat < 0) {
3772                 ALOGE("%s: Unsupported VK format for colorBuffer, vkFormat: 0x%x", __func__,
3773                       imageCreateInfo.format);
3774                 return VK_ERROR_FORMAT_NOT_SUPPORTED;
3775             }
3776             colorBufferBlob = instance->createVirglBlob(imageCreateInfo.extent.width,
3777                                                         imageCreateInfo.extent.height, virglFormat);
3778             if (!colorBufferBlob) {
3779                 ALOGE("%s: Failed to create colorBuffer resource for Image memory\n", __func__);
3780                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3781             }
3782             if (0 != colorBufferBlob->wait()) {
3783                 ALOGE("%s: Failed to wait for colorBuffer resource for Image memory\n", __func__);
3784                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3785             }
3786         }
3787 
3788         if (hasDedicatedBuffer) {
3789             VkBufferCreateInfo bufferCreateInfo;
3790             {
3791                 AutoLock<RecursiveLock> lock(mLock);
3792 
3793                 auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
3794                 if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
3795                 const auto& bufferInfo = it->second;
3796                 bufferCreateInfo = bufferInfo.createInfo;
3797             }
3798             colorBufferBlob = instance->createVirglBlob(bufferCreateInfo.size / 4, 1,
3799                                                         VIRGL_FORMAT_R8G8B8A8_UNORM);
3800             if (!colorBufferBlob) {
3801                 ALOGE("%s: Failed to create colorBuffer resource for Buffer memory\n", __func__);
3802                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3803             }
3804             if (0 != colorBufferBlob->wait()) {
3805                 ALOGE("%s: Failed to wait for colorBuffer resource for Buffer memory\n", __func__);
3806                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3807             }
3808         }
3809     }
3810 
3811     if (importDmabuf) {
3812         VirtGpuExternalHandle importHandle = {};
3813         importHandle.osHandle = importFdInfoPtr->fd;
3814         importHandle.type = kMemHandleDmabuf;
3815 
3816         auto instance = VirtGpuDevice::getInstance();
3817         colorBufferBlob = instance->importBlob(importHandle);
3818         if (!colorBufferBlob) {
3819             ALOGE("%s: Failed to import colorBuffer resource\n", __func__);
3820             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3821         }
3822     }
3823 
3824     if (colorBufferBlob) {
3825         importCbInfo.colorBuffer = colorBufferBlob->getResourceHandle();
3826         vk_append_struct(&structChainIter, &importCbInfo);
3827     }
3828 #endif
3829 
3830     if (ahw || colorBufferBlob || !requestedMemoryIsHostVisible) {
3831         input_result =
3832             enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
3833 
3834         if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
3835 
3836         VkDeviceSize allocationSize = finalAllocInfo.allocationSize;
3837         setDeviceMemoryInfo(device, *pMemory, 0, nullptr, finalAllocInfo.memoryTypeIndex, ahw,
3838                             isImport, vmo_handle, colorBufferBlob);
3839 
3840         _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
3841     }
3842 
3843 #ifdef VK_USE_PLATFORM_FUCHSIA
3844     if (vmo_handle != ZX_HANDLE_INVALID) {
3845         input_result =
3846             enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
3847 
3848         // Get VMO handle rights, and only use allowed rights to map the
3849         // host memory.
3850         zx_info_handle_basic handle_info;
3851         zx_status_t status = zx_object_get_info(vmo_handle, ZX_INFO_HANDLE_BASIC, &handle_info,
3852                                                 sizeof(handle_info), nullptr, nullptr);
3853         if (status != ZX_OK) {
3854             ALOGE("%s: cannot get vmo object info: vmo = %u status: %d.", __func__, vmo_handle,
3855                   status);
3856             return VK_ERROR_OUT_OF_HOST_MEMORY;
3857         }
3858 
3859         zx_vm_option_t vm_permission = 0u;
3860         vm_permission |= (handle_info.rights & ZX_RIGHT_READ) ? ZX_VM_PERM_READ : 0;
3861         vm_permission |= (handle_info.rights & ZX_RIGHT_WRITE) ? ZX_VM_PERM_WRITE : 0;
3862 
3863         zx_paddr_t addr;
3864         status = zx_vmar_map(zx_vmar_root_self(), vm_permission, 0, vmo_handle, 0,
3865                              finalAllocInfo.allocationSize, &addr);
3866         if (status != ZX_OK) {
3867             ALOGE("%s: cannot map vmar: status %d.", __func__, status);
3868             return VK_ERROR_OUT_OF_HOST_MEMORY;
3869         }
3870 
3871         setDeviceMemoryInfo(device, *pMemory, finalAllocInfo.allocationSize,
3872                             reinterpret_cast<uint8_t*>(addr), finalAllocInfo.memoryTypeIndex,
3873                             /*ahw=*/nullptr, isImport, vmo_handle, /*blobPtr=*/nullptr);
3874         return VK_SUCCESS;
3875     }
3876 #endif
3877 
3878     // Host visible memory with direct mapping
3879     VkResult result = getCoherentMemory(&finalAllocInfo, enc, device, pMemory);
3880     if (result != VK_SUCCESS) return result;
3881 
3882     _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
3883 }
3884 
on_vkFreeMemory(void * context,VkDevice device,VkDeviceMemory memory,const VkAllocationCallbacks * pAllocateInfo)3885 void ResourceTracker::on_vkFreeMemory(void* context, VkDevice device, VkDeviceMemory memory,
3886                                       const VkAllocationCallbacks* pAllocateInfo) {
3887     AutoLock<RecursiveLock> lock(mLock);
3888 
3889     auto it = info_VkDeviceMemory.find(memory);
3890     if (it == info_VkDeviceMemory.end()) return;
3891     auto& info = it->second;
3892     uint64_t memoryObjectId = (uint64_t)(void*)memory;
3893 #ifdef VK_USE_PLATFORM_ANDROID_KHR
3894     if (info.ahw) {
3895         memoryObjectId = getAHardwareBufferId(info.ahw);
3896     }
3897 #endif
3898 
3899     emitDeviceMemoryReport(info_VkDevice[device],
3900                            info.imported ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT
3901                                          : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT,
3902                            memoryObjectId, 0 /* size */, VK_OBJECT_TYPE_DEVICE_MEMORY,
3903                            (uint64_t)(void*)memory);
3904 
3905 #ifdef VK_USE_PLATFORM_FUCHSIA
3906     if (info.vmoHandle && info.ptr) {
3907         zx_status_t status = zx_vmar_unmap(
3908             zx_vmar_root_self(), reinterpret_cast<zx_paddr_t>(info.ptr), info.allocationSize);
3909         if (status != ZX_OK) {
3910             ALOGE("%s: Cannot unmap ptr: status %d", status);
3911         }
3912         info.ptr = nullptr;
3913     }
3914 #endif
3915 
3916     if (!info.coherentMemory) {
3917         lock.unlock();
3918         VkEncoder* enc = (VkEncoder*)context;
3919         enc->vkFreeMemory(device, memory, pAllocateInfo, true /* do lock */);
3920         return;
3921     }
3922 
3923     auto coherentMemory = freeCoherentMemoryLocked(memory, info);
3924 
3925     // We have to release the lock before we could possibly free a
3926     // CoherentMemory, because that will call into VkEncoder, which
3927     // shouldn't be called when the lock is held.
3928     lock.unlock();
3929     coherentMemory = nullptr;
3930 }
3931 
on_vkMapMemory(void * context,VkResult host_result,VkDevice device,VkDeviceMemory memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags,void ** ppData)3932 VkResult ResourceTracker::on_vkMapMemory(void* context, VkResult host_result, VkDevice device,
3933                                          VkDeviceMemory memory, VkDeviceSize offset,
3934                                          VkDeviceSize size, VkMemoryMapFlags, void** ppData) {
3935     if (host_result != VK_SUCCESS) {
3936         ALOGE("%s: Host failed to map\n", __func__);
3937         return host_result;
3938     }
3939 
3940     AutoLock<RecursiveLock> lock(mLock);
3941 
3942     auto it = info_VkDeviceMemory.find(memory);
3943     if (it == info_VkDeviceMemory.end()) {
3944         ALOGE("%s: Could not find this device memory\n", __func__);
3945         return VK_ERROR_MEMORY_MAP_FAILED;
3946     }
3947 
3948     auto& info = it->second;
3949 
3950     if (info.blobId && !info.coherentMemory && !mCaps.params[kParamCreateGuestHandle]) {
3951         VkEncoder* enc = (VkEncoder*)context;
3952         VirtGpuBlobMappingPtr mapping;
3953         VirtGpuDevice* instance = VirtGpuDevice::getInstance();
3954 
3955         uint64_t offset;
3956         uint8_t* ptr;
3957 
3958         VkResult vkResult = enc->vkGetBlobGOOGLE(device, memory, false);
3959         if (vkResult != VK_SUCCESS) return vkResult;
3960 
3961         struct VirtGpuCreateBlob createBlob = {};
3962         createBlob.blobMem = kBlobMemHost3d;
3963         createBlob.flags = kBlobFlagMappable;
3964         createBlob.blobId = info.blobId;
3965         createBlob.size = info.coherentMemorySize;
3966 
3967         auto blob = instance->createBlob(createBlob);
3968         if (!blob) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3969 
3970         mapping = blob->createMapping();
3971         if (!mapping) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3972 
3973         auto coherentMemory =
3974             std::make_shared<CoherentMemory>(mapping, createBlob.size, device, memory);
3975 
3976         coherentMemory->subAllocate(info.allocationSize, &ptr, offset);
3977 
3978         info.coherentMemoryOffset = offset;
3979         info.coherentMemory = coherentMemory;
3980         info.ptr = ptr;
3981     }
3982 
3983     if (!info.ptr) {
3984         ALOGE("%s: ptr null\n", __func__);
3985         return VK_ERROR_MEMORY_MAP_FAILED;
3986     }
3987 
3988     if (size != VK_WHOLE_SIZE && (info.ptr + offset + size > info.ptr + info.allocationSize)) {
3989         ALOGE(
3990             "%s: size is too big. alloc size 0x%llx while we wanted offset 0x%llx size 0x%llx "
3991             "total 0x%llx\n",
3992             __func__, (unsigned long long)info.allocationSize, (unsigned long long)offset,
3993             (unsigned long long)size, (unsigned long long)offset);
3994         return VK_ERROR_MEMORY_MAP_FAILED;
3995     }
3996 
3997     *ppData = info.ptr + offset;
3998 
3999     return host_result;
4000 }
4001 
on_vkUnmapMemory(void *,VkDevice,VkDeviceMemory)4002 void ResourceTracker::on_vkUnmapMemory(void*, VkDevice, VkDeviceMemory) {
4003     // no-op
4004 }
4005 
transformImageMemoryRequirements2ForGuest(VkImage image,VkMemoryRequirements2 * reqs2)4006 void ResourceTracker::transformImageMemoryRequirements2ForGuest(VkImage image,
4007                                                                 VkMemoryRequirements2* reqs2) {
4008     AutoLock<RecursiveLock> lock(mLock);
4009 
4010     auto it = info_VkImage.find(image);
4011     if (it == info_VkImage.end()) return;
4012 
4013     auto& info = it->second;
4014 
4015     if (!info.external || !info.externalCreateInfo.handleTypes) {
4016         transformImageMemoryRequirementsForGuestLocked(image, &reqs2->memoryRequirements);
4017         return;
4018     }
4019 
4020     transformImageMemoryRequirementsForGuestLocked(image, &reqs2->memoryRequirements);
4021 
4022     VkMemoryDedicatedRequirements* dedicatedReqs =
4023         vk_find_struct<VkMemoryDedicatedRequirements>(reqs2);
4024 
4025     if (!dedicatedReqs) return;
4026 
4027     transformExternalResourceMemoryDedicatedRequirementsForGuest(dedicatedReqs);
4028 }
4029 
transformBufferMemoryRequirements2ForGuest(VkBuffer buffer,VkMemoryRequirements2 * reqs2)4030 void ResourceTracker::transformBufferMemoryRequirements2ForGuest(VkBuffer buffer,
4031                                                                  VkMemoryRequirements2* reqs2) {
4032     AutoLock<RecursiveLock> lock(mLock);
4033 
4034     auto it = info_VkBuffer.find(buffer);
4035     if (it == info_VkBuffer.end()) return;
4036 
4037     auto& info = it->second;
4038 
4039     if (!info.external || !info.externalCreateInfo.handleTypes) {
4040         return;
4041     }
4042 
4043     VkMemoryDedicatedRequirements* dedicatedReqs =
4044         vk_find_struct<VkMemoryDedicatedRequirements>(reqs2);
4045 
4046     if (!dedicatedReqs) return;
4047 
4048     transformExternalResourceMemoryDedicatedRequirementsForGuest(dedicatedReqs);
4049 }
4050 
on_vkCreateImage(void * context,VkResult,VkDevice device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)4051 VkResult ResourceTracker::on_vkCreateImage(void* context, VkResult, VkDevice device,
4052                                            const VkImageCreateInfo* pCreateInfo,
4053                                            const VkAllocationCallbacks* pAllocator,
4054                                            VkImage* pImage) {
4055     VkEncoder* enc = (VkEncoder*)context;
4056 
4057     VkImageCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4058     if (localCreateInfo.sharingMode != VK_SHARING_MODE_CONCURRENT) {
4059         localCreateInfo.queueFamilyIndexCount = 0;
4060         localCreateInfo.pQueueFamilyIndices = nullptr;
4061     }
4062 
4063     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
4064     VkExternalMemoryImageCreateInfo localExtImgCi;
4065 
4066     const VkExternalMemoryImageCreateInfo* extImgCiPtr =
4067         vk_find_struct<VkExternalMemoryImageCreateInfo>(pCreateInfo);
4068 
4069     if (extImgCiPtr) {
4070         localExtImgCi = vk_make_orphan_copy(*extImgCiPtr);
4071         vk_append_struct(&structChainIter, &localExtImgCi);
4072     }
4073 
4074     bool isWsiImage = false;
4075 
4076 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
4077     if (extImgCiPtr &&
4078         (extImgCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)) {
4079         // Assumes that handleType with DMA_BUF_BIT indicates creation of a
4080         // image for WSI use; no other external dma_buf usage is supported
4081         isWsiImage = true;
4082         // Must be linear. Otherwise querying stride and other properties
4083         // can be implementation-dependent.
4084         localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
4085         if (gfxstream::vk::getVirglFormat(localCreateInfo.format) < 0) {
4086             localCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
4087         }
4088     }
4089 #endif
4090 
4091 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4092     VkNativeBufferANDROID localAnb;
4093     const VkNativeBufferANDROID* anbInfoPtr = vk_find_struct<VkNativeBufferANDROID>(pCreateInfo);
4094     if (anbInfoPtr) {
4095         localAnb = vk_make_orphan_copy(*anbInfoPtr);
4096         vk_append_struct(&structChainIter, &localAnb);
4097     }
4098 
4099     VkExternalFormatANDROID localExtFormatAndroid;
4100     const VkExternalFormatANDROID* extFormatAndroidPtr =
4101         vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4102     if (extFormatAndroidPtr) {
4103         localExtFormatAndroid = vk_make_orphan_copy(*extFormatAndroidPtr);
4104 
4105         // Do not append external format android;
4106         // instead, replace the local image localCreateInfo format
4107         // with the corresponding Vulkan format
4108         if (extFormatAndroidPtr->externalFormat) {
4109             localCreateInfo.format = vk_format_from_android(extFormatAndroidPtr->externalFormat);
4110             if (localCreateInfo.format == VK_FORMAT_UNDEFINED)
4111                 return VK_ERROR_VALIDATION_FAILED_EXT;
4112         }
4113     }
4114 #endif
4115 
4116 #ifdef VK_USE_PLATFORM_FUCHSIA
4117     const VkBufferCollectionImageCreateInfoFUCHSIA* extBufferCollectionPtr =
4118         vk_find_struct<VkBufferCollectionImageCreateInfoFUCHSIA>(pCreateInfo);
4119 
4120     bool isSysmemBackedMemory = false;
4121 
4122     if (extImgCiPtr &&
4123         (extImgCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) {
4124         isSysmemBackedMemory = true;
4125     }
4126 
4127     if (extBufferCollectionPtr) {
4128         const auto& collection =
4129             *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
4130                 extBufferCollectionPtr->collection);
4131         uint32_t index = extBufferCollectionPtr->index;
4132         zx::vmo vmo;
4133 
4134         fuchsia_sysmem::wire::BufferCollectionInfo2 info;
4135 
4136         auto result = collection->WaitForBuffersAllocated();
4137         if (result.ok() && result->status == ZX_OK) {
4138             info = std::move(result->buffer_collection_info);
4139             if (index < info.buffer_count && info.settings.has_image_format_constraints) {
4140                 vmo = std::move(info.buffers[index].vmo);
4141             }
4142         } else {
4143             ALOGE("WaitForBuffersAllocated failed: %d %d", result.status(),
4144                   GET_STATUS_SAFE(result, status));
4145         }
4146 
4147         if (vmo.is_valid()) {
4148             zx::vmo vmo_dup;
4149             if (zx_status_t status = vmo.duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
4150                 status != ZX_OK) {
4151                 ALOGE("%s: zx_vmo_duplicate failed: %d", __func__, status);
4152                 abort();
4153             }
4154 
4155             auto buffer_handle_result = mControlDevice->GetBufferHandle(std::move(vmo_dup));
4156             if (!buffer_handle_result.ok()) {
4157                 ALOGE("%s: GetBufferHandle FIDL error: %d", __func__,
4158                       buffer_handle_result.status());
4159                 abort();
4160             }
4161             if (buffer_handle_result.value().res == ZX_OK) {
4162                 // Buffer handle already exists.
4163                 // If it is a ColorBuffer, no-op; Otherwise return error.
4164                 if (buffer_handle_result.value().type !=
4165                     fuchsia_hardware_goldfish::wire::BufferHandleType::kColorBuffer) {
4166                     ALOGE("%s: BufferHandle %u is not a ColorBuffer", __func__,
4167                           buffer_handle_result.value().id);
4168                     return VK_ERROR_OUT_OF_HOST_MEMORY;
4169                 }
4170             } else if (buffer_handle_result.value().res == ZX_ERR_NOT_FOUND) {
4171                 // Buffer handle not found. Create ColorBuffer based on buffer settings.
4172                 auto format = info.settings.image_format_constraints.pixel_format.type ==
4173                                       fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8
4174                                   ? fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba
4175                                   : fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra;
4176 
4177                 uint32_t memory_property =
4178                     info.settings.buffer_settings.heap ==
4179                             fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal
4180                         ? fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal
4181                         : fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
4182 
4183                 fidl::Arena arena;
4184                 fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(arena);
4185                 createParams.set_width(info.settings.image_format_constraints.min_coded_width)
4186                     .set_height(info.settings.image_format_constraints.min_coded_height)
4187                     .set_format(format)
4188                     .set_memory_property(memory_property);
4189 
4190                 auto result =
4191                     mControlDevice->CreateColorBuffer2(std::move(vmo), std::move(createParams));
4192                 if (result.ok() && result->res == ZX_ERR_ALREADY_EXISTS) {
4193                     ALOGD("CreateColorBuffer: color buffer already exists\n");
4194                 } else if (!result.ok() || result->res != ZX_OK) {
4195                     ALOGE("CreateColorBuffer failed: %d:%d", result.status(),
4196                           GET_STATUS_SAFE(result, res));
4197                 }
4198             }
4199 
4200             if (info.settings.buffer_settings.heap ==
4201                 fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible) {
4202                 ALOGD(
4203                     "%s: Image uses host visible memory heap; set tiling "
4204                     "to linear to match host ImageCreateInfo",
4205                     __func__);
4206                 localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
4207             }
4208         }
4209         isSysmemBackedMemory = true;
4210     }
4211 
4212     if (isSysmemBackedMemory) {
4213         localCreateInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
4214     }
4215 #endif
4216 
4217     VkResult res;
4218     VkMemoryRequirements memReqs;
4219 
4220     if (supportsCreateResourcesWithRequirements()) {
4221         res = enc->vkCreateImageWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator, pImage,
4222                                                        &memReqs, true /* do lock */);
4223     } else {
4224         res = enc->vkCreateImage(device, &localCreateInfo, pAllocator, pImage, true /* do lock */);
4225     }
4226 
4227     if (res != VK_SUCCESS) return res;
4228 
4229     AutoLock<RecursiveLock> lock(mLock);
4230 
4231     auto it = info_VkImage.find(*pImage);
4232     if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
4233 
4234     auto& info = it->second;
4235 
4236     info.device = device;
4237     info.createInfo = *pCreateInfo;
4238     info.createInfo.pNext = nullptr;
4239 
4240 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4241     if (extFormatAndroidPtr && extFormatAndroidPtr->externalFormat) {
4242         info.hasExternalFormat = true;
4243         info.androidFormat = extFormatAndroidPtr->externalFormat;
4244     }
4245 #endif  // VK_USE_PLATFORM_ANDROID_KHR
4246 
4247     if (supportsCreateResourcesWithRequirements()) {
4248         info.baseRequirementsKnown = true;
4249     }
4250 
4251     if (extImgCiPtr) {
4252         info.external = true;
4253         info.externalCreateInfo = *extImgCiPtr;
4254     }
4255 
4256 #ifdef VK_USE_PLATFORM_FUCHSIA
4257     if (isSysmemBackedMemory) {
4258         info.isSysmemBackedMemory = true;
4259     }
4260 #endif
4261 
4262     info.isWsiImage = isWsiImage;
4263 
4264 // Delete `protocolVersion` check goldfish drivers are gone.
4265 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4266     if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
4267         mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
4268     }
4269     if (isWsiImage ||
4270         (extImgCiPtr && (extImgCiPtr->handleTypes &
4271                          VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) {
4272         updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
4273     }
4274 #endif
4275 
4276     if (info.baseRequirementsKnown) {
4277         transformImageMemoryRequirementsForGuestLocked(*pImage, &memReqs);
4278         info.baseRequirements = memReqs;
4279     }
4280     return res;
4281 }
4282 
on_vkCreateSamplerYcbcrConversion(void * context,VkResult,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)4283 VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversion(
4284     void* context, VkResult, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4285     const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) {
4286     VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4287 
4288 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4289     const VkExternalFormatANDROID* extFormatAndroidPtr =
4290         vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4291     if (extFormatAndroidPtr) {
4292         if (extFormatAndroidPtr->externalFormat == AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM) {
4293             // We don't support external formats on host and it causes RGB565
4294             // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4295             // when passed as an external format.
4296             // We may consider doing this for all external formats.
4297             // See b/134771579.
4298             *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4299             return VK_SUCCESS;
4300         } else if (extFormatAndroidPtr->externalFormat) {
4301             localCreateInfo.format = vk_format_from_android(extFormatAndroidPtr->externalFormat);
4302         }
4303     }
4304 #endif
4305 
4306     VkEncoder* enc = (VkEncoder*)context;
4307     VkResult res = enc->vkCreateSamplerYcbcrConversion(device, &localCreateInfo, pAllocator,
4308                                                        pYcbcrConversion, true /* do lock */);
4309 
4310     if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
4311         ALOGE(
4312             "FATAL: vkCreateSamplerYcbcrConversion returned a reserved value "
4313             "(VK_YCBCR_CONVERSION_DO_NOTHING)");
4314         abort();
4315     }
4316     return res;
4317 }
4318 
on_vkDestroySamplerYcbcrConversion(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)4319 void ResourceTracker::on_vkDestroySamplerYcbcrConversion(void* context, VkDevice device,
4320                                                          VkSamplerYcbcrConversion ycbcrConversion,
4321                                                          const VkAllocationCallbacks* pAllocator) {
4322     VkEncoder* enc = (VkEncoder*)context;
4323     if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4324         enc->vkDestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator,
4325                                              true /* do lock */);
4326     }
4327 }
4328 
on_vkCreateSamplerYcbcrConversionKHR(void * context,VkResult,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)4329 VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversionKHR(
4330     void* context, VkResult, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4331     const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) {
4332     VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4333 
4334 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
4335     const VkExternalFormatANDROID* extFormatAndroidPtr =
4336         vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4337     if (extFormatAndroidPtr) {
4338         if (extFormatAndroidPtr->externalFormat == AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM) {
4339             // We don't support external formats on host and it causes RGB565
4340             // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4341             // when passed as an external format.
4342             // We may consider doing this for all external formats.
4343             // See b/134771579.
4344             *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4345             return VK_SUCCESS;
4346         } else if (extFormatAndroidPtr->externalFormat) {
4347             localCreateInfo.format = vk_format_from_android(extFormatAndroidPtr->externalFormat);
4348         }
4349     }
4350 #endif
4351 
4352     VkEncoder* enc = (VkEncoder*)context;
4353     VkResult res = enc->vkCreateSamplerYcbcrConversionKHR(device, &localCreateInfo, pAllocator,
4354                                                           pYcbcrConversion, true /* do lock */);
4355 
4356     if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
4357         ALOGE(
4358             "FATAL: vkCreateSamplerYcbcrConversionKHR returned a reserved value "
4359             "(VK_YCBCR_CONVERSION_DO_NOTHING)");
4360         abort();
4361     }
4362     return res;
4363 }
4364 
on_vkDestroySamplerYcbcrConversionKHR(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)4365 void ResourceTracker::on_vkDestroySamplerYcbcrConversionKHR(
4366     void* context, VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
4367     const VkAllocationCallbacks* pAllocator) {
4368     VkEncoder* enc = (VkEncoder*)context;
4369     if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4370         enc->vkDestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator,
4371                                                 true /* do lock */);
4372     }
4373 }
4374 
on_vkCreateSampler(void * context,VkResult,VkDevice device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler)4375 VkResult ResourceTracker::on_vkCreateSampler(void* context, VkResult, VkDevice device,
4376                                              const VkSamplerCreateInfo* pCreateInfo,
4377                                              const VkAllocationCallbacks* pAllocator,
4378                                              VkSampler* pSampler) {
4379     VkSamplerCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4380     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
4381 
4382 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(VK_USE_PLATFORM_FUCHSIA)
4383     VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
4384     const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo =
4385         vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo);
4386     if (samplerYcbcrConversionInfo) {
4387         if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4388             localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
4389             vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
4390         }
4391     }
4392 
4393     VkSamplerCustomBorderColorCreateInfoEXT localVkSamplerCustomBorderColorCreateInfo;
4394     const VkSamplerCustomBorderColorCreateInfoEXT* samplerCustomBorderColorCreateInfo =
4395         vk_find_struct<VkSamplerCustomBorderColorCreateInfoEXT>(pCreateInfo);
4396     if (samplerCustomBorderColorCreateInfo) {
4397         localVkSamplerCustomBorderColorCreateInfo =
4398             vk_make_orphan_copy(*samplerCustomBorderColorCreateInfo);
4399         vk_append_struct(&structChainIter, &localVkSamplerCustomBorderColorCreateInfo);
4400     }
4401 #endif
4402 
4403     VkEncoder* enc = (VkEncoder*)context;
4404     return enc->vkCreateSampler(device, &localCreateInfo, pAllocator, pSampler, true /* do lock */);
4405 }
4406 
on_vkGetPhysicalDeviceExternalFenceProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)4407 void ResourceTracker::on_vkGetPhysicalDeviceExternalFenceProperties(
4408     void* context, VkPhysicalDevice physicalDevice,
4409     const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
4410     VkExternalFenceProperties* pExternalFenceProperties) {
4411     (void)context;
4412     (void)physicalDevice;
4413 
4414     pExternalFenceProperties->exportFromImportedHandleTypes = 0;
4415     pExternalFenceProperties->compatibleHandleTypes = 0;
4416     pExternalFenceProperties->externalFenceFeatures = 0;
4417 
4418     bool syncFd = pExternalFenceInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4419 
4420     if (!syncFd) {
4421         return;
4422     }
4423 
4424 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4425     pExternalFenceProperties->exportFromImportedHandleTypes =
4426         VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4427     pExternalFenceProperties->compatibleHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4428     pExternalFenceProperties->externalFenceFeatures =
4429         VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT | VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT;
4430 #endif
4431 }
4432 
on_vkGetPhysicalDeviceExternalFencePropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)4433 void ResourceTracker::on_vkGetPhysicalDeviceExternalFencePropertiesKHR(
4434     void* context, VkPhysicalDevice physicalDevice,
4435     const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
4436     VkExternalFenceProperties* pExternalFenceProperties) {
4437     on_vkGetPhysicalDeviceExternalFenceProperties(context, physicalDevice, pExternalFenceInfo,
4438                                                   pExternalFenceProperties);
4439 }
4440 
on_vkCreateFence(void * context,VkResult input_result,VkDevice device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)4441 VkResult ResourceTracker::on_vkCreateFence(void* context, VkResult input_result, VkDevice device,
4442                                            const VkFenceCreateInfo* pCreateInfo,
4443                                            const VkAllocationCallbacks* pAllocator,
4444                                            VkFence* pFence) {
4445     VkEncoder* enc = (VkEncoder*)context;
4446     VkFenceCreateInfo finalCreateInfo = *pCreateInfo;
4447 
4448     const VkExportFenceCreateInfo* exportFenceInfoPtr =
4449         vk_find_struct<VkExportFenceCreateInfo>(pCreateInfo);
4450 
4451 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4452     bool exportSyncFd = exportFenceInfoPtr && (exportFenceInfoPtr->handleTypes &
4453                                                VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
4454 #endif
4455 
4456     input_result =
4457         enc->vkCreateFence(device, &finalCreateInfo, pAllocator, pFence, true /* do lock */);
4458 
4459     if (input_result != VK_SUCCESS) return input_result;
4460 
4461 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4462     if (exportSyncFd) {
4463         if (!mFeatureInfo->hasVirtioGpuNativeSync) {
4464             ALOGV("%s: ensure sync device\n", __func__);
4465             ensureSyncDeviceFd();
4466         }
4467 
4468         ALOGV("%s: getting fence info\n", __func__);
4469         AutoLock<RecursiveLock> lock(mLock);
4470         auto it = info_VkFence.find(*pFence);
4471 
4472         if (it == info_VkFence.end()) return VK_ERROR_INITIALIZATION_FAILED;
4473 
4474         auto& info = it->second;
4475 
4476         info.external = true;
4477         info.exportFenceCreateInfo = *exportFenceInfoPtr;
4478         ALOGV("%s: info set (fence still -1). fence: %p\n", __func__, (void*)(*pFence));
4479         // syncFd is still -1 because we expect user to explicitly
4480         // export it via vkGetFenceFdKHR
4481     }
4482 #endif
4483 
4484     return input_result;
4485 }
4486 
on_vkDestroyFence(void * context,VkDevice device,VkFence fence,const VkAllocationCallbacks * pAllocator)4487 void ResourceTracker::on_vkDestroyFence(void* context, VkDevice device, VkFence fence,
4488                                         const VkAllocationCallbacks* pAllocator) {
4489     VkEncoder* enc = (VkEncoder*)context;
4490     enc->vkDestroyFence(device, fence, pAllocator, true /* do lock */);
4491 }
4492 
on_vkResetFences(void * context,VkResult,VkDevice device,uint32_t fenceCount,const VkFence * pFences)4493 VkResult ResourceTracker::on_vkResetFences(void* context, VkResult, VkDevice device,
4494                                            uint32_t fenceCount, const VkFence* pFences) {
4495     VkEncoder* enc = (VkEncoder*)context;
4496     VkResult res = enc->vkResetFences(device, fenceCount, pFences, true /* do lock */);
4497 
4498     if (res != VK_SUCCESS) return res;
4499 
4500     if (!fenceCount) return res;
4501 
4502     // Permanence: temporary
4503     // on fence reset, close the fence fd
4504     // and act like we need to GetFenceFdKHR/ImportFenceFdKHR again
4505     AutoLock<RecursiveLock> lock(mLock);
4506     for (uint32_t i = 0; i < fenceCount; ++i) {
4507         VkFence fence = pFences[i];
4508         auto it = info_VkFence.find(fence);
4509         auto& info = it->second;
4510         if (!info.external) continue;
4511 
4512 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
4513         if (info.syncFd >= 0) {
4514             ALOGV("%s: resetting fence. make fd -1\n", __func__);
4515             goldfish_sync_signal(info.syncFd);
4516             auto* syncHelper =
4517                 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
4518             syncHelper->close(info.syncFd);
4519             info.syncFd = -1;
4520         }
4521 #endif
4522     }
4523 
4524     return res;
4525 }
4526 
on_vkImportFenceFdKHR(void * context,VkResult,VkDevice device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)4527 VkResult ResourceTracker::on_vkImportFenceFdKHR(void* context, VkResult, VkDevice device,
4528                                                 const VkImportFenceFdInfoKHR* pImportFenceFdInfo) {
4529     (void)context;
4530     (void)device;
4531     (void)pImportFenceFdInfo;
4532 
4533     // Transference: copy
4534     // meaning dup() the incoming fd
4535 
4536     VkEncoder* enc = (VkEncoder*)context;
4537 
4538     bool hasFence = pImportFenceFdInfo->fence != VK_NULL_HANDLE;
4539 
4540     if (!hasFence) return VK_ERROR_OUT_OF_HOST_MEMORY;
4541 
4542 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4543 
4544     bool syncFdImport = pImportFenceFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4545 
4546     if (!syncFdImport) {
4547         ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd import\n", __func__);
4548         return VK_ERROR_OUT_OF_HOST_MEMORY;
4549     }
4550 
4551     AutoLock<RecursiveLock> lock(mLock);
4552     auto it = info_VkFence.find(pImportFenceFdInfo->fence);
4553     if (it == info_VkFence.end()) {
4554         ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
4555         return VK_ERROR_OUT_OF_HOST_MEMORY;
4556     }
4557 
4558     auto& info = it->second;
4559 
4560     auto* syncHelper = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
4561 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
4562     if (info.syncFd >= 0) {
4563         ALOGV("%s: previous sync fd exists, close it\n", __func__);
4564         goldfish_sync_signal(info.syncFd);
4565         syncHelper->close(info.syncFd);
4566     }
4567 #endif
4568 
4569     if (pImportFenceFdInfo->fd < 0) {
4570         ALOGV("%s: import -1, set to -1 and exit\n", __func__);
4571         info.syncFd = -1;
4572     } else {
4573         ALOGV("%s: import actual fd, dup and close()\n", __func__);
4574         info.syncFd = syncHelper->dup(pImportFenceFdInfo->fd);
4575         syncHelper->close(pImportFenceFdInfo->fd);
4576     }
4577     return VK_SUCCESS;
4578 #else
4579     return VK_ERROR_OUT_OF_HOST_MEMORY;
4580 #endif
4581 }
4582 
on_vkGetFenceFdKHR(void * context,VkResult,VkDevice device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd)4583 VkResult ResourceTracker::on_vkGetFenceFdKHR(void* context, VkResult, VkDevice device,
4584                                              const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd) {
4585     // export operation.
4586     // first check if fence is signaled
4587     // then if so, return -1
4588     // else, queue work
4589 
4590     VkEncoder* enc = (VkEncoder*)context;
4591 
4592     bool hasFence = pGetFdInfo->fence != VK_NULL_HANDLE;
4593 
4594     if (!hasFence) {
4595         ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence\n", __func__);
4596         return VK_ERROR_OUT_OF_HOST_MEMORY;
4597     }
4598 
4599 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4600     bool syncFdExport = pGetFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4601 
4602     if (!syncFdExport) {
4603         ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd fence\n", __func__);
4604         return VK_ERROR_OUT_OF_HOST_MEMORY;
4605     }
4606 
4607     VkResult currentFenceStatus =
4608         enc->vkGetFenceStatus(device, pGetFdInfo->fence, true /* do lock */);
4609 
4610     if (VK_ERROR_DEVICE_LOST == currentFenceStatus) {  // Other error
4611         ALOGV("%s: VK_ERROR_DEVICE_LOST: Other error\n", __func__);
4612         *pFd = -1;
4613         return VK_ERROR_DEVICE_LOST;
4614     }
4615 
4616     if (VK_NOT_READY == currentFenceStatus || VK_SUCCESS == currentFenceStatus) {
4617         // Fence is valid. We also create a new sync fd for a signaled
4618         // fence, because ANGLE will use the returned fd directly to
4619         // implement eglDupNativeFenceFDANDROID, where -1 is only returned
4620         // when error occurs.
4621         AutoLock<RecursiveLock> lock(mLock);
4622 
4623         auto it = info_VkFence.find(pGetFdInfo->fence);
4624         if (it == info_VkFence.end()) {
4625             ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
4626             return VK_ERROR_OUT_OF_HOST_MEMORY;
4627         }
4628 
4629         auto& info = it->second;
4630 
4631         bool syncFdCreated = info.external && (info.exportFenceCreateInfo.handleTypes &
4632                                                VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
4633 
4634         if (!syncFdCreated) {
4635             ALOGV("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd created\n", __func__);
4636             return VK_ERROR_OUT_OF_HOST_MEMORY;
4637         }
4638 
4639         if (mFeatureInfo->hasVirtioGpuNativeSync) {
4640             VkResult result;
4641             int64_t osHandle;
4642             uint64_t hostFenceHandle = get_host_u64_VkFence(pGetFdInfo->fence);
4643 
4644             result = createFence(device, hostFenceHandle, osHandle);
4645             if (result != VK_SUCCESS) return result;
4646 
4647             *pFd = osHandle;
4648         } else {
4649 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
4650             goldfish_sync_queue_work(
4651                 mSyncDeviceFd, get_host_u64_VkFence(pGetFdInfo->fence) /* the handle */,
4652                 GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */,
4653                 pFd);
4654 #endif
4655         }
4656 
4657         // relinquish ownership
4658         info.syncFd = -1;
4659         ALOGV("%s: got fd: %d\n", __func__, *pFd);
4660         return VK_SUCCESS;
4661     }
4662     return VK_ERROR_DEVICE_LOST;
4663 #else
4664     return VK_ERROR_OUT_OF_HOST_MEMORY;
4665 #endif
4666 }
4667 
on_vkWaitForFences(void * context,VkResult,VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)4668 VkResult ResourceTracker::on_vkWaitForFences(void* context, VkResult, VkDevice device,
4669                                              uint32_t fenceCount, const VkFence* pFences,
4670                                              VkBool32 waitAll, uint64_t timeout) {
4671     VkEncoder* enc = (VkEncoder*)context;
4672 
4673 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4674     std::vector<VkFence> fencesExternal;
4675     std::vector<int> fencesExternalWaitFds;
4676     std::vector<VkFence> fencesNonExternal;
4677 
4678     AutoLock<RecursiveLock> lock(mLock);
4679 
4680     for (uint32_t i = 0; i < fenceCount; ++i) {
4681         auto it = info_VkFence.find(pFences[i]);
4682         if (it == info_VkFence.end()) continue;
4683         const auto& info = it->second;
4684         if (info.syncFd >= 0) {
4685             fencesExternal.push_back(pFences[i]);
4686             fencesExternalWaitFds.push_back(info.syncFd);
4687         } else {
4688             fencesNonExternal.push_back(pFences[i]);
4689         }
4690     }
4691 
4692     lock.unlock();
4693 
4694     if (fencesExternal.empty()) {
4695         // No need for work pool, just wait with host driver.
4696         return enc->vkWaitForFences(device, fenceCount, pFences, waitAll, timeout,
4697                                     true /* do lock */);
4698     } else {
4699         // Depending on wait any or wait all,
4700         // schedule a wait group with waitAny/waitAll
4701         std::vector<WorkPool::Task> tasks;
4702 
4703         ALOGV("%s: scheduling ext waits\n", __func__);
4704 
4705         for (auto fd : fencesExternalWaitFds) {
4706             ALOGV("%s: wait on %d\n", __func__, fd);
4707             tasks.push_back([fd] {
4708                 auto* syncHelper =
4709                     ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
4710                 syncHelper->wait(fd, 3000);
4711                 ALOGV("done waiting on fd %d\n", fd);
4712             });
4713         }
4714 
4715         if (!fencesNonExternal.empty()) {
4716             tasks.push_back(
4717                 [this, fencesNonExternal /* copy of vector */, device, waitAll, timeout] {
4718                     auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
4719                     auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
4720                     ALOGV("%s: vkWaitForFences to host\n", __func__);
4721                     vkEncoder->vkWaitForFences(device, fencesNonExternal.size(),
4722                                                fencesNonExternal.data(), waitAll, timeout,
4723                                                true /* do lock */);
4724                 });
4725         }
4726 
4727         auto waitGroupHandle = mWorkPool.schedule(tasks);
4728 
4729         // Convert timeout to microseconds from nanoseconds
4730         bool waitRes = false;
4731         if (waitAll) {
4732             waitRes = mWorkPool.waitAll(waitGroupHandle, timeout / 1000);
4733         } else {
4734             waitRes = mWorkPool.waitAny(waitGroupHandle, timeout / 1000);
4735         }
4736 
4737         if (waitRes) {
4738             ALOGV("%s: VK_SUCCESS\n", __func__);
4739             return VK_SUCCESS;
4740         } else {
4741             ALOGV("%s: VK_TIMEOUT\n", __func__);
4742             return VK_TIMEOUT;
4743         }
4744     }
4745 #else
4746     return enc->vkWaitForFences(device, fenceCount, pFences, waitAll, timeout, true /* do lock */);
4747 #endif
4748 }
4749 
on_vkCreateDescriptorPool(void * context,VkResult,VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)4750 VkResult ResourceTracker::on_vkCreateDescriptorPool(void* context, VkResult, VkDevice device,
4751                                                     const VkDescriptorPoolCreateInfo* pCreateInfo,
4752                                                     const VkAllocationCallbacks* pAllocator,
4753                                                     VkDescriptorPool* pDescriptorPool) {
4754     VkEncoder* enc = (VkEncoder*)context;
4755 
4756     VkResult res = enc->vkCreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool,
4757                                                true /* do lock */);
4758 
4759     if (res != VK_SUCCESS) return res;
4760 
4761     VkDescriptorPool pool = *pDescriptorPool;
4762 
4763     struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
4764     dp->allocInfo = new DescriptorPoolAllocationInfo;
4765     dp->allocInfo->device = device;
4766     dp->allocInfo->createFlags = pCreateInfo->flags;
4767     dp->allocInfo->maxSets = pCreateInfo->maxSets;
4768     dp->allocInfo->usedSets = 0;
4769 
4770     for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; ++i) {
4771         dp->allocInfo->descriptorCountInfo.push_back({
4772             pCreateInfo->pPoolSizes[i].type, pCreateInfo->pPoolSizes[i].descriptorCount,
4773             0, /* used */
4774         });
4775     }
4776 
4777     if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
4778         std::vector<uint64_t> poolIds(pCreateInfo->maxSets);
4779 
4780         uint32_t count = pCreateInfo->maxSets;
4781         enc->vkCollectDescriptorPoolIdsGOOGLE(device, pool, &count, poolIds.data(),
4782                                               true /* do lock */);
4783 
4784         dp->allocInfo->freePoolIds = poolIds;
4785     }
4786 
4787     return res;
4788 }
4789 
on_vkDestroyDescriptorPool(void * context,VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)4790 void ResourceTracker::on_vkDestroyDescriptorPool(void* context, VkDevice device,
4791                                                  VkDescriptorPool descriptorPool,
4792                                                  const VkAllocationCallbacks* pAllocator) {
4793     if (!descriptorPool) return;
4794 
4795     VkEncoder* enc = (VkEncoder*)context;
4796 
4797     clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
4798 
4799     enc->vkDestroyDescriptorPool(device, descriptorPool, pAllocator, true /* do lock */);
4800 }
4801 
on_vkResetDescriptorPool(void * context,VkResult,VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)4802 VkResult ResourceTracker::on_vkResetDescriptorPool(void* context, VkResult, VkDevice device,
4803                                                    VkDescriptorPool descriptorPool,
4804                                                    VkDescriptorPoolResetFlags flags) {
4805     if (!descriptorPool) return VK_ERROR_INITIALIZATION_FAILED;
4806 
4807     VkEncoder* enc = (VkEncoder*)context;
4808 
4809     VkResult res = enc->vkResetDescriptorPool(device, descriptorPool, flags, true /* do lock */);
4810 
4811     if (res != VK_SUCCESS) return res;
4812 
4813     clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
4814     return res;
4815 }
4816 
on_vkAllocateDescriptorSets(void * context,VkResult,VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)4817 VkResult ResourceTracker::on_vkAllocateDescriptorSets(
4818     void* context, VkResult, VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo,
4819     VkDescriptorSet* pDescriptorSets) {
4820     VkEncoder* enc = (VkEncoder*)context;
4821     auto ci = pAllocateInfo;
4822     auto sets = pDescriptorSets;
4823     if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
4824         // Using the pool ID's we collected earlier from the host
4825         VkResult poolAllocResult = validateAndApplyVirtualDescriptorSetAllocation(ci, sets);
4826 
4827         if (poolAllocResult != VK_SUCCESS) return poolAllocResult;
4828 
4829         for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
4830             register_VkDescriptorSet(sets[i]);
4831             VkDescriptorSetLayout setLayout =
4832                 as_goldfish_VkDescriptorSet(sets[i])->reified->setLayout;
4833 
4834             // Need to add ref to the set layout in the virtual case
4835             // because the set itself might not be realized on host at the
4836             // same time
4837             struct goldfish_VkDescriptorSetLayout* dsl =
4838                 as_goldfish_VkDescriptorSetLayout(setLayout);
4839             ++dsl->layoutInfo->refcount;
4840         }
4841     } else {
4842         VkResult allocRes = enc->vkAllocateDescriptorSets(device, ci, sets, true /* do lock */);
4843 
4844         if (allocRes != VK_SUCCESS) return allocRes;
4845 
4846         for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
4847             applyDescriptorSetAllocation(ci->descriptorPool, ci->pSetLayouts[i]);
4848             fillDescriptorSetInfoForPool(ci->descriptorPool, ci->pSetLayouts[i], sets[i]);
4849         }
4850     }
4851 
4852     return VK_SUCCESS;
4853 }
4854 
on_vkFreeDescriptorSets(void * context,VkResult,VkDevice device,VkDescriptorPool descriptorPool,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)4855 VkResult ResourceTracker::on_vkFreeDescriptorSets(void* context, VkResult, VkDevice device,
4856                                                   VkDescriptorPool descriptorPool,
4857                                                   uint32_t descriptorSetCount,
4858                                                   const VkDescriptorSet* pDescriptorSets) {
4859     VkEncoder* enc = (VkEncoder*)context;
4860 
4861     // Bit of robustness so that we can double free descriptor sets
4862     // and do other invalid usages
4863     // https://github.com/KhronosGroup/Vulkan-Docs/issues/1070
4864     // (people expect VK_SUCCESS to always be returned by vkFreeDescriptorSets)
4865     std::vector<VkDescriptorSet> toActuallyFree;
4866     {
4867         AutoLock<RecursiveLock> lock(mLock);
4868 
4869         // Pool was destroyed
4870         if (info_VkDescriptorPool.find(descriptorPool) == info_VkDescriptorPool.end()) {
4871             return VK_SUCCESS;
4872         }
4873 
4874         if (!descriptorPoolSupportsIndividualFreeLocked(descriptorPool)) return VK_SUCCESS;
4875 
4876         std::vector<VkDescriptorSet> existingDescriptorSets;
4877         ;
4878 
4879         // Check if this descriptor set was in the pool's set of allocated descriptor sets,
4880         // to guard against double free (Double free is allowed by the client)
4881         {
4882             auto allocedSets = as_goldfish_VkDescriptorPool(descriptorPool)->allocInfo->allocedSets;
4883 
4884             for (uint32_t i = 0; i < descriptorSetCount; ++i) {
4885                 if (allocedSets.end() == allocedSets.find(pDescriptorSets[i])) {
4886                     ALOGV(
4887                         "%s: Warning: descriptor set %p not found in pool. Was this "
4888                         "double-freed?\n",
4889                         __func__, (void*)pDescriptorSets[i]);
4890                     continue;
4891                 }
4892 
4893                 auto it = info_VkDescriptorSet.find(pDescriptorSets[i]);
4894                 if (it == info_VkDescriptorSet.end()) continue;
4895 
4896                 existingDescriptorSets.push_back(pDescriptorSets[i]);
4897             }
4898         }
4899 
4900         for (auto set : existingDescriptorSets) {
4901             if (removeDescriptorSetFromPool(set,
4902                                             mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate)) {
4903                 toActuallyFree.push_back(set);
4904             }
4905         }
4906 
4907         if (toActuallyFree.empty()) return VK_SUCCESS;
4908     }
4909 
4910     if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
4911         // In the batched set update case, decrement refcount on the set layout
4912         // and only free on host if we satisfied a pending allocation on the
4913         // host.
4914         for (uint32_t i = 0; i < toActuallyFree.size(); ++i) {
4915             VkDescriptorSetLayout setLayout =
4916                 as_goldfish_VkDescriptorSet(toActuallyFree[i])->reified->setLayout;
4917             decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
4918         }
4919         freeDescriptorSetsIfHostAllocated(enc, device, (uint32_t)toActuallyFree.size(),
4920                                           toActuallyFree.data());
4921     } else {
4922         // In the non-batched set update case, just free them directly.
4923         enc->vkFreeDescriptorSets(device, descriptorPool, (uint32_t)toActuallyFree.size(),
4924                                   toActuallyFree.data(), true /* do lock */);
4925     }
4926     return VK_SUCCESS;
4927 }
4928 
on_vkCreateDescriptorSetLayout(void * context,VkResult,VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)4929 VkResult ResourceTracker::on_vkCreateDescriptorSetLayout(
4930     void* context, VkResult, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
4931     const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout) {
4932     VkEncoder* enc = (VkEncoder*)context;
4933 
4934     VkResult res = enc->vkCreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout,
4935                                                     true /* do lock */);
4936 
4937     if (res != VK_SUCCESS) return res;
4938 
4939     struct goldfish_VkDescriptorSetLayout* dsl = as_goldfish_VkDescriptorSetLayout(*pSetLayout);
4940     dsl->layoutInfo = new DescriptorSetLayoutInfo;
4941     for (uint32_t i = 0; i < pCreateInfo->bindingCount; ++i) {
4942         dsl->layoutInfo->bindings.push_back(pCreateInfo->pBindings[i]);
4943     }
4944     dsl->layoutInfo->refcount = 1;
4945 
4946     return res;
4947 }
4948 
on_vkUpdateDescriptorSets(void * context,VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)4949 void ResourceTracker::on_vkUpdateDescriptorSets(void* context, VkDevice device,
4950                                                 uint32_t descriptorWriteCount,
4951                                                 const VkWriteDescriptorSet* pDescriptorWrites,
4952                                                 uint32_t descriptorCopyCount,
4953                                                 const VkCopyDescriptorSet* pDescriptorCopies) {
4954     VkEncoder* enc = (VkEncoder*)context;
4955 
4956     std::vector<VkDescriptorImageInfo> transformedImageInfos;
4957     std::vector<VkWriteDescriptorSet> transformedWrites(descriptorWriteCount);
4958 
4959     memcpy(transformedWrites.data(), pDescriptorWrites,
4960            sizeof(VkWriteDescriptorSet) * descriptorWriteCount);
4961 
4962     size_t imageInfosNeeded = 0;
4963     for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
4964         if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
4965         if (!transformedWrites[i].pImageInfo) continue;
4966 
4967         imageInfosNeeded += transformedWrites[i].descriptorCount;
4968     }
4969 
4970     transformedImageInfos.resize(imageInfosNeeded);
4971 
4972     size_t imageInfoIndex = 0;
4973     for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
4974         if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
4975         if (!transformedWrites[i].pImageInfo) continue;
4976 
4977         for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
4978             transformedImageInfos[imageInfoIndex] = transformedWrites[i].pImageInfo[j];
4979             ++imageInfoIndex;
4980         }
4981         transformedWrites[i].pImageInfo =
4982             &transformedImageInfos[imageInfoIndex - transformedWrites[i].descriptorCount];
4983     }
4984 
4985     {
4986         // Validate and filter samplers
4987         AutoLock<RecursiveLock> lock(mLock);
4988         size_t imageInfoIndex = 0;
4989         for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
4990             if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
4991             if (!transformedWrites[i].pImageInfo) continue;
4992 
4993             bool isImmutableSampler = descriptorBindingIsImmutableSampler(
4994                 transformedWrites[i].dstSet, transformedWrites[i].dstBinding);
4995 
4996             for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
4997                 if (isImmutableSampler) {
4998                     transformedImageInfos[imageInfoIndex].sampler = 0;
4999                 }
5000                 transformedImageInfos[imageInfoIndex] =
5001                     filterNonexistentSampler(transformedImageInfos[imageInfoIndex]);
5002                 ++imageInfoIndex;
5003             }
5004         }
5005     }
5006 
5007     if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
5008         for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5009             VkDescriptorSet set = transformedWrites[i].dstSet;
5010             doEmulatedDescriptorWrite(&transformedWrites[i],
5011                                       as_goldfish_VkDescriptorSet(set)->reified);
5012         }
5013 
5014         for (uint32_t i = 0; i < descriptorCopyCount; ++i) {
5015             doEmulatedDescriptorCopy(
5016                 &pDescriptorCopies[i],
5017                 as_goldfish_VkDescriptorSet(pDescriptorCopies[i].srcSet)->reified,
5018                 as_goldfish_VkDescriptorSet(pDescriptorCopies[i].dstSet)->reified);
5019         }
5020     } else {
5021         enc->vkUpdateDescriptorSets(device, descriptorWriteCount, transformedWrites.data(),
5022                                     descriptorCopyCount, pDescriptorCopies, true /* do lock */);
5023     }
5024 }
5025 
on_vkDestroyImage(void * context,VkDevice device,VkImage image,const VkAllocationCallbacks * pAllocator)5026 void ResourceTracker::on_vkDestroyImage(void* context, VkDevice device, VkImage image,
5027                                         const VkAllocationCallbacks* pAllocator) {
5028 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5029     auto* syncHelper = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
5030     {
5031         AutoLock<RecursiveLock> lock(mLock);  // do not guard encoder may cause
5032                                               // deadlock b/243339973
5033 
5034         // Wait for any pending QSRIs to prevent a race between the Gfxstream host
5035         // potentially processing the below `vkDestroyImage()` from the VK encoder
5036         // command stream before processing a previously submitted
5037         // `VIRTIO_GPU_NATIVE_SYNC_VULKAN_QSRI_EXPORT` from the virtio-gpu command
5038         // stream which relies on the image existing.
5039         auto imageInfoIt = info_VkImage.find(image);
5040         if (imageInfoIt != info_VkImage.end()) {
5041             auto& imageInfo = imageInfoIt->second;
5042             for (int syncFd : imageInfo.pendingQsriSyncFds) {
5043                 int syncWaitRet = syncHelper->wait(syncFd, 3000);
5044                 if (syncWaitRet < 0) {
5045                     ALOGE("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d",
5046                           __func__, strerror(errno), errno);
5047                 }
5048                 syncHelper->close(syncFd);
5049             }
5050             imageInfo.pendingQsriSyncFds.clear();
5051         }
5052     }
5053 #endif
5054     VkEncoder* enc = (VkEncoder*)context;
5055     enc->vkDestroyImage(device, image, pAllocator, true /* do lock */);
5056 }
5057 
on_vkGetImageMemoryRequirements(void * context,VkDevice device,VkImage image,VkMemoryRequirements * pMemoryRequirements)5058 void ResourceTracker::on_vkGetImageMemoryRequirements(void* context, VkDevice device, VkImage image,
5059                                                       VkMemoryRequirements* pMemoryRequirements) {
5060     AutoLock<RecursiveLock> lock(mLock);
5061 
5062     auto it = info_VkImage.find(image);
5063     if (it == info_VkImage.end()) return;
5064 
5065     auto& info = it->second;
5066 
5067     if (info.baseRequirementsKnown) {
5068         *pMemoryRequirements = info.baseRequirements;
5069         return;
5070     }
5071 
5072     lock.unlock();
5073 
5074     VkEncoder* enc = (VkEncoder*)context;
5075 
5076     enc->vkGetImageMemoryRequirements(device, image, pMemoryRequirements, true /* do lock */);
5077 
5078     lock.lock();
5079 
5080     transformImageMemoryRequirementsForGuestLocked(image, pMemoryRequirements);
5081 
5082     info.baseRequirementsKnown = true;
5083     info.baseRequirements = *pMemoryRequirements;
5084 }
5085 
on_vkGetImageMemoryRequirements2(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5086 void ResourceTracker::on_vkGetImageMemoryRequirements2(void* context, VkDevice device,
5087                                                        const VkImageMemoryRequirementsInfo2* pInfo,
5088                                                        VkMemoryRequirements2* pMemoryRequirements) {
5089     VkEncoder* enc = (VkEncoder*)context;
5090     enc->vkGetImageMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */);
5091     transformImageMemoryRequirements2ForGuest(pInfo->image, pMemoryRequirements);
5092 }
5093 
on_vkGetImageMemoryRequirements2KHR(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5094 void ResourceTracker::on_vkGetImageMemoryRequirements2KHR(
5095     void* context, VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
5096     VkMemoryRequirements2* pMemoryRequirements) {
5097     VkEncoder* enc = (VkEncoder*)context;
5098     enc->vkGetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */);
5099     transformImageMemoryRequirements2ForGuest(pInfo->image, pMemoryRequirements);
5100 }
5101 
on_vkBindImageMemory(void * context,VkResult,VkDevice device,VkImage image,VkDeviceMemory memory,VkDeviceSize memoryOffset)5102 VkResult ResourceTracker::on_vkBindImageMemory(void* context, VkResult, VkDevice device,
5103                                                VkImage image, VkDeviceMemory memory,
5104                                                VkDeviceSize memoryOffset) {
5105     VkEncoder* enc = (VkEncoder*)context;
5106     // Do not forward calls with invalid handles to host.
5107     if (info_VkDeviceMemory.find(memory) == info_VkDeviceMemory.end() ||
5108         info_VkImage.find(image) == info_VkImage.end()) {
5109         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5110     }
5111     return enc->vkBindImageMemory(device, image, memory, memoryOffset, true /* do lock */);
5112 }
5113 
on_vkBindImageMemory2(void * context,VkResult,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)5114 VkResult ResourceTracker::on_vkBindImageMemory2(void* context, VkResult, VkDevice device,
5115                                                 uint32_t bindingCount,
5116                                                 const VkBindImageMemoryInfo* pBindInfos) {
5117     VkEncoder* enc = (VkEncoder*)context;
5118 
5119     if (bindingCount < 1 || !pBindInfos) {
5120         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5121     }
5122 
5123     for (uint32_t i = 0; i < bindingCount; i++) {
5124         const VkBindImageMemoryInfo& bimi = pBindInfos[i];
5125 
5126         auto imageIt = info_VkImage.find(bimi.image);
5127         if (imageIt == info_VkImage.end()) {
5128             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5129         }
5130 
5131         if (bimi.memory != VK_NULL_HANDLE) {
5132             auto memoryIt = info_VkDeviceMemory.find(bimi.memory);
5133             if (memoryIt == info_VkDeviceMemory.end()) {
5134                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5135             }
5136         }
5137     }
5138 
5139     return enc->vkBindImageMemory2(device, bindingCount, pBindInfos, true /* do lock */);
5140 }
5141 
on_vkBindImageMemory2KHR(void * context,VkResult result,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)5142 VkResult ResourceTracker::on_vkBindImageMemory2KHR(void* context, VkResult result, VkDevice device,
5143                                                    uint32_t bindingCount,
5144                                                    const VkBindImageMemoryInfo* pBindInfos) {
5145     return on_vkBindImageMemory2(context, result, device, bindingCount, pBindInfos);
5146 }
5147 
on_vkCreateBuffer(void * context,VkResult,VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)5148 VkResult ResourceTracker::on_vkCreateBuffer(void* context, VkResult, VkDevice device,
5149                                             const VkBufferCreateInfo* pCreateInfo,
5150                                             const VkAllocationCallbacks* pAllocator,
5151                                             VkBuffer* pBuffer) {
5152     VkEncoder* enc = (VkEncoder*)context;
5153 
5154     VkBufferCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
5155     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
5156     VkExternalMemoryBufferCreateInfo localExtBufCi;
5157 
5158     const VkExternalMemoryBufferCreateInfo* extBufCiPtr =
5159         vk_find_struct<VkExternalMemoryBufferCreateInfo>(pCreateInfo);
5160     if (extBufCiPtr) {
5161         localExtBufCi = vk_make_orphan_copy(*extBufCiPtr);
5162         vk_append_struct(&structChainIter, &localExtBufCi);
5163     }
5164 
5165     VkBufferOpaqueCaptureAddressCreateInfo localCapAddrCi;
5166     const VkBufferOpaqueCaptureAddressCreateInfo* pCapAddrCi =
5167         vk_find_struct<VkBufferOpaqueCaptureAddressCreateInfo>(pCreateInfo);
5168     if (pCapAddrCi) {
5169         localCapAddrCi = vk_make_orphan_copy(*pCapAddrCi);
5170         vk_append_struct(&structChainIter, &localCapAddrCi);
5171     }
5172 
5173     VkBufferDeviceAddressCreateInfoEXT localDevAddrCi;
5174     const VkBufferDeviceAddressCreateInfoEXT* pDevAddrCi =
5175         vk_find_struct<VkBufferDeviceAddressCreateInfoEXT>(pCreateInfo);
5176     if (pDevAddrCi) {
5177         localDevAddrCi = vk_make_orphan_copy(*pDevAddrCi);
5178         vk_append_struct(&structChainIter, &localDevAddrCi);
5179     }
5180 
5181 #ifdef VK_USE_PLATFORM_FUCHSIA
5182     Optional<zx::vmo> vmo;
5183     bool isSysmemBackedMemory = false;
5184 
5185     if (extBufCiPtr &&
5186         (extBufCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) {
5187         isSysmemBackedMemory = true;
5188     }
5189 
5190     const auto* extBufferCollectionPtr =
5191         vk_find_struct<VkBufferCollectionBufferCreateInfoFUCHSIA>(pCreateInfo);
5192 
5193     if (extBufferCollectionPtr) {
5194         const auto& collection =
5195             *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
5196                 extBufferCollectionPtr->collection);
5197         uint32_t index = extBufferCollectionPtr->index;
5198 
5199         auto result = collection->WaitForBuffersAllocated();
5200         if (result.ok() && result->status == ZX_OK) {
5201             auto& info = result->buffer_collection_info;
5202             if (index < info.buffer_count) {
5203                 vmo = gfxstream::guest::makeOptional(std::move(info.buffers[index].vmo));
5204             }
5205         } else {
5206             ALOGE("WaitForBuffersAllocated failed: %d %d", result.status(),
5207                   GET_STATUS_SAFE(result, status));
5208         }
5209 
5210         if (vmo && vmo->is_valid()) {
5211             fidl::Arena arena;
5212             fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena);
5213             createParams.set_size(arena, pCreateInfo->size)
5214                 .set_memory_property(fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
5215 
5216             auto result = mControlDevice->CreateBuffer2(std::move(*vmo), createParams);
5217             if (!result.ok() ||
5218                 (result->is_error() != ZX_OK && result->error_value() != ZX_ERR_ALREADY_EXISTS)) {
5219                 ALOGE("CreateBuffer2 failed: %d:%d", result.status(),
5220                       GET_STATUS_SAFE(result, error_value()));
5221             }
5222             isSysmemBackedMemory = true;
5223         }
5224     }
5225 #endif  // VK_USE_PLATFORM_FUCHSIA
5226 
5227     VkResult res;
5228     VkMemoryRequirements memReqs;
5229 
5230     if (supportsCreateResourcesWithRequirements()) {
5231         res = enc->vkCreateBufferWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator,
5232                                                         pBuffer, &memReqs, true /* do lock */);
5233     } else {
5234         res =
5235             enc->vkCreateBuffer(device, &localCreateInfo, pAllocator, pBuffer, true /* do lock */);
5236     }
5237 
5238     if (res != VK_SUCCESS) return res;
5239 
5240 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5241     if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
5242         mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
5243     }
5244     if (extBufCiPtr &&
5245         ((extBufCiPtr->handleTypes &
5246           VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) ||
5247          (extBufCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT))) {
5248         updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
5249     }
5250 #endif
5251 
5252     AutoLock<RecursiveLock> lock(mLock);
5253 
5254     auto it = info_VkBuffer.find(*pBuffer);
5255     if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
5256 
5257     auto& info = it->second;
5258 
5259     info.createInfo = localCreateInfo;
5260     info.createInfo.pNext = nullptr;
5261 
5262     if (supportsCreateResourcesWithRequirements()) {
5263         info.baseRequirementsKnown = true;
5264         info.baseRequirements = memReqs;
5265     }
5266 
5267     if (extBufCiPtr) {
5268         info.external = true;
5269         info.externalCreateInfo = *extBufCiPtr;
5270     }
5271 
5272 #ifdef VK_USE_PLATFORM_FUCHSIA
5273     if (isSysmemBackedMemory) {
5274         info.isSysmemBackedMemory = true;
5275     }
5276 #endif
5277 
5278     return res;
5279 }
5280 
on_vkDestroyBuffer(void * context,VkDevice device,VkBuffer buffer,const VkAllocationCallbacks * pAllocator)5281 void ResourceTracker::on_vkDestroyBuffer(void* context, VkDevice device, VkBuffer buffer,
5282                                          const VkAllocationCallbacks* pAllocator) {
5283     VkEncoder* enc = (VkEncoder*)context;
5284     enc->vkDestroyBuffer(device, buffer, pAllocator, true /* do lock */);
5285 }
5286 
on_vkGetBufferMemoryRequirements(void * context,VkDevice device,VkBuffer buffer,VkMemoryRequirements * pMemoryRequirements)5287 void ResourceTracker::on_vkGetBufferMemoryRequirements(void* context, VkDevice device,
5288                                                        VkBuffer buffer,
5289                                                        VkMemoryRequirements* pMemoryRequirements) {
5290     AutoLock<RecursiveLock> lock(mLock);
5291 
5292     auto it = info_VkBuffer.find(buffer);
5293     if (it == info_VkBuffer.end()) return;
5294 
5295     auto& info = it->second;
5296 
5297     if (info.baseRequirementsKnown) {
5298         *pMemoryRequirements = info.baseRequirements;
5299         return;
5300     }
5301 
5302     lock.unlock();
5303 
5304     VkEncoder* enc = (VkEncoder*)context;
5305     enc->vkGetBufferMemoryRequirements(device, buffer, pMemoryRequirements, true /* do lock */);
5306 
5307     lock.lock();
5308 
5309     info.baseRequirementsKnown = true;
5310     info.baseRequirements = *pMemoryRequirements;
5311 }
5312 
on_vkGetBufferMemoryRequirements2(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5313 void ResourceTracker::on_vkGetBufferMemoryRequirements2(
5314     void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5315     VkMemoryRequirements2* pMemoryRequirements) {
5316     VkEncoder* enc = (VkEncoder*)context;
5317     enc->vkGetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */);
5318     transformBufferMemoryRequirements2ForGuest(pInfo->buffer, pMemoryRequirements);
5319 }
5320 
on_vkGetBufferMemoryRequirements2KHR(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5321 void ResourceTracker::on_vkGetBufferMemoryRequirements2KHR(
5322     void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5323     VkMemoryRequirements2* pMemoryRequirements) {
5324     VkEncoder* enc = (VkEncoder*)context;
5325     enc->vkGetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */);
5326     transformBufferMemoryRequirements2ForGuest(pInfo->buffer, pMemoryRequirements);
5327 }
5328 
on_vkBindBufferMemory(void * context,VkResult,VkDevice device,VkBuffer buffer,VkDeviceMemory memory,VkDeviceSize memoryOffset)5329 VkResult ResourceTracker::on_vkBindBufferMemory(void* context, VkResult, VkDevice device,
5330                                                 VkBuffer buffer, VkDeviceMemory memory,
5331                                                 VkDeviceSize memoryOffset) {
5332     VkEncoder* enc = (VkEncoder*)context;
5333     return enc->vkBindBufferMemory(device, buffer, memory, memoryOffset, true /* do lock */);
5334 }
5335 
on_vkBindBufferMemory2(void * context,VkResult,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)5336 VkResult ResourceTracker::on_vkBindBufferMemory2(void* context, VkResult, VkDevice device,
5337                                                  uint32_t bindInfoCount,
5338                                                  const VkBindBufferMemoryInfo* pBindInfos) {
5339     VkEncoder* enc = (VkEncoder*)context;
5340     return enc->vkBindBufferMemory2(device, bindInfoCount, pBindInfos, true /* do lock */);
5341 }
5342 
on_vkBindBufferMemory2KHR(void * context,VkResult,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)5343 VkResult ResourceTracker::on_vkBindBufferMemory2KHR(void* context, VkResult, VkDevice device,
5344                                                     uint32_t bindInfoCount,
5345                                                     const VkBindBufferMemoryInfo* pBindInfos) {
5346     VkEncoder* enc = (VkEncoder*)context;
5347     return enc->vkBindBufferMemory2KHR(device, bindInfoCount, pBindInfos, true /* do lock */);
5348 }
5349 
on_vkCreateSemaphore(void * context,VkResult input_result,VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)5350 VkResult ResourceTracker::on_vkCreateSemaphore(void* context, VkResult input_result,
5351                                                VkDevice device,
5352                                                const VkSemaphoreCreateInfo* pCreateInfo,
5353                                                const VkAllocationCallbacks* pAllocator,
5354                                                VkSemaphore* pSemaphore) {
5355     (void)input_result;
5356     VkEncoder* enc = (VkEncoder*)context;
5357 
5358     VkSemaphoreCreateInfo finalCreateInfo = *pCreateInfo;
5359 
5360     const VkExportSemaphoreCreateInfoKHR* exportSemaphoreInfoPtr =
5361         vk_find_struct<VkExportSemaphoreCreateInfoKHR>(pCreateInfo);
5362 
5363 #ifdef VK_USE_PLATFORM_FUCHSIA
5364     bool exportEvent =
5365         exportSemaphoreInfoPtr && (exportSemaphoreInfoPtr->handleTypes &
5366                                    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA);
5367 
5368     if (exportEvent) {
5369         finalCreateInfo.pNext = nullptr;
5370         // If we have timeline semaphores externally, leave it there.
5371         const VkSemaphoreTypeCreateInfo* typeCi =
5372             vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo);
5373         if (typeCi) finalCreateInfo.pNext = typeCi;
5374     }
5375 #endif
5376 
5377 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5378     bool exportSyncFd = exportSemaphoreInfoPtr && (exportSemaphoreInfoPtr->handleTypes &
5379                                                    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
5380 
5381     if (exportSyncFd) {
5382         finalCreateInfo.pNext = nullptr;
5383         // If we have timeline semaphores externally, leave it there.
5384         const VkSemaphoreTypeCreateInfo* typeCi =
5385             vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo);
5386         if (typeCi) finalCreateInfo.pNext = typeCi;
5387     }
5388 #endif
5389     input_result = enc->vkCreateSemaphore(device, &finalCreateInfo, pAllocator, pSemaphore,
5390                                           true /* do lock */);
5391 
5392     zx_handle_t event_handle = ZX_HANDLE_INVALID;
5393 
5394 #ifdef VK_USE_PLATFORM_FUCHSIA
5395     if (exportEvent) {
5396         zx_event_create(0, &event_handle);
5397     }
5398 #endif
5399 
5400     AutoLock<RecursiveLock> lock(mLock);
5401 
5402     auto it = info_VkSemaphore.find(*pSemaphore);
5403     if (it == info_VkSemaphore.end()) return VK_ERROR_INITIALIZATION_FAILED;
5404 
5405     auto& info = it->second;
5406 
5407     info.device = device;
5408     info.eventHandle = event_handle;
5409 #ifdef VK_USE_PLATFORM_FUCHSIA
5410     info.eventKoid = getEventKoid(info.eventHandle);
5411 #endif
5412 
5413 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5414     if (exportSyncFd) {
5415         if (mFeatureInfo->hasVirtioGpuNativeSync) {
5416             VkResult result;
5417             int64_t osHandle;
5418             uint64_t hostFenceHandle = get_host_u64_VkSemaphore(*pSemaphore);
5419 
5420             result = createFence(device, hostFenceHandle, osHandle);
5421             if (result != VK_SUCCESS) return result;
5422 
5423             info.syncFd.emplace(osHandle);
5424         } else {
5425 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
5426             ensureSyncDeviceFd();
5427 
5428             if (exportSyncFd) {
5429                 int syncFd = -1;
5430                 goldfish_sync_queue_work(
5431                     mSyncDeviceFd, get_host_u64_VkSemaphore(*pSemaphore) /* the handle */,
5432                     GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */
5433                     ,
5434                     &syncFd);
5435                 info.syncFd.emplace(syncFd);
5436             }
5437 #endif
5438         }
5439     }
5440 #endif
5441 
5442     return VK_SUCCESS;
5443 }
5444 
on_vkDestroySemaphore(void * context,VkDevice device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)5445 void ResourceTracker::on_vkDestroySemaphore(void* context, VkDevice device, VkSemaphore semaphore,
5446                                             const VkAllocationCallbacks* pAllocator) {
5447     VkEncoder* enc = (VkEncoder*)context;
5448     enc->vkDestroySemaphore(device, semaphore, pAllocator, true /* do lock */);
5449 }
5450 
5451 // https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#vkGetSemaphoreFdKHR
5452 // Each call to vkGetSemaphoreFdKHR must create a new file descriptor and transfer ownership
5453 // of it to the application. To avoid leaking resources, the application must release ownership
5454 // of the file descriptor when it is no longer needed.
on_vkGetSemaphoreFdKHR(void * context,VkResult,VkDevice device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd)5455 VkResult ResourceTracker::on_vkGetSemaphoreFdKHR(void* context, VkResult, VkDevice device,
5456                                                  const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
5457                                                  int* pFd) {
5458 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5459     VkEncoder* enc = (VkEncoder*)context;
5460     bool getSyncFd = pGetFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
5461 
5462     if (getSyncFd) {
5463         AutoLock<RecursiveLock> lock(mLock);
5464         auto it = info_VkSemaphore.find(pGetFdInfo->semaphore);
5465         if (it == info_VkSemaphore.end()) return VK_ERROR_OUT_OF_HOST_MEMORY;
5466         auto& semInfo = it->second;
5467         // syncFd is supposed to have value.
5468         auto* syncHelper =
5469             ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
5470         *pFd = syncHelper->dup(semInfo.syncFd.value_or(-1));
5471         return VK_SUCCESS;
5472     } else {
5473         // opaque fd
5474         int hostFd = 0;
5475         VkResult result = enc->vkGetSemaphoreFdKHR(device, pGetFdInfo, &hostFd, true /* do lock */);
5476         if (result != VK_SUCCESS) {
5477             return result;
5478         }
5479         *pFd = memfd_create("vk_opaque_fd", 0);
5480         write(*pFd, &hostFd, sizeof(hostFd));
5481         return VK_SUCCESS;
5482     }
5483 #else
5484     (void)context;
5485     (void)device;
5486     (void)pGetFdInfo;
5487     (void)pFd;
5488     return VK_ERROR_INCOMPATIBLE_DRIVER;
5489 #endif
5490 }
5491 
on_vkImportSemaphoreFdKHR(void * context,VkResult input_result,VkDevice device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)5492 VkResult ResourceTracker::on_vkImportSemaphoreFdKHR(
5493     void* context, VkResult input_result, VkDevice device,
5494     const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) {
5495 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5496     VkEncoder* enc = (VkEncoder*)context;
5497     if (input_result != VK_SUCCESS) {
5498         return input_result;
5499     }
5500 
5501     auto* syncHelper = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
5502 
5503     if (pImportSemaphoreFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
5504         VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo;
5505 
5506         AutoLock<RecursiveLock> lock(mLock);
5507 
5508         auto semaphoreIt = info_VkSemaphore.find(pImportSemaphoreFdInfo->semaphore);
5509         auto& info = semaphoreIt->second;
5510 
5511         if (info.syncFd.value_or(-1) >= 0) {
5512             syncHelper->close(info.syncFd.value());
5513         }
5514 
5515         info.syncFd.emplace(pImportSemaphoreFdInfo->fd);
5516 
5517         return VK_SUCCESS;
5518     } else {
5519         int fd = pImportSemaphoreFdInfo->fd;
5520         int err = lseek(fd, 0, SEEK_SET);
5521         if (err == -1) {
5522             ALOGE("lseek fail on import semaphore");
5523         }
5524         int hostFd = 0;
5525         read(fd, &hostFd, sizeof(hostFd));
5526         VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo;
5527         tmpInfo.fd = hostFd;
5528         VkResult result = enc->vkImportSemaphoreFdKHR(device, &tmpInfo, true /* do lock */);
5529         syncHelper->close(fd);
5530         return result;
5531     }
5532 #else
5533     (void)context;
5534     (void)input_result;
5535     (void)device;
5536     (void)pImportSemaphoreFdInfo;
5537     return VK_ERROR_INCOMPATIBLE_DRIVER;
5538 #endif
5539 }
5540 
on_vkGetMemoryFdKHR(void * context,VkResult,VkDevice device,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFd)5541 VkResult ResourceTracker::on_vkGetMemoryFdKHR(void* context, VkResult, VkDevice device,
5542                                               const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd) {
5543 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
5544     if (!pGetFdInfo) return VK_ERROR_OUT_OF_HOST_MEMORY;
5545     if (!pGetFdInfo->memory) return VK_ERROR_OUT_OF_HOST_MEMORY;
5546 
5547     if (!(pGetFdInfo->handleType & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
5548                                     VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT))) {
5549         ALOGE("%s: Export operation not defined for handleType: 0x%x\n", __func__,
5550               pGetFdInfo->handleType);
5551         return VK_ERROR_OUT_OF_HOST_MEMORY;
5552     }
5553     // Sanity-check device
5554     AutoLock<RecursiveLock> lock(mLock);
5555     auto deviceIt = info_VkDevice.find(device);
5556     if (deviceIt == info_VkDevice.end()) {
5557         return VK_ERROR_OUT_OF_HOST_MEMORY;
5558     }
5559 
5560     auto deviceMemIt = info_VkDeviceMemory.find(pGetFdInfo->memory);
5561     if (deviceMemIt == info_VkDeviceMemory.end()) {
5562         return VK_ERROR_OUT_OF_HOST_MEMORY;
5563     }
5564     auto& info = deviceMemIt->second;
5565 
5566     if (!info.blobPtr) {
5567         ALOGE("%s: VkDeviceMemory does not have a resource available for export.\n", __func__);
5568         return VK_ERROR_OUT_OF_HOST_MEMORY;
5569     }
5570 
5571     VirtGpuExternalHandle handle{};
5572     int ret = info.blobPtr->exportBlob(handle);
5573     if (ret != 0 || handle.osHandle < 0) {
5574         ALOGE("%s: Failed to export host resource to FD.\n", __func__);
5575         return VK_ERROR_OUT_OF_HOST_MEMORY;
5576     }
5577     *pFd = handle.osHandle;
5578     return VK_SUCCESS;
5579 #else
5580     (void)context;
5581     (void)device;
5582     (void)pGetFdInfo;
5583     (void)pFd;
5584     return VK_ERROR_INCOMPATIBLE_DRIVER;
5585 #endif
5586 }
5587 
flushCommandBufferPendingCommandsBottomUp(void * context,VkQueue queue,const std::vector<VkCommandBuffer> & workingSet)5588 void ResourceTracker::flushCommandBufferPendingCommandsBottomUp(
5589     void* context, VkQueue queue, const std::vector<VkCommandBuffer>& workingSet) {
5590     if (workingSet.empty()) return;
5591 
5592     std::vector<VkCommandBuffer> nextLevel;
5593     for (auto commandBuffer : workingSet) {
5594         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
5595         forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
5596             nextLevel.push_back((VkCommandBuffer)secondary);
5597         });
5598     }
5599 
5600     flushCommandBufferPendingCommandsBottomUp(context, queue, nextLevel);
5601 
5602     // After this point, everyone at the previous level has been flushed
5603     for (auto cmdbuf : workingSet) {
5604         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
5605 
5606         // There's no pending commands here, skip. (case 1)
5607         if (!cb->privateStream) continue;
5608 
5609         unsigned char* writtenPtr = 0;
5610         size_t written = 0;
5611         CommandBufferStagingStream* cmdBufStream =
5612             static_cast<CommandBufferStagingStream*>(cb->privateStream);
5613         cmdBufStream->getWritten(&writtenPtr, &written);
5614 
5615         // There's no pending commands here, skip. (case 2, stream created but no new recordings)
5616         if (!written) continue;
5617 
5618         // There are pending commands to flush.
5619         VkEncoder* enc = (VkEncoder*)context;
5620         VkDeviceMemory deviceMemory = cmdBufStream->getDeviceMemory();
5621         VkDeviceSize dataOffset = 0;
5622         if (mFeatureInfo->hasVulkanAuxCommandMemory) {
5623             // for suballocations, deviceMemory is an alias VkDeviceMemory
5624             // get underling VkDeviceMemory for given alias
5625             deviceMemoryTransform_tohost(&deviceMemory, 1 /*memoryCount*/, &dataOffset,
5626                                          1 /*offsetCount*/, nullptr /*size*/, 0 /*sizeCount*/,
5627                                          nullptr /*typeIndex*/, 0 /*typeIndexCount*/,
5628                                          nullptr /*typeBits*/, 0 /*typeBitCounts*/);
5629 
5630             // mark stream as flushing before flushing commands
5631             cmdBufStream->markFlushing();
5632             enc->vkQueueFlushCommandsFromAuxMemoryGOOGLE(queue, cmdbuf, deviceMemory, dataOffset,
5633                                                          written, true /*do lock*/);
5634         } else {
5635             enc->vkQueueFlushCommandsGOOGLE(queue, cmdbuf, written, (const void*)writtenPtr,
5636                                             true /* do lock */);
5637         }
5638         // Reset this stream.
5639         // flushing happens on vkQueueSubmit
5640         // vulkan api states that on queue submit,
5641         // applications MUST not attempt to modify the command buffer in any way
5642         // -as the device may be processing the commands recorded to it.
5643         // It is safe to call reset() here for this reason.
5644         // Command Buffer associated with this stream will only leave pending state
5645         // after queue submit is complete and host has read the data
5646         cmdBufStream->reset();
5647     }
5648 }
5649 
syncEncodersForQueue(VkQueue queue,VkEncoder * currentEncoder)5650 uint32_t ResourceTracker::syncEncodersForQueue(VkQueue queue, VkEncoder* currentEncoder) {
5651     if (!supportsAsyncQueueSubmit()) {
5652         return 0;
5653     }
5654 
5655     struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
5656     if (!q) return 0;
5657 
5658     auto lastEncoder = q->lastUsedEncoder;
5659 
5660     if (lastEncoder == currentEncoder) return 0;
5661 
5662     currentEncoder->incRef();
5663 
5664     q->lastUsedEncoder = currentEncoder;
5665 
5666     if (!lastEncoder) return 0;
5667 
5668     auto oldSeq = q->sequenceNumber;
5669     q->sequenceNumber += 2;
5670     lastEncoder->vkQueueHostSyncGOOGLE(queue, false, oldSeq + 1, true /* do lock */);
5671     lastEncoder->flush();
5672     currentEncoder->vkQueueHostSyncGOOGLE(queue, true, oldSeq + 2, true /* do lock */);
5673 
5674     if (lastEncoder->decRef()) {
5675         q->lastUsedEncoder = nullptr;
5676     }
5677 
5678     return 0;
5679 }
5680 
5681 template <class VkSubmitInfoType>
flushStagingStreams(void * context,VkQueue queue,uint32_t submitCount,const VkSubmitInfoType * pSubmits)5682 void ResourceTracker::flushStagingStreams(void* context, VkQueue queue, uint32_t submitCount,
5683                                           const VkSubmitInfoType* pSubmits) {
5684     std::vector<VkCommandBuffer> toFlush;
5685     for (uint32_t i = 0; i < submitCount; ++i) {
5686         for (uint32_t j = 0; j < getCommandBufferCount(pSubmits[i]); ++j) {
5687             toFlush.push_back(getCommandBuffer(pSubmits[i], j));
5688         }
5689     }
5690 
5691     std::unordered_set<VkDescriptorSet> pendingSets;
5692     collectAllPendingDescriptorSetsBottomUp(toFlush, pendingSets);
5693     commitDescriptorSetUpdates(context, queue, pendingSets);
5694 
5695     flushCommandBufferPendingCommandsBottomUp(context, queue, toFlush);
5696 
5697     for (auto cb : toFlush) {
5698         resetCommandBufferPendingTopology(cb);
5699     }
5700 }
5701 
on_vkQueueSubmit(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)5702 VkResult ResourceTracker::on_vkQueueSubmit(void* context, VkResult input_result, VkQueue queue,
5703                                            uint32_t submitCount, const VkSubmitInfo* pSubmits,
5704                                            VkFence fence) {
5705     AEMU_SCOPED_TRACE("on_vkQueueSubmit");
5706     return on_vkQueueSubmitTemplate<VkSubmitInfo>(context, input_result, queue, submitCount,
5707                                                   pSubmits, fence);
5708 }
5709 
on_vkQueueSubmit2(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)5710 VkResult ResourceTracker::on_vkQueueSubmit2(void* context, VkResult input_result, VkQueue queue,
5711                                             uint32_t submitCount, const VkSubmitInfo2* pSubmits,
5712                                             VkFence fence) {
5713     AEMU_SCOPED_TRACE("on_vkQueueSubmit2");
5714     return on_vkQueueSubmitTemplate<VkSubmitInfo2>(context, input_result, queue, submitCount,
5715                                                    pSubmits, fence);
5716 }
5717 
vkQueueSubmitEnc(VkEncoder * enc,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)5718 VkResult ResourceTracker::vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount,
5719                                            const VkSubmitInfo* pSubmits, VkFence fence) {
5720     if (supportsAsyncQueueSubmit()) {
5721         enc->vkQueueSubmitAsyncGOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
5722         return VK_SUCCESS;
5723     } else {
5724         return enc->vkQueueSubmit(queue, submitCount, pSubmits, fence, true /* do lock */);
5725     }
5726 }
5727 
vkQueueSubmitEnc(VkEncoder * enc,VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)5728 VkResult ResourceTracker::vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount,
5729                                            const VkSubmitInfo2* pSubmits, VkFence fence) {
5730     if (supportsAsyncQueueSubmit()) {
5731         enc->vkQueueSubmitAsync2GOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
5732         return VK_SUCCESS;
5733     } else {
5734         return enc->vkQueueSubmit2(queue, submitCount, pSubmits, fence, true /* do lock */);
5735     }
5736 }
5737 
5738 template <typename VkSubmitInfoType>
on_vkQueueSubmitTemplate(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfoType * pSubmits,VkFence fence)5739 VkResult ResourceTracker::on_vkQueueSubmitTemplate(void* context, VkResult input_result,
5740                                                    VkQueue queue, uint32_t submitCount,
5741                                                    const VkSubmitInfoType* pSubmits,
5742                                                    VkFence fence) {
5743     flushStagingStreams(context, queue, submitCount, pSubmits);
5744 
5745     std::vector<VkSemaphore> pre_signal_semaphores;
5746     std::vector<zx_handle_t> pre_signal_events;
5747     std::vector<int> pre_signal_sync_fds;
5748     std::vector<std::pair<zx_handle_t, zx_koid_t>> post_wait_events;
5749     std::vector<int> post_wait_sync_fds;
5750 
5751     VkEncoder* enc = (VkEncoder*)context;
5752 
5753     AutoLock<RecursiveLock> lock(mLock);
5754 
5755     for (uint32_t i = 0; i < submitCount; ++i) {
5756         for (uint32_t j = 0; j < getWaitSemaphoreCount(pSubmits[i]); ++j) {
5757             VkSemaphore semaphore = getWaitSemaphore(pSubmits[i], j);
5758             auto it = info_VkSemaphore.find(semaphore);
5759             if (it != info_VkSemaphore.end()) {
5760                 auto& semInfo = it->second;
5761 #ifdef VK_USE_PLATFORM_FUCHSIA
5762                 if (semInfo.eventHandle) {
5763                     pre_signal_events.push_back(semInfo.eventHandle);
5764                     pre_signal_semaphores.push_back(semaphore);
5765                 }
5766 #endif
5767 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5768                 if (semInfo.syncFd.has_value()) {
5769                     pre_signal_sync_fds.push_back(semInfo.syncFd.value());
5770                     pre_signal_semaphores.push_back(semaphore);
5771                 }
5772 #endif
5773             }
5774         }
5775         for (uint32_t j = 0; j < getSignalSemaphoreCount(pSubmits[i]); ++j) {
5776             auto it = info_VkSemaphore.find(getSignalSemaphore(pSubmits[i], j));
5777             if (it != info_VkSemaphore.end()) {
5778                 auto& semInfo = it->second;
5779 #ifdef VK_USE_PLATFORM_FUCHSIA
5780                 if (semInfo.eventHandle) {
5781                     post_wait_events.push_back({semInfo.eventHandle, semInfo.eventKoid});
5782 #ifndef FUCHSIA_NO_TRACE
5783                     if (semInfo.eventKoid != ZX_KOID_INVALID) {
5784                         // TODO(fxbug.dev/66098): Remove the "semaphore"
5785                         // FLOW_END events once it is removed from clients
5786                         // (for example, gfx Engine).
5787                         TRACE_FLOW_END("gfx", "semaphore", semInfo.eventKoid);
5788                         TRACE_FLOW_BEGIN("gfx", "goldfish_post_wait_event", semInfo.eventKoid);
5789                     }
5790 #endif
5791                 }
5792 #endif
5793 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5794                 if (semInfo.syncFd.value_or(-1) >= 0) {
5795                     post_wait_sync_fds.push_back(semInfo.syncFd.value());
5796                 }
5797 #endif
5798             }
5799         }
5800     }
5801     lock.unlock();
5802 
5803     if (pre_signal_semaphores.empty()) {
5804         input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence);
5805         if (input_result != VK_SUCCESS) return input_result;
5806     } else {
5807         // Schedule waits on the OS external objects and
5808         // signal the wait semaphores
5809         // in a separate thread.
5810         std::vector<WorkPool::Task> preSignalTasks;
5811         std::vector<WorkPool::Task> preSignalQueueSubmitTasks;
5812         ;
5813 #ifdef VK_USE_PLATFORM_FUCHSIA
5814         for (auto event : pre_signal_events) {
5815             preSignalTasks.push_back([event] {
5816                 zx_object_wait_one(event, ZX_EVENT_SIGNALED, ZX_TIME_INFINITE, nullptr);
5817             });
5818         }
5819 #endif
5820 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5821         for (auto fd : pre_signal_sync_fds) {
5822             // https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkImportSemaphoreFdInfoKHR.html
5823             // fd == -1 is treated as already signaled
5824             if (fd != -1) {
5825                 preSignalTasks.push_back([fd] {
5826                     auto* syncHelper =
5827                         ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
5828                     syncHelper->wait(fd, 3000);
5829                 });
5830             }
5831         }
5832 #endif
5833         if (!preSignalTasks.empty()) {
5834             auto waitGroupHandle = mWorkPool.schedule(preSignalTasks);
5835             mWorkPool.waitAll(waitGroupHandle);
5836         }
5837 
5838         // Use the old version of VkSubmitInfo
5839         VkSubmitInfo submit_info = {
5840             .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
5841             .waitSemaphoreCount = 0,
5842             .pWaitSemaphores = nullptr,
5843             .pWaitDstStageMask = nullptr,
5844             .signalSemaphoreCount = static_cast<uint32_t>(pre_signal_semaphores.size()),
5845             .pSignalSemaphores = pre_signal_semaphores.data()};
5846         vkQueueSubmitEnc(enc, queue, 1, &submit_info, VK_NULL_HANDLE);
5847         input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence);
5848         if (input_result != VK_SUCCESS) return input_result;
5849     }
5850     lock.lock();
5851     int externalFenceFdToSignal = -1;
5852 
5853 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5854     if (fence != VK_NULL_HANDLE) {
5855         auto it = info_VkFence.find(fence);
5856         if (it != info_VkFence.end()) {
5857             const auto& info = it->second;
5858             if (info.syncFd >= 0) {
5859                 externalFenceFdToSignal = info.syncFd;
5860             }
5861         }
5862     }
5863 #endif
5864     if (externalFenceFdToSignal >= 0 || !post_wait_events.empty() || !post_wait_sync_fds.empty()) {
5865         std::vector<WorkPool::Task> tasks;
5866 
5867         tasks.push_back([queue, externalFenceFdToSignal, post_wait_events /* copy of zx handles */,
5868                          post_wait_sync_fds /* copy of sync fds */] {
5869             auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
5870             auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
5871             auto waitIdleRes = vkEncoder->vkQueueWaitIdle(queue, true /* do lock */);
5872 #ifdef VK_USE_PLATFORM_FUCHSIA
5873             AEMU_SCOPED_TRACE("on_vkQueueSubmit::SignalSemaphores");
5874             (void)externalFenceFdToSignal;
5875             for (auto& [event, koid] : post_wait_events) {
5876 #ifndef FUCHSIA_NO_TRACE
5877                 if (koid != ZX_KOID_INVALID) {
5878                     TRACE_FLOW_END("gfx", "goldfish_post_wait_event", koid);
5879                     TRACE_FLOW_BEGIN("gfx", "event_signal", koid);
5880                 }
5881 #endif
5882                 zx_object_signal(event, 0, ZX_EVENT_SIGNALED);
5883             }
5884 #endif
5885 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
5886             for (auto& fd : post_wait_sync_fds) {
5887                 goldfish_sync_signal(fd);
5888             }
5889 
5890             if (externalFenceFdToSignal >= 0) {
5891                 ALOGV("%s: external fence real signal: %d\n", __func__, externalFenceFdToSignal);
5892                 goldfish_sync_signal(externalFenceFdToSignal);
5893             }
5894 #endif
5895         });
5896         auto queueAsyncWaitHandle = mWorkPool.schedule(tasks);
5897         auto& queueWorkItems = mQueueSensitiveWorkPoolItems[queue];
5898         queueWorkItems.push_back(queueAsyncWaitHandle);
5899     }
5900     return VK_SUCCESS;
5901 }
5902 
on_vkQueueWaitIdle(void * context,VkResult,VkQueue queue)5903 VkResult ResourceTracker::on_vkQueueWaitIdle(void* context, VkResult, VkQueue queue) {
5904     VkEncoder* enc = (VkEncoder*)context;
5905 
5906     AutoLock<RecursiveLock> lock(mLock);
5907     std::vector<WorkPool::WaitGroupHandle> toWait = mQueueSensitiveWorkPoolItems[queue];
5908     mQueueSensitiveWorkPoolItems[queue].clear();
5909     lock.unlock();
5910 
5911     if (toWait.empty()) {
5912         ALOGV("%s: No queue-specific work pool items\n", __func__);
5913         return enc->vkQueueWaitIdle(queue, true /* do lock */);
5914     }
5915 
5916     for (auto handle : toWait) {
5917         ALOGV("%s: waiting on work group item: %llu\n", __func__, (unsigned long long)handle);
5918         mWorkPool.waitAll(handle);
5919     }
5920 
5921     // now done waiting, get the host's opinion
5922     return enc->vkQueueWaitIdle(queue, true /* do lock */);
5923 }
5924 
5925 #ifdef VK_USE_PLATFORM_ANDROID_KHR
unwrap_VkNativeBufferANDROID(const VkNativeBufferANDROID * inputNativeInfo,VkNativeBufferANDROID * outputNativeInfo)5926 void ResourceTracker::unwrap_VkNativeBufferANDROID(const VkNativeBufferANDROID* inputNativeInfo,
5927                                                    VkNativeBufferANDROID* outputNativeInfo) {
5928     if (!inputNativeInfo || !inputNativeInfo->handle) {
5929         return;
5930     }
5931 
5932     if (!outputNativeInfo || !outputNativeInfo) {
5933         ALOGE("FATAL: Local native buffer info not properly allocated!");
5934         abort();
5935     }
5936 
5937     auto* gralloc = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
5938     const native_handle_t* nativeHandle = (const native_handle_t*)inputNativeInfo->handle;
5939 
5940 #if defined(END2END_TESTS)
5941     // This is valid since the testing backend creates the handle and we know the layout.
5942     *(uint32_t*)(outputNativeInfo->handle) = (uint32_t)nativeHandle->data[0];
5943 #else
5944     *(uint32_t*)(outputNativeInfo->handle) = gralloc->getHostHandle(nativeHandle);
5945 #endif
5946 }
5947 
unwrap_VkBindImageMemorySwapchainInfoKHR(const VkBindImageMemorySwapchainInfoKHR * inputBimsi,VkBindImageMemorySwapchainInfoKHR * outputBimsi)5948 void ResourceTracker::unwrap_VkBindImageMemorySwapchainInfoKHR(
5949     const VkBindImageMemorySwapchainInfoKHR* inputBimsi,
5950     VkBindImageMemorySwapchainInfoKHR* outputBimsi) {
5951     if (!inputBimsi || !inputBimsi->swapchain) {
5952         return;
5953     }
5954 
5955     if (!outputBimsi || !outputBimsi->swapchain) {
5956         return;
5957     }
5958 
5959     // Android based swapchains are implemented by the Android framework's
5960     // libvulkan. The only exist within the guest and should not be sent to
5961     // the host.
5962     outputBimsi->swapchain = VK_NULL_HANDLE;
5963 }
5964 #endif
5965 
unwrap_vkCreateImage_pCreateInfo(const VkImageCreateInfo * pCreateInfo,VkImageCreateInfo * local_pCreateInfo)5966 void ResourceTracker::unwrap_vkCreateImage_pCreateInfo(const VkImageCreateInfo* pCreateInfo,
5967                                                        VkImageCreateInfo* local_pCreateInfo) {
5968 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5969     const VkNativeBufferANDROID* inputNativeInfo =
5970         vk_find_struct<VkNativeBufferANDROID>(pCreateInfo);
5971 
5972     VkNativeBufferANDROID* outputNativeInfo = const_cast<VkNativeBufferANDROID*>(
5973         vk_find_struct<VkNativeBufferANDROID>(local_pCreateInfo));
5974 
5975     unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo);
5976 #endif
5977 }
5978 
unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd,int * fd_out)5979 void ResourceTracker::unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd, int* fd_out) {
5980 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5981     (void)fd_out;
5982     if (fd != -1) {
5983         AEMU_SCOPED_TRACE("waitNativeFenceInAcquire");
5984         // Implicit Synchronization
5985         auto* syncHelper =
5986             ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
5987         syncHelper->wait(fd, 3000);
5988         // From libvulkan's swapchain.cpp:
5989         // """
5990         // NOTE: we're relying on AcquireImageANDROID to close fence_clone,
5991         // even if the call fails. We could close it ourselves on failure, but
5992         // that would create a race condition if the driver closes it on a
5993         // failure path: some other thread might create an fd with the same
5994         // number between the time the driver closes it and the time we close
5995         // it. We must assume one of: the driver *always* closes it even on
5996         // failure, or *never* closes it on failure.
5997         // """
5998         // Therefore, assume contract where we need to close fd in this driver
5999         syncHelper->close(fd);
6000     }
6001 #endif
6002 }
6003 
unwrap_VkBindImageMemory2_pBindInfos(uint32_t bindInfoCount,const VkBindImageMemoryInfo * inputBindInfos,VkBindImageMemoryInfo * outputBindInfos)6004 void ResourceTracker::unwrap_VkBindImageMemory2_pBindInfos(
6005     uint32_t bindInfoCount, const VkBindImageMemoryInfo* inputBindInfos,
6006     VkBindImageMemoryInfo* outputBindInfos) {
6007 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6008     for (uint32_t i = 0; i < bindInfoCount; ++i) {
6009         const VkBindImageMemoryInfo* inputBindInfo = &inputBindInfos[i];
6010         VkBindImageMemoryInfo* outputBindInfo = &outputBindInfos[i];
6011 
6012         const VkNativeBufferANDROID* inputNativeInfo =
6013             vk_find_struct<VkNativeBufferANDROID>(inputBindInfo);
6014 
6015         VkNativeBufferANDROID* outputNativeInfo = const_cast<VkNativeBufferANDROID*>(
6016             vk_find_struct<VkNativeBufferANDROID>(outputBindInfo));
6017 
6018         unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo);
6019 
6020         const VkBindImageMemorySwapchainInfoKHR* inputBimsi =
6021             vk_find_struct<VkBindImageMemorySwapchainInfoKHR>(inputBindInfo);
6022 
6023         VkBindImageMemorySwapchainInfoKHR* outputBimsi =
6024             const_cast<VkBindImageMemorySwapchainInfoKHR*>(
6025                 vk_find_struct<VkBindImageMemorySwapchainInfoKHR>(outputBindInfo));
6026 
6027         unwrap_VkBindImageMemorySwapchainInfoKHR(inputBimsi, outputBimsi);
6028     }
6029 #endif
6030 }
6031 
6032 // Action of vkMapMemoryIntoAddressSpaceGOOGLE:
6033 // 1. preprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE_pre):
6034 //    uses address space device to reserve the right size of
6035 //    memory.
6036 // 2. the reservation results in a physical address. the physical
6037 //    address is set as |*pAddress|.
6038 // 3. after pre, the API call is encoded to the host, where the
6039 //    value of pAddress is also sent (the physical address).
6040 // 4. the host will obtain the actual gpu pointer and send it
6041 //    back out in |*pAddress|.
6042 // 5. postprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE) will run,
6043 //    using the mmap() method of GoldfishAddressSpaceBlock to obtain
6044 //    a pointer in guest userspace corresponding to the host pointer.
on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void *,VkResult,VkDevice,VkDeviceMemory memory,uint64_t * pAddress)6045 VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void*, VkResult, VkDevice,
6046                                                                    VkDeviceMemory memory,
6047                                                                    uint64_t* pAddress) {
6048     AutoLock<RecursiveLock> lock(mLock);
6049 
6050     auto it = info_VkDeviceMemory.find(memory);
6051     if (it == info_VkDeviceMemory.end()) {
6052         return VK_ERROR_OUT_OF_HOST_MEMORY;
6053     }
6054 
6055 #if defined(__ANDROID__)
6056     auto& memInfo = it->second;
6057 
6058     GoldfishAddressSpaceBlockPtr block = std::make_shared<GoldfishAddressSpaceBlock>();
6059     block->allocate(mGoldfishAddressSpaceBlockProvider.get(), memInfo.coherentMemorySize);
6060 
6061     memInfo.goldfishBlock = block;
6062     *pAddress = block->physAddr();
6063 
6064     return VK_SUCCESS;
6065 #else
6066     (void)pAddress;
6067     return VK_ERROR_MEMORY_MAP_FAILED;
6068 #endif
6069 }
6070 
on_vkMapMemoryIntoAddressSpaceGOOGLE(void *,VkResult input_result,VkDevice,VkDeviceMemory memory,uint64_t * pAddress)6071 VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE(void*, VkResult input_result,
6072                                                                VkDevice, VkDeviceMemory memory,
6073                                                                uint64_t* pAddress) {
6074     (void)memory;
6075     (void)pAddress;
6076 
6077     if (input_result != VK_SUCCESS) {
6078         return input_result;
6079     }
6080 
6081     return input_result;
6082 }
6083 
initDescriptorUpdateTemplateBuffers(const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,VkDescriptorUpdateTemplate descriptorUpdateTemplate)6084 VkResult ResourceTracker::initDescriptorUpdateTemplateBuffers(
6085     const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6086     VkDescriptorUpdateTemplate descriptorUpdateTemplate) {
6087     AutoLock<RecursiveLock> lock(mLock);
6088 
6089     auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6090     if (it == info_VkDescriptorUpdateTemplate.end()) {
6091         return VK_ERROR_INITIALIZATION_FAILED;
6092     }
6093 
6094     auto& info = it->second;
6095     uint32_t inlineUniformBlockBufferSize = 0;
6096 
6097     for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6098         const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6099         uint32_t descCount = entry.descriptorCount;
6100         VkDescriptorType descType = entry.descriptorType;
6101         ++info.templateEntryCount;
6102         if (isDescriptorTypeInlineUniformBlock(descType)) {
6103             inlineUniformBlockBufferSize += descCount;
6104             ++info.inlineUniformBlockCount;
6105         } else {
6106             for (uint32_t j = 0; j < descCount; ++j) {
6107                 if (isDescriptorTypeImageInfo(descType)) {
6108                     ++info.imageInfoCount;
6109                 } else if (isDescriptorTypeBufferInfo(descType)) {
6110                     ++info.bufferInfoCount;
6111                 } else if (isDescriptorTypeBufferView(descType)) {
6112                     ++info.bufferViewCount;
6113                 } else {
6114                     ALOGE("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6115                     // abort();
6116                 }
6117             }
6118         }
6119     }
6120 
6121     if (info.templateEntryCount)
6122         info.templateEntries = new VkDescriptorUpdateTemplateEntry[info.templateEntryCount];
6123 
6124     if (info.imageInfoCount) {
6125         info.imageInfoIndices = new uint32_t[info.imageInfoCount];
6126         info.imageInfos = new VkDescriptorImageInfo[info.imageInfoCount];
6127     }
6128 
6129     if (info.bufferInfoCount) {
6130         info.bufferInfoIndices = new uint32_t[info.bufferInfoCount];
6131         info.bufferInfos = new VkDescriptorBufferInfo[info.bufferInfoCount];
6132     }
6133 
6134     if (info.bufferViewCount) {
6135         info.bufferViewIndices = new uint32_t[info.bufferViewCount];
6136         info.bufferViews = new VkBufferView[info.bufferViewCount];
6137     }
6138 
6139     if (info.inlineUniformBlockCount) {
6140         info.inlineUniformBlockBuffer.resize(inlineUniformBlockBufferSize);
6141         info.inlineUniformBlockBytesPerBlocks.resize(info.inlineUniformBlockCount);
6142     }
6143 
6144     uint32_t imageInfoIndex = 0;
6145     uint32_t bufferInfoIndex = 0;
6146     uint32_t bufferViewIndex = 0;
6147     uint32_t inlineUniformBlockIndex = 0;
6148 
6149     for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6150         const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6151         uint32_t descCount = entry.descriptorCount;
6152         VkDescriptorType descType = entry.descriptorType;
6153 
6154         info.templateEntries[i] = entry;
6155 
6156         if (isDescriptorTypeInlineUniformBlock(descType)) {
6157             info.inlineUniformBlockBytesPerBlocks[inlineUniformBlockIndex] = descCount;
6158             ++inlineUniformBlockIndex;
6159         } else {
6160             for (uint32_t j = 0; j < descCount; ++j) {
6161                 if (isDescriptorTypeImageInfo(descType)) {
6162                     info.imageInfoIndices[imageInfoIndex] = i;
6163                     ++imageInfoIndex;
6164                 } else if (isDescriptorTypeBufferInfo(descType)) {
6165                     info.bufferInfoIndices[bufferInfoIndex] = i;
6166                     ++bufferInfoIndex;
6167                 } else if (isDescriptorTypeBufferView(descType)) {
6168                     info.bufferViewIndices[bufferViewIndex] = i;
6169                     ++bufferViewIndex;
6170                 } else {
6171                     ALOGE("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6172                     // abort();
6173                 }
6174             }
6175         }
6176     }
6177 
6178     return VK_SUCCESS;
6179 }
6180 
on_vkCreateDescriptorUpdateTemplate(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)6181 VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplate(
6182     void* context, VkResult input_result, VkDevice device,
6183     const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6184     const VkAllocationCallbacks* pAllocator,
6185     VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6186     (void)context;
6187     (void)device;
6188     (void)pAllocator;
6189 
6190     if (input_result != VK_SUCCESS) return input_result;
6191 
6192     return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6193 }
6194 
on_vkCreateDescriptorUpdateTemplateKHR(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)6195 VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplateKHR(
6196     void* context, VkResult input_result, VkDevice device,
6197     const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6198     const VkAllocationCallbacks* pAllocator,
6199     VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6200     (void)context;
6201     (void)device;
6202     (void)pAllocator;
6203 
6204     if (input_result != VK_SUCCESS) return input_result;
6205 
6206     return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6207 }
6208 
on_vkUpdateDescriptorSetWithTemplate(void * context,VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)6209 void ResourceTracker::on_vkUpdateDescriptorSetWithTemplate(
6210     void* context, VkDevice device, VkDescriptorSet descriptorSet,
6211     VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData) {
6212     VkEncoder* enc = (VkEncoder*)context;
6213 
6214     uint8_t* userBuffer = (uint8_t*)pData;
6215     if (!userBuffer) return;
6216 
6217     // TODO: Make this thread safe
6218     AutoLock<RecursiveLock> lock(mLock);
6219 
6220     auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6221     if (it == info_VkDescriptorUpdateTemplate.end()) {
6222         return;
6223     }
6224 
6225     auto& info = it->second;
6226 
6227     uint32_t templateEntryCount = info.templateEntryCount;
6228     VkDescriptorUpdateTemplateEntry* templateEntries = info.templateEntries;
6229 
6230     uint32_t imageInfoCount = info.imageInfoCount;
6231     uint32_t bufferInfoCount = info.bufferInfoCount;
6232     uint32_t bufferViewCount = info.bufferViewCount;
6233     uint32_t inlineUniformBlockCount = info.inlineUniformBlockCount;
6234     uint32_t* imageInfoIndices = info.imageInfoIndices;
6235     uint32_t* bufferInfoIndices = info.bufferInfoIndices;
6236     uint32_t* bufferViewIndices = info.bufferViewIndices;
6237     VkDescriptorImageInfo* imageInfos = info.imageInfos;
6238     VkDescriptorBufferInfo* bufferInfos = info.bufferInfos;
6239     VkBufferView* bufferViews = info.bufferViews;
6240     uint8_t* inlineUniformBlockBuffer = info.inlineUniformBlockBuffer.data();
6241     uint32_t* inlineUniformBlockBytesPerBlocks = info.inlineUniformBlockBytesPerBlocks.data();
6242 
6243     lock.unlock();
6244 
6245     size_t currImageInfoOffset = 0;
6246     size_t currBufferInfoOffset = 0;
6247     size_t currBufferViewOffset = 0;
6248     size_t inlineUniformBlockOffset = 0;
6249     size_t inlineUniformBlockIdx = 0;
6250 
6251     struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(descriptorSet);
6252     ReifiedDescriptorSet* reified = ds->reified;
6253 
6254     bool batched = mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate;
6255 
6256     for (uint32_t i = 0; i < templateEntryCount; ++i) {
6257         const auto& entry = templateEntries[i];
6258         VkDescriptorType descType = entry.descriptorType;
6259         uint32_t dstBinding = entry.dstBinding;
6260 
6261         auto offset = entry.offset;
6262         auto stride = entry.stride;
6263         auto dstArrayElement = entry.dstArrayElement;
6264 
6265         uint32_t descCount = entry.descriptorCount;
6266 
6267         if (isDescriptorTypeImageInfo(descType)) {
6268             if (!stride) stride = sizeof(VkDescriptorImageInfo);
6269 
6270             const VkDescriptorImageInfo* currImageInfoBegin =
6271                 (const VkDescriptorImageInfo*)((uint8_t*)imageInfos + currImageInfoOffset);
6272 
6273             for (uint32_t j = 0; j < descCount; ++j) {
6274                 const VkDescriptorImageInfo* user =
6275                     (const VkDescriptorImageInfo*)(userBuffer + offset + j * stride);
6276 
6277                 memcpy(((uint8_t*)imageInfos) + currImageInfoOffset, user,
6278                        sizeof(VkDescriptorImageInfo));
6279 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
6280                 // Convert mesa to internal for objects in the user buffer
6281                 VkDescriptorImageInfo* internalImageInfo =
6282                     (VkDescriptorImageInfo*)(((uint8_t*)imageInfos) + currImageInfoOffset);
6283                 VK_FROM_HANDLE(gfxstream_vk_image_view, gfxstream_image_view,
6284                                internalImageInfo->imageView);
6285                 internalImageInfo->imageView = gfxstream_image_view->internal_object;
6286 #endif
6287                 currImageInfoOffset += sizeof(VkDescriptorImageInfo);
6288             }
6289 
6290             if (batched) {
6291                 doEmulatedDescriptorImageInfoWriteFromTemplate(
6292                     descType, dstBinding, dstArrayElement, descCount, currImageInfoBegin, reified);
6293             }
6294         } else if (isDescriptorTypeBufferInfo(descType)) {
6295             if (!stride) stride = sizeof(VkDescriptorBufferInfo);
6296 
6297             const VkDescriptorBufferInfo* currBufferInfoBegin =
6298                 (const VkDescriptorBufferInfo*)((uint8_t*)bufferInfos + currBufferInfoOffset);
6299 
6300             for (uint32_t j = 0; j < descCount; ++j) {
6301                 const VkDescriptorBufferInfo* user =
6302                     (const VkDescriptorBufferInfo*)(userBuffer + offset + j * stride);
6303 
6304                 memcpy(((uint8_t*)bufferInfos) + currBufferInfoOffset, user,
6305                        sizeof(VkDescriptorBufferInfo));
6306 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
6307                 // Convert mesa to internal for objects in the user buffer
6308                 VkDescriptorBufferInfo* internalBufferInfo =
6309                     (VkDescriptorBufferInfo*)(((uint8_t*)bufferInfos) + currBufferInfoOffset);
6310                 VK_FROM_HANDLE(gfxstream_vk_buffer, gfxstream_buffer, internalBufferInfo->buffer);
6311                 internalBufferInfo->buffer = gfxstream_buffer->internal_object;
6312 #endif
6313                 currBufferInfoOffset += sizeof(VkDescriptorBufferInfo);
6314             }
6315 
6316             if (batched) {
6317                 doEmulatedDescriptorBufferInfoWriteFromTemplate(
6318                     descType, dstBinding, dstArrayElement, descCount, currBufferInfoBegin, reified);
6319             }
6320 
6321         } else if (isDescriptorTypeBufferView(descType)) {
6322             if (!stride) stride = sizeof(VkBufferView);
6323 
6324             const VkBufferView* currBufferViewBegin =
6325                 (const VkBufferView*)((uint8_t*)bufferViews + currBufferViewOffset);
6326 
6327             for (uint32_t j = 0; j < descCount; ++j) {
6328                 const VkBufferView* user = (const VkBufferView*)(userBuffer + offset + j * stride);
6329 
6330                 memcpy(((uint8_t*)bufferViews) + currBufferViewOffset, user, sizeof(VkBufferView));
6331 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
6332                 // Convert mesa to internal for objects in the user buffer
6333                 VkBufferView* internalBufferView =
6334                     (VkBufferView*)(((uint8_t*)bufferViews) + currBufferViewOffset);
6335                 VK_FROM_HANDLE(gfxstream_vk_buffer_view, gfxstream_buffer_view,
6336                                *internalBufferView);
6337                 *internalBufferView = gfxstream_buffer_view->internal_object;
6338 #endif
6339 
6340                 currBufferViewOffset += sizeof(VkBufferView);
6341             }
6342 
6343             if (batched) {
6344                 doEmulatedDescriptorBufferViewWriteFromTemplate(
6345                     descType, dstBinding, dstArrayElement, descCount, currBufferViewBegin, reified);
6346             }
6347         } else if (isDescriptorTypeInlineUniformBlock(descType)) {
6348             uint32_t inlineUniformBlockBytesPerBlock =
6349                 inlineUniformBlockBytesPerBlocks[inlineUniformBlockIdx];
6350             uint8_t* currInlineUniformBlockBufferBegin =
6351                 inlineUniformBlockBuffer + inlineUniformBlockOffset;
6352             memcpy(currInlineUniformBlockBufferBegin, userBuffer + offset,
6353                    inlineUniformBlockBytesPerBlock);
6354             inlineUniformBlockIdx++;
6355             inlineUniformBlockOffset += inlineUniformBlockBytesPerBlock;
6356 
6357             if (batched) {
6358                 doEmulatedDescriptorInlineUniformBlockFromTemplate(
6359                     descType, dstBinding, dstArrayElement, descCount,
6360                     currInlineUniformBlockBufferBegin, reified);
6361             }
6362         } else {
6363             ALOGE("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6364             abort();
6365         }
6366     }
6367 
6368     if (batched) return;
6369 
6370     enc->vkUpdateDescriptorSetWithTemplateSized2GOOGLE(
6371         device, descriptorSet, descriptorUpdateTemplate, imageInfoCount, bufferInfoCount,
6372         bufferViewCount, static_cast<uint32_t>(info.inlineUniformBlockBuffer.size()),
6373         imageInfoIndices, bufferInfoIndices, bufferViewIndices, imageInfos, bufferInfos,
6374         bufferViews, inlineUniformBlockBuffer, true /* do lock */);
6375 }
6376 
on_vkGetPhysicalDeviceImageFormatProperties2_common(bool isKhr,void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6377 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2_common(
6378     bool isKhr, void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6379     const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6380     VkImageFormatProperties2* pImageFormatProperties) {
6381     VkEncoder* enc = (VkEncoder*)context;
6382     (void)input_result;
6383 
6384     uint32_t supportedHandleType = 0;
6385     VkExternalImageFormatProperties* ext_img_properties =
6386         vk_find_struct<VkExternalImageFormatProperties>(pImageFormatProperties);
6387 
6388 #ifdef VK_USE_PLATFORM_FUCHSIA
6389 
6390     constexpr VkFormat kExternalImageSupportedFormats[] = {
6391         VK_FORMAT_B8G8R8A8_SINT,  VK_FORMAT_B8G8R8A8_UNORM,   VK_FORMAT_B8G8R8A8_SRGB,
6392         VK_FORMAT_B8G8R8A8_SNORM, VK_FORMAT_B8G8R8A8_SSCALED, VK_FORMAT_B8G8R8A8_USCALED,
6393         VK_FORMAT_R8G8B8A8_SINT,  VK_FORMAT_R8G8B8A8_UNORM,   VK_FORMAT_R8G8B8A8_SRGB,
6394         VK_FORMAT_R8G8B8A8_SNORM, VK_FORMAT_R8G8B8A8_SSCALED, VK_FORMAT_R8G8B8A8_USCALED,
6395         VK_FORMAT_R8_UNORM,       VK_FORMAT_R8_UINT,          VK_FORMAT_R8_USCALED,
6396         VK_FORMAT_R8_SNORM,       VK_FORMAT_R8_SINT,          VK_FORMAT_R8_SSCALED,
6397         VK_FORMAT_R8_SRGB,        VK_FORMAT_R8G8_UNORM,       VK_FORMAT_R8G8_UINT,
6398         VK_FORMAT_R8G8_USCALED,   VK_FORMAT_R8G8_SNORM,       VK_FORMAT_R8G8_SINT,
6399         VK_FORMAT_R8G8_SSCALED,   VK_FORMAT_R8G8_SRGB,
6400     };
6401 
6402     if (ext_img_properties) {
6403         if (std::find(std::begin(kExternalImageSupportedFormats),
6404                       std::end(kExternalImageSupportedFormats),
6405                       pImageFormatInfo->format) == std::end(kExternalImageSupportedFormats)) {
6406             return VK_ERROR_FORMAT_NOT_SUPPORTED;
6407         }
6408     }
6409     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
6410 #endif
6411 
6412 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6413     VkAndroidHardwareBufferUsageANDROID* output_ahw_usage =
6414         vk_find_struct<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties);
6415     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
6416                            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
6417 #endif
6418     const VkPhysicalDeviceExternalImageFormatInfo* ext_img_info =
6419         vk_find_struct<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo);
6420     if (supportedHandleType && ext_img_info) {
6421         // 0 is a valid handleType so we don't check against 0
6422         if (ext_img_info->handleType != (ext_img_info->handleType & supportedHandleType)) {
6423             return VK_ERROR_FORMAT_NOT_SUPPORTED;
6424         }
6425     }
6426 
6427     VkResult hostRes;
6428 
6429     if (isKhr) {
6430         hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2KHR(
6431             physicalDevice, pImageFormatInfo, pImageFormatProperties, true /* do lock */);
6432     } else {
6433         hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2(
6434             physicalDevice, pImageFormatInfo, pImageFormatProperties, true /* do lock */);
6435     }
6436 
6437     if (hostRes != VK_SUCCESS) return hostRes;
6438 
6439 #ifdef VK_USE_PLATFORM_FUCHSIA
6440     if (ext_img_properties) {
6441         if (ext_img_info) {
6442             if (static_cast<uint32_t>(ext_img_info->handleType) ==
6443                 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
6444                 ext_img_properties->externalMemoryProperties = {
6445                     .externalMemoryFeatures = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
6446                                               VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
6447                     .exportFromImportedHandleTypes =
6448                         VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6449                     .compatibleHandleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6450                 };
6451             }
6452         }
6453     }
6454 #endif
6455 
6456 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6457     if (output_ahw_usage) {
6458         output_ahw_usage->androidHardwareBufferUsage = getAndroidHardwareBufferUsageFromVkUsage(
6459             pImageFormatInfo->flags, pImageFormatInfo->usage);
6460     }
6461 #endif
6462     if (ext_img_properties) {
6463         transformImpl_VkExternalMemoryProperties_fromhost(
6464             &ext_img_properties->externalMemoryProperties, 0);
6465     }
6466     return hostRes;
6467 }
6468 
on_vkGetPhysicalDeviceImageFormatProperties2(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6469 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2(
6470     void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6471     const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6472     VkImageFormatProperties2* pImageFormatProperties) {
6473     return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6474         false /* not KHR */, context, input_result, physicalDevice, pImageFormatInfo,
6475         pImageFormatProperties);
6476 }
6477 
on_vkGetPhysicalDeviceImageFormatProperties2KHR(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6478 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2KHR(
6479     void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6480     const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6481     VkImageFormatProperties2* pImageFormatProperties) {
6482     return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6483         true /* is KHR */, context, input_result, physicalDevice, pImageFormatInfo,
6484         pImageFormatProperties);
6485 }
6486 
on_vkGetPhysicalDeviceExternalBufferProperties_common(bool isKhr,void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)6487 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferProperties_common(
6488     bool isKhr, void* context, VkPhysicalDevice physicalDevice,
6489     const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
6490     VkExternalBufferProperties* pExternalBufferProperties) {
6491     VkEncoder* enc = (VkEncoder*)context;
6492 
6493     // Older versions of Goldfish's Gralloc did not support allocating AHARDWAREBUFFER_FORMAT_BLOB
6494     // with GPU usage (b/299520213).
6495     if (ResourceTracker::threadingCallbacks.hostConnectionGetFunc()
6496             ->grallocHelper()
6497             ->treatBlobAsImage() &&
6498         pExternalBufferInfo->handleType ==
6499             VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) {
6500         pExternalBufferProperties->externalMemoryProperties.externalMemoryFeatures = 0;
6501         pExternalBufferProperties->externalMemoryProperties.exportFromImportedHandleTypes = 0;
6502         pExternalBufferProperties->externalMemoryProperties.compatibleHandleTypes = 0;
6503         return;
6504     }
6505 
6506     uint32_t supportedHandleType = 0;
6507 #ifdef VK_USE_PLATFORM_FUCHSIA
6508     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
6509 #endif
6510 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6511     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
6512                            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
6513 #endif
6514     if (supportedHandleType) {
6515         // 0 is a valid handleType so we can't check against 0
6516         if (pExternalBufferInfo->handleType !=
6517             (pExternalBufferInfo->handleType & supportedHandleType)) {
6518             return;
6519         }
6520     }
6521 
6522     if (isKhr) {
6523         enc->vkGetPhysicalDeviceExternalBufferPropertiesKHR(
6524             physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */);
6525     } else {
6526         enc->vkGetPhysicalDeviceExternalBufferProperties(
6527             physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */);
6528     }
6529     transformImpl_VkExternalMemoryProperties_fromhost(
6530         &pExternalBufferProperties->externalMemoryProperties, 0);
6531 }
6532 
on_vkGetPhysicalDeviceExternalBufferProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)6533 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferProperties(
6534     void* context, VkPhysicalDevice physicalDevice,
6535     const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
6536     VkExternalBufferProperties* pExternalBufferProperties) {
6537     return on_vkGetPhysicalDeviceExternalBufferProperties_common(
6538         false /* not KHR */, context, physicalDevice, pExternalBufferInfo,
6539         pExternalBufferProperties);
6540 }
6541 
on_vkGetPhysicalDeviceExternalBufferPropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfoKHR * pExternalBufferInfo,VkExternalBufferPropertiesKHR * pExternalBufferProperties)6542 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferPropertiesKHR(
6543     void* context, VkPhysicalDevice physicalDevice,
6544     const VkPhysicalDeviceExternalBufferInfoKHR* pExternalBufferInfo,
6545     VkExternalBufferPropertiesKHR* pExternalBufferProperties) {
6546     return on_vkGetPhysicalDeviceExternalBufferProperties_common(
6547         true /* is KHR */, context, physicalDevice, pExternalBufferInfo, pExternalBufferProperties);
6548 }
6549 
on_vkGetPhysicalDeviceExternalSemaphoreProperties(void *,VkPhysicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)6550 void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphoreProperties(
6551     void*, VkPhysicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
6552     VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
6553     (void)pExternalSemaphoreInfo;
6554     (void)pExternalSemaphoreProperties;
6555 #ifdef VK_USE_PLATFORM_FUCHSIA
6556     if (pExternalSemaphoreInfo->handleType ==
6557         static_cast<uint32_t>(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA)) {
6558         pExternalSemaphoreProperties->compatibleHandleTypes |=
6559             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6560         pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6561             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6562         pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6563             VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6564             VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6565     }
6566 #else
6567     const VkSemaphoreTypeCreateInfo* semaphoreTypeCi =
6568         vk_find_struct<VkSemaphoreTypeCreateInfo>(pExternalSemaphoreInfo);
6569     bool isSemaphoreTimeline =
6570         semaphoreTypeCi != nullptr && semaphoreTypeCi->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE;
6571     if (isSemaphoreTimeline) {
6572         // b/304373623
6573         // dEQP-VK.api.external.semaphore.sync_fd#info_timeline
6574         pExternalSemaphoreProperties->compatibleHandleTypes = 0;
6575         pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
6576         pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
6577     } else if (pExternalSemaphoreInfo->handleType ==
6578                VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
6579         pExternalSemaphoreProperties->compatibleHandleTypes |=
6580             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6581         pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6582             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6583         pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6584             VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6585             VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6586     }
6587 #endif  // VK_USE_PLATFORM_FUCHSIA
6588 }
6589 
on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)6590 void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(
6591     void* context, VkPhysicalDevice physicalDevice,
6592     const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
6593     VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
6594     on_vkGetPhysicalDeviceExternalSemaphoreProperties(
6595         context, physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties);
6596 }
6597 
registerEncoderCleanupCallback(const VkEncoder * encoder,void * object,CleanupCallback callback)6598 void ResourceTracker::registerEncoderCleanupCallback(const VkEncoder* encoder, void* object,
6599                                                      CleanupCallback callback) {
6600     AutoLock<RecursiveLock> lock(mLock);
6601     auto& callbacks = mEncoderCleanupCallbacks[encoder];
6602     callbacks[object] = callback;
6603 }
6604 
unregisterEncoderCleanupCallback(const VkEncoder * encoder,void * object)6605 void ResourceTracker::unregisterEncoderCleanupCallback(const VkEncoder* encoder, void* object) {
6606     AutoLock<RecursiveLock> lock(mLock);
6607     mEncoderCleanupCallbacks[encoder].erase(object);
6608 }
6609 
onEncoderDeleted(const VkEncoder * encoder)6610 void ResourceTracker::onEncoderDeleted(const VkEncoder* encoder) {
6611     AutoLock<RecursiveLock> lock(mLock);
6612     if (mEncoderCleanupCallbacks.find(encoder) == mEncoderCleanupCallbacks.end()) return;
6613 
6614     std::unordered_map<void*, CleanupCallback> callbackCopies = mEncoderCleanupCallbacks[encoder];
6615 
6616     mEncoderCleanupCallbacks.erase(encoder);
6617     lock.unlock();
6618 
6619     for (auto it : callbackCopies) {
6620         it.second();
6621     }
6622 }
6623 
getAlloc()6624 CommandBufferStagingStream::Alloc ResourceTracker::getAlloc() {
6625     if (mFeatureInfo->hasVulkanAuxCommandMemory) {
6626         return [this](size_t size) -> CommandBufferStagingStream::Memory {
6627             VkMemoryAllocateInfo info{
6628                 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
6629                 .pNext = nullptr,
6630                 .allocationSize = size,
6631                 .memoryTypeIndex = VK_MAX_MEMORY_TYPES  // indicates auxiliary memory
6632             };
6633 
6634             auto enc = ResourceTracker::getThreadLocalEncoder();
6635             VkDevice device = VK_NULL_HANDLE;
6636             VkDeviceMemory vkDeviceMem = VK_NULL_HANDLE;
6637             VkResult result = getCoherentMemory(&info, enc, device, &vkDeviceMem);
6638             if (result != VK_SUCCESS) {
6639                 ALOGE("Failed to get coherent memory %u", result);
6640                 return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
6641             }
6642 
6643             // getCoherentMemory() uses suballocations.
6644             // To retrieve the suballocated memory address, look up
6645             // VkDeviceMemory filled in by getCoherentMemory()
6646             // scope of mLock
6647             {
6648                 AutoLock<RecursiveLock> lock(mLock);
6649                 const auto it = info_VkDeviceMemory.find(vkDeviceMem);
6650                 if (it == info_VkDeviceMemory.end()) {
6651                     ALOGE("Coherent memory allocated %u not found", result);
6652                     return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
6653                 };
6654 
6655                 const auto& info = it->second;
6656                 return {.deviceMemory = vkDeviceMem, .ptr = info.ptr};
6657             }
6658         };
6659     }
6660     return nullptr;
6661 }
6662 
getFree()6663 CommandBufferStagingStream::Free ResourceTracker::getFree() {
6664     if (mFeatureInfo->hasVulkanAuxCommandMemory) {
6665         return [this](const CommandBufferStagingStream::Memory& memory) {
6666             // deviceMemory may not be the actual backing auxiliary VkDeviceMemory
6667             // for suballocations, deviceMemory is a alias VkDeviceMemory hand;
6668             // freeCoherentMemoryLocked maps the alias to the backing VkDeviceMemory
6669             VkDeviceMemory deviceMemory = memory.deviceMemory;
6670             AutoLock<RecursiveLock> lock(mLock);
6671             auto it = info_VkDeviceMemory.find(deviceMemory);
6672             if (it == info_VkDeviceMemory.end()) {
6673                 ALOGE("Device memory to free not found");
6674                 return;
6675             }
6676             auto coherentMemory = freeCoherentMemoryLocked(deviceMemory, it->second);
6677             // We have to release the lock before we could possibly free a
6678             // CoherentMemory, because that will call into VkEncoder, which
6679             // shouldn't be called when the lock is held.
6680             lock.unlock();
6681             coherentMemory = nullptr;
6682         };
6683     }
6684     return nullptr;
6685 }
6686 
on_vkBeginCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)6687 VkResult ResourceTracker::on_vkBeginCommandBuffer(void* context, VkResult input_result,
6688                                                   VkCommandBuffer commandBuffer,
6689                                                   const VkCommandBufferBeginInfo* pBeginInfo) {
6690     (void)context;
6691 
6692     resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
6693                                   true /* also clear pending descriptor sets */);
6694 
6695     VkEncoder* enc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
6696     (void)input_result;
6697 
6698     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
6699     cb->flags = pBeginInfo->flags;
6700 
6701     VkCommandBufferBeginInfo modifiedBeginInfo;
6702 
6703     if (pBeginInfo->pInheritanceInfo && !cb->isSecondary) {
6704         modifiedBeginInfo = *pBeginInfo;
6705         modifiedBeginInfo.pInheritanceInfo = nullptr;
6706         pBeginInfo = &modifiedBeginInfo;
6707     }
6708 
6709     if (!supportsDeferredCommands()) {
6710         return enc->vkBeginCommandBuffer(commandBuffer, pBeginInfo, true /* do lock */);
6711     }
6712 
6713     enc->vkBeginCommandBufferAsyncGOOGLE(commandBuffer, pBeginInfo, true /* do lock */);
6714 
6715     return VK_SUCCESS;
6716 }
6717 
on_vkEndCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer)6718 VkResult ResourceTracker::on_vkEndCommandBuffer(void* context, VkResult input_result,
6719                                                 VkCommandBuffer commandBuffer) {
6720     VkEncoder* enc = (VkEncoder*)context;
6721     (void)input_result;
6722 
6723     if (!supportsDeferredCommands()) {
6724         return enc->vkEndCommandBuffer(commandBuffer, true /* do lock */);
6725     }
6726 
6727     enc->vkEndCommandBufferAsyncGOOGLE(commandBuffer, true /* do lock */);
6728 
6729     return VK_SUCCESS;
6730 }
6731 
on_vkResetCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)6732 VkResult ResourceTracker::on_vkResetCommandBuffer(void* context, VkResult input_result,
6733                                                   VkCommandBuffer commandBuffer,
6734                                                   VkCommandBufferResetFlags flags) {
6735     resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
6736                                   true /* also clear pending descriptor sets */);
6737 
6738     VkEncoder* enc = (VkEncoder*)context;
6739     (void)input_result;
6740 
6741     if (!supportsDeferredCommands()) {
6742         return enc->vkResetCommandBuffer(commandBuffer, flags, true /* do lock */);
6743     }
6744 
6745     enc->vkResetCommandBufferAsyncGOOGLE(commandBuffer, flags, true /* do lock */);
6746     return VK_SUCCESS;
6747 }
6748 
on_vkCreateImageView(void * context,VkResult input_result,VkDevice device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)6749 VkResult ResourceTracker::on_vkCreateImageView(void* context, VkResult input_result,
6750                                                VkDevice device,
6751                                                const VkImageViewCreateInfo* pCreateInfo,
6752                                                const VkAllocationCallbacks* pAllocator,
6753                                                VkImageView* pView) {
6754     VkEncoder* enc = (VkEncoder*)context;
6755     (void)input_result;
6756 
6757     VkImageViewCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
6758     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
6759 
6760 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
6761     if (pCreateInfo->format == VK_FORMAT_UNDEFINED) {
6762         AutoLock<RecursiveLock> lock(mLock);
6763 
6764         auto it = info_VkImage.find(pCreateInfo->image);
6765         if (it != info_VkImage.end() && it->second.hasExternalFormat) {
6766             localCreateInfo.format = vk_format_from_android(it->second.androidFormat);
6767         }
6768     }
6769     VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
6770     const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo =
6771         vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo);
6772     if (samplerYcbcrConversionInfo) {
6773         if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
6774             localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
6775             vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
6776         }
6777     }
6778 #endif
6779 
6780     return enc->vkCreateImageView(device, &localCreateInfo, pAllocator, pView, true /* do lock */);
6781 }
6782 
on_vkCmdExecuteCommands(void * context,VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)6783 void ResourceTracker::on_vkCmdExecuteCommands(void* context, VkCommandBuffer commandBuffer,
6784                                               uint32_t commandBufferCount,
6785                                               const VkCommandBuffer* pCommandBuffers) {
6786     VkEncoder* enc = (VkEncoder*)context;
6787 
6788     if (!mFeatureInfo->hasVulkanQueueSubmitWithCommands) {
6789         enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers,
6790                                   true /* do lock */);
6791         return;
6792     }
6793 
6794     struct goldfish_VkCommandBuffer* primary = as_goldfish_VkCommandBuffer(commandBuffer);
6795     for (uint32_t i = 0; i < commandBufferCount; ++i) {
6796         struct goldfish_VkCommandBuffer* secondary =
6797             as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
6798         appendObject(&secondary->superObjects, primary);
6799         appendObject(&primary->subObjects, secondary);
6800     }
6801 
6802     enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers,
6803                               true /* do lock */);
6804 }
6805 
on_vkCmdBindDescriptorSets(void * context,VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)6806 void ResourceTracker::on_vkCmdBindDescriptorSets(void* context, VkCommandBuffer commandBuffer,
6807                                                  VkPipelineBindPoint pipelineBindPoint,
6808                                                  VkPipelineLayout layout, uint32_t firstSet,
6809                                                  uint32_t descriptorSetCount,
6810                                                  const VkDescriptorSet* pDescriptorSets,
6811                                                  uint32_t dynamicOffsetCount,
6812                                                  const uint32_t* pDynamicOffsets) {
6813     VkEncoder* enc = (VkEncoder*)context;
6814 
6815     if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate)
6816         addPendingDescriptorSets(commandBuffer, descriptorSetCount, pDescriptorSets);
6817 
6818     enc->vkCmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet,
6819                                  descriptorSetCount, pDescriptorSets, dynamicOffsetCount,
6820                                  pDynamicOffsets, true /* do lock */);
6821 }
6822 
on_vkCmdPipelineBarrier(void * context,VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)6823 void ResourceTracker::on_vkCmdPipelineBarrier(
6824     void* context, VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
6825     VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
6826     uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
6827     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
6828     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers) {
6829     VkEncoder* enc = (VkEncoder*)context;
6830 
6831     std::vector<VkImageMemoryBarrier> updatedImageMemoryBarriers;
6832     updatedImageMemoryBarriers.reserve(imageMemoryBarrierCount);
6833     for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
6834         VkImageMemoryBarrier barrier = pImageMemoryBarriers[i];
6835 
6836 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6837         // Unfortunetly, Android does not yet have a mechanism for sharing the expected
6838         // VkImageLayout when passing around AHardwareBuffer-s so many existing users
6839         // that import AHardwareBuffer-s into VkImage-s/VkDeviceMemory-s simply use
6840         // VK_IMAGE_LAYOUT_UNDEFINED. However, the Vulkan spec's image layout transition
6841         // sections says "If the old layout is VK_IMAGE_LAYOUT_UNDEFINED, the contents of
6842         // that range may be discarded." Some Vulkan drivers have been observed to actually
6843         // perform the discard which leads to AHardwareBuffer-s being unintentionally
6844         // cleared. See go/ahb-vkimagelayout for more information.
6845         if (barrier.srcQueueFamilyIndex != barrier.dstQueueFamilyIndex &&
6846             (barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
6847              barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) &&
6848             barrier.oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
6849             // This is not a complete solution as the Vulkan spec does not require that
6850             // Vulkan drivers perform a no-op in the case when oldLayout equals newLayout
6851             // but this has been observed to be enough to work for now to avoid clearing
6852             // out images.
6853             // TODO(b/236179843): figure out long term solution.
6854             barrier.oldLayout = barrier.newLayout;
6855         }
6856 #endif
6857 
6858         updatedImageMemoryBarriers.push_back(barrier);
6859     }
6860 
6861     enc->vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
6862                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
6863                               pBufferMemoryBarriers, updatedImageMemoryBarriers.size(),
6864                               updatedImageMemoryBarriers.data(), true /* do lock */);
6865 }
6866 
on_vkDestroyDescriptorSetLayout(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)6867 void ResourceTracker::on_vkDestroyDescriptorSetLayout(void* context, VkDevice device,
6868                                                       VkDescriptorSetLayout descriptorSetLayout,
6869                                                       const VkAllocationCallbacks* pAllocator) {
6870     decDescriptorSetLayoutRef(context, device, descriptorSetLayout, pAllocator);
6871 }
6872 
on_vkAllocateCommandBuffers(void * context,VkResult input_result,VkDevice device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)6873 VkResult ResourceTracker::on_vkAllocateCommandBuffers(
6874     void* context, VkResult input_result, VkDevice device,
6875     const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers) {
6876     (void)input_result;
6877 
6878     VkEncoder* enc = (VkEncoder*)context;
6879     VkResult res =
6880         enc->vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers, true /* do lock */);
6881     if (VK_SUCCESS != res) return res;
6882 
6883     for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; ++i) {
6884         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
6885         cb->isSecondary = pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY;
6886         cb->device = device;
6887     }
6888 
6889     return res;
6890 }
6891 
6892 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
exportSyncFdForQSRILocked(VkImage image,int * fd)6893 VkResult ResourceTracker::exportSyncFdForQSRILocked(VkImage image, int* fd) {
6894     ALOGV("%s: call for image %p hos timage handle 0x%llx\n", __func__, (void*)image,
6895           (unsigned long long)get_host_u64_VkImage(image));
6896 
6897     if (mFeatureInfo->hasVirtioGpuNativeSync) {
6898         struct VirtGpuExecBuffer exec = {};
6899         struct gfxstreamCreateQSRIExportVK exportQSRI = {};
6900         VirtGpuDevice* instance = VirtGpuDevice::getInstance();
6901 
6902         uint64_t hostImageHandle = get_host_u64_VkImage(image);
6903 
6904         exportQSRI.hdr.opCode = GFXSTREAM_CREATE_QSRI_EXPORT_VK;
6905         exportQSRI.imageHandleLo = (uint32_t)hostImageHandle;
6906         exportQSRI.imageHandleHi = (uint32_t)(hostImageHandle >> 32);
6907 
6908         exec.command = static_cast<void*>(&exportQSRI);
6909         exec.command_size = sizeof(exportQSRI);
6910         exec.flags = kFenceOut | kRingIdx;
6911         if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY;
6912 
6913         *fd = exec.handle.osHandle;
6914     } else {
6915 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
6916         ensureSyncDeviceFd();
6917         goldfish_sync_queue_work(
6918             mSyncDeviceFd, get_host_u64_VkImage(image) /* the handle */,
6919             GOLDFISH_SYNC_VULKAN_QSRI /* thread handle (doubling as type field) */, fd);
6920 #endif
6921     }
6922 
6923     ALOGV("%s: got fd: %d\n", __func__, *fd);
6924     auto imageInfoIt = info_VkImage.find(image);
6925     if (imageInfoIt != info_VkImage.end()) {
6926         auto& imageInfo = imageInfoIt->second;
6927 
6928         auto* syncHelper =
6929             ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
6930 
6931         // Remove any pending QSRI sync fds that are already signaled.
6932         auto syncFdIt = imageInfo.pendingQsriSyncFds.begin();
6933         while (syncFdIt != imageInfo.pendingQsriSyncFds.end()) {
6934             int syncFd = *syncFdIt;
6935             int syncWaitRet = syncHelper->wait(syncFd, /*timeout msecs*/ 0);
6936             if (syncWaitRet == 0) {
6937                 // Sync fd is signaled.
6938                 syncFdIt = imageInfo.pendingQsriSyncFds.erase(syncFdIt);
6939                 syncHelper->close(syncFd);
6940             } else {
6941                 if (errno != ETIME) {
6942                     ALOGE("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d",
6943                           __func__, strerror(errno), errno);
6944                 }
6945                 break;
6946             }
6947         }
6948 
6949         int syncFdDup = syncHelper->dup(*fd);
6950         if (syncFdDup < 0) {
6951             ALOGE("%s: Failed to dup() QSRI sync fd : sterror: %s errno: %d", __func__,
6952                   strerror(errno), errno);
6953         } else {
6954             imageInfo.pendingQsriSyncFds.push_back(syncFdDup);
6955         }
6956     }
6957 
6958     return VK_SUCCESS;
6959 }
6960 
on_vkQueueSignalReleaseImageANDROID(void * context,VkResult input_result,VkQueue queue,uint32_t waitSemaphoreCount,const VkSemaphore * pWaitSemaphores,VkImage image,int * pNativeFenceFd)6961 VkResult ResourceTracker::on_vkQueueSignalReleaseImageANDROID(void* context, VkResult input_result,
6962                                                               VkQueue queue,
6963                                                               uint32_t waitSemaphoreCount,
6964                                                               const VkSemaphore* pWaitSemaphores,
6965                                                               VkImage image, int* pNativeFenceFd) {
6966     (void)input_result;
6967 
6968     VkEncoder* enc = (VkEncoder*)context;
6969 
6970     if (!mFeatureInfo->hasVulkanAsyncQsri) {
6971         return enc->vkQueueSignalReleaseImageANDROID(queue, waitSemaphoreCount, pWaitSemaphores,
6972                                                      image, pNativeFenceFd, true /* lock */);
6973     }
6974 
6975     {
6976         AutoLock<RecursiveLock> lock(mLock);
6977         auto it = info_VkImage.find(image);
6978         if (it == info_VkImage.end()) {
6979             if (pNativeFenceFd) *pNativeFenceFd = -1;
6980             return VK_ERROR_INITIALIZATION_FAILED;
6981         }
6982     }
6983 
6984     enc->vkQueueSignalReleaseImageANDROIDAsyncGOOGLE(queue, waitSemaphoreCount, pWaitSemaphores,
6985                                                      image, true /* lock */);
6986 
6987     AutoLock<RecursiveLock> lock(mLock);
6988     VkResult result;
6989     if (pNativeFenceFd) {
6990         result = exportSyncFdForQSRILocked(image, pNativeFenceFd);
6991     } else {
6992         int syncFd;
6993         result = exportSyncFdForQSRILocked(image, &syncFd);
6994 
6995         if (syncFd >= 0) {
6996             auto* syncHelper =
6997                 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
6998             syncHelper->close(syncFd);
6999         }
7000     }
7001 
7002     return result;
7003 }
7004 #endif
7005 
on_vkCreateGraphicsPipelines(void * context,VkResult input_result,VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)7006 VkResult ResourceTracker::on_vkCreateGraphicsPipelines(
7007     void* context, VkResult input_result, VkDevice device, VkPipelineCache pipelineCache,
7008     uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos,
7009     const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) {
7010     (void)input_result;
7011     VkEncoder* enc = (VkEncoder*)context;
7012     std::vector<VkGraphicsPipelineCreateInfo> localCreateInfos(pCreateInfos,
7013                                                                pCreateInfos + createInfoCount);
7014     for (VkGraphicsPipelineCreateInfo& graphicsPipelineCreateInfo : localCreateInfos) {
7015         // dEQP-VK.api.pipeline.pipeline_invalid_pointers_unused_structs#graphics
7016         bool requireViewportState = false;
7017         // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750
7018         requireViewportState |=
7019             graphicsPipelineCreateInfo.pRasterizationState != nullptr &&
7020             graphicsPipelineCreateInfo.pRasterizationState->rasterizerDiscardEnable == VK_FALSE;
7021         // VUID-VkGraphicsPipelineCreateInfo-pViewportState-04892
7022 #ifdef VK_EXT_extended_dynamic_state2
7023         if (!requireViewportState && graphicsPipelineCreateInfo.pDynamicState) {
7024             for (uint32_t i = 0; i < graphicsPipelineCreateInfo.pDynamicState->dynamicStateCount;
7025                  i++) {
7026                 if (VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT ==
7027                     graphicsPipelineCreateInfo.pDynamicState->pDynamicStates[i]) {
7028                     requireViewportState = true;
7029                     break;
7030                 }
7031             }
7032         }
7033 #endif  // VK_EXT_extended_dynamic_state2
7034         if (!requireViewportState) {
7035             graphicsPipelineCreateInfo.pViewportState = nullptr;
7036         }
7037 
7038         // It has the same requirement as for pViewportState.
7039         bool shouldIncludeFragmentShaderState = requireViewportState;
7040 
7041         // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00751
7042         if (!shouldIncludeFragmentShaderState) {
7043             graphicsPipelineCreateInfo.pMultisampleState = nullptr;
7044         }
7045 
7046         bool forceDepthStencilState = false;
7047         bool forceColorBlendState = false;
7048 
7049         const VkPipelineRenderingCreateInfo* pipelineRenderingInfo =
7050             vk_find_struct<VkPipelineRenderingCreateInfo>(&graphicsPipelineCreateInfo);
7051 
7052         if (pipelineRenderingInfo) {
7053             forceDepthStencilState |=
7054                 pipelineRenderingInfo->depthAttachmentFormat != VK_FORMAT_UNDEFINED;
7055             forceDepthStencilState |=
7056                 pipelineRenderingInfo->stencilAttachmentFormat != VK_FORMAT_UNDEFINED;
7057             forceColorBlendState |= pipelineRenderingInfo->colorAttachmentCount != 0;
7058         }
7059 
7060         // VUID-VkGraphicsPipelineCreateInfo-renderPass-06043
7061         // VUID-VkGraphicsPipelineCreateInfo-renderPass-06044
7062         if (graphicsPipelineCreateInfo.renderPass == VK_NULL_HANDLE ||
7063             !shouldIncludeFragmentShaderState) {
7064             // VUID-VkGraphicsPipelineCreateInfo-renderPass-06053
7065             if (!forceDepthStencilState) {
7066                 graphicsPipelineCreateInfo.pDepthStencilState = nullptr;
7067             }
7068             if (!forceColorBlendState) {
7069                 graphicsPipelineCreateInfo.pColorBlendState = nullptr;
7070             }
7071         }
7072     }
7073     return enc->vkCreateGraphicsPipelines(device, pipelineCache, localCreateInfos.size(),
7074                                           localCreateInfos.data(), pAllocator, pPipelines,
7075                                           true /* do lock */);
7076 }
7077 
getApiVersionFromInstance(VkInstance instance) const7078 uint32_t ResourceTracker::getApiVersionFromInstance(VkInstance instance) const {
7079     AutoLock<RecursiveLock> lock(mLock);
7080     uint32_t api = kDefaultApiVersion;
7081 
7082     auto it = info_VkInstance.find(instance);
7083     if (it == info_VkInstance.end()) return api;
7084 
7085     api = it->second.highestApiVersion;
7086 
7087     return api;
7088 }
7089 
getApiVersionFromDevice(VkDevice device) const7090 uint32_t ResourceTracker::getApiVersionFromDevice(VkDevice device) const {
7091     AutoLock<RecursiveLock> lock(mLock);
7092 
7093     uint32_t api = kDefaultApiVersion;
7094 
7095     auto it = info_VkDevice.find(device);
7096     if (it == info_VkDevice.end()) return api;
7097 
7098     api = it->second.apiVersion;
7099 
7100     return api;
7101 }
7102 
hasInstanceExtension(VkInstance instance,const std::string & name) const7103 bool ResourceTracker::hasInstanceExtension(VkInstance instance, const std::string& name) const {
7104     AutoLock<RecursiveLock> lock(mLock);
7105 
7106     auto it = info_VkInstance.find(instance);
7107     if (it == info_VkInstance.end()) return false;
7108 
7109     return it->second.enabledExtensions.find(name) != it->second.enabledExtensions.end();
7110 }
7111 
hasDeviceExtension(VkDevice device,const std::string & name) const7112 bool ResourceTracker::hasDeviceExtension(VkDevice device, const std::string& name) const {
7113     AutoLock<RecursiveLock> lock(mLock);
7114 
7115     auto it = info_VkDevice.find(device);
7116     if (it == info_VkDevice.end()) return false;
7117 
7118     return it->second.enabledExtensions.find(name) != it->second.enabledExtensions.end();
7119 }
7120 
getDevice(VkCommandBuffer commandBuffer) const7121 VkDevice ResourceTracker::getDevice(VkCommandBuffer commandBuffer) const {
7122     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7123     if (!cb) {
7124         return nullptr;
7125     }
7126     return cb->device;
7127 }
7128 
7129 // Resets staging stream for this command buffer and primary command buffers
7130 // where this command buffer has been recorded. If requested, also clears the pending
7131 // descriptor sets.
resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer,bool alsoResetPrimaries,bool alsoClearPendingDescriptorSets)7132 void ResourceTracker::resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer,
7133                                                     bool alsoResetPrimaries,
7134                                                     bool alsoClearPendingDescriptorSets) {
7135     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7136     if (!cb) {
7137         return;
7138     }
7139     if (cb->privateEncoder) {
7140         sStaging.pushStaging((CommandBufferStagingStream*)cb->privateStream, cb->privateEncoder);
7141         cb->privateEncoder = nullptr;
7142         cb->privateStream = nullptr;
7143     }
7144 
7145     if (alsoClearPendingDescriptorSets && cb->userPtr) {
7146         CommandBufferPendingDescriptorSets* pendingSets =
7147             (CommandBufferPendingDescriptorSets*)cb->userPtr;
7148         pendingSets->sets.clear();
7149     }
7150 
7151     if (alsoResetPrimaries) {
7152         forAllObjects(cb->superObjects, [this, alsoResetPrimaries,
7153                                          alsoClearPendingDescriptorSets](void* obj) {
7154             VkCommandBuffer superCommandBuffer = (VkCommandBuffer)obj;
7155             struct goldfish_VkCommandBuffer* superCb =
7156                 as_goldfish_VkCommandBuffer(superCommandBuffer);
7157             this->resetCommandBufferStagingInfo(superCommandBuffer, alsoResetPrimaries,
7158                                                 alsoClearPendingDescriptorSets);
7159         });
7160         eraseObjects(&cb->superObjects);
7161     }
7162 
7163     forAllObjects(cb->subObjects, [cb](void* obj) {
7164         VkCommandBuffer subCommandBuffer = (VkCommandBuffer)obj;
7165         struct goldfish_VkCommandBuffer* subCb = as_goldfish_VkCommandBuffer(subCommandBuffer);
7166         // We don't do resetCommandBufferStagingInfo(subCommandBuffer)
7167         // since the user still might have submittable stuff pending there.
7168         eraseObject(&subCb->superObjects, (void*)cb);
7169     });
7170 
7171     eraseObjects(&cb->subObjects);
7172 }
7173 
7174 // Unlike resetCommandBufferStagingInfo, this does not always erase its
7175 // superObjects pointers because the command buffer has merely been
7176 // submitted, not reset.  However, if the command buffer was recorded with
7177 // ONE_TIME_SUBMIT_BIT, then it will also reset its primaries.
7178 //
7179 // Also, we save the set of descriptor sets referenced by this command
7180 // buffer because we only submitted the command buffer and it's possible to
7181 // update the descriptor set again and re-submit the same command without
7182 // recording it (Update-after-bind descriptor sets)
resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer)7183 void ResourceTracker::resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer) {
7184     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7185     if (cb->flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) {
7186         resetCommandBufferStagingInfo(commandBuffer, true /* reset primaries */,
7187                                       true /* clear pending descriptor sets */);
7188     } else {
7189         resetCommandBufferStagingInfo(commandBuffer, false /* Don't reset primaries */,
7190                                       false /* Don't clear pending descriptor sets */);
7191     }
7192 }
7193 
resetCommandPoolStagingInfo(VkCommandPool commandPool)7194 void ResourceTracker::resetCommandPoolStagingInfo(VkCommandPool commandPool) {
7195     struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7196 
7197     if (!p) return;
7198 
7199     forAllObjects(p->subObjects, [this](void* commandBuffer) {
7200         this->resetCommandBufferStagingInfo((VkCommandBuffer)commandBuffer,
7201                                             true /* also reset primaries */,
7202                                             true /* also clear pending descriptor sets */);
7203     });
7204 }
7205 
addToCommandPool(VkCommandPool commandPool,uint32_t commandBufferCount,VkCommandBuffer * pCommandBuffers)7206 void ResourceTracker::addToCommandPool(VkCommandPool commandPool, uint32_t commandBufferCount,
7207                                        VkCommandBuffer* pCommandBuffers) {
7208     for (uint32_t i = 0; i < commandBufferCount; ++i) {
7209         struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7210         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7211         appendObject(&p->subObjects, (void*)(pCommandBuffers[i]));
7212         appendObject(&cb->poolObjects, (void*)commandPool);
7213     }
7214 }
7215 
clearCommandPool(VkCommandPool commandPool)7216 void ResourceTracker::clearCommandPool(VkCommandPool commandPool) {
7217     resetCommandPoolStagingInfo(commandPool);
7218     struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7219     forAllObjects(p->subObjects, [this](void* commandBuffer) {
7220         this->unregister_VkCommandBuffer((VkCommandBuffer)commandBuffer);
7221     });
7222     eraseObjects(&p->subObjects);
7223 }
7224 
getPhysicalDeviceMemoryProperties(void * context,VkDevice device,VkPhysicalDevice physicalDevice)7225 const VkPhysicalDeviceMemoryProperties& ResourceTracker::getPhysicalDeviceMemoryProperties(
7226     void* context, VkDevice device, VkPhysicalDevice physicalDevice) {
7227     if (!mCachedPhysicalDeviceMemoryProps) {
7228         if (physicalDevice == VK_NULL_HANDLE) {
7229             AutoLock<RecursiveLock> lock(mLock);
7230 
7231             auto deviceInfoIt = info_VkDevice.find(device);
7232             if (deviceInfoIt == info_VkDevice.end()) {
7233                 ALOGE("Failed to pass device or physical device.");
7234                 abort();
7235             }
7236             const auto& deviceInfo = deviceInfoIt->second;
7237             physicalDevice = deviceInfo.physdev;
7238         }
7239 
7240         VkEncoder* enc = (VkEncoder*)context;
7241 
7242         VkPhysicalDeviceMemoryProperties properties;
7243         enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &properties, true /* no lock */);
7244 
7245         mCachedPhysicalDeviceMemoryProps.emplace(std::move(properties));
7246     }
7247     return *mCachedPhysicalDeviceMemoryProps;
7248 }
7249 
7250 static ResourceTracker* sTracker = nullptr;
7251 
ResourceTracker()7252 ResourceTracker::ResourceTracker() {
7253     mCreateMapping = new CreateMapping();
7254     mDestroyMapping = new DestroyMapping();
7255     // nothing to do
7256 }
7257 
~ResourceTracker()7258 ResourceTracker::~ResourceTracker() {
7259     delete mCreateMapping;
7260     delete mDestroyMapping;
7261 }
7262 
createMapping()7263 VulkanHandleMapping* ResourceTracker::createMapping() { return mCreateMapping; }
7264 
destroyMapping()7265 VulkanHandleMapping* ResourceTracker::destroyMapping() { return mDestroyMapping; }
7266 
7267 // static
get()7268 ResourceTracker* ResourceTracker::get() {
7269     if (!sTracker) {
7270         // To be initialized once on vulkan device open.
7271         sTracker = new ResourceTracker;
7272     }
7273     return sTracker;
7274 }
7275 
7276 // static
getCommandBufferEncoder(VkCommandBuffer commandBuffer)7277 ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getCommandBufferEncoder(
7278     VkCommandBuffer commandBuffer) {
7279     if (!(ResourceTracker::streamFeatureBits &
7280           VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
7281         auto enc = ResourceTracker::getThreadLocalEncoder();
7282         ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, enc);
7283         return enc;
7284     }
7285 
7286     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7287     if (!cb->privateEncoder) {
7288         sStaging.setAllocFree(ResourceTracker::get()->getAlloc(),
7289                               ResourceTracker::get()->getFree());
7290         sStaging.popStaging((CommandBufferStagingStream**)&cb->privateStream, &cb->privateEncoder);
7291     }
7292     uint8_t* writtenPtr;
7293     size_t written;
7294     ((CommandBufferStagingStream*)cb->privateStream)->getWritten(&writtenPtr, &written);
7295     return cb->privateEncoder;
7296 }
7297 
7298 // static
getQueueEncoder(VkQueue queue)7299 ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getQueueEncoder(VkQueue queue) {
7300     auto enc = ResourceTracker::getThreadLocalEncoder();
7301     if (!(ResourceTracker::streamFeatureBits &
7302           VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
7303         ResourceTracker::get()->syncEncodersForQueue(queue, enc);
7304     }
7305     return enc;
7306 }
7307 
7308 // static
getThreadLocalEncoder()7309 ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getThreadLocalEncoder() {
7310     auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
7311     auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
7312     return vkEncoder;
7313 }
7314 
7315 // static
setSeqnoPtr(uint32_t * seqnoptr)7316 void ResourceTracker::setSeqnoPtr(uint32_t* seqnoptr) { sSeqnoPtr = seqnoptr; }
7317 
7318 // static
nextSeqno()7319 ALWAYS_INLINE_GFXSTREAM uint32_t ResourceTracker::nextSeqno() {
7320     uint32_t res = __atomic_add_fetch(sSeqnoPtr, 1, __ATOMIC_SEQ_CST);
7321     return res;
7322 }
7323 
7324 // static
getSeqno()7325 ALWAYS_INLINE_GFXSTREAM uint32_t ResourceTracker::getSeqno() {
7326     uint32_t res = __atomic_load_n(sSeqnoPtr, __ATOMIC_SEQ_CST);
7327     return res;
7328 }
7329 
transformImpl_VkExternalMemoryProperties_tohost(VkExternalMemoryProperties *,uint32_t)7330 void ResourceTracker::transformImpl_VkExternalMemoryProperties_tohost(VkExternalMemoryProperties*,
7331                                                                       uint32_t) {}
7332 
transformImpl_VkImageCreateInfo_fromhost(const VkImageCreateInfo *,uint32_t)7333 void ResourceTracker::transformImpl_VkImageCreateInfo_fromhost(const VkImageCreateInfo*, uint32_t) {
7334 }
transformImpl_VkImageCreateInfo_tohost(const VkImageCreateInfo *,uint32_t)7335 void ResourceTracker::transformImpl_VkImageCreateInfo_tohost(const VkImageCreateInfo*, uint32_t) {}
7336 
7337 #define DEFINE_TRANSFORMED_TYPE_IMPL(type)                                  \
7338     void ResourceTracker::transformImpl_##type##_tohost(type*, uint32_t) {} \
7339     void ResourceTracker::transformImpl_##type##_fromhost(type*, uint32_t) {}
7340 
7341 LIST_TRIVIAL_TRANSFORMED_TYPES(DEFINE_TRANSFORMED_TYPE_IMPL)
7342 
7343 }  // namespace vk
7344 }  // namespace gfxstream
7345