• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Google
3  * SPDX-License-Identifier: MIT
4  */
5 
6 #include "ResourceTracker.h"
7 
8 #include "CommandBufferStagingStream.h"
9 #include "DescriptorSetVirtualization.h"
10 #include "HostVisibleMemoryVirtualization.h"
11 #include "Resources.h"
12 #include "VkEncoder.h"
13 #include "gfxstream_vk_private.h"
14 #include "goldfish_address_space.h"
15 #include "goldfish_vk_private_defs.h"
16 #include "util/anon_file.h"
17 #include "util/macros.h"
18 #include "virtgpu_gfxstream_protocol.h"
19 #include "vulkan/vulkan_core.h"
20 #include "util/detect_os.h"
21 
22 #ifdef VK_USE_PLATFORM_ANDROID_KHR
23 #include "vk_format_info.h"
24 #include <vndk/hardware_buffer.h>
25 #endif
26 #include <stdlib.h>
27 
28 #include <algorithm>
29 #include <chrono>
30 #include <set>
31 #include <string>
32 #include <unordered_map>
33 #include <unordered_set>
34 
35 #include "vk_util.h"
36 
37 #if DETECT_OS_LINUX
38 #include <drm_fourcc.h>
39 #endif
40 
41 #ifndef VK_USE_PLATFORM_FUCHSIA
zx_handle_close(zx_handle_t)42 void zx_handle_close(zx_handle_t) {}
zx_event_create(int,zx_handle_t *)43 void zx_event_create(int, zx_handle_t*) {}
44 #endif
45 
46 static constexpr uint32_t kDefaultApiVersion = VK_MAKE_VERSION(1, 1, 0);
47 
48 struct vk_struct_chain_iterator {
49     VkBaseOutStructure* value;
50 };
51 
52 template <class T>
vk_make_chain_iterator(T * vk_struct)53 static vk_struct_chain_iterator vk_make_chain_iterator(T* vk_struct) {
54     vk_struct_chain_iterator result = {reinterpret_cast<VkBaseOutStructure*>(vk_struct)};
55     return result;
56 }
57 
58 template <class T>
vk_append_struct(vk_struct_chain_iterator * i,T * vk_struct)59 static void vk_append_struct(vk_struct_chain_iterator* i, T* vk_struct) {
60     VkBaseOutStructure* p = i->value;
61     if (p->pNext) {
62         ::abort();
63     }
64 
65     p->pNext = reinterpret_cast<VkBaseOutStructure*>(vk_struct);
66     vk_struct->pNext = NULL;
67 
68     *i = vk_make_chain_iterator(vk_struct);
69 }
70 
71 template <class T>
vk_make_orphan_copy(const T & vk_struct)72 static T vk_make_orphan_copy(const T& vk_struct) {
73     T copy = vk_struct;
74     copy.pNext = NULL;
75     return copy;
76 }
77 
78 namespace gfxstream {
79 namespace vk {
80 
81 #define MAKE_HANDLE_MAPPING_FOREACH(type_name, map_impl, map_to_u64_impl, map_from_u64_impl)       \
82     void mapHandles_##type_name(type_name* handles, size_t count) override {                       \
83         for (size_t i = 0; i < count; ++i) {                                                       \
84             map_impl;                                                                              \
85         }                                                                                          \
86     }                                                                                              \
87     void mapHandles_##type_name##_u64(const type_name* handles, uint64_t* handle_u64s,             \
88                                       size_t count) override {                                     \
89         for (size_t i = 0; i < count; ++i) {                                                       \
90             map_to_u64_impl;                                                                       \
91         }                                                                                          \
92     }                                                                                              \
93     void mapHandles_u64_##type_name(const uint64_t* handle_u64s, type_name* handles, size_t count) \
94         override {                                                                                 \
95         for (size_t i = 0; i < count; ++i) {                                                       \
96             map_from_u64_impl;                                                                     \
97         }                                                                                          \
98     }
99 
100 #define DEFINE_RESOURCE_TRACKING_CLASS(class_name, impl) \
101     class class_name : public VulkanHandleMapping {      \
102        public:                                           \
103         virtual ~class_name() {}                         \
104         GOLDFISH_VK_LIST_HANDLE_TYPES(impl)              \
105     };
106 
107 #define CREATE_MAPPING_IMPL_FOR_TYPE(type_name)                                \
108     MAKE_HANDLE_MAPPING_FOREACH(                                               \
109         type_name, handles[i] = new_from_host_##type_name(handles[i]);         \
110         ResourceTracker::get()->register_##type_name(handles[i]);              \
111         , handle_u64s[i] = (uint64_t)new_from_host_##type_name(handles[i]),    \
112         handles[i] = (type_name)new_from_host_u64_##type_name(handle_u64s[i]); \
113         ResourceTracker::get()->register_##type_name(handles[i]);)
114 
115 #define UNWRAP_MAPPING_IMPL_FOR_TYPE(type_name)                          \
116     MAKE_HANDLE_MAPPING_FOREACH(                                         \
117         type_name, handles[i] = get_host_##type_name(handles[i]),        \
118         handle_u64s[i] = (uint64_t)get_host_u64_##type_name(handles[i]), \
119         handles[i] = (type_name)get_host_##type_name((type_name)handle_u64s[i]))
120 
121 #define DESTROY_MAPPING_IMPL_FOR_TYPE(type_name)                                               \
122     MAKE_HANDLE_MAPPING_FOREACH(type_name,                                                     \
123                                 ResourceTracker::get()->unregister_##type_name(handles[i]);    \
124                                 delete_goldfish_##type_name(handles[i]), (void)handle_u64s[i]; \
125                                 delete_goldfish_##type_name(handles[i]), (void)handles[i];     \
126                                 delete_goldfish_##type_name((type_name)handle_u64s[i]))
127 
128 DEFINE_RESOURCE_TRACKING_CLASS(CreateMapping, CREATE_MAPPING_IMPL_FOR_TYPE)
129 DEFINE_RESOURCE_TRACKING_CLASS(DestroyMapping, DESTROY_MAPPING_IMPL_FOR_TYPE)
130 
131 static uint32_t* sSeqnoPtr = nullptr;
132 
133 // static
134 uint32_t ResourceTracker::streamFeatureBits = 0;
135 ResourceTracker::ThreadingCallbacks ResourceTracker::threadingCallbacks;
136 
137 struct StagingInfo {
138     std::mutex mLock;
139     std::vector<CommandBufferStagingStream*> streams;
140     std::vector<VkEncoder*> encoders;
141     /// \brief sets alloc and free callbacks for memory allocation for CommandBufferStagingStream(s)
142     /// \param allocFn is the callback to allocate memory
143     /// \param freeFn is the callback to free memory
setAllocFreegfxstream::vk::StagingInfo144     void setAllocFree(CommandBufferStagingStream::Alloc&& allocFn,
145                       CommandBufferStagingStream::Free&& freeFn) {
146         mAlloc = allocFn;
147         mFree = freeFn;
148     }
149 
~StagingInfogfxstream::vk::StagingInfo150     ~StagingInfo() {
151         for (auto stream : streams) {
152             delete stream;
153         }
154 
155         for (auto encoder : encoders) {
156             delete encoder;
157         }
158     }
159 
pushStaginggfxstream::vk::StagingInfo160     void pushStaging(CommandBufferStagingStream* stream, VkEncoder* encoder) {
161         std::lock_guard<std::mutex> lock(mLock);
162         stream->reset();
163         streams.push_back(stream);
164         encoders.push_back(encoder);
165     }
166 
popStaginggfxstream::vk::StagingInfo167     void popStaging(CommandBufferStagingStream** streamOut, VkEncoder** encoderOut) {
168         std::lock_guard<std::mutex> lock(mLock);
169         CommandBufferStagingStream* stream;
170         VkEncoder* encoder;
171         if (streams.empty()) {
172             if (mAlloc && mFree) {
173                 // if custom allocators are provided, forward them to CommandBufferStagingStream
174                 stream = new CommandBufferStagingStream(mAlloc, mFree);
175             } else {
176                 stream = new CommandBufferStagingStream;
177             }
178             encoder = new VkEncoder(stream);
179         } else {
180             stream = streams.back();
181             encoder = encoders.back();
182             streams.pop_back();
183             encoders.pop_back();
184         }
185         *streamOut = stream;
186         *encoderOut = encoder;
187     }
188 
189    private:
190     CommandBufferStagingStream::Alloc mAlloc = nullptr;
191     CommandBufferStagingStream::Free mFree = nullptr;
192 };
193 
194 static StagingInfo sStaging;
195 
196 struct CommandBufferPendingDescriptorSets {
197     std::unordered_set<VkDescriptorSet> sets;
198 };
199 
200 #define HANDLE_REGISTER_IMPL_IMPL(type)                    \
201     void ResourceTracker::register_##type(type obj) {      \
202         std::lock_guard<std::recursive_mutex> lock(mLock); \
203         info_##type[obj] = type##_Info();                  \
204     }
205 
206 #define HANDLE_UNREGISTER_IMPL_IMPL(type)                  \
207     void ResourceTracker::unregister_##type(type obj) {    \
208         std::lock_guard<std::recursive_mutex> lock(mLock); \
209         info_##type.erase(obj);                            \
210     }
211 
212 GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL_IMPL)
GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)213 GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)
214 uint32_t getWaitSemaphoreCount(const VkSubmitInfo& pSubmit) { return pSubmit.waitSemaphoreCount; }
215 
getWaitSemaphoreCount(const VkSubmitInfo2 & pSubmit)216 uint32_t getWaitSemaphoreCount(const VkSubmitInfo2& pSubmit) {
217     return pSubmit.waitSemaphoreInfoCount;
218 }
219 
getCommandBufferCount(const VkSubmitInfo & pSubmit)220 uint32_t getCommandBufferCount(const VkSubmitInfo& pSubmit) { return pSubmit.commandBufferCount; }
221 
getCommandBufferCount(const VkSubmitInfo2 & pSubmit)222 uint32_t getCommandBufferCount(const VkSubmitInfo2& pSubmit) {
223     return pSubmit.commandBufferInfoCount;
224 }
225 
getSignalSemaphoreCount(const VkSubmitInfo & pSubmit)226 uint32_t getSignalSemaphoreCount(const VkSubmitInfo& pSubmit) {
227     return pSubmit.signalSemaphoreCount;
228 }
229 
getSignalSemaphoreCount(const VkSubmitInfo2 & pSubmit)230 uint32_t getSignalSemaphoreCount(const VkSubmitInfo2& pSubmit) {
231     return pSubmit.signalSemaphoreInfoCount;
232 }
233 
getWaitSemaphore(const VkSubmitInfo & pSubmit,int i)234 VkSemaphore getWaitSemaphore(const VkSubmitInfo& pSubmit, int i) {
235     return pSubmit.pWaitSemaphores[i];
236 }
237 
getWaitSemaphore(const VkSubmitInfo2 & pSubmit,int i)238 VkSemaphore getWaitSemaphore(const VkSubmitInfo2& pSubmit, int i) {
239     return pSubmit.pWaitSemaphoreInfos[i].semaphore;
240 }
241 
getSignalSemaphore(const VkSubmitInfo & pSubmit,int i)242 VkSemaphore getSignalSemaphore(const VkSubmitInfo& pSubmit, int i) {
243     return pSubmit.pSignalSemaphores[i];
244 }
245 
getSignalSemaphore(const VkSubmitInfo2 & pSubmit,int i)246 VkSemaphore getSignalSemaphore(const VkSubmitInfo2& pSubmit, int i) {
247     return pSubmit.pSignalSemaphoreInfos[i].semaphore;
248 }
249 
getCommandBuffer(const VkSubmitInfo & pSubmit,int i)250 VkCommandBuffer getCommandBuffer(const VkSubmitInfo& pSubmit, int i) {
251     return pSubmit.pCommandBuffers[i];
252 }
253 
getCommandBuffer(const VkSubmitInfo2 & pSubmit,int i)254 VkCommandBuffer getCommandBuffer(const VkSubmitInfo2& pSubmit, int i) {
255     return pSubmit.pCommandBufferInfos[i].commandBuffer;
256 }
257 
descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool)258 bool descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool) {
259     return as_goldfish_VkDescriptorPool(pool)->allocInfo->createFlags &
260            VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
261 }
262 
createImmutableSamplersFilteredImageInfo(VkDescriptorType descType,VkDescriptorSet descSet,uint32_t binding,const VkDescriptorImageInfo * pImageInfo)263 VkDescriptorImageInfo createImmutableSamplersFilteredImageInfo(
264     VkDescriptorType descType, VkDescriptorSet descSet, uint32_t binding,
265     const VkDescriptorImageInfo* pImageInfo) {
266     VkDescriptorImageInfo res = *pImageInfo;
267 
268     if (descType != VK_DESCRIPTOR_TYPE_SAMPLER &&
269         descType != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
270         return res;
271 
272     bool immutableSampler =
273         as_goldfish_VkDescriptorSet(descSet)->reified->bindingIsImmutableSampler[binding];
274 
275     if (!immutableSampler) return res;
276 
277     res.sampler = 0;
278 
279     return res;
280 }
281 
descriptorBindingIsImmutableSampler(VkDescriptorSet dstSet,uint32_t dstBinding)282 bool descriptorBindingIsImmutableSampler(VkDescriptorSet dstSet, uint32_t dstBinding) {
283     return as_goldfish_VkDescriptorSet(dstSet)->reified->bindingIsImmutableSampler[dstBinding];
284 }
285 
isHostVisible(const VkPhysicalDeviceMemoryProperties * memoryProps,uint32_t index)286 static bool isHostVisible(const VkPhysicalDeviceMemoryProperties* memoryProps, uint32_t index) {
287     return memoryProps->memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
288 }
289 
filterNonexistentSampler(const VkDescriptorImageInfo & inputInfo)290 VkDescriptorImageInfo ResourceTracker::filterNonexistentSampler(
291     const VkDescriptorImageInfo& inputInfo) {
292     VkSampler sampler = inputInfo.sampler;
293 
294     VkDescriptorImageInfo res = inputInfo;
295 
296     if (sampler) {
297         auto it = info_VkSampler.find(sampler);
298         bool samplerExists = it != info_VkSampler.end();
299         if (!samplerExists) res.sampler = 0;
300     }
301 
302     return res;
303 }
304 
emitDeviceMemoryReport(VkDevice_Info info,VkDeviceMemoryReportEventTypeEXT type,uint64_t memoryObjectId,VkDeviceSize size,VkObjectType objectType,uint64_t objectHandle,uint32_t heapIndex)305 void ResourceTracker::emitDeviceMemoryReport(VkDevice_Info info,
306                                              VkDeviceMemoryReportEventTypeEXT type,
307                                              uint64_t memoryObjectId, VkDeviceSize size,
308                                              VkObjectType objectType, uint64_t objectHandle,
309                                              uint32_t heapIndex) {
310     if (info.deviceMemoryReportCallbacks.empty()) return;
311 
312     const VkDeviceMemoryReportCallbackDataEXT callbackData = {
313         VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT,  // sType
314         nullptr,                                                   // pNext
315         0,                                                         // flags
316         type,                                                      // type
317         memoryObjectId,                                            // memoryObjectId
318         size,                                                      // size
319         objectType,                                                // objectType
320         objectHandle,                                              // objectHandle
321         heapIndex,                                                 // heapIndex
322     };
323     for (const auto& callback : info.deviceMemoryReportCallbacks) {
324         callback.first(&callbackData, callback.second);
325     }
326 }
327 
328 #ifdef VK_USE_PLATFORM_FUCHSIA
defaultBufferCollectionConstraints(size_t minSizeBytes,size_t minBufferCount,size_t maxBufferCount=0u,size_t minBufferCountForCamping=0u,size_t minBufferCountForDedicatedSlack=0u,size_t minBufferCountForSharedSlack=0u)329 inline fuchsia_sysmem::wire::BufferCollectionConstraints defaultBufferCollectionConstraints(
330     size_t minSizeBytes, size_t minBufferCount, size_t maxBufferCount = 0u,
331     size_t minBufferCountForCamping = 0u, size_t minBufferCountForDedicatedSlack = 0u,
332     size_t minBufferCountForSharedSlack = 0u) {
333     fuchsia_sysmem::wire::BufferCollectionConstraints constraints = {};
334     constraints.min_buffer_count = minBufferCount;
335     if (maxBufferCount > 0) {
336         constraints.max_buffer_count = maxBufferCount;
337     }
338     if (minBufferCountForCamping) {
339         constraints.min_buffer_count_for_camping = minBufferCountForCamping;
340     }
341     if (minBufferCountForSharedSlack) {
342         constraints.min_buffer_count_for_shared_slack = minBufferCountForSharedSlack;
343     }
344     constraints.has_buffer_memory_constraints = true;
345     fuchsia_sysmem::wire::BufferMemoryConstraints& buffer_constraints =
346         constraints.buffer_memory_constraints;
347 
348     buffer_constraints.min_size_bytes = minSizeBytes;
349     buffer_constraints.max_size_bytes = 0xffffffff;
350     buffer_constraints.physically_contiguous_required = false;
351     buffer_constraints.secure_required = false;
352 
353     // No restrictions on coherency domain or Heaps.
354     buffer_constraints.ram_domain_supported = true;
355     buffer_constraints.cpu_domain_supported = true;
356     buffer_constraints.inaccessible_domain_supported = true;
357     buffer_constraints.heap_permitted_count = 2;
358     buffer_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
359     buffer_constraints.heap_permitted[1] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
360 
361     return constraints;
362 }
363 
getBufferCollectionConstraintsVulkanImageUsage(const VkImageCreateInfo * pImageInfo)364 uint32_t getBufferCollectionConstraintsVulkanImageUsage(const VkImageCreateInfo* pImageInfo) {
365     uint32_t usage = 0u;
366     VkImageUsageFlags imageUsage = pImageInfo->usage;
367 
368 #define SetUsageBit(BIT, VALUE)                                  \
369     if (imageUsage & VK_IMAGE_USAGE_##BIT##_BIT) {               \
370         usage |= fuchsia_sysmem::wire::kVulkanImageUsage##VALUE; \
371     }
372 
373     SetUsageBit(COLOR_ATTACHMENT, ColorAttachment);
374     SetUsageBit(TRANSFER_SRC, TransferSrc);
375     SetUsageBit(TRANSFER_DST, TransferDst);
376     SetUsageBit(SAMPLED, Sampled);
377 
378 #undef SetUsageBit
379     return usage;
380 }
381 
getBufferCollectionConstraintsVulkanBufferUsage(VkBufferUsageFlags bufferUsage)382 uint32_t getBufferCollectionConstraintsVulkanBufferUsage(VkBufferUsageFlags bufferUsage) {
383     uint32_t usage = 0u;
384 
385 #define SetUsageBit(BIT, VALUE)                                   \
386     if (bufferUsage & VK_BUFFER_USAGE_##BIT##_BIT) {              \
387         usage |= fuchsia_sysmem::wire::kVulkanBufferUsage##VALUE; \
388     }
389 
390     SetUsageBit(TRANSFER_SRC, TransferSrc);
391     SetUsageBit(TRANSFER_DST, TransferDst);
392     SetUsageBit(UNIFORM_TEXEL_BUFFER, UniformTexelBuffer);
393     SetUsageBit(STORAGE_TEXEL_BUFFER, StorageTexelBuffer);
394     SetUsageBit(UNIFORM_BUFFER, UniformBuffer);
395     SetUsageBit(STORAGE_BUFFER, StorageBuffer);
396     SetUsageBit(INDEX_BUFFER, IndexBuffer);
397     SetUsageBit(VERTEX_BUFFER, VertexBuffer);
398     SetUsageBit(INDIRECT_BUFFER, IndirectBuffer);
399 
400 #undef SetUsageBit
401     return usage;
402 }
403 
getBufferCollectionConstraintsVulkanBufferUsage(const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)404 uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
405     const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
406     VkBufferUsageFlags bufferUsage = pBufferConstraintsInfo->createInfo.usage;
407     return getBufferCollectionConstraintsVulkanBufferUsage(bufferUsage);
408 }
409 
vkFormatTypeToSysmem(VkFormat format)410 static fuchsia_sysmem::wire::PixelFormatType vkFormatTypeToSysmem(VkFormat format) {
411     switch (format) {
412         case VK_FORMAT_B8G8R8A8_SINT:
413         case VK_FORMAT_B8G8R8A8_UNORM:
414         case VK_FORMAT_B8G8R8A8_SRGB:
415         case VK_FORMAT_B8G8R8A8_SNORM:
416         case VK_FORMAT_B8G8R8A8_SSCALED:
417         case VK_FORMAT_B8G8R8A8_USCALED:
418             return fuchsia_sysmem::wire::PixelFormatType::kBgra32;
419         case VK_FORMAT_R8G8B8A8_SINT:
420         case VK_FORMAT_R8G8B8A8_UNORM:
421         case VK_FORMAT_R8G8B8A8_SRGB:
422         case VK_FORMAT_R8G8B8A8_SNORM:
423         case VK_FORMAT_R8G8B8A8_SSCALED:
424         case VK_FORMAT_R8G8B8A8_USCALED:
425             return fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
426         case VK_FORMAT_R8_UNORM:
427         case VK_FORMAT_R8_UINT:
428         case VK_FORMAT_R8_USCALED:
429         case VK_FORMAT_R8_SNORM:
430         case VK_FORMAT_R8_SINT:
431         case VK_FORMAT_R8_SSCALED:
432         case VK_FORMAT_R8_SRGB:
433             return fuchsia_sysmem::wire::PixelFormatType::kR8;
434         case VK_FORMAT_R8G8_UNORM:
435         case VK_FORMAT_R8G8_UINT:
436         case VK_FORMAT_R8G8_USCALED:
437         case VK_FORMAT_R8G8_SNORM:
438         case VK_FORMAT_R8G8_SINT:
439         case VK_FORMAT_R8G8_SSCALED:
440         case VK_FORMAT_R8G8_SRGB:
441             return fuchsia_sysmem::wire::PixelFormatType::kR8G8;
442         default:
443             return fuchsia_sysmem::wire::PixelFormatType::kInvalid;
444     }
445 }
446 
vkFormatMatchesSysmemFormat(VkFormat vkFormat,fuchsia_sysmem::wire::PixelFormatType sysmemFormat)447 static bool vkFormatMatchesSysmemFormat(VkFormat vkFormat,
448                                         fuchsia_sysmem::wire::PixelFormatType sysmemFormat) {
449     switch (vkFormat) {
450         case VK_FORMAT_B8G8R8A8_SINT:
451         case VK_FORMAT_B8G8R8A8_UNORM:
452         case VK_FORMAT_B8G8R8A8_SRGB:
453         case VK_FORMAT_B8G8R8A8_SNORM:
454         case VK_FORMAT_B8G8R8A8_SSCALED:
455         case VK_FORMAT_B8G8R8A8_USCALED:
456             return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kBgra32;
457         case VK_FORMAT_R8G8B8A8_SINT:
458         case VK_FORMAT_R8G8B8A8_UNORM:
459         case VK_FORMAT_R8G8B8A8_SRGB:
460         case VK_FORMAT_R8G8B8A8_SNORM:
461         case VK_FORMAT_R8G8B8A8_SSCALED:
462         case VK_FORMAT_R8G8B8A8_USCALED:
463             return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
464         case VK_FORMAT_R8_UNORM:
465         case VK_FORMAT_R8_UINT:
466         case VK_FORMAT_R8_USCALED:
467         case VK_FORMAT_R8_SNORM:
468         case VK_FORMAT_R8_SINT:
469         case VK_FORMAT_R8_SSCALED:
470         case VK_FORMAT_R8_SRGB:
471             return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8 ||
472                    sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kL8;
473         case VK_FORMAT_R8G8_UNORM:
474         case VK_FORMAT_R8G8_UINT:
475         case VK_FORMAT_R8G8_USCALED:
476         case VK_FORMAT_R8G8_SNORM:
477         case VK_FORMAT_R8G8_SINT:
478         case VK_FORMAT_R8G8_SSCALED:
479         case VK_FORMAT_R8G8_SRGB:
480             return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8G8;
481         default:
482             return false;
483     }
484 }
485 
sysmemPixelFormatTypeToVk(fuchsia_sysmem::wire::PixelFormatType format)486 static VkFormat sysmemPixelFormatTypeToVk(fuchsia_sysmem::wire::PixelFormatType format) {
487     switch (format) {
488         case fuchsia_sysmem::wire::PixelFormatType::kBgra32:
489             return VK_FORMAT_B8G8R8A8_SRGB;
490         case fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8:
491             return VK_FORMAT_R8G8B8A8_SRGB;
492         case fuchsia_sysmem::wire::PixelFormatType::kL8:
493         case fuchsia_sysmem::wire::PixelFormatType::kR8:
494             return VK_FORMAT_R8_UNORM;
495         case fuchsia_sysmem::wire::PixelFormatType::kR8G8:
496             return VK_FORMAT_R8G8_UNORM;
497         default:
498             return VK_FORMAT_UNDEFINED;
499     }
500 }
501 
502 // TODO(fxbug.dev/42172354): This is currently only used for allocating
503 // memory for dedicated external images. It should be migrated to use
504 // SetBufferCollectionImageConstraintsFUCHSIA.
setBufferCollectionConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * collection,const VkImageCreateInfo * pImageInfo)505 VkResult ResourceTracker::setBufferCollectionConstraintsFUCHSIA(
506     VkEncoder* enc, VkDevice device,
507     fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
508     const VkImageCreateInfo* pImageInfo) {
509     if (pImageInfo == nullptr) {
510         mesa_loge("setBufferCollectionConstraints: pImageInfo cannot be null.");
511         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
512     }
513 
514     const VkSysmemColorSpaceFUCHSIA kDefaultColorSpace = {
515         .sType = VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA,
516         .pNext = nullptr,
517         .colorSpace = static_cast<uint32_t>(fuchsia_sysmem::wire::ColorSpaceType::kSrgb),
518     };
519 
520     std::vector<VkImageFormatConstraintsInfoFUCHSIA> formatInfos;
521     if (pImageInfo->format == VK_FORMAT_UNDEFINED) {
522         const auto kFormats = {
523             VK_FORMAT_B8G8R8A8_SRGB,
524             VK_FORMAT_R8G8B8A8_SRGB,
525         };
526         for (auto format : kFormats) {
527             // shallow copy, using pNext from pImageInfo directly.
528             auto createInfo = *pImageInfo;
529             createInfo.format = format;
530             formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
531                 .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
532                 .pNext = nullptr,
533                 .imageCreateInfo = createInfo,
534                 .colorSpaceCount = 1,
535                 .pColorSpaces = &kDefaultColorSpace,
536             });
537         }
538     } else {
539         formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
540             .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
541             .pNext = nullptr,
542             .imageCreateInfo = *pImageInfo,
543             .colorSpaceCount = 1,
544             .pColorSpaces = &kDefaultColorSpace,
545         });
546     }
547 
548     VkImageConstraintsInfoFUCHSIA imageConstraints = {
549         .sType = VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA,
550         .pNext = nullptr,
551         .formatConstraintsCount = static_cast<uint32_t>(formatInfos.size()),
552         .pFormatConstraints = formatInfos.data(),
553         .bufferCollectionConstraints =
554             VkBufferCollectionConstraintsInfoFUCHSIA{
555                 .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
556                 .pNext = nullptr,
557                 .minBufferCount = 1,
558                 .maxBufferCount = 0,
559                 .minBufferCountForCamping = 0,
560                 .minBufferCountForDedicatedSlack = 0,
561                 .minBufferCountForSharedSlack = 0,
562             },
563         .flags = 0u,
564     };
565 
566     return setBufferCollectionImageConstraintsFUCHSIA(enc, device, collection, &imageConstraints);
567 }
568 
addImageBufferCollectionConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,VkPhysicalDevice physicalDevice,const VkImageFormatConstraintsInfoFUCHSIA * formatConstraints,VkImageTiling tiling,fuchsia_sysmem::wire::BufferCollectionConstraints * constraints)569 VkResult addImageBufferCollectionConstraintsFUCHSIA(
570     VkEncoder* enc, VkDevice device, VkPhysicalDevice physicalDevice,
571     const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints,  // always non-zero
572     VkImageTiling tiling, fuchsia_sysmem::wire::BufferCollectionConstraints* constraints) {
573     // First check if the format, tiling and usage is supported on host.
574     VkImageFormatProperties imageFormatProperties;
575     auto createInfo = &formatConstraints->imageCreateInfo;
576     auto result = enc->vkGetPhysicalDeviceImageFormatProperties(
577         physicalDevice, createInfo->format, createInfo->imageType, tiling, createInfo->usage,
578         createInfo->flags, &imageFormatProperties, true /* do lock */);
579     if (result != VK_SUCCESS) {
580         mesa_logd(
581             "%s: Image format (%u) type (%u) tiling (%u) "
582             "usage (%u) flags (%u) not supported by physical "
583             "device",
584             __func__, static_cast<uint32_t>(createInfo->format),
585             static_cast<uint32_t>(createInfo->imageType), static_cast<uint32_t>(tiling),
586             static_cast<uint32_t>(createInfo->usage), static_cast<uint32_t>(createInfo->flags));
587         return VK_ERROR_FORMAT_NOT_SUPPORTED;
588     }
589 
590     // Check if format constraints contains unsupported format features.
591     {
592         VkFormatProperties formatProperties;
593         enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, createInfo->format,
594                                                  &formatProperties, true /* do lock */);
595 
596         auto supportedFeatures = (tiling == VK_IMAGE_TILING_LINEAR)
597                                      ? formatProperties.linearTilingFeatures
598                                      : formatProperties.optimalTilingFeatures;
599         auto requiredFeatures = formatConstraints->requiredFormatFeatures;
600         if ((~supportedFeatures) & requiredFeatures) {
601             mesa_logd(
602                 "%s: Host device support features for %s tiling: %08x, "
603                 "required features: %08x, feature bits %08x missing",
604                 __func__, tiling == VK_IMAGE_TILING_LINEAR ? "LINEAR" : "OPTIMAL",
605                 static_cast<uint32_t>(requiredFeatures), static_cast<uint32_t>(supportedFeatures),
606                 static_cast<uint32_t>((~supportedFeatures) & requiredFeatures));
607             return VK_ERROR_FORMAT_NOT_SUPPORTED;
608         }
609     }
610 
611     fuchsia_sysmem::wire::ImageFormatConstraints imageConstraints;
612     if (formatConstraints->sysmemPixelFormat != 0) {
613         auto pixelFormat = static_cast<fuchsia_sysmem::wire::PixelFormatType>(
614             formatConstraints->sysmemPixelFormat);
615         if (createInfo->format != VK_FORMAT_UNDEFINED &&
616             !vkFormatMatchesSysmemFormat(createInfo->format, pixelFormat)) {
617             mesa_logd("%s: VkFormat %u doesn't match sysmem pixelFormat %lu", __func__,
618                   static_cast<uint32_t>(createInfo->format), formatConstraints->sysmemPixelFormat);
619             return VK_ERROR_FORMAT_NOT_SUPPORTED;
620         }
621         imageConstraints.pixel_format.type = pixelFormat;
622     } else {
623         auto pixel_format = vkFormatTypeToSysmem(createInfo->format);
624         if (pixel_format == fuchsia_sysmem::wire::PixelFormatType::kInvalid) {
625             mesa_logd("%s: Unsupported VkFormat %u", __func__,
626                   static_cast<uint32_t>(createInfo->format));
627             return VK_ERROR_FORMAT_NOT_SUPPORTED;
628         }
629         imageConstraints.pixel_format.type = pixel_format;
630     }
631 
632     imageConstraints.color_spaces_count = formatConstraints->colorSpaceCount;
633     for (size_t i = 0; i < formatConstraints->colorSpaceCount; i++) {
634         imageConstraints.color_space[0].type = static_cast<fuchsia_sysmem::wire::ColorSpaceType>(
635             formatConstraints->pColorSpaces[i].colorSpace);
636     }
637 
638     // Get row alignment from host GPU.
639     VkDeviceSize offset = 0;
640     VkDeviceSize rowPitchAlignment = 1u;
641 
642     if (tiling == VK_IMAGE_TILING_LINEAR) {
643         VkImageCreateInfo createInfoDup = *createInfo;
644         createInfoDup.pNext = nullptr;
645         enc->vkGetLinearImageLayout2GOOGLE(device, &createInfoDup, &offset, &rowPitchAlignment,
646                                            true /* do lock */);
647         mesa_logd(
648             "vkGetLinearImageLayout2GOOGLE: format %d offset %lu "
649             "rowPitchAlignment = %lu",
650             (int)createInfo->format, offset, rowPitchAlignment);
651     }
652 
653     imageConstraints.min_coded_width = createInfo->extent.width;
654     imageConstraints.max_coded_width = 0xfffffff;
655     imageConstraints.min_coded_height = createInfo->extent.height;
656     imageConstraints.max_coded_height = 0xffffffff;
657     // The min_bytes_per_row can be calculated by sysmem using
658     // |min_coded_width|, |bytes_per_row_divisor| and color format.
659     imageConstraints.min_bytes_per_row = 0;
660     imageConstraints.max_bytes_per_row = 0xffffffff;
661     imageConstraints.max_coded_width_times_coded_height = 0xffffffff;
662 
663     imageConstraints.layers = 1;
664     imageConstraints.coded_width_divisor = 1;
665     imageConstraints.coded_height_divisor = 1;
666     imageConstraints.bytes_per_row_divisor = rowPitchAlignment;
667     imageConstraints.start_offset_divisor = 1;
668     imageConstraints.display_width_divisor = 1;
669     imageConstraints.display_height_divisor = 1;
670     imageConstraints.pixel_format.has_format_modifier = true;
671     imageConstraints.pixel_format.format_modifier.value =
672         (tiling == VK_IMAGE_TILING_LINEAR)
673             ? fuchsia_sysmem::wire::kFormatModifierLinear
674             : fuchsia_sysmem::wire::kFormatModifierGoogleGoldfishOptimal;
675 
676     constraints->image_format_constraints[constraints->image_format_constraints_count++] =
677         imageConstraints;
678     return VK_SUCCESS;
679 }
680 
setBufferCollectionBufferConstraintsImpl(fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)681 SetBufferCollectionBufferConstraintsResult setBufferCollectionBufferConstraintsImpl(
682     fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
683     const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
684     const auto& collection = *pCollection;
685     if (pBufferConstraintsInfo == nullptr) {
686         mesa_loge(
687             "setBufferCollectionBufferConstraints: "
688             "pBufferConstraintsInfo cannot be null.");
689         return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
690     }
691 
692     fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
693         defaultBufferCollectionConstraints(
694             /* min_size_bytes */ pBufferConstraintsInfo->createInfo.size,
695             /* buffer_count */ pBufferConstraintsInfo->bufferCollectionConstraints.minBufferCount);
696     constraints.usage.vulkan =
697         getBufferCollectionConstraintsVulkanBufferUsage(pBufferConstraintsInfo);
698 
699     constexpr uint32_t kVulkanPriority = 5;
700     const char kName[] = "GoldfishBufferSysmemShared";
701     collection->SetName(kVulkanPriority, fidl::StringView(kName));
702 
703     auto result = collection->SetConstraints(true, constraints);
704     if (!result.ok()) {
705         mesa_loge("setBufferCollectionConstraints: SetConstraints failed: %d", result.status());
706         return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
707     }
708 
709     return {VK_SUCCESS, constraints};
710 }
711 #endif
712 
713 #ifdef VK_USE_PLATFORM_ANDROID_KHR
getAHardwareBufferId(AHardwareBuffer * ahw)714 uint64_t ResourceTracker::getAHardwareBufferId(AHardwareBuffer* ahw) {
715     uint64_t id = 0;
716     mGralloc->getId(ahw, &id);
717     return id;
718 }
719 #endif
720 
transformExternalResourceMemoryDedicatedRequirementsForGuest(VkMemoryDedicatedRequirements * dedicatedReqs)721 void transformExternalResourceMemoryDedicatedRequirementsForGuest(
722     VkMemoryDedicatedRequirements* dedicatedReqs) {
723     dedicatedReqs->prefersDedicatedAllocation = VK_TRUE;
724     dedicatedReqs->requiresDedicatedAllocation = VK_TRUE;
725 }
726 
transformImageMemoryRequirementsForGuestLocked(VkImage image,VkMemoryRequirements * reqs)727 void ResourceTracker::transformImageMemoryRequirementsForGuestLocked(VkImage image,
728                                                                      VkMemoryRequirements* reqs) {
729 #ifdef VK_USE_PLATFORM_FUCHSIA
730     auto it = info_VkImage.find(image);
731     if (it == info_VkImage.end()) return;
732     auto& info = it->second;
733     if (info.isSysmemBackedMemory) {
734         auto width = info.createInfo.extent.width;
735         auto height = info.createInfo.extent.height;
736         reqs->size = width * height * 4;
737     }
738 #else
739     // Bypass "unused parameter" checks.
740     (void)image;
741     (void)reqs;
742 #endif
743 }
744 
freeCoherentMemoryLocked(VkDeviceMemory memory,VkDeviceMemory_Info & info)745 CoherentMemoryPtr ResourceTracker::freeCoherentMemoryLocked(VkDeviceMemory memory,
746                                                             VkDeviceMemory_Info& info) {
747     if (info.coherentMemory && info.ptr) {
748         if (info.coherentMemory->getDeviceMemory() != memory) {
749             delete_goldfish_VkDeviceMemory(memory);
750         }
751 
752         if (info.ptr) {
753             info.coherentMemory->release(info.ptr);
754             info.ptr = nullptr;
755         }
756 
757         return std::move(info.coherentMemory);
758     }
759 
760     return nullptr;
761 }
762 
acquireSync(uint64_t syncId,int64_t & osHandle)763 VkResult acquireSync(uint64_t syncId, int64_t& osHandle) {
764     struct VirtGpuExecBuffer exec = {};
765     struct gfxstreamAcquireSync acquireSync = {};
766     VirtGpuDevice* instance = VirtGpuDevice::getInstance();
767 
768     acquireSync.hdr.opCode = GFXSTREAM_ACQUIRE_SYNC;
769     acquireSync.syncId = syncId;
770 
771     exec.command = static_cast<void*>(&acquireSync);
772     exec.command_size = sizeof(acquireSync);
773     exec.flags = kFenceOut | kRingIdx | kShareableOut;
774 
775     if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY;
776 
777     osHandle = exec.handle.osHandle;
778     return VK_SUCCESS;
779 }
780 
createFence(VkDevice device,uint64_t hostFenceHandle,int64_t & osHandle)781 VkResult createFence(VkDevice device, uint64_t hostFenceHandle, int64_t& osHandle) {
782     struct VirtGpuExecBuffer exec = {};
783     struct gfxstreamCreateExportSyncVK exportSync = {};
784     VirtGpuDevice* instance = VirtGpuDevice::getInstance();
785 
786     uint64_t hostDeviceHandle = get_host_u64_VkDevice(device);
787 
788     exportSync.hdr.opCode = GFXSTREAM_CREATE_EXPORT_SYNC_VK;
789     exportSync.deviceHandleLo = (uint32_t)hostDeviceHandle;
790     exportSync.deviceHandleHi = (uint32_t)(hostDeviceHandle >> 32);
791     exportSync.fenceHandleLo = (uint32_t)hostFenceHandle;
792     exportSync.fenceHandleHi = (uint32_t)(hostFenceHandle >> 32);
793 
794     exec.command = static_cast<void*>(&exportSync);
795     exec.command_size = sizeof(exportSync);
796     exec.flags = kFenceOut | kRingIdx;
797     if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY;
798 
799     osHandle = exec.handle.osHandle;
800     return VK_SUCCESS;
801 }
802 
collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer> & workingSet,std::unordered_set<VkDescriptorSet> & allDs)803 void collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer>& workingSet,
804                                              std::unordered_set<VkDescriptorSet>& allDs) {
805     if (workingSet.empty()) return;
806 
807     std::vector<VkCommandBuffer> nextLevel;
808     for (auto commandBuffer : workingSet) {
809         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
810         forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
811             nextLevel.push_back((VkCommandBuffer)secondary);
812         });
813     }
814 
815     collectAllPendingDescriptorSetsBottomUp(nextLevel, allDs);
816 
817     for (auto cmdbuf : workingSet) {
818         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
819 
820         if (!cb->userPtr) {
821             continue;  // No descriptors to update.
822         }
823 
824         CommandBufferPendingDescriptorSets* pendingDescriptorSets =
825             (CommandBufferPendingDescriptorSets*)(cb->userPtr);
826 
827         if (pendingDescriptorSets->sets.empty()) {
828             continue;  // No descriptors to update.
829         }
830 
831         allDs.insert(pendingDescriptorSets->sets.begin(), pendingDescriptorSets->sets.end());
832     }
833 }
834 
commitDescriptorSetUpdates(void * context,VkQueue queue,const std::unordered_set<VkDescriptorSet> & sets)835 void commitDescriptorSetUpdates(void* context, VkQueue queue,
836                                 const std::unordered_set<VkDescriptorSet>& sets) {
837     VkEncoder* enc = (VkEncoder*)context;
838 
839     std::unordered_map<VkDescriptorPool, uint32_t> poolSet;
840     std::vector<VkDescriptorPool> pools;
841     std::vector<VkDescriptorSetLayout> setLayouts;
842     std::vector<uint64_t> poolIds;
843     std::vector<uint32_t> descriptorSetWhichPool;
844     std::vector<uint32_t> pendingAllocations;
845     std::vector<uint32_t> writeStartingIndices;
846     std::vector<VkWriteDescriptorSet> writesForHost;
847 
848     uint32_t poolIndex = 0;
849     uint32_t currentWriteIndex = 0;
850     for (auto set : sets) {
851         ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
852         VkDescriptorPool pool = reified->pool;
853         VkDescriptorSetLayout setLayout = reified->setLayout;
854 
855         auto it = poolSet.find(pool);
856         if (it == poolSet.end()) {
857             poolSet[pool] = poolIndex;
858             descriptorSetWhichPool.push_back(poolIndex);
859             pools.push_back(pool);
860             ++poolIndex;
861         } else {
862             uint32_t savedPoolIndex = it->second;
863             descriptorSetWhichPool.push_back(savedPoolIndex);
864         }
865 
866         poolIds.push_back(reified->poolId);
867         setLayouts.push_back(setLayout);
868         pendingAllocations.push_back(reified->allocationPending ? 1 : 0);
869         writeStartingIndices.push_back(currentWriteIndex);
870 
871         auto& writes = reified->allWrites;
872 
873         for (size_t i = 0; i < writes.size(); ++i) {
874             uint32_t binding = i;
875 
876             for (size_t j = 0; j < writes[i].size(); ++j) {
877                 auto& write = writes[i][j];
878 
879                 if (write.type == DescriptorWriteType::Empty) continue;
880 
881                 uint32_t dstArrayElement = 0;
882 
883                 VkDescriptorImageInfo* imageInfo = nullptr;
884                 VkDescriptorBufferInfo* bufferInfo = nullptr;
885                 VkBufferView* bufferView = nullptr;
886 
887                 switch (write.type) {
888                     case DescriptorWriteType::Empty:
889                         break;
890                     case DescriptorWriteType::ImageInfo:
891                         dstArrayElement = j;
892                         imageInfo = &write.imageInfo;
893                         break;
894                     case DescriptorWriteType::BufferInfo:
895                         dstArrayElement = j;
896                         bufferInfo = &write.bufferInfo;
897                         break;
898                     case DescriptorWriteType::BufferView:
899                         dstArrayElement = j;
900                         bufferView = &write.bufferView;
901                         break;
902                     case DescriptorWriteType::InlineUniformBlock:
903                     case DescriptorWriteType::AccelerationStructure:
904                         // TODO
905                         mesa_loge(
906                             "Encountered pending inline uniform block or acceleration structure "
907                             "desc write, abort (NYI)\n");
908                         abort();
909                     default:
910                         break;
911                 }
912 
913                 // TODO: Combine multiple writes into one VkWriteDescriptorSet.
914                 VkWriteDescriptorSet forHost = {
915                     VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
916                     0 /* TODO: inline uniform block */,
917                     set,
918                     binding,
919                     dstArrayElement,
920                     1,
921                     write.descriptorType,
922                     imageInfo,
923                     bufferInfo,
924                     bufferView,
925                 };
926 
927                 writesForHost.push_back(forHost);
928                 ++currentWriteIndex;
929 
930                 // Set it back to empty.
931                 write.type = DescriptorWriteType::Empty;
932             }
933         }
934     }
935 
936     // Skip out if there's nothing to VkWriteDescriptorSet home about.
937     if (writesForHost.empty()) {
938         return;
939     }
940 
941     enc->vkQueueCommitDescriptorSetUpdatesGOOGLE(
942         queue, (uint32_t)pools.size(), pools.data(), (uint32_t)sets.size(), setLayouts.data(),
943         poolIds.data(), descriptorSetWhichPool.data(), pendingAllocations.data(),
944         writeStartingIndices.data(), (uint32_t)writesForHost.size(), writesForHost.data(),
945         false /* no lock */);
946 
947     // If we got here, then we definitely serviced the allocations.
948     for (auto set : sets) {
949         ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
950         reified->allocationPending = false;
951     }
952 }
953 
syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer,VkEncoder * currentEncoder)954 uint32_t ResourceTracker::syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer,
955                                                        VkEncoder* currentEncoder) {
956     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
957     if (!cb) return 0;
958 
959     auto lastEncoder = cb->lastUsedEncoder;
960 
961     if (lastEncoder == currentEncoder) return 0;
962 
963     currentEncoder->incRef();
964 
965     cb->lastUsedEncoder = currentEncoder;
966 
967     if (!lastEncoder) return 0;
968 
969     auto oldSeq = cb->sequenceNumber;
970     cb->sequenceNumber += 2;
971     lastEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, false, oldSeq + 1,
972                                                true /* do lock */);
973     lastEncoder->flush();
974     currentEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, true, oldSeq + 2,
975                                                   true /* do lock */);
976 
977     if (lastEncoder->decRef()) {
978         cb->lastUsedEncoder = nullptr;
979     }
980     return 0;
981 }
982 
addPendingDescriptorSets(VkCommandBuffer commandBuffer,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)983 void addPendingDescriptorSets(VkCommandBuffer commandBuffer, uint32_t descriptorSetCount,
984                               const VkDescriptorSet* pDescriptorSets) {
985     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
986 
987     if (!cb->userPtr) {
988         CommandBufferPendingDescriptorSets* newPendingSets = new CommandBufferPendingDescriptorSets;
989         cb->userPtr = newPendingSets;
990     }
991 
992     CommandBufferPendingDescriptorSets* pendingSets =
993         (CommandBufferPendingDescriptorSets*)cb->userPtr;
994 
995     for (uint32_t i = 0; i < descriptorSetCount; ++i) {
996         pendingSets->sets.insert(pDescriptorSets[i]);
997     }
998 }
999 
decDescriptorSetLayoutRef(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)1000 void decDescriptorSetLayoutRef(void* context, VkDevice device,
1001                                VkDescriptorSetLayout descriptorSetLayout,
1002                                const VkAllocationCallbacks* pAllocator) {
1003     if (!descriptorSetLayout) return;
1004 
1005     struct goldfish_VkDescriptorSetLayout* setLayout =
1006         as_goldfish_VkDescriptorSetLayout(descriptorSetLayout);
1007 
1008     if (0 == --setLayout->layoutInfo->refcount) {
1009         VkEncoder* enc = (VkEncoder*)context;
1010         enc->vkDestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator,
1011                                           true /* do lock */);
1012     }
1013 }
1014 
ensureSyncDeviceFd()1015 void ResourceTracker::ensureSyncDeviceFd() {
1016 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
1017     if (mSyncDeviceFd >= 0) return;
1018     mSyncDeviceFd = goldfish_sync_open();
1019     if (mSyncDeviceFd >= 0) {
1020         mesa_logd("%s: created sync device for current Vulkan process: %d\n", __func__, mSyncDeviceFd);
1021     } else {
1022         mesa_logd("%s: failed to create sync device for current Vulkan process\n", __func__);
1023     }
1024 #endif
1025 }
1026 
unregister_VkInstance(VkInstance instance)1027 void ResourceTracker::unregister_VkInstance(VkInstance instance) {
1028     std::lock_guard<std::recursive_mutex> lock(mLock);
1029 
1030     auto it = info_VkInstance.find(instance);
1031     if (it == info_VkInstance.end()) return;
1032     auto info = it->second;
1033     info_VkInstance.erase(instance);
1034 }
1035 
unregister_VkDevice(VkDevice device)1036 void ResourceTracker::unregister_VkDevice(VkDevice device) {
1037     std::lock_guard<std::recursive_mutex> lock(mLock);
1038 
1039     auto it = info_VkDevice.find(device);
1040     if (it == info_VkDevice.end()) return;
1041     auto info = it->second;
1042     info_VkDevice.erase(device);
1043 }
1044 
unregister_VkCommandPool(VkCommandPool pool)1045 void ResourceTracker::unregister_VkCommandPool(VkCommandPool pool) {
1046     if (!pool) return;
1047 
1048     clearCommandPool(pool);
1049 
1050     std::lock_guard<std::recursive_mutex> lock(mLock);
1051     info_VkCommandPool.erase(pool);
1052 }
1053 
unregister_VkSampler(VkSampler sampler)1054 void ResourceTracker::unregister_VkSampler(VkSampler sampler) {
1055     if (!sampler) return;
1056 
1057     std::lock_guard<std::recursive_mutex> lock(mLock);
1058     info_VkSampler.erase(sampler);
1059 }
1060 
unregister_VkCommandBuffer(VkCommandBuffer commandBuffer)1061 void ResourceTracker::unregister_VkCommandBuffer(VkCommandBuffer commandBuffer) {
1062     resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
1063                                   true /* also clear pending descriptor sets */);
1064 
1065     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
1066     if (!cb) return;
1067     if (cb->lastUsedEncoder) {
1068         cb->lastUsedEncoder->decRef();
1069     }
1070     eraseObjects(&cb->subObjects);
1071     forAllObjects(cb->poolObjects, [cb](void* commandPool) {
1072         struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool((VkCommandPool)commandPool);
1073         eraseObject(&p->subObjects, (void*)cb);
1074     });
1075     eraseObjects(&cb->poolObjects);
1076 
1077     if (cb->userPtr) {
1078         CommandBufferPendingDescriptorSets* pendingSets =
1079             (CommandBufferPendingDescriptorSets*)cb->userPtr;
1080         delete pendingSets;
1081     }
1082 
1083     std::lock_guard<std::recursive_mutex> lock(mLock);
1084     info_VkCommandBuffer.erase(commandBuffer);
1085 }
1086 
unregister_VkQueue(VkQueue queue)1087 void ResourceTracker::unregister_VkQueue(VkQueue queue) {
1088     struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
1089     if (!q) return;
1090     if (q->lastUsedEncoder) {
1091         q->lastUsedEncoder->decRef();
1092     }
1093 
1094     std::lock_guard<std::recursive_mutex> lock(mLock);
1095     info_VkQueue.erase(queue);
1096 }
1097 
unregister_VkDeviceMemory(VkDeviceMemory mem)1098 void ResourceTracker::unregister_VkDeviceMemory(VkDeviceMemory mem) {
1099     std::lock_guard<std::recursive_mutex> lock(mLock);
1100 
1101     auto it = info_VkDeviceMemory.find(mem);
1102     if (it == info_VkDeviceMemory.end()) return;
1103 
1104     auto& memInfo = it->second;
1105 
1106 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1107     if (memInfo.ahw) {
1108         mGralloc->release(memInfo.ahw);
1109     }
1110 #endif
1111 
1112     if (memInfo.vmoHandle != ZX_HANDLE_INVALID) {
1113         zx_handle_close(memInfo.vmoHandle);
1114     }
1115 
1116     info_VkDeviceMemory.erase(mem);
1117 }
1118 
unregister_VkImage(VkImage img)1119 void ResourceTracker::unregister_VkImage(VkImage img) {
1120     std::lock_guard<std::recursive_mutex> lock(mLock);
1121 
1122     auto it = info_VkImage.find(img);
1123     if (it == info_VkImage.end()) return;
1124 
1125     info_VkImage.erase(img);
1126 }
1127 
unregister_VkBuffer(VkBuffer buf)1128 void ResourceTracker::unregister_VkBuffer(VkBuffer buf) {
1129     std::lock_guard<std::recursive_mutex> lock(mLock);
1130 
1131     auto it = info_VkBuffer.find(buf);
1132     if (it == info_VkBuffer.end()) return;
1133 
1134     info_VkBuffer.erase(buf);
1135 }
1136 
unregister_VkSemaphore(VkSemaphore sem)1137 void ResourceTracker::unregister_VkSemaphore(VkSemaphore sem) {
1138     std::lock_guard<std::recursive_mutex> lock(mLock);
1139 
1140     auto it = info_VkSemaphore.find(sem);
1141     if (it == info_VkSemaphore.end()) return;
1142 
1143     auto& semInfo = it->second;
1144 
1145     if (semInfo.eventHandle != ZX_HANDLE_INVALID) {
1146         zx_handle_close(semInfo.eventHandle);
1147     }
1148 
1149 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
1150     if (semInfo.syncFd.value_or(-1) >= 0) {
1151         mSyncHelper->close(semInfo.syncFd.value());
1152     }
1153 #endif
1154 
1155     info_VkSemaphore.erase(sem);
1156 }
1157 
unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ)1158 void ResourceTracker::unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ) {
1159     std::lock_guard<std::recursive_mutex> lock(mLock);
1160     auto it = info_VkDescriptorUpdateTemplate.find(templ);
1161     if (it == info_VkDescriptorUpdateTemplate.end()) return;
1162 
1163     auto& info = it->second;
1164     if (info.templateEntryCount) delete[] info.templateEntries;
1165     if (info.imageInfoCount) {
1166         delete[] info.imageInfoIndices;
1167         delete[] info.imageInfos;
1168     }
1169     if (info.bufferInfoCount) {
1170         delete[] info.bufferInfoIndices;
1171         delete[] info.bufferInfos;
1172     }
1173     if (info.bufferViewCount) {
1174         delete[] info.bufferViewIndices;
1175         delete[] info.bufferViews;
1176     }
1177     info_VkDescriptorUpdateTemplate.erase(it);
1178 }
1179 
unregister_VkFence(VkFence fence)1180 void ResourceTracker::unregister_VkFence(VkFence fence) {
1181     std::lock_guard<std::recursive_mutex> lock(mLock);
1182     auto it = info_VkFence.find(fence);
1183     if (it == info_VkFence.end()) return;
1184 
1185     auto& fenceInfo = it->second;
1186     (void)fenceInfo;
1187 
1188 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
1189     if (fenceInfo.syncFd && *fenceInfo.syncFd >= 0) {
1190         mSyncHelper->close(*fenceInfo.syncFd);
1191     }
1192 #endif
1193 
1194     info_VkFence.erase(fence);
1195 }
1196 
1197 #ifdef VK_USE_PLATFORM_FUCHSIA
unregister_VkBufferCollectionFUCHSIA(VkBufferCollectionFUCHSIA collection)1198 void ResourceTracker::unregister_VkBufferCollectionFUCHSIA(VkBufferCollectionFUCHSIA collection) {
1199     std::lock_guard<std::recursive_mutex> lock(mLock);
1200     info_VkBufferCollectionFUCHSIA.erase(collection);
1201 }
1202 #endif
1203 
unregister_VkDescriptorSet_locked(VkDescriptorSet set)1204 void ResourceTracker::unregister_VkDescriptorSet_locked(VkDescriptorSet set) {
1205     struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(set);
1206     delete ds->reified;
1207     info_VkDescriptorSet.erase(set);
1208 }
1209 
unregister_VkDescriptorSet(VkDescriptorSet set)1210 void ResourceTracker::unregister_VkDescriptorSet(VkDescriptorSet set) {
1211     if (!set) return;
1212 
1213     std::lock_guard<std::recursive_mutex> lock(mLock);
1214     unregister_VkDescriptorSet_locked(set);
1215 }
1216 
unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout)1217 void ResourceTracker::unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout) {
1218     if (!setLayout) return;
1219 
1220     std::lock_guard<std::recursive_mutex> lock(mLock);
1221     delete as_goldfish_VkDescriptorSetLayout(setLayout)->layoutInfo;
1222     info_VkDescriptorSetLayout.erase(setLayout);
1223 }
1224 
freeDescriptorSetsIfHostAllocated(VkEncoder * enc,VkDevice device,uint32_t descriptorSetCount,const VkDescriptorSet * sets)1225 void ResourceTracker::freeDescriptorSetsIfHostAllocated(VkEncoder* enc, VkDevice device,
1226                                                         uint32_t descriptorSetCount,
1227                                                         const VkDescriptorSet* sets) {
1228     for (uint32_t i = 0; i < descriptorSetCount; ++i) {
1229         struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(sets[i]);
1230         if (ds->reified->allocationPending) {
1231             unregister_VkDescriptorSet(sets[i]);
1232             delete_goldfish_VkDescriptorSet(sets[i]);
1233         } else {
1234             enc->vkFreeDescriptorSets(device, ds->reified->pool, 1, &sets[i], false /* no lock */);
1235         }
1236     }
1237 }
1238 
clearDescriptorPoolAndUnregisterDescriptorSets(void * context,VkDevice device,VkDescriptorPool pool)1239 void ResourceTracker::clearDescriptorPoolAndUnregisterDescriptorSets(void* context, VkDevice device,
1240                                                                      VkDescriptorPool pool) {
1241     std::vector<VkDescriptorSet> toClear =
1242         clearDescriptorPool(pool, mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate);
1243 
1244     for (auto set : toClear) {
1245         if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate) {
1246             VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(set)->reified->setLayout;
1247             decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
1248         }
1249         unregister_VkDescriptorSet(set);
1250         delete_goldfish_VkDescriptorSet(set);
1251     }
1252 }
1253 
unregister_VkDescriptorPool(VkDescriptorPool pool)1254 void ResourceTracker::unregister_VkDescriptorPool(VkDescriptorPool pool) {
1255     if (!pool) return;
1256 
1257     std::lock_guard<std::recursive_mutex> lock(mLock);
1258 
1259     struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
1260     delete dp->allocInfo;
1261 
1262     info_VkDescriptorPool.erase(pool);
1263 }
1264 
deviceMemoryTransform_fromhost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)1265 void ResourceTracker::deviceMemoryTransform_fromhost(VkDeviceMemory* memory, uint32_t memoryCount,
1266                                                      VkDeviceSize* offset, uint32_t offsetCount,
1267                                                      VkDeviceSize* size, uint32_t sizeCount,
1268                                                      uint32_t* typeIndex, uint32_t typeIndexCount,
1269                                                      uint32_t* typeBits, uint32_t typeBitsCount) {
1270     (void)memory;
1271     (void)memoryCount;
1272     (void)offset;
1273     (void)offsetCount;
1274     (void)size;
1275     (void)sizeCount;
1276     (void)typeIndex;
1277     (void)typeIndexCount;
1278     (void)typeBits;
1279     (void)typeBitsCount;
1280 }
1281 
transformImpl_VkExternalMemoryProperties_fromhost(VkExternalMemoryProperties * pProperties,uint32_t)1282 void ResourceTracker::transformImpl_VkExternalMemoryProperties_fromhost(
1283     VkExternalMemoryProperties* pProperties, uint32_t) {
1284     VkExternalMemoryHandleTypeFlags supportedHandleType = 0u;
1285 #ifdef VK_USE_PLATFORM_FUCHSIA
1286     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
1287 #endif  // VK_USE_PLATFORM_FUCHSIA
1288 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1289     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
1290                            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
1291 #endif  // VK_USE_PLATFORM_ANDROID_KHR
1292     if (supportedHandleType) {
1293         pProperties->compatibleHandleTypes &= supportedHandleType;
1294         pProperties->exportFromImportedHandleTypes &= supportedHandleType;
1295     }
1296 }
1297 
setInstanceInfo(VkInstance instance,uint32_t enabledExtensionCount,const char * const * ppEnabledExtensionNames,uint32_t apiVersion)1298 void ResourceTracker::setInstanceInfo(VkInstance instance, uint32_t enabledExtensionCount,
1299                                       const char* const* ppEnabledExtensionNames,
1300                                       uint32_t apiVersion) {
1301     std::lock_guard<std::recursive_mutex> lock(mLock);
1302     auto& info = info_VkInstance[instance];
1303     info.highestApiVersion = apiVersion;
1304 
1305     if (!ppEnabledExtensionNames) return;
1306 
1307     for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
1308         info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
1309     }
1310 }
1311 
setDeviceInfo(VkDevice device,VkPhysicalDevice physdev,VkPhysicalDeviceProperties props,VkPhysicalDeviceMemoryProperties memProps,uint32_t enabledExtensionCount,const char * const * ppEnabledExtensionNames,const void * pNext)1312 void ResourceTracker::setDeviceInfo(VkDevice device, VkPhysicalDevice physdev,
1313                                     VkPhysicalDeviceProperties props,
1314                                     VkPhysicalDeviceMemoryProperties memProps,
1315                                     uint32_t enabledExtensionCount,
1316                                     const char* const* ppEnabledExtensionNames, const void* pNext) {
1317     std::lock_guard<std::recursive_mutex> lock(mLock);
1318     auto& info = info_VkDevice[device];
1319     info.physdev = physdev;
1320     info.props = props;
1321     info.memProps = memProps;
1322     info.apiVersion = props.apiVersion;
1323 
1324     const VkBaseInStructure* extensionCreateInfo =
1325         reinterpret_cast<const VkBaseInStructure*>(pNext);
1326     while (extensionCreateInfo) {
1327         if (extensionCreateInfo->sType ==
1328             VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT) {
1329             auto deviceMemoryReportCreateInfo =
1330                 reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT*>(
1331                     extensionCreateInfo);
1332             if (deviceMemoryReportCreateInfo->pfnUserCallback != nullptr) {
1333                 info.deviceMemoryReportCallbacks.emplace_back(
1334                     deviceMemoryReportCreateInfo->pfnUserCallback,
1335                     deviceMemoryReportCreateInfo->pUserData);
1336             }
1337         }
1338         extensionCreateInfo = extensionCreateInfo->pNext;
1339     }
1340 
1341     if (!ppEnabledExtensionNames) return;
1342 
1343     for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
1344         info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
1345     }
1346 }
1347 
setDeviceMemoryInfo(VkDevice device,VkDeviceMemory memory,VkDeviceSize allocationSize,uint8_t * ptr,uint32_t memoryTypeIndex,void * ahw,bool imported,zx_handle_t vmoHandle,VirtGpuResourcePtr blobPtr)1348 void ResourceTracker::setDeviceMemoryInfo(VkDevice device, VkDeviceMemory memory,
1349                                           VkDeviceSize allocationSize, uint8_t* ptr,
1350                                           uint32_t memoryTypeIndex, void* ahw, bool imported,
1351                                           zx_handle_t vmoHandle, VirtGpuResourcePtr blobPtr) {
1352     std::lock_guard<std::recursive_mutex> lock(mLock);
1353     auto& info = info_VkDeviceMemory[memory];
1354 
1355     info.device = device;
1356     info.allocationSize = allocationSize;
1357     info.ptr = ptr;
1358     info.memoryTypeIndex = memoryTypeIndex;
1359 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1360     info.ahw = (AHardwareBuffer*)ahw;
1361 #endif
1362     info.imported = imported;
1363     info.vmoHandle = vmoHandle;
1364     info.blobPtr = blobPtr;
1365 }
1366 
setImageInfo(VkImage image,VkDevice device,const VkImageCreateInfo * pCreateInfo)1367 void ResourceTracker::setImageInfo(VkImage image, VkDevice device,
1368                                    const VkImageCreateInfo* pCreateInfo) {
1369     std::lock_guard<std::recursive_mutex> lock(mLock);
1370     auto& info = info_VkImage[image];
1371 
1372     info.device = device;
1373     info.createInfo = *pCreateInfo;
1374 }
1375 
getMappedPointer(VkDeviceMemory memory)1376 uint8_t* ResourceTracker::getMappedPointer(VkDeviceMemory memory) {
1377     std::lock_guard<std::recursive_mutex> lock(mLock);
1378     const auto it = info_VkDeviceMemory.find(memory);
1379     if (it == info_VkDeviceMemory.end()) return nullptr;
1380 
1381     const auto& info = it->second;
1382     return info.ptr;
1383 }
1384 
getMappedSize(VkDeviceMemory memory)1385 VkDeviceSize ResourceTracker::getMappedSize(VkDeviceMemory memory) {
1386     std::lock_guard<std::recursive_mutex> lock(mLock);
1387     const auto it = info_VkDeviceMemory.find(memory);
1388     if (it == info_VkDeviceMemory.end()) return 0;
1389 
1390     const auto& info = it->second;
1391     return info.allocationSize;
1392 }
1393 
isValidMemoryRange(const VkMappedMemoryRange & range)1394 bool ResourceTracker::isValidMemoryRange(const VkMappedMemoryRange& range) {
1395     std::lock_guard<std::recursive_mutex> lock(mLock);
1396     const auto it = info_VkDeviceMemory.find(range.memory);
1397     if (it == info_VkDeviceMemory.end()) return false;
1398     const auto& info = it->second;
1399 
1400     if (!info.ptr) return false;
1401 
1402     VkDeviceSize offset = range.offset;
1403     VkDeviceSize size = range.size;
1404 
1405     if (size == VK_WHOLE_SIZE) {
1406         return offset <= info.allocationSize;
1407     }
1408 
1409     return offset + size <= info.allocationSize;
1410 }
1411 
setupCaps(uint32_t & noRenderControlEnc)1412 void ResourceTracker::setupCaps(uint32_t& noRenderControlEnc) {
1413     VirtGpuDevice* instance = VirtGpuDevice::getInstance(kCapsetGfxStreamVulkan);
1414     mCaps = instance->getCaps();
1415 
1416     // Delete once goldfish Linux drivers are gone
1417     if (mCaps.vulkanCapset.protocolVersion == 0) {
1418         mCaps.vulkanCapset.colorBufferMemoryIndex = 0xFFFFFFFF;
1419     } else {
1420         // Don't query the render control encoder for features, since for virtio-gpu the
1421         // capabilities provide versioning. Set features to be unconditionally true, since
1422         // using virtio-gpu encompasses all prior goldfish features.  mFeatureInfo should be
1423         // deprecated in favor of caps.
1424         mFeatureInfo.hasVulkanNullOptionalStrings = true;
1425         mFeatureInfo.hasVulkanIgnoredHandles = true;
1426         mFeatureInfo.hasVulkanShaderFloat16Int8 = true;
1427         mFeatureInfo.hasVulkanQueueSubmitWithCommands = true;
1428         mFeatureInfo.hasDeferredVulkanCommands = true;
1429         mFeatureInfo.hasVulkanAsyncQueueSubmit = true;
1430         mFeatureInfo.hasVulkanCreateResourcesWithRequirements = true;
1431         mFeatureInfo.hasVirtioGpuNext = true;
1432         mFeatureInfo.hasVirtioGpuNativeSync = true;
1433         mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate =
1434             mCaps.vulkanCapset.vulkanBatchedDescriptorSetUpdate;
1435         mFeatureInfo.hasVulkanAsyncQsri = true;
1436 
1437         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
1438         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
1439         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
1440         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
1441     }
1442 
1443     noRenderControlEnc = mCaps.vulkanCapset.noRenderControlEnc;
1444 }
1445 
setupFeatures(const struct GfxStreamVkFeatureInfo * features)1446 void ResourceTracker::setupFeatures(const struct GfxStreamVkFeatureInfo* features) {
1447     if (mFeatureInfo.setupComplete) {
1448         return;
1449     }
1450 
1451     mFeatureInfo = *features;
1452 #if DETECT_OS_ANDROID
1453     if (mFeatureInfo.hasDirectMem) {
1454         mGoldfishAddressSpaceBlockProvider.reset(
1455             new GoldfishAddressSpaceBlockProvider(GoldfishAddressSpaceSubdeviceType::NoSubdevice));
1456     }
1457 #endif  // DETECT_OS_ANDROID
1458 
1459 #ifdef VK_USE_PLATFORM_FUCHSIA
1460     if (mFeatureInfo.hasVulkan) {
1461         fidl::ClientEnd<fuchsia_hardware_goldfish::ControlDevice> channel{zx::channel(
1462             GetConnectToServiceFunction()("/loader-gpu-devices/class/goldfish-control/000"))};
1463         if (!channel) {
1464             mesa_loge("failed to open control device");
1465             abort();
1466         }
1467         mControlDevice =
1468             fidl::WireSyncClient<fuchsia_hardware_goldfish::ControlDevice>(std::move(channel));
1469 
1470         fidl::ClientEnd<fuchsia_sysmem::Allocator> sysmem_channel{
1471             zx::channel(GetConnectToServiceFunction()("/svc/fuchsia.sysmem.Allocator"))};
1472         if (!sysmem_channel) {
1473             mesa_loge("failed to open sysmem connection");
1474         }
1475         mSysmemAllocator =
1476             fidl::WireSyncClient<fuchsia_sysmem::Allocator>(std::move(sysmem_channel));
1477         char name[ZX_MAX_NAME_LEN] = {};
1478         zx_object_get_property(zx_process_self(), ZX_PROP_NAME, name, sizeof(name));
1479         std::string client_name(name);
1480         client_name += "-goldfish";
1481         zx_info_handle_basic_t info;
1482         zx_object_get_info(zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, sizeof(info), nullptr,
1483                            nullptr);
1484         mSysmemAllocator->SetDebugClientInfo(fidl::StringView::FromExternal(client_name),
1485                                              info.koid);
1486     }
1487 #endif
1488 
1489     if (mFeatureInfo.hasVulkanNullOptionalStrings) {
1490         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
1491     }
1492     if (mFeatureInfo.hasVulkanIgnoredHandles) {
1493         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
1494     }
1495     if (mFeatureInfo.hasVulkanShaderFloat16Int8) {
1496         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
1497     }
1498     if (mFeatureInfo.hasVulkanQueueSubmitWithCommands) {
1499         ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
1500     }
1501 
1502     mFeatureInfo.setupComplete = true;
1503 }
1504 
setupPlatformHelpers()1505 void ResourceTracker::setupPlatformHelpers() {
1506 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
1507     VirtGpuDevice* instance = VirtGpuDevice::getInstance(kCapsetGfxStreamVulkan);
1508     auto deviceHandle = instance->getDeviceHandle();
1509     if (mGralloc == nullptr) {
1510         mGralloc.reset(gfxstream::createPlatformGralloc(deviceHandle));
1511     }
1512 #endif
1513 
1514     if (mSyncHelper == nullptr) {
1515         mSyncHelper.reset(gfxstream::createPlatformSyncHelper());
1516     }
1517 }
1518 
setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks & callbacks)1519 void ResourceTracker::setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks& callbacks) {
1520     ResourceTracker::threadingCallbacks = callbacks;
1521 }
1522 
usingDirectMapping() const1523 bool ResourceTracker::usingDirectMapping() const { return true; }
1524 
getStreamFeatures() const1525 uint32_t ResourceTracker::getStreamFeatures() const { return ResourceTracker::streamFeatureBits; }
1526 
supportsDeferredCommands() const1527 bool ResourceTracker::supportsDeferredCommands() const {
1528     return mFeatureInfo.hasDeferredVulkanCommands;
1529 }
1530 
supportsAsyncQueueSubmit() const1531 bool ResourceTracker::supportsAsyncQueueSubmit() const {
1532     return mFeatureInfo.hasVulkanAsyncQueueSubmit;
1533 }
1534 
supportsCreateResourcesWithRequirements() const1535 bool ResourceTracker::supportsCreateResourcesWithRequirements() const {
1536     return mFeatureInfo.hasVulkanCreateResourcesWithRequirements;
1537 }
1538 
getHostInstanceExtensionIndex(const std::string & extName) const1539 int ResourceTracker::getHostInstanceExtensionIndex(const std::string& extName) const {
1540     int i = 0;
1541     for (const auto& prop : mHostInstanceExtensions) {
1542         if (extName == std::string(prop.extensionName)) {
1543             return i;
1544         }
1545         ++i;
1546     }
1547     return -1;
1548 }
1549 
getHostDeviceExtensionIndex(const std::string & extName) const1550 int ResourceTracker::getHostDeviceExtensionIndex(const std::string& extName) const {
1551     int i = 0;
1552     for (const auto& prop : mHostDeviceExtensions) {
1553         if (extName == std::string(prop.extensionName)) {
1554             return i;
1555         }
1556         ++i;
1557     }
1558     return -1;
1559 }
1560 
deviceMemoryTransform_tohost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)1561 void ResourceTracker::deviceMemoryTransform_tohost(VkDeviceMemory* memory, uint32_t memoryCount,
1562                                                    VkDeviceSize* offset, uint32_t offsetCount,
1563                                                    VkDeviceSize* size, uint32_t sizeCount,
1564                                                    uint32_t* typeIndex, uint32_t typeIndexCount,
1565                                                    uint32_t* typeBits, uint32_t typeBitsCount) {
1566     (void)memoryCount;
1567     (void)offsetCount;
1568     (void)sizeCount;
1569     (void)typeIndex;
1570     (void)typeIndexCount;
1571     (void)typeBits;
1572     (void)typeBitsCount;
1573 
1574     if (memory) {
1575         std::lock_guard<std::recursive_mutex> lock(mLock);
1576 
1577         for (uint32_t i = 0; i < memoryCount; ++i) {
1578             VkDeviceMemory mem = memory[i];
1579 
1580             auto it = info_VkDeviceMemory.find(mem);
1581             if (it == info_VkDeviceMemory.end()) return;
1582 
1583             const auto& info = it->second;
1584 
1585             if (!info.coherentMemory) continue;
1586 
1587             memory[i] = info.coherentMemory->getDeviceMemory();
1588 
1589             if (offset) {
1590                 offset[i] = info.coherentMemoryOffset + offset[i];
1591             }
1592 
1593             if (size && size[i] == VK_WHOLE_SIZE) {
1594                 size[i] = info.allocationSize;
1595             }
1596 
1597             // TODO
1598             (void)memory;
1599             (void)offset;
1600             (void)size;
1601         }
1602     }
1603 }
1604 
getColorBufferMemoryIndex(void * context,VkDevice device)1605 uint32_t ResourceTracker::getColorBufferMemoryIndex(void* context, VkDevice device) {
1606     // Create test image to get the memory requirements
1607     VkEncoder* enc = (VkEncoder*)context;
1608     VkImageCreateInfo createInfo = {
1609         .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1610         .imageType = VK_IMAGE_TYPE_2D,
1611         .format = VK_FORMAT_R8G8B8A8_UNORM,
1612         .extent = {64, 64, 1},
1613         .mipLevels = 1,
1614         .arrayLayers = 1,
1615         .samples = VK_SAMPLE_COUNT_1_BIT,
1616         .tiling = VK_IMAGE_TILING_OPTIMAL,
1617         .usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
1618                  VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
1619                  VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
1620         .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
1621     };
1622     VkImage image = VK_NULL_HANDLE;
1623     VkResult res = enc->vkCreateImage(device, &createInfo, nullptr, &image, true /* do lock */);
1624 
1625     if (res != VK_SUCCESS) {
1626         return 0;
1627     }
1628 
1629     VkMemoryRequirements memReqs;
1630     enc->vkGetImageMemoryRequirements(device, image, &memReqs, true /* do lock */);
1631     enc->vkDestroyImage(device, image, nullptr, true /* do lock */);
1632 
1633     const VkPhysicalDeviceMemoryProperties& memProps =
1634         getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE);
1635 
1636     // Currently, host looks for the last index that has with memory
1637     // property type VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
1638     VkMemoryPropertyFlags memoryProperty = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1639     for (int i = VK_MAX_MEMORY_TYPES - 1; i >= 0; --i) {
1640         if ((memReqs.memoryTypeBits & (1u << i)) &&
1641             (memProps.memoryTypes[i].propertyFlags & memoryProperty)) {
1642             return i;
1643         }
1644     }
1645 
1646     return 0;
1647 }
1648 
on_vkEnumerateInstanceExtensionProperties(void * context,VkResult,const char *,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)1649 VkResult ResourceTracker::on_vkEnumerateInstanceExtensionProperties(
1650     void* context, VkResult, const char*, uint32_t* pPropertyCount,
1651     VkExtensionProperties* pProperties) {
1652     std::vector<const char*> allowedExtensionNames = {
1653         "VK_KHR_get_physical_device_properties2",
1654         "VK_KHR_sampler_ycbcr_conversion",
1655 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
1656         "VK_KHR_external_semaphore_capabilities",
1657         "VK_KHR_external_memory_capabilities",
1658         "VK_KHR_external_fence_capabilities",
1659         "VK_EXT_debug_utils",
1660 #endif
1661     };
1662 
1663     VkEncoder* enc = (VkEncoder*)context;
1664 
1665     // Only advertise a select set of extensions.
1666     if (mHostInstanceExtensions.empty()) {
1667         uint32_t hostPropCount = 0;
1668         enc->vkEnumerateInstanceExtensionProperties(nullptr, &hostPropCount, nullptr,
1669                                                     true /* do lock */);
1670         mHostInstanceExtensions.resize(hostPropCount);
1671 
1672         VkResult hostRes = enc->vkEnumerateInstanceExtensionProperties(
1673             nullptr, &hostPropCount, mHostInstanceExtensions.data(), true /* do lock */);
1674 
1675         if (hostRes != VK_SUCCESS) {
1676             return hostRes;
1677         }
1678     }
1679 
1680     std::vector<VkExtensionProperties> filteredExts;
1681 
1682     for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1683         auto extIndex = getHostInstanceExtensionIndex(allowedExtensionNames[i]);
1684         if (extIndex != -1) {
1685             filteredExts.push_back(mHostInstanceExtensions[extIndex]);
1686         }
1687     }
1688 
1689     VkExtensionProperties anbExtProps[] = {
1690 #ifdef VK_USE_PLATFORM_FUCHSIA
1691         {"VK_KHR_external_memory_capabilities", 1},
1692         {"VK_KHR_external_semaphore_capabilities", 1},
1693 #endif
1694     };
1695 
1696     for (auto& anbExtProp : anbExtProps) {
1697         filteredExts.push_back(anbExtProp);
1698     }
1699 
1700     // Spec:
1701     //
1702     // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
1703     //
1704     // If pProperties is NULL, then the number of extensions properties
1705     // available is returned in pPropertyCount. Otherwise, pPropertyCount
1706     // must point to a variable set by the user to the number of elements
1707     // in the pProperties array, and on return the variable is overwritten
1708     // with the number of structures actually written to pProperties. If
1709     // pPropertyCount is less than the number of extension properties
1710     // available, at most pPropertyCount structures will be written. If
1711     // pPropertyCount is smaller than the number of extensions available,
1712     // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1713     // that not all the available properties were returned.
1714     //
1715     // pPropertyCount must be a valid pointer to a uint32_t value
1716     if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1717 
1718     if (!pProperties) {
1719         *pPropertyCount = (uint32_t)filteredExts.size();
1720         return VK_SUCCESS;
1721     } else {
1722         auto actualExtensionCount = (uint32_t)filteredExts.size();
1723         if (*pPropertyCount > actualExtensionCount) {
1724             *pPropertyCount = actualExtensionCount;
1725         }
1726 
1727         for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1728             pProperties[i] = filteredExts[i];
1729         }
1730 
1731         if (actualExtensionCount > *pPropertyCount) {
1732             return VK_INCOMPLETE;
1733         }
1734 
1735         return VK_SUCCESS;
1736     }
1737 }
1738 
on_vkEnumerateDeviceExtensionProperties(void * context,VkResult,VkPhysicalDevice physdev,const char *,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)1739 VkResult ResourceTracker::on_vkEnumerateDeviceExtensionProperties(
1740     void* context, VkResult, VkPhysicalDevice physdev, const char*, uint32_t* pPropertyCount,
1741     VkExtensionProperties* pProperties) {
1742     std::vector<const char*> allowedExtensionNames = {
1743         "VK_KHR_vulkan_memory_model",
1744         "VK_KHR_buffer_device_address",
1745         "VK_KHR_maintenance1",
1746         "VK_KHR_maintenance2",
1747         "VK_KHR_maintenance3",
1748         "VK_KHR_bind_memory2",
1749         "VK_KHR_dedicated_allocation",
1750         "VK_KHR_get_memory_requirements2",
1751         "VK_KHR_sampler_ycbcr_conversion",
1752         "VK_KHR_shader_float16_int8",
1753     // Timeline semaphores buggy in newer NVIDIA drivers
1754     // (vkWaitSemaphoresKHR causes further vkCommandBuffer dispatches to deadlock)
1755 #ifndef VK_USE_PLATFORM_ANDROID_KHR
1756         "VK_KHR_timeline_semaphore",
1757 #endif
1758         "VK_AMD_gpu_shader_half_float",
1759         "VK_NV_shader_subgroup_partitioned",
1760         "VK_KHR_shader_subgroup_extended_types",
1761         "VK_EXT_subgroup_size_control",
1762         "VK_EXT_provoking_vertex",
1763         "VK_KHR_line_rasterization",
1764         "VK_EXT_line_rasterization",
1765         "VK_KHR_shader_terminate_invocation",
1766         "VK_EXT_transform_feedback",
1767         "VK_EXT_primitive_topology_list_restart",
1768         "VK_EXT_index_type_uint8",
1769         "VK_EXT_load_store_op_none",
1770         "VK_EXT_swapchain_colorspace",
1771         "VK_EXT_image_robustness",
1772         "VK_EXT_custom_border_color",
1773         "VK_EXT_shader_stencil_export",
1774         "VK_KHR_image_format_list",
1775         "VK_KHR_incremental_present",
1776         "VK_KHR_pipeline_executable_properties",
1777         "VK_EXT_queue_family_foreign",
1778         "VK_EXT_scalar_block_layout",
1779         "VK_KHR_descriptor_update_template",
1780         "VK_KHR_storage_buffer_storage_class",
1781         "VK_EXT_depth_clip_enable",
1782         "VK_KHR_create_renderpass2",
1783         "VK_EXT_vertex_attribute_divisor",
1784         "VK_EXT_host_query_reset",
1785 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
1786         "VK_KHR_external_semaphore",
1787         "VK_KHR_external_semaphore_fd",
1788         // "VK_KHR_external_semaphore_win32", not exposed because it's translated to fd
1789         "VK_KHR_external_memory",
1790         "VK_KHR_external_fence",
1791         "VK_KHR_external_fence_fd",
1792         "VK_EXT_device_memory_report",
1793 #endif
1794 #if DETECT_OS_LINUX && !defined(VK_USE_PLATFORM_ANDROID_KHR)
1795         "VK_KHR_imageless_framebuffer",
1796 #endif
1797         "VK_KHR_multiview",
1798         // Vulkan 1.3
1799         "VK_KHR_synchronization2",
1800         "VK_EXT_private_data",
1801         "VK_EXT_color_write_enable",
1802     };
1803 
1804     VkEncoder* enc = (VkEncoder*)context;
1805 
1806     if (mHostDeviceExtensions.empty()) {
1807         uint32_t hostPropCount = 0;
1808         enc->vkEnumerateDeviceExtensionProperties(physdev, nullptr, &hostPropCount, nullptr,
1809                                                   true /* do lock */);
1810         mHostDeviceExtensions.resize(hostPropCount);
1811 
1812         VkResult hostRes = enc->vkEnumerateDeviceExtensionProperties(
1813             physdev, nullptr, &hostPropCount, mHostDeviceExtensions.data(), true /* do lock */);
1814 
1815         if (hostRes != VK_SUCCESS) {
1816             return hostRes;
1817         }
1818     }
1819 
1820     std::vector<VkExtensionProperties> filteredExts;
1821 
1822     for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1823         auto extIndex = getHostDeviceExtensionIndex(allowedExtensionNames[i]);
1824         if (extIndex != -1) {
1825             filteredExts.push_back(mHostDeviceExtensions[extIndex]);
1826         }
1827     }
1828 
1829     VkExtensionProperties anbExtProps[] = {
1830 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1831         {"VK_ANDROID_native_buffer", 7},
1832 #endif
1833 #ifdef VK_USE_PLATFORM_FUCHSIA
1834         {"VK_KHR_external_memory", 1},
1835         {"VK_KHR_external_semaphore", 1},
1836         {"VK_FUCHSIA_external_semaphore", 1},
1837 #endif
1838     };
1839 
1840     for (auto& anbExtProp : anbExtProps) {
1841         filteredExts.push_back(anbExtProp);
1842     }
1843 
1844     /*
1845      * GfxstreamEnd2EndVkTest::DeviceMemoryReport always assumes the memory report
1846      * extension is present.  It's is filtered out when sent host side, since for a
1847      * virtual GPU this is quite difficult to implement.
1848      *
1849      * Mesa runtime checks physical device features.  So if the test tries to enable
1850      * device level extension without it definitely existing, the test will fail.
1851      *
1852      * The test can also be modified to check VkPhysicalDeviceDeviceMemoryReportFeaturesEXT,
1853      * but that's more involved.  Work around this by always advertising the extension.
1854      * Tracking bug: b/338270042
1855      */
1856     filteredExts.push_back(VkExtensionProperties{"VK_EXT_device_memory_report", 1});
1857 
1858 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
1859     bool hostSupportsExternalFenceFd =
1860         getHostDeviceExtensionIndex("VK_KHR_external_fence_fd") != -1;
1861     if (!hostSupportsExternalFenceFd) {
1862         filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_fence_fd", 1});
1863     }
1864 #endif
1865 
1866 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
1867     bool hostHasPosixExternalSemaphore =
1868         getHostDeviceExtensionIndex("VK_KHR_external_semaphore_fd") != -1;
1869     if (!hostHasPosixExternalSemaphore) {
1870         // Always advertise posix external semaphore capabilities on Android/Linux.
1871         // SYNC_FD handles will always work, regardless of host support. Support
1872         // for non-sync, opaque FDs, depends on host driver support, but will
1873         // be handled accordingly by host.
1874         filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_semaphore_fd", 1});
1875     }
1876 #endif
1877 
1878     bool win32ExtMemAvailable = getHostDeviceExtensionIndex("VK_KHR_external_memory_win32") != -1;
1879     bool posixExtMemAvailable = getHostDeviceExtensionIndex("VK_KHR_external_memory_fd") != -1;
1880     bool metalExtMemAvailable = getHostDeviceExtensionIndex("VK_EXT_external_memory_metal") != -1 ||
1881                                 getHostDeviceExtensionIndex("VK_MVK_moltenvk") != -1;
1882     bool qnxExtMemAvailable =
1883         getHostDeviceExtensionIndex("VK_QNX_external_memory_screen_buffer") != -1;
1884 
1885     bool hostHasExternalMemorySupport =
1886         win32ExtMemAvailable || posixExtMemAvailable || metalExtMemAvailable || qnxExtMemAvailable;
1887 
1888     if (hostHasExternalMemorySupport) {
1889 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1890         filteredExts.push_back(
1891             VkExtensionProperties{"VK_ANDROID_external_memory_android_hardware_buffer", 7});
1892         filteredExts.push_back(VkExtensionProperties{"VK_EXT_queue_family_foreign", 1});
1893 #endif
1894 #ifdef VK_USE_PLATFORM_FUCHSIA
1895         filteredExts.push_back(VkExtensionProperties{"VK_FUCHSIA_external_memory", 1});
1896         filteredExts.push_back(VkExtensionProperties{"VK_FUCHSIA_buffer_collection", 1});
1897 #endif
1898 #if !defined(VK_USE_PLATFORM_ANDROID_KHR) && DETECT_OS_LINUX
1899         filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_memory_fd", 1});
1900         filteredExts.push_back(VkExtensionProperties{"VK_EXT_external_memory_dma_buf", 1});
1901         // In case the host doesn't support format modifiers, they are emulated
1902         // on guest side.
1903         filteredExts.push_back(VkExtensionProperties{"VK_EXT_image_drm_format_modifier", 1});
1904 #endif
1905     }
1906 
1907     // NOTE: the Vulkan Loader's trampoline functions will remove duplicates. This can lead
1908     // to lead errors if this function returns VK_SUCCESS with N elements (including a duplicate)
1909     // but the Vulkan Loader's trampoline function returns VK_INCOMPLETE with N-1 elements
1910     // (without the duplicate).
1911     std::sort(filteredExts.begin(),
1912               filteredExts.end(),
1913               [](const VkExtensionProperties& a,
1914                  const VkExtensionProperties& b) {
1915                   return strcmp(a.extensionName, b.extensionName) < 0;
1916               });
1917     filteredExts.erase(std::unique(filteredExts.begin(),
1918                                    filteredExts.end(),
1919                                    [](const VkExtensionProperties& a,
1920                                       const VkExtensionProperties& b) {
1921                                        return strcmp(a.extensionName, b.extensionName) == 0;
1922                                    }),
1923                        filteredExts.end());
1924 
1925     // Spec:
1926     //
1927     // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateDeviceExtensionProperties.html
1928     //
1929     // pPropertyCount is a pointer to an integer related to the number of
1930     // extension properties available or queried, and is treated in the
1931     // same fashion as the
1932     // vkEnumerateInstanceExtensionProperties::pPropertyCount parameter.
1933     //
1934     // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
1935     //
1936     // If pProperties is NULL, then the number of extensions properties
1937     // available is returned in pPropertyCount. Otherwise, pPropertyCount
1938     // must point to a variable set by the user to the number of elements
1939     // in the pProperties array, and on return the variable is overwritten
1940     // with the number of structures actually written to pProperties. If
1941     // pPropertyCount is less than the number of extension properties
1942     // available, at most pPropertyCount structures will be written. If
1943     // pPropertyCount is smaller than the number of extensions available,
1944     // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1945     // that not all the available properties were returned.
1946     //
1947     // pPropertyCount must be a valid pointer to a uint32_t value
1948 
1949     if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1950 
1951     if (!pProperties) {
1952         *pPropertyCount = (uint32_t)filteredExts.size();
1953         return VK_SUCCESS;
1954     } else {
1955         auto actualExtensionCount = (uint32_t)filteredExts.size();
1956         if (*pPropertyCount > actualExtensionCount) {
1957             *pPropertyCount = actualExtensionCount;
1958         }
1959 
1960         for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1961             pProperties[i] = filteredExts[i];
1962         }
1963 
1964         if (actualExtensionCount > *pPropertyCount) {
1965             return VK_INCOMPLETE;
1966         }
1967 
1968         return VK_SUCCESS;
1969     }
1970 }
1971 
on_vkEnumeratePhysicalDevices(void * context,VkResult,VkInstance instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices)1972 VkResult ResourceTracker::on_vkEnumeratePhysicalDevices(void* context, VkResult,
1973                                                         VkInstance instance,
1974                                                         uint32_t* pPhysicalDeviceCount,
1975                                                         VkPhysicalDevice* pPhysicalDevices) {
1976     VkEncoder* enc = (VkEncoder*)context;
1977 
1978     if (!instance) return VK_ERROR_INITIALIZATION_FAILED;
1979 
1980     if (!pPhysicalDeviceCount) return VK_ERROR_INITIALIZATION_FAILED;
1981 
1982     std::unique_lock<std::recursive_mutex> lock(mLock);
1983 
1984     // When this function is called, we actually need to do two things:
1985     // - Get full information about physical devices from the host,
1986     // even if the guest did not ask for it
1987     // - Serve the guest query according to the spec:
1988     //
1989     // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
1990 
1991     auto it = info_VkInstance.find(instance);
1992 
1993     if (it == info_VkInstance.end()) return VK_ERROR_INITIALIZATION_FAILED;
1994 
1995     auto& info = it->second;
1996 
1997     // Get the full host information here if it doesn't exist already.
1998     if (info.physicalDevices.empty()) {
1999         uint32_t hostPhysicalDeviceCount = 0;
2000 
2001         lock.unlock();
2002         VkResult countRes = enc->vkEnumeratePhysicalDevices(instance, &hostPhysicalDeviceCount,
2003                                                             nullptr, false /* no lock */);
2004         lock.lock();
2005 
2006         if (countRes != VK_SUCCESS) {
2007             mesa_loge(
2008                 "%s: failed: could not count host physical devices. "
2009                 "Error %d\n",
2010                 __func__, countRes);
2011             return countRes;
2012         }
2013 
2014         info.physicalDevices.resize(hostPhysicalDeviceCount);
2015 
2016         lock.unlock();
2017         VkResult enumRes = enc->vkEnumeratePhysicalDevices(
2018             instance, &hostPhysicalDeviceCount, info.physicalDevices.data(), false /* no lock */);
2019         lock.lock();
2020 
2021         if (enumRes != VK_SUCCESS) {
2022             mesa_loge(
2023                 "%s: failed: could not retrieve host physical devices. "
2024                 "Error %d\n",
2025                 __func__, enumRes);
2026             return enumRes;
2027         }
2028     }
2029 
2030     // Serve the guest query according to the spec.
2031     //
2032     // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
2033     //
2034     // If pPhysicalDevices is NULL, then the number of physical devices
2035     // available is returned in pPhysicalDeviceCount. Otherwise,
2036     // pPhysicalDeviceCount must point to a variable set by the user to the
2037     // number of elements in the pPhysicalDevices array, and on return the
2038     // variable is overwritten with the number of handles actually written
2039     // to pPhysicalDevices. If pPhysicalDeviceCount is less than the number
2040     // of physical devices available, at most pPhysicalDeviceCount
2041     // structures will be written.  If pPhysicalDeviceCount is smaller than
2042     // the number of physical devices available, VK_INCOMPLETE will be
2043     // returned instead of VK_SUCCESS, to indicate that not all the
2044     // available physical devices were returned.
2045 
2046     if (!pPhysicalDevices) {
2047         *pPhysicalDeviceCount = (uint32_t)info.physicalDevices.size();
2048         return VK_SUCCESS;
2049     } else {
2050         uint32_t actualDeviceCount = (uint32_t)info.physicalDevices.size();
2051         uint32_t toWrite =
2052             actualDeviceCount < *pPhysicalDeviceCount ? actualDeviceCount : *pPhysicalDeviceCount;
2053 
2054         for (uint32_t i = 0; i < toWrite; ++i) {
2055             pPhysicalDevices[i] = info.physicalDevices[i];
2056         }
2057 
2058         *pPhysicalDeviceCount = toWrite;
2059 
2060         if (actualDeviceCount > *pPhysicalDeviceCount) {
2061             return VK_INCOMPLETE;
2062         }
2063 
2064         return VK_SUCCESS;
2065     }
2066 }
2067 
on_vkGetPhysicalDeviceProperties(void *,VkPhysicalDevice,VkPhysicalDeviceProperties * pProperties)2068 void ResourceTracker::on_vkGetPhysicalDeviceProperties(void*, VkPhysicalDevice,
2069                                                        VkPhysicalDeviceProperties* pProperties) {
2070 #if DETECT_OS_LINUX && !defined(VK_USE_PLATFORM_ANDROID_KHR)
2071     if (pProperties) {
2072         if (VK_PHYSICAL_DEVICE_TYPE_CPU == pProperties->deviceType) {
2073             /* For Linux guest: Even if host driver reports DEVICE_TYPE_CPU,
2074              * override this to VIRTUAL_GPU, otherwise Linux DRM interfaces
2075              * will take unexpected code paths to deal with "software" driver
2076              */
2077             pProperties->deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
2078         }
2079     }
2080 #endif
2081 }
2082 
on_vkGetPhysicalDeviceFeatures2(void *,VkPhysicalDevice,VkPhysicalDeviceFeatures2 * pFeatures)2083 void ResourceTracker::on_vkGetPhysicalDeviceFeatures2(void*, VkPhysicalDevice,
2084                                                       VkPhysicalDeviceFeatures2* pFeatures) {
2085     if (pFeatures) {
2086         VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
2087             vk_find_struct(pFeatures, PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT);
2088         if (memoryReportFeaturesEXT) {
2089             memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
2090         }
2091     }
2092 }
2093 
on_vkGetPhysicalDeviceFeatures2KHR(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceFeatures2 * pFeatures)2094 void ResourceTracker::on_vkGetPhysicalDeviceFeatures2KHR(void* context,
2095                                                          VkPhysicalDevice physicalDevice,
2096                                                          VkPhysicalDeviceFeatures2* pFeatures) {
2097     on_vkGetPhysicalDeviceFeatures2(context, physicalDevice, pFeatures);
2098 }
2099 
on_vkGetPhysicalDeviceProperties2(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)2100 void ResourceTracker::on_vkGetPhysicalDeviceProperties2(void* context,
2101                                                         VkPhysicalDevice physicalDevice,
2102                                                         VkPhysicalDeviceProperties2* pProperties) {
2103     if (pProperties) {
2104         on_vkGetPhysicalDeviceProperties(context, physicalDevice, &pProperties->properties);
2105     }
2106 }
2107 
on_vkGetPhysicalDeviceProperties2KHR(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)2108 void ResourceTracker::on_vkGetPhysicalDeviceProperties2KHR(
2109     void* context, VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties) {
2110     on_vkGetPhysicalDeviceProperties2(context, physicalDevice, pProperties);
2111 }
2112 
on_vkGetPhysicalDeviceMemoryProperties(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties * out)2113 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties(
2114     void* context, VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* out) {
2115     // gfxstream decides which physical device to expose to the guest on startup.
2116     // Otherwise, we would need a physical device to properties mapping.
2117     *out = getPhysicalDeviceMemoryProperties(context, VK_NULL_HANDLE, physicalDevice);
2118 }
2119 
on_vkGetPhysicalDeviceMemoryProperties2(void *,VkPhysicalDevice physdev,VkPhysicalDeviceMemoryProperties2 * out)2120 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties2(
2121     void*, VkPhysicalDevice physdev, VkPhysicalDeviceMemoryProperties2* out) {
2122     on_vkGetPhysicalDeviceMemoryProperties(nullptr, physdev, &out->memoryProperties);
2123 }
2124 
on_vkGetDeviceQueue(void *,VkDevice device,uint32_t,uint32_t,VkQueue * pQueue)2125 void ResourceTracker::on_vkGetDeviceQueue(void*, VkDevice device, uint32_t, uint32_t,
2126                                           VkQueue* pQueue) {
2127     std::lock_guard<std::recursive_mutex> lock(mLock);
2128     info_VkQueue[*pQueue].device = device;
2129 }
2130 
on_vkGetDeviceQueue2(void *,VkDevice device,const VkDeviceQueueInfo2 *,VkQueue * pQueue)2131 void ResourceTracker::on_vkGetDeviceQueue2(void*, VkDevice device, const VkDeviceQueueInfo2*,
2132                                            VkQueue* pQueue) {
2133     std::lock_guard<std::recursive_mutex> lock(mLock);
2134     info_VkQueue[*pQueue].device = device;
2135 }
2136 
on_vkCreateInstance(void * context,VkResult input_result,const VkInstanceCreateInfo * createInfo,const VkAllocationCallbacks *,VkInstance * pInstance)2137 VkResult ResourceTracker::on_vkCreateInstance(void* context, VkResult input_result,
2138                                               const VkInstanceCreateInfo* createInfo,
2139                                               const VkAllocationCallbacks*, VkInstance* pInstance) {
2140     if (input_result != VK_SUCCESS) return input_result;
2141 
2142     VkEncoder* enc = (VkEncoder*)context;
2143 
2144     uint32_t apiVersion;
2145     input_result = enc->vkEnumerateInstanceVersion(&apiVersion, false /* no lock */);
2146 
2147     setInstanceInfo(*pInstance, createInfo->enabledExtensionCount,
2148                     createInfo->ppEnabledExtensionNames, apiVersion);
2149 
2150     return input_result;
2151 }
2152 
on_vkCreateDevice(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks *,VkDevice * pDevice)2153 VkResult ResourceTracker::on_vkCreateDevice(void* context, VkResult input_result,
2154                                             VkPhysicalDevice physicalDevice,
2155                                             const VkDeviceCreateInfo* pCreateInfo,
2156                                             const VkAllocationCallbacks*, VkDevice* pDevice) {
2157     if (input_result != VK_SUCCESS) return input_result;
2158 
2159     VkEncoder* enc = (VkEncoder*)context;
2160 
2161     VkPhysicalDeviceProperties props;
2162     VkPhysicalDeviceMemoryProperties memProps;
2163     enc->vkGetPhysicalDeviceProperties(physicalDevice, &props, false /* no lock */);
2164     enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps, false /* no lock */);
2165 
2166     setDeviceInfo(*pDevice, physicalDevice, props, memProps, pCreateInfo->enabledExtensionCount,
2167                   pCreateInfo->ppEnabledExtensionNames, pCreateInfo->pNext);
2168 
2169     return input_result;
2170 }
2171 
on_vkDestroyDevice_pre(void * context,VkDevice device,const VkAllocationCallbacks *)2172 void ResourceTracker::on_vkDestroyDevice_pre(void* context, VkDevice device,
2173                                              const VkAllocationCallbacks*) {
2174     (void)context;
2175     std::lock_guard<std::recursive_mutex> lock(mLock);
2176 
2177     auto it = info_VkDevice.find(device);
2178     if (it == info_VkDevice.end()) return;
2179 
2180     for (auto itr = info_VkDeviceMemory.cbegin(); itr != info_VkDeviceMemory.cend();) {
2181         auto& memInfo = itr->second;
2182         if (memInfo.device == device) {
2183             itr = info_VkDeviceMemory.erase(itr);
2184         } else {
2185             itr++;
2186         }
2187     }
2188 }
2189 
2190 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
updateMemoryTypeBits(uint32_t * memoryTypeBits,uint32_t memoryIndex)2191 void updateMemoryTypeBits(uint32_t* memoryTypeBits, uint32_t memoryIndex) {
2192     *memoryTypeBits = 1u << memoryIndex;
2193 }
2194 #endif
2195 
2196 #ifdef VK_USE_PLATFORM_ANDROID_KHR
2197 
on_vkGetAndroidHardwareBufferPropertiesANDROID(void * context,VkResult,VkDevice device,const AHardwareBuffer * buffer,VkAndroidHardwareBufferPropertiesANDROID * pProperties)2198 VkResult ResourceTracker::on_vkGetAndroidHardwareBufferPropertiesANDROID(
2199     void* context, VkResult, VkDevice device, const AHardwareBuffer* buffer,
2200     VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
2201     // Delete once goldfish Linux drivers are gone
2202     if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
2203         mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
2204     }
2205 
2206     updateMemoryTypeBits(&pProperties->memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
2207 
2208     return getAndroidHardwareBufferPropertiesANDROID(mGralloc.get(), buffer, pProperties);
2209 }
2210 
on_vkGetMemoryAndroidHardwareBufferANDROID(void *,VkResult,VkDevice device,const VkMemoryGetAndroidHardwareBufferInfoANDROID * pInfo,struct AHardwareBuffer ** pBuffer)2211 VkResult ResourceTracker::on_vkGetMemoryAndroidHardwareBufferANDROID(
2212     void*, VkResult, VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
2213     struct AHardwareBuffer** pBuffer) {
2214     if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2215     if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
2216 
2217     std::lock_guard<std::recursive_mutex> lock(mLock);
2218 
2219     auto deviceIt = info_VkDevice.find(device);
2220 
2221     if (deviceIt == info_VkDevice.end()) {
2222         return VK_ERROR_INITIALIZATION_FAILED;
2223     }
2224 
2225     auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
2226 
2227     if (memoryIt == info_VkDeviceMemory.end()) {
2228         return VK_ERROR_INITIALIZATION_FAILED;
2229     }
2230 
2231     auto& info = memoryIt->second;
2232     VkResult queryRes = getMemoryAndroidHardwareBufferANDROID(mGralloc.get(), &info.ahw);
2233 
2234     if (queryRes != VK_SUCCESS) return queryRes;
2235 
2236     *pBuffer = info.ahw;
2237 
2238     return queryRes;
2239 }
2240 #endif
2241 
2242 #ifdef VK_USE_PLATFORM_FUCHSIA
on_vkGetMemoryZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkMemoryGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)2243 VkResult ResourceTracker::on_vkGetMemoryZirconHandleFUCHSIA(
2244     void*, VkResult, VkDevice device, const VkMemoryGetZirconHandleInfoFUCHSIA* pInfo,
2245     uint32_t* pHandle) {
2246     if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2247     if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
2248 
2249     std::lock_guard<std::recursive_mutex> lock(mLock);
2250 
2251     auto deviceIt = info_VkDevice.find(device);
2252 
2253     if (deviceIt == info_VkDevice.end()) {
2254         return VK_ERROR_INITIALIZATION_FAILED;
2255     }
2256 
2257     auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
2258 
2259     if (memoryIt == info_VkDeviceMemory.end()) {
2260         return VK_ERROR_INITIALIZATION_FAILED;
2261     }
2262 
2263     auto& info = memoryIt->second;
2264 
2265     if (info.vmoHandle == ZX_HANDLE_INVALID) {
2266         mesa_loge("%s: memory cannot be exported", __func__);
2267         return VK_ERROR_INITIALIZATION_FAILED;
2268     }
2269 
2270     *pHandle = ZX_HANDLE_INVALID;
2271     zx_handle_duplicate(info.vmoHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
2272     return VK_SUCCESS;
2273 }
2274 
on_vkGetMemoryZirconHandlePropertiesFUCHSIA(void *,VkResult,VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,uint32_t handle,VkMemoryZirconHandlePropertiesFUCHSIA * pProperties)2275 VkResult ResourceTracker::on_vkGetMemoryZirconHandlePropertiesFUCHSIA(
2276     void*, VkResult, VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType,
2277     uint32_t handle, VkMemoryZirconHandlePropertiesFUCHSIA* pProperties) {
2278     using fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal;
2279     using fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
2280 
2281     if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
2282         return VK_ERROR_INITIALIZATION_FAILED;
2283     }
2284 
2285     zx_info_handle_basic_t handleInfo;
2286     zx_status_t status = zx::unowned_vmo(handle)->get_info(ZX_INFO_HANDLE_BASIC, &handleInfo,
2287                                                            sizeof(handleInfo), nullptr, nullptr);
2288     if (status != ZX_OK || handleInfo.type != ZX_OBJ_TYPE_VMO) {
2289         return VK_ERROR_INVALID_EXTERNAL_HANDLE;
2290     }
2291 
2292     std::lock_guard<std::recursive_mutex> lock(mLock);
2293 
2294     auto deviceIt = info_VkDevice.find(device);
2295 
2296     if (deviceIt == info_VkDevice.end()) {
2297         return VK_ERROR_INITIALIZATION_FAILED;
2298     }
2299 
2300     auto& info = deviceIt->second;
2301 
2302     zx::vmo vmo_dup;
2303     status = zx::unowned_vmo(handle)->duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
2304     if (status != ZX_OK) {
2305         mesa_loge("zx_handle_duplicate() error: %d", status);
2306         return VK_ERROR_INITIALIZATION_FAILED;
2307     }
2308 
2309     uint32_t memoryProperty = 0u;
2310 
2311     auto result = mControlDevice->GetBufferHandleInfo(std::move(vmo_dup));
2312     if (!result.ok()) {
2313         mesa_loge("mControlDevice->GetBufferHandleInfo fatal error: epitaph: %d", result.status());
2314         return VK_ERROR_INITIALIZATION_FAILED;
2315     }
2316     if (result.value().is_ok()) {
2317         memoryProperty = result.value().value()->info.memory_property();
2318     } else if (result.value().error_value() == ZX_ERR_NOT_FOUND) {
2319         // If a VMO is allocated while ColorBuffer/Buffer is not created,
2320         // it must be a device-local buffer, since for host-visible buffers,
2321         // ColorBuffer/Buffer is created at sysmem allocation time.
2322         memoryProperty = kMemoryPropertyDeviceLocal;
2323     } else {
2324         // Importing read-only host memory into the Vulkan driver should not
2325         // work, but it is not an error to try to do so. Returning a
2326         // VkMemoryZirconHandlePropertiesFUCHSIA with no available
2327         // memoryType bits should be enough for clients. See fxbug.dev/42098398
2328         // for other issues this this flow.
2329         mesa_logw("GetBufferHandleInfo failed: %d", result.value().error_value());
2330         pProperties->memoryTypeBits = 0;
2331         return VK_SUCCESS;
2332     }
2333 
2334     pProperties->memoryTypeBits = 0;
2335     for (uint32_t i = 0; i < info.memProps.memoryTypeCount; ++i) {
2336         if (((memoryProperty & kMemoryPropertyDeviceLocal) &&
2337              (info.memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
2338             ((memoryProperty & kMemoryPropertyHostVisible) &&
2339              (info.memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
2340             pProperties->memoryTypeBits |= 1ull << i;
2341         }
2342     }
2343     return VK_SUCCESS;
2344 }
2345 
getEventKoid(zx_handle_t eventHandle)2346 zx_koid_t getEventKoid(zx_handle_t eventHandle) {
2347     if (eventHandle == ZX_HANDLE_INVALID) {
2348         return ZX_KOID_INVALID;
2349     }
2350 
2351     zx_info_handle_basic_t info;
2352     zx_status_t status = zx_object_get_info(eventHandle, ZX_INFO_HANDLE_BASIC, &info, sizeof(info),
2353                                             nullptr, nullptr);
2354     if (status != ZX_OK) {
2355         mesa_loge("Cannot get object info of handle %u: %d", eventHandle, status);
2356         return ZX_KOID_INVALID;
2357     }
2358     return info.koid;
2359 }
2360 
on_vkImportSemaphoreZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkImportSemaphoreZirconHandleInfoFUCHSIA * pInfo)2361 VkResult ResourceTracker::on_vkImportSemaphoreZirconHandleFUCHSIA(
2362     void*, VkResult, VkDevice device, const VkImportSemaphoreZirconHandleInfoFUCHSIA* pInfo) {
2363     if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2364     if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
2365 
2366     std::lock_guard<std::recursive_mutex> lock(mLock);
2367 
2368     auto deviceIt = info_VkDevice.find(device);
2369 
2370     if (deviceIt == info_VkDevice.end()) {
2371         return VK_ERROR_INITIALIZATION_FAILED;
2372     }
2373 
2374     auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
2375 
2376     if (semaphoreIt == info_VkSemaphore.end()) {
2377         return VK_ERROR_INITIALIZATION_FAILED;
2378     }
2379 
2380     auto& info = semaphoreIt->second;
2381 
2382     if (info.eventHandle != ZX_HANDLE_INVALID) {
2383         zx_handle_close(info.eventHandle);
2384     }
2385 #if VK_HEADER_VERSION < 174
2386     info.eventHandle = pInfo->handle;
2387 #else   // VK_HEADER_VERSION >= 174
2388     info.eventHandle = pInfo->zirconHandle;
2389 #endif  // VK_HEADER_VERSION < 174
2390     if (info.eventHandle != ZX_HANDLE_INVALID) {
2391         info.eventKoid = getEventKoid(info.eventHandle);
2392     }
2393 
2394     return VK_SUCCESS;
2395 }
2396 
on_vkGetSemaphoreZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkSemaphoreGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)2397 VkResult ResourceTracker::on_vkGetSemaphoreZirconHandleFUCHSIA(
2398     void*, VkResult, VkDevice device, const VkSemaphoreGetZirconHandleInfoFUCHSIA* pInfo,
2399     uint32_t* pHandle) {
2400     if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2401     if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
2402 
2403     std::lock_guard<std::recursive_mutex> lock(mLock);
2404 
2405     auto deviceIt = info_VkDevice.find(device);
2406 
2407     if (deviceIt == info_VkDevice.end()) {
2408         return VK_ERROR_INITIALIZATION_FAILED;
2409     }
2410 
2411     auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
2412 
2413     if (semaphoreIt == info_VkSemaphore.end()) {
2414         return VK_ERROR_INITIALIZATION_FAILED;
2415     }
2416 
2417     auto& info = semaphoreIt->second;
2418 
2419     if (info.eventHandle == ZX_HANDLE_INVALID) {
2420         return VK_ERROR_INITIALIZATION_FAILED;
2421     }
2422 
2423     *pHandle = ZX_HANDLE_INVALID;
2424     zx_handle_duplicate(info.eventHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
2425     return VK_SUCCESS;
2426 }
2427 
on_vkCreateBufferCollectionFUCHSIA(void *,VkResult,VkDevice,const VkBufferCollectionCreateInfoFUCHSIA * pInfo,const VkAllocationCallbacks *,VkBufferCollectionFUCHSIA * pCollection)2428 VkResult ResourceTracker::on_vkCreateBufferCollectionFUCHSIA(
2429     void*, VkResult, VkDevice, const VkBufferCollectionCreateInfoFUCHSIA* pInfo,
2430     const VkAllocationCallbacks*, VkBufferCollectionFUCHSIA* pCollection) {
2431     fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken> token_client;
2432 
2433     if (pInfo->collectionToken) {
2434         token_client = fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken>(
2435             zx::channel(pInfo->collectionToken));
2436     } else {
2437         auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
2438         if (!endpoints.is_ok()) {
2439             mesa_loge("zx_channel_create failed: %d", endpoints.status_value());
2440             return VK_ERROR_INITIALIZATION_FAILED;
2441         }
2442 
2443         auto result = mSysmemAllocator->AllocateSharedCollection(std::move(endpoints->server));
2444         if (!result.ok()) {
2445             mesa_loge("AllocateSharedCollection failed: %d", result.status());
2446             return VK_ERROR_INITIALIZATION_FAILED;
2447         }
2448         token_client = std::move(endpoints->client);
2449     }
2450 
2451     auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
2452     if (!endpoints.is_ok()) {
2453         mesa_loge("zx_channel_create failed: %d", endpoints.status_value());
2454         return VK_ERROR_INITIALIZATION_FAILED;
2455     }
2456     auto [collection_client, collection_server] = std::move(endpoints.value());
2457 
2458     auto result = mSysmemAllocator->BindSharedCollection(std::move(token_client),
2459                                                          std::move(collection_server));
2460     if (!result.ok()) {
2461         mesa_loge("BindSharedCollection failed: %d", result.status());
2462         return VK_ERROR_INITIALIZATION_FAILED;
2463     }
2464 
2465     auto* sysmem_collection =
2466         new fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>(std::move(collection_client));
2467     *pCollection = reinterpret_cast<VkBufferCollectionFUCHSIA>(sysmem_collection);
2468 
2469     register_VkBufferCollectionFUCHSIA(*pCollection);
2470     return VK_SUCCESS;
2471 }
2472 
on_vkDestroyBufferCollectionFUCHSIA(void *,VkResult,VkDevice,VkBufferCollectionFUCHSIA collection,const VkAllocationCallbacks *)2473 void ResourceTracker::on_vkDestroyBufferCollectionFUCHSIA(void*, VkResult, VkDevice,
2474                                                           VkBufferCollectionFUCHSIA collection,
2475                                                           const VkAllocationCallbacks*) {
2476     auto sysmem_collection =
2477         reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2478     if (sysmem_collection) {
2479         (*sysmem_collection)->Close();
2480     }
2481     delete sysmem_collection;
2482 
2483     unregister_VkBufferCollectionFUCHSIA(collection);
2484 }
2485 
setBufferCollectionImageConstraintsImpl(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2486 SetBufferCollectionImageConstraintsResult ResourceTracker::setBufferCollectionImageConstraintsImpl(
2487     VkEncoder* enc, VkDevice device,
2488     fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2489     const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2490     const auto& collection = *pCollection;
2491     if (!pImageConstraintsInfo ||
2492         pImageConstraintsInfo->sType != VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA) {
2493         mesa_loge("%s: invalid pImageConstraintsInfo", __func__);
2494         return {VK_ERROR_INITIALIZATION_FAILED};
2495     }
2496 
2497     if (pImageConstraintsInfo->formatConstraintsCount == 0) {
2498         mesa_loge("%s: formatConstraintsCount must be greater than 0", __func__);
2499         abort();
2500     }
2501 
2502     fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
2503         defaultBufferCollectionConstraints(
2504             /* min_size_bytes */ 0,
2505             pImageConstraintsInfo->bufferCollectionConstraints.minBufferCount,
2506             pImageConstraintsInfo->bufferCollectionConstraints.maxBufferCount,
2507             pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForCamping,
2508             pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForDedicatedSlack,
2509             pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForSharedSlack);
2510 
2511     std::vector<fuchsia_sysmem::wire::ImageFormatConstraints> format_constraints;
2512 
2513     VkPhysicalDevice physicalDevice;
2514     {
2515         std::lock_guard<std::recursive_mutex> lock(mLock);
2516         auto deviceIt = info_VkDevice.find(device);
2517         if (deviceIt == info_VkDevice.end()) {
2518             return {VK_ERROR_INITIALIZATION_FAILED};
2519         }
2520         physicalDevice = deviceIt->second.physdev;
2521     }
2522 
2523     std::vector<uint32_t> createInfoIndex;
2524 
2525     bool hasOptimalTiling = false;
2526     for (uint32_t i = 0; i < pImageConstraintsInfo->formatConstraintsCount; i++) {
2527         const VkImageCreateInfo* createInfo =
2528             &pImageConstraintsInfo->pFormatConstraints[i].imageCreateInfo;
2529         const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints =
2530             &pImageConstraintsInfo->pFormatConstraints[i];
2531 
2532         // add ImageFormatConstraints for *optimal* tiling
2533         VkResult optimalResult = VK_ERROR_FORMAT_NOT_SUPPORTED;
2534         if (createInfo->tiling == VK_IMAGE_TILING_OPTIMAL) {
2535             optimalResult = addImageBufferCollectionConstraintsFUCHSIA(
2536                 enc, device, physicalDevice, formatConstraints, VK_IMAGE_TILING_OPTIMAL,
2537                 &constraints);
2538             if (optimalResult == VK_SUCCESS) {
2539                 createInfoIndex.push_back(i);
2540                 hasOptimalTiling = true;
2541             }
2542         }
2543 
2544         // Add ImageFormatConstraints for *linear* tiling
2545         VkResult linearResult = addImageBufferCollectionConstraintsFUCHSIA(
2546             enc, device, physicalDevice, formatConstraints, VK_IMAGE_TILING_LINEAR, &constraints);
2547         if (linearResult == VK_SUCCESS) {
2548             createInfoIndex.push_back(i);
2549         }
2550 
2551         // Update usage and BufferMemoryConstraints
2552         if (linearResult == VK_SUCCESS || optimalResult == VK_SUCCESS) {
2553             constraints.usage.vulkan |= getBufferCollectionConstraintsVulkanImageUsage(createInfo);
2554 
2555             if (formatConstraints && formatConstraints->flags) {
2556                 mesa_logw(
2557                     "%s: Non-zero flags (%08x) in image format "
2558                     "constraints; this is currently not supported, see "
2559                     "fxbug.dev/42147900.",
2560                     __func__, formatConstraints->flags);
2561             }
2562         }
2563     }
2564 
2565     // Set buffer memory constraints based on optimal/linear tiling support
2566     // and flags.
2567     VkImageConstraintsInfoFlagsFUCHSIA flags = pImageConstraintsInfo->flags;
2568     if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA)
2569         constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageRead;
2570     if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA)
2571         constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageReadOften;
2572     if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA)
2573         constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWrite;
2574     if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA)
2575         constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWriteOften;
2576 
2577     constraints.has_buffer_memory_constraints = true;
2578     auto& memory_constraints = constraints.buffer_memory_constraints;
2579     memory_constraints.cpu_domain_supported = true;
2580     memory_constraints.ram_domain_supported = true;
2581     memory_constraints.inaccessible_domain_supported =
2582         hasOptimalTiling && !(flags & (VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA |
2583                                        VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA |
2584                                        VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA |
2585                                        VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA));
2586 
2587     if (memory_constraints.inaccessible_domain_supported) {
2588         memory_constraints.heap_permitted_count = 2;
2589         memory_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2590         memory_constraints.heap_permitted[1] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2591     } else {
2592         memory_constraints.heap_permitted_count = 1;
2593         memory_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2594     }
2595 
2596     if (constraints.image_format_constraints_count == 0) {
2597         mesa_loge("%s: none of the specified formats is supported by device", __func__);
2598         return {VK_ERROR_FORMAT_NOT_SUPPORTED};
2599     }
2600 
2601     constexpr uint32_t kVulkanPriority = 5;
2602     const char kName[] = "GoldfishSysmemShared";
2603     collection->SetName(kVulkanPriority, fidl::StringView(kName));
2604 
2605     auto result = collection->SetConstraints(true, constraints);
2606     if (!result.ok()) {
2607         mesa_loge("setBufferCollectionConstraints: SetConstraints failed: %d", result.status());
2608         return {VK_ERROR_INITIALIZATION_FAILED};
2609     }
2610 
2611     return {VK_SUCCESS, constraints, std::move(createInfoIndex)};
2612 }
2613 
setBufferCollectionImageConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2614 VkResult ResourceTracker::setBufferCollectionImageConstraintsFUCHSIA(
2615     VkEncoder* enc, VkDevice device,
2616     fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2617     const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2618     const auto& collection = *pCollection;
2619 
2620     auto setConstraintsResult =
2621         setBufferCollectionImageConstraintsImpl(enc, device, pCollection, pImageConstraintsInfo);
2622     if (setConstraintsResult.result != VK_SUCCESS) {
2623         return setConstraintsResult.result;
2624     }
2625 
2626     // copy constraints to info_VkBufferCollectionFUCHSIA if
2627     // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2628     std::lock_guard<std::recursive_mutex> lock(mLock);
2629     VkBufferCollectionFUCHSIA buffer_collection =
2630         reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
2631     if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2632         info_VkBufferCollectionFUCHSIA.end()) {
2633         info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2634             std::make_optional<fuchsia_sysmem::wire::BufferCollectionConstraints>(
2635                 std::move(setConstraintsResult.constraints));
2636         info_VkBufferCollectionFUCHSIA[buffer_collection].createInfoIndex =
2637             std::move(setConstraintsResult.createInfoIndex);
2638     }
2639 
2640     return VK_SUCCESS;
2641 }
2642 
setBufferCollectionBufferConstraintsFUCHSIA(fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2643 VkResult ResourceTracker::setBufferCollectionBufferConstraintsFUCHSIA(
2644     fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2645     const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2646     auto setConstraintsResult =
2647         setBufferCollectionBufferConstraintsImpl(pCollection, pBufferConstraintsInfo);
2648     if (setConstraintsResult.result != VK_SUCCESS) {
2649         return setConstraintsResult.result;
2650     }
2651 
2652     // copy constraints to info_VkBufferCollectionFUCHSIA if
2653     // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2654     std::lock_guard<std::recursive_mutex> lock(mLock);
2655     VkBufferCollectionFUCHSIA buffer_collection =
2656         reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
2657     if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2658         info_VkBufferCollectionFUCHSIA.end()) {
2659         info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2660             std::make_optional<fuchsia_sysmem::wire::BufferCollectionConstraints>(
2661                 setConstraintsResult.constraints);
2662     }
2663 
2664     return VK_SUCCESS;
2665 }
2666 
on_vkSetBufferCollectionImageConstraintsFUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2667 VkResult ResourceTracker::on_vkSetBufferCollectionImageConstraintsFUCHSIA(
2668     void* context, VkResult, VkDevice device, VkBufferCollectionFUCHSIA collection,
2669     const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2670     VkEncoder* enc = (VkEncoder*)context;
2671     auto sysmem_collection =
2672         reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2673     return setBufferCollectionImageConstraintsFUCHSIA(enc, device, sysmem_collection,
2674                                                       pImageConstraintsInfo);
2675 }
2676 
on_vkSetBufferCollectionBufferConstraintsFUCHSIA(void *,VkResult,VkDevice,VkBufferCollectionFUCHSIA collection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2677 VkResult ResourceTracker::on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
2678     void*, VkResult, VkDevice, VkBufferCollectionFUCHSIA collection,
2679     const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2680     auto sysmem_collection =
2681         reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2682     return setBufferCollectionBufferConstraintsFUCHSIA(sysmem_collection, pBufferConstraintsInfo);
2683 }
2684 
getBufferCollectionImageCreateInfoIndexLocked(VkBufferCollectionFUCHSIA collection,fuchsia_sysmem::wire::BufferCollectionInfo2 & info,uint32_t * outCreateInfoIndex)2685 VkResult ResourceTracker::getBufferCollectionImageCreateInfoIndexLocked(
2686     VkBufferCollectionFUCHSIA collection, fuchsia_sysmem::wire::BufferCollectionInfo2& info,
2687     uint32_t* outCreateInfoIndex) {
2688     if (!info_VkBufferCollectionFUCHSIA[collection].constraints.has_value()) {
2689         mesa_loge("%s: constraints not set", __func__);
2690         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2691     }
2692 
2693     if (!info.settings.has_image_format_constraints) {
2694         // no image format constraints, skip getting createInfoIndex.
2695         return VK_SUCCESS;
2696     }
2697 
2698     const auto& constraints = *info_VkBufferCollectionFUCHSIA[collection].constraints;
2699     const auto& createInfoIndices = info_VkBufferCollectionFUCHSIA[collection].createInfoIndex;
2700     const auto& out = info.settings.image_format_constraints;
2701     bool foundCreateInfo = false;
2702 
2703     for (size_t imageFormatIndex = 0; imageFormatIndex < constraints.image_format_constraints_count;
2704          imageFormatIndex++) {
2705         const auto& in = constraints.image_format_constraints[imageFormatIndex];
2706         // These checks are sorted in order of how often they're expected to
2707         // mismatch, from most likely to least likely. They aren't always
2708         // equality comparisons, since sysmem may change some values in
2709         // compatible ways on behalf of the other participants.
2710         if ((out.pixel_format.type != in.pixel_format.type) ||
2711             (out.pixel_format.has_format_modifier != in.pixel_format.has_format_modifier) ||
2712             (out.pixel_format.format_modifier.value != in.pixel_format.format_modifier.value) ||
2713             (out.min_bytes_per_row < in.min_bytes_per_row) ||
2714             (out.required_max_coded_width < in.required_max_coded_width) ||
2715             (out.required_max_coded_height < in.required_max_coded_height) ||
2716             (in.bytes_per_row_divisor != 0 &&
2717              out.bytes_per_row_divisor % in.bytes_per_row_divisor != 0)) {
2718             continue;
2719         }
2720         // Check if the out colorspaces are a subset of the in color spaces.
2721         bool all_color_spaces_found = true;
2722         for (uint32_t j = 0; j < out.color_spaces_count; j++) {
2723             bool found_matching_color_space = false;
2724             for (uint32_t k = 0; k < in.color_spaces_count; k++) {
2725                 if (out.color_space[j].type == in.color_space[k].type) {
2726                     found_matching_color_space = true;
2727                     break;
2728                 }
2729             }
2730             if (!found_matching_color_space) {
2731                 all_color_spaces_found = false;
2732                 break;
2733             }
2734         }
2735         if (!all_color_spaces_found) {
2736             continue;
2737         }
2738 
2739         // Choose the first valid format for now.
2740         *outCreateInfoIndex = createInfoIndices[imageFormatIndex];
2741         return VK_SUCCESS;
2742     }
2743 
2744     mesa_loge("%s: cannot find a valid image format in constraints", __func__);
2745     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2746 }
2747 
on_vkGetBufferCollectionPropertiesFUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,VkBufferCollectionPropertiesFUCHSIA * pProperties)2748 VkResult ResourceTracker::on_vkGetBufferCollectionPropertiesFUCHSIA(
2749     void* context, VkResult, VkDevice device, VkBufferCollectionFUCHSIA collection,
2750     VkBufferCollectionPropertiesFUCHSIA* pProperties) {
2751     VkEncoder* enc = (VkEncoder*)context;
2752     const auto& sysmem_collection =
2753         *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2754 
2755     auto result = sysmem_collection->WaitForBuffersAllocated();
2756     if (!result.ok() || result->status != ZX_OK) {
2757         mesa_loge("Failed wait for allocation: %d %d", result.status(),
2758                   GET_STATUS_SAFE(result, status));
2759         return VK_ERROR_INITIALIZATION_FAILED;
2760     }
2761     fuchsia_sysmem::wire::BufferCollectionInfo2 info = std::move(result->buffer_collection_info);
2762 
2763     bool is_host_visible =
2764         info.settings.buffer_settings.heap == fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2765     bool is_device_local =
2766         info.settings.buffer_settings.heap == fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2767     if (!is_host_visible && !is_device_local) {
2768         mesa_loge("buffer collection uses a non-goldfish heap (type 0x%lu)",
2769                   static_cast<uint64_t>(info.settings.buffer_settings.heap));
2770         return VK_ERROR_INITIALIZATION_FAILED;
2771     }
2772 
2773     // memoryTypeBits
2774     // ====================================================================
2775     {
2776         std::lock_guard<std::recursive_mutex> lock(mLock);
2777         auto deviceIt = info_VkDevice.find(device);
2778         if (deviceIt == info_VkDevice.end()) {
2779             return VK_ERROR_INITIALIZATION_FAILED;
2780         }
2781         auto& deviceInfo = deviceIt->second;
2782 
2783         // Device local memory type supported.
2784         pProperties->memoryTypeBits = 0;
2785         for (uint32_t i = 0; i < deviceInfo.memProps.memoryTypeCount; ++i) {
2786             if ((is_device_local && (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2787                                      VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
2788                 (is_host_visible && (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2789                                      VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
2790                 pProperties->memoryTypeBits |= 1ull << i;
2791             }
2792         }
2793     }
2794 
2795     // bufferCount
2796     // ====================================================================
2797     pProperties->bufferCount = info.buffer_count;
2798 
2799     auto storeProperties = [this, collection, pProperties]() -> VkResult {
2800         // store properties to storage
2801         std::lock_guard<std::recursive_mutex> lock(mLock);
2802         if (info_VkBufferCollectionFUCHSIA.find(collection) ==
2803             info_VkBufferCollectionFUCHSIA.end()) {
2804             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2805         }
2806 
2807         info_VkBufferCollectionFUCHSIA[collection].properties =
2808             std::make_optional<VkBufferCollectionPropertiesFUCHSIA>(*pProperties);
2809 
2810         // We only do a shallow copy so we should remove all pNext pointers.
2811         info_VkBufferCollectionFUCHSIA[collection].properties->pNext = nullptr;
2812         info_VkBufferCollectionFUCHSIA[collection].properties->sysmemColorSpaceIndex.pNext =
2813             nullptr;
2814         return VK_SUCCESS;
2815     };
2816 
2817     // The fields below only apply to buffer collections with image formats.
2818     if (!info.settings.has_image_format_constraints) {
2819         mesa_logd("%s: buffer collection doesn't have image format constraints", __func__);
2820         return storeProperties();
2821     }
2822 
2823     // sysmemFormat
2824     // ====================================================================
2825 
2826     pProperties->sysmemPixelFormat =
2827         static_cast<uint64_t>(info.settings.image_format_constraints.pixel_format.type);
2828 
2829     // colorSpace
2830     // ====================================================================
2831     if (info.settings.image_format_constraints.color_spaces_count == 0) {
2832         mesa_loge(
2833             "%s: color space missing from allocated buffer collection "
2834             "constraints",
2835             __func__);
2836         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2837     }
2838     // Only report first colorspace for now.
2839     pProperties->sysmemColorSpaceIndex.colorSpace =
2840         static_cast<uint32_t>(info.settings.image_format_constraints.color_space[0].type);
2841 
2842     // createInfoIndex
2843     // ====================================================================
2844     {
2845         std::lock_guard<std::recursive_mutex> lock(mLock);
2846         auto getIndexResult = getBufferCollectionImageCreateInfoIndexLocked(
2847             collection, info, &pProperties->createInfoIndex);
2848         if (getIndexResult != VK_SUCCESS) {
2849             return getIndexResult;
2850         }
2851     }
2852 
2853     // formatFeatures
2854     // ====================================================================
2855     VkPhysicalDevice physicalDevice;
2856     {
2857         std::lock_guard<std::recursive_mutex> lock(mLock);
2858         auto deviceIt = info_VkDevice.find(device);
2859         if (deviceIt == info_VkDevice.end()) {
2860             return VK_ERROR_INITIALIZATION_FAILED;
2861         }
2862         physicalDevice = deviceIt->second.physdev;
2863     }
2864 
2865     VkFormat vkFormat =
2866         sysmemPixelFormatTypeToVk(info.settings.image_format_constraints.pixel_format.type);
2867     VkFormatProperties formatProperties;
2868     enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, vkFormat, &formatProperties,
2869                                              true /* do lock */);
2870     if (is_device_local) {
2871         pProperties->formatFeatures = formatProperties.optimalTilingFeatures;
2872     }
2873     if (is_host_visible) {
2874         pProperties->formatFeatures = formatProperties.linearTilingFeatures;
2875     }
2876 
2877     // YCbCr properties
2878     // ====================================================================
2879     // TODO(59804): Implement this correctly when we support YUV pixel
2880     // formats in goldfish ICD.
2881     pProperties->samplerYcbcrConversionComponents.r = VK_COMPONENT_SWIZZLE_IDENTITY;
2882     pProperties->samplerYcbcrConversionComponents.g = VK_COMPONENT_SWIZZLE_IDENTITY;
2883     pProperties->samplerYcbcrConversionComponents.b = VK_COMPONENT_SWIZZLE_IDENTITY;
2884     pProperties->samplerYcbcrConversionComponents.a = VK_COMPONENT_SWIZZLE_IDENTITY;
2885     pProperties->suggestedYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
2886     pProperties->suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
2887     pProperties->suggestedXChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2888     pProperties->suggestedYChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2889 
2890     return storeProperties();
2891 }
2892 #endif
2893 
getVirglFormat(VkFormat vkFormat)2894 static uint32_t getVirglFormat(VkFormat vkFormat) {
2895     uint32_t virglFormat = 0;
2896 
2897     switch (vkFormat) {
2898         case VK_FORMAT_R8G8B8A8_SINT:
2899         case VK_FORMAT_R8G8B8A8_UNORM:
2900         case VK_FORMAT_R8G8B8A8_SRGB:
2901         case VK_FORMAT_R8G8B8A8_SNORM:
2902         case VK_FORMAT_R8G8B8A8_SSCALED:
2903         case VK_FORMAT_R8G8B8A8_USCALED:
2904             virglFormat = VIRGL_FORMAT_R8G8B8A8_UNORM;
2905             break;
2906         case VK_FORMAT_B8G8R8A8_SINT:
2907         case VK_FORMAT_B8G8R8A8_UNORM:
2908         case VK_FORMAT_B8G8R8A8_SRGB:
2909         case VK_FORMAT_B8G8R8A8_SNORM:
2910         case VK_FORMAT_B8G8R8A8_SSCALED:
2911         case VK_FORMAT_B8G8R8A8_USCALED:
2912             virglFormat = VIRGL_FORMAT_B8G8R8A8_UNORM;
2913             break;
2914         case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
2915             virglFormat = VIRGL_FORMAT_R10G10B10A2_UNORM;
2916             break;
2917         default:
2918             break;
2919     }
2920 
2921     return virglFormat;
2922 }
2923 
createCoherentMemory(VkDevice device,VkDeviceMemory mem,const VkMemoryAllocateInfo & hostAllocationInfo,VkEncoder * enc,VkResult & res)2924 CoherentMemoryPtr ResourceTracker::createCoherentMemory(
2925     VkDevice device, VkDeviceMemory mem, const VkMemoryAllocateInfo& hostAllocationInfo,
2926     VkEncoder* enc, VkResult& res) {
2927     CoherentMemoryPtr coherentMemory = nullptr;
2928 
2929 #if DETECT_OS_ANDROID
2930     if (mFeatureInfo.hasDirectMem) {
2931         uint64_t gpuAddr = 0;
2932         GoldfishAddressSpaceBlockPtr block = nullptr;
2933         res = enc->vkMapMemoryIntoAddressSpaceGOOGLE(device, mem, &gpuAddr, true);
2934         if (res != VK_SUCCESS) {
2935             mesa_loge(
2936                 "Failed to create coherent memory: vkMapMemoryIntoAddressSpaceGOOGLE "
2937                 "returned:%d.",
2938                 res);
2939             return coherentMemory;
2940         }
2941         {
2942             std::lock_guard<std::recursive_mutex> lock(mLock);
2943             auto it = info_VkDeviceMemory.find(mem);
2944             if (it == info_VkDeviceMemory.end()) {
2945                 mesa_loge("Failed to create coherent memory: failed to find device memory.");
2946                 res = VK_ERROR_OUT_OF_HOST_MEMORY;
2947                 return coherentMemory;
2948             }
2949             auto& info = it->second;
2950             block = info.goldfishBlock;
2951             info.goldfishBlock = nullptr;
2952 
2953             coherentMemory = std::make_shared<CoherentMemory>(
2954                 block, gpuAddr, hostAllocationInfo.allocationSize, device, mem);
2955         }
2956     } else
2957 #endif  // DETECT_OS_ANDROID
2958         if (mFeatureInfo.hasVirtioGpuNext) {
2959             struct VirtGpuCreateBlob createBlob = {0};
2960             uint64_t hvaSizeId[3];
2961             res = enc->vkGetMemoryHostAddressInfoGOOGLE(device, mem, &hvaSizeId[0], &hvaSizeId[1],
2962                                                         &hvaSizeId[2], true /* do lock */);
2963             if (res != VK_SUCCESS) {
2964                 mesa_loge(
2965                     "Failed to create coherent memory: vkMapMemoryIntoAddressSpaceGOOGLE "
2966                     "returned:%d.",
2967                     res);
2968                 return coherentMemory;
2969             }
2970             {
2971                 std::lock_guard<std::recursive_mutex> lock(mLock);
2972                 VirtGpuDevice* instance = VirtGpuDevice::getInstance((enum VirtGpuCapset)3);
2973                 createBlob.blobMem = kBlobMemHost3d;
2974                 createBlob.flags = kBlobFlagMappable;
2975                 createBlob.blobId = hvaSizeId[2];
2976                 createBlob.size = hostAllocationInfo.allocationSize;
2977 
2978                 auto blob = instance->createBlob(createBlob);
2979                 if (!blob) {
2980                     mesa_loge("Failed to create coherent memory: failed to create blob.");
2981                     res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2982                     return coherentMemory;
2983                 }
2984 
2985                 VirtGpuResourceMappingPtr mapping = blob->createMapping();
2986                 if (!mapping) {
2987                     mesa_loge("Failed to create coherent memory: failed to create blob mapping.");
2988                     res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2989                     return coherentMemory;
2990                 }
2991 
2992                 coherentMemory =
2993                     std::make_shared<CoherentMemory>(mapping, createBlob.size, device, mem);
2994             }
2995         } else {
2996             mesa_loge("FATAL: Unsupported virtual memory feature");
2997             abort();
2998         }
2999     return coherentMemory;
3000 }
3001 
allocateCoherentMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,VkEncoder * enc,VkDeviceMemory * pMemory)3002 VkResult ResourceTracker::allocateCoherentMemory(VkDevice device,
3003                                                  const VkMemoryAllocateInfo* pAllocateInfo,
3004                                                  VkEncoder* enc, VkDeviceMemory* pMemory) {
3005     uint64_t offset = 0;
3006     uint8_t* ptr = nullptr;
3007     VkMemoryAllocateFlagsInfo allocFlagsInfo;
3008     VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
3009     VkCreateBlobGOOGLE createBlobInfo;
3010     VirtGpuResourcePtr guestBlob = nullptr;
3011 
3012     memset(&createBlobInfo, 0, sizeof(struct VkCreateBlobGOOGLE));
3013     createBlobInfo.sType = VK_STRUCTURE_TYPE_CREATE_BLOB_GOOGLE;
3014 
3015     const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3016         vk_find_struct_const(pAllocateInfo, MEMORY_ALLOCATE_FLAGS_INFO);
3017     const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
3018         vk_find_struct_const(pAllocateInfo, MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO);
3019 
3020     bool deviceAddressMemoryAllocation =
3021         allocFlagsInfoPtr &&
3022         ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
3023          (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
3024 
3025     bool dedicated = deviceAddressMemoryAllocation;
3026 
3027     if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle])
3028         dedicated = true;
3029 
3030     VkMemoryAllocateInfo hostAllocationInfo = vk_make_orphan_copy(*pAllocateInfo);
3031     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&hostAllocationInfo);
3032 
3033     if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3034         hostAllocationInfo.allocationSize =
3035             ALIGN_POT(pAllocateInfo->allocationSize, mCaps.vulkanCapset.blobAlignment);
3036     } else if (dedicated) {
3037         // Over-aligning to kLargestSize to some Windows drivers (b:152769369).  Can likely
3038         // have host report the desired alignment.
3039         hostAllocationInfo.allocationSize =
3040             ALIGN_POT(pAllocateInfo->allocationSize, kLargestPageSize);
3041     } else {
3042         VkDeviceSize roundedUpAllocSize = ALIGN_POT(pAllocateInfo->allocationSize, kMegaByte);
3043         hostAllocationInfo.allocationSize = std::max(roundedUpAllocSize, kDefaultHostMemBlockSize);
3044     }
3045 
3046     // Support device address capture/replay allocations
3047     if (deviceAddressMemoryAllocation) {
3048         if (allocFlagsInfoPtr) {
3049             mesa_logd("%s: has alloc flags\n", __func__);
3050             allocFlagsInfo = *allocFlagsInfoPtr;
3051             vk_append_struct(&structChainIter, &allocFlagsInfo);
3052         }
3053 
3054         if (opaqueCaptureAddressAllocInfoPtr) {
3055             mesa_logd("%s: has opaque capture address\n", __func__);
3056             opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
3057             vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
3058         }
3059     }
3060 
3061     if (mCaps.params[kParamCreateGuestHandle]) {
3062         struct VirtGpuCreateBlob createBlob = {0};
3063         struct VirtGpuExecBuffer exec = {};
3064         VirtGpuDevice* instance = VirtGpuDevice::getInstance();
3065         struct gfxstreamPlaceholderCommandVk placeholderCmd = {};
3066 
3067         createBlobInfo.blobId = ++mAtomicId;
3068         createBlobInfo.blobMem = kBlobMemGuest;
3069         createBlobInfo.blobFlags = kBlobFlagCreateGuestHandle;
3070         vk_append_struct(&structChainIter, &createBlobInfo);
3071 
3072         createBlob.blobMem = kBlobMemGuest;
3073         createBlob.flags = kBlobFlagCreateGuestHandle;
3074         createBlob.blobId = createBlobInfo.blobId;
3075         createBlob.size = hostAllocationInfo.allocationSize;
3076 
3077         guestBlob = instance->createBlob(createBlob);
3078         if (!guestBlob) {
3079             mesa_loge("Failed to allocate coherent memory: failed to create blob.");
3080             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3081         }
3082 
3083         placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK;
3084         exec.command = static_cast<void*>(&placeholderCmd);
3085         exec.command_size = sizeof(placeholderCmd);
3086         exec.flags = kRingIdx;
3087         exec.ring_idx = 1;
3088         if (instance->execBuffer(exec, guestBlob.get())) {
3089             mesa_loge("Failed to allocate coherent memory: failed to execbuffer for wait.");
3090             return VK_ERROR_OUT_OF_HOST_MEMORY;
3091         }
3092 
3093         guestBlob->wait();
3094     } else if (mCaps.vulkanCapset.deferredMapping) {
3095         createBlobInfo.blobId = ++mAtomicId;
3096         createBlobInfo.blobMem = kBlobMemHost3d;
3097         vk_append_struct(&structChainIter, &createBlobInfo);
3098     }
3099 
3100     VkDeviceMemory mem = VK_NULL_HANDLE;
3101     VkResult host_res =
3102         enc->vkAllocateMemory(device, &hostAllocationInfo, nullptr, &mem, true /* do lock */);
3103     if (host_res != VK_SUCCESS) {
3104         mesa_loge("Failed to allocate coherent memory: failed to allocate on the host: %d.",
3105                   host_res);
3106         return host_res;
3107     }
3108 
3109     struct VkDeviceMemory_Info info;
3110     if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3111         info.allocationSize = pAllocateInfo->allocationSize;
3112         info.blobId = createBlobInfo.blobId;
3113     }
3114 
3115     if (guestBlob) {
3116         auto mapping = guestBlob->createMapping();
3117         if (!mapping) {
3118             mesa_loge("Failed to allocate coherent memory: failed to create blob mapping.");
3119             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3120         }
3121 
3122         auto coherentMemory = std::make_shared<CoherentMemory>(
3123             mapping, hostAllocationInfo.allocationSize, device, mem);
3124 
3125         coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset);
3126         info.coherentMemoryOffset = offset;
3127         info.coherentMemory = coherentMemory;
3128         info.ptr = ptr;
3129     }
3130 
3131     info.coherentMemorySize = hostAllocationInfo.allocationSize;
3132     info.memoryTypeIndex = hostAllocationInfo.memoryTypeIndex;
3133     info.device = device;
3134     info.dedicated = dedicated;
3135     {
3136         // createCoherentMemory inside need to access info_VkDeviceMemory
3137         // information. set it before use.
3138         std::lock_guard<std::recursive_mutex> lock(mLock);
3139         info_VkDeviceMemory[mem] = info;
3140     }
3141 
3142     if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3143         *pMemory = mem;
3144         return host_res;
3145     }
3146 
3147     auto coherentMemory = createCoherentMemory(device, mem, hostAllocationInfo, enc, host_res);
3148     if (coherentMemory) {
3149         std::lock_guard<std::recursive_mutex> lock(mLock);
3150         coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset);
3151         info.allocationSize = pAllocateInfo->allocationSize;
3152         info.coherentMemoryOffset = offset;
3153         info.coherentMemory = coherentMemory;
3154         info.ptr = ptr;
3155         info_VkDeviceMemory[mem] = info;
3156         *pMemory = mem;
3157     } else {
3158         enc->vkFreeMemory(device, mem, nullptr, true);
3159         std::lock_guard<std::recursive_mutex> lock(mLock);
3160         info_VkDeviceMemory.erase(mem);
3161     }
3162     return host_res;
3163 }
3164 
getCoherentMemory(const VkMemoryAllocateInfo * pAllocateInfo,VkEncoder * enc,VkDevice device,VkDeviceMemory * pMemory)3165 VkResult ResourceTracker::getCoherentMemory(const VkMemoryAllocateInfo* pAllocateInfo,
3166                                             VkEncoder* enc, VkDevice device,
3167                                             VkDeviceMemory* pMemory) {
3168     // Add buffer device address capture structs
3169     const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3170         vk_find_struct_const(pAllocateInfo, MEMORY_ALLOCATE_FLAGS_INFO);
3171 
3172     bool dedicated =
3173         allocFlagsInfoPtr &&
3174         ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
3175          (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
3176 
3177     if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle])
3178         dedicated = true;
3179 
3180     CoherentMemoryPtr coherentMemory = nullptr;
3181     uint8_t* ptr = nullptr;
3182     uint64_t offset = 0;
3183     {
3184         std::lock_guard<std::recursive_mutex> lock(mLock);
3185         for (const auto& [memory, info] : info_VkDeviceMemory) {
3186             if (info.device != device) continue;
3187 
3188             if (info.memoryTypeIndex != pAllocateInfo->memoryTypeIndex) continue;
3189 
3190             if (info.dedicated || dedicated) continue;
3191 
3192             if (!info.coherentMemory) continue;
3193 
3194             if (!info.coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset))
3195                 continue;
3196 
3197             coherentMemory = info.coherentMemory;
3198             break;
3199         }
3200         if (coherentMemory) {
3201             struct VkDeviceMemory_Info info;
3202             info.coherentMemoryOffset = offset;
3203             info.ptr = ptr;
3204             info.memoryTypeIndex = pAllocateInfo->memoryTypeIndex;
3205             info.allocationSize = pAllocateInfo->allocationSize;
3206             info.coherentMemory = coherentMemory;
3207             info.device = device;
3208 
3209             // for suballocated memory, create an alias VkDeviceMemory handle for application
3210             // memory used for suballocations will still be VkDeviceMemory associated with
3211             // CoherentMemory
3212             auto mem = new_from_host_VkDeviceMemory(VK_NULL_HANDLE);
3213             info_VkDeviceMemory[mem] = info;
3214             *pMemory = mem;
3215             return VK_SUCCESS;
3216         }
3217     }
3218     return allocateCoherentMemory(device, pAllocateInfo, enc, pMemory);
3219 }
3220 
on_vkAllocateMemory(void * context,VkResult input_result,VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)3221 VkResult ResourceTracker::on_vkAllocateMemory(void* context, VkResult input_result, VkDevice device,
3222                                               const VkMemoryAllocateInfo* pAllocateInfo,
3223                                               const VkAllocationCallbacks* pAllocator,
3224                                               VkDeviceMemory* pMemory) {
3225 #define _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(result)                                      \
3226     {                                                                                          \
3227         auto it = info_VkDevice.find(device);                                                  \
3228         if (it == info_VkDevice.end()) return result;                                          \
3229         emitDeviceMemoryReport(it->second,                                                     \
3230                                VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT, 0,    \
3231                                pAllocateInfo->allocationSize, VK_OBJECT_TYPE_DEVICE_MEMORY, 0, \
3232                                pAllocateInfo->memoryTypeIndex);                                \
3233         return result;                                                                         \
3234     }
3235 
3236     if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
3237 
3238     VkEncoder* enc = (VkEncoder*)context;
3239 
3240     bool hasDedicatedImage = false;
3241     bool hasDedicatedBuffer = false;
3242 
3243     VkMemoryAllocateInfo finalAllocInfo = vk_make_orphan_copy(*pAllocateInfo);
3244     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&finalAllocInfo);
3245 
3246     VkMemoryAllocateFlagsInfo allocFlagsInfo;
3247     VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
3248 
3249     // Add buffer device address capture structs
3250     const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3251         vk_find_struct_const(pAllocateInfo, MEMORY_ALLOCATE_FLAGS_INFO);
3252     const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
3253         vk_find_struct_const(pAllocateInfo, MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO);
3254 
3255     if (allocFlagsInfoPtr) {
3256         mesa_logd("%s: has alloc flags\n", __func__);
3257         allocFlagsInfo = *allocFlagsInfoPtr;
3258         vk_append_struct(&structChainIter, &allocFlagsInfo);
3259     }
3260 
3261     if (opaqueCaptureAddressAllocInfoPtr) {
3262         mesa_logd("%s: has opaque capture address\n", __func__);
3263         opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
3264         vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
3265     }
3266 
3267     VkMemoryDedicatedAllocateInfo dedicatedAllocInfo;
3268     VkImportColorBufferGOOGLE importCbInfo = {
3269         VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE,
3270         0,
3271     };
3272     VkImportBufferGOOGLE importBufferInfo = {
3273         VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE,
3274         0,
3275     };
3276     // VkImportPhysicalAddressGOOGLE importPhysAddrInfo = {
3277     //     VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE, 0,
3278     // };
3279 
3280     const VkExportMemoryAllocateInfo* exportAllocateInfoPtr =
3281         vk_find_struct_const(pAllocateInfo, EXPORT_MEMORY_ALLOCATE_INFO);
3282 
3283 #ifdef VK_USE_PLATFORM_ANDROID_KHR
3284     const VkImportAndroidHardwareBufferInfoANDROID* importAhbInfoPtr =
3285         vk_find_struct_const(pAllocateInfo, IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID);
3286     // Even if we export allocate, the underlying operation
3287     // for the host is always going to be an import operation.
3288     // This is also how Intel's implementation works,
3289     // and is generally simpler;
3290     // even in an export allocation,
3291     // we perform AHardwareBuffer allocation
3292     // on the guest side, at this layer,
3293     // and then we attach a new VkDeviceMemory
3294     // to the AHardwareBuffer on the host via an "import" operation.
3295     AHardwareBuffer* ahw = nullptr;
3296 #else
3297     const void* importAhbInfoPtr = nullptr;
3298     void* ahw = nullptr;
3299 #endif
3300 
3301 #if DETECT_OS_LINUX && !defined(VK_USE_PLATFORM_ANDROID_KHR)
3302     const VkImportMemoryFdInfoKHR* importFdInfoPtr =
3303         vk_find_struct_const(pAllocateInfo, IMPORT_MEMORY_FD_INFO_KHR);
3304 #else
3305     const VkImportMemoryFdInfoKHR* importFdInfoPtr = nullptr;
3306 #endif
3307 
3308 #ifdef VK_USE_PLATFORM_FUCHSIA
3309     const VkImportMemoryBufferCollectionFUCHSIA* importBufferCollectionInfoPtr =
3310         vk_find_struct_const(pAllocateInfo, IMPORT_MEMORY_BUFFER_COLLECTION_FUCHSIA);
3311 
3312     const VkImportMemoryZirconHandleInfoFUCHSIA* importVmoInfoPtr =
3313         vk_find_struct_const(pAllocateInfo, IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA);
3314 #else
3315     const void* importBufferCollectionInfoPtr = nullptr;
3316     const void* importVmoInfoPtr = nullptr;
3317 #endif  // VK_USE_PLATFORM_FUCHSIA
3318 
3319     const VkMemoryDedicatedAllocateInfo* dedicatedAllocInfoPtr =
3320         vk_find_struct_const(pAllocateInfo, MEMORY_DEDICATED_ALLOCATE_INFO);
3321 
3322     // Note for AHardwareBuffers, the Vulkan spec states:
3323     //
3324     //     Android hardware buffers have intrinsic width, height, format, and usage
3325     //     properties, so Vulkan images bound to memory imported from an Android
3326     //     hardware buffer must use dedicated allocations
3327     //
3328     // so any allocation requests with a VkImportAndroidHardwareBufferInfoANDROID
3329     // will necessarily have a VkMemoryDedicatedAllocateInfo. However, the host
3330     // may or may not actually use a dedicated allocation to emulate
3331     // AHardwareBuffers. As such, the VkMemoryDedicatedAllocateInfo is passed to the
3332     // host and the host will decide whether or not to use it.
3333 
3334     bool shouldPassThroughDedicatedAllocInfo =
3335         !exportAllocateInfoPtr && !importBufferCollectionInfoPtr && !importVmoInfoPtr;
3336 
3337     const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProps =
3338         getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE);
3339 
3340     const bool requestedMemoryIsHostVisible =
3341         isHostVisible(&physicalDeviceMemoryProps, pAllocateInfo->memoryTypeIndex);
3342 
3343 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
3344     shouldPassThroughDedicatedAllocInfo &= !requestedMemoryIsHostVisible;
3345 #endif  // VK_USE_PLATFORM_FUCHSIA
3346 
3347     if (shouldPassThroughDedicatedAllocInfo && dedicatedAllocInfoPtr) {
3348         dedicatedAllocInfo = vk_make_orphan_copy(*dedicatedAllocInfoPtr);
3349         vk_append_struct(&structChainIter, &dedicatedAllocInfo);
3350     }
3351 
3352     // State needed for import/export.
3353     bool exportAhb = false;
3354     bool exportVmo = false;
3355     bool exportDmabuf = false;
3356     bool importAhb = false;
3357     bool importBufferCollection = false;
3358     bool importVmo = false;
3359     bool importDmabuf = false;
3360     (void)exportVmo;
3361     (void)exportAhb;
3362 
3363     if (exportAllocateInfoPtr) {
3364         exportAhb = exportAllocateInfoPtr->handleTypes &
3365                     VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
3366 #ifdef VK_USE_PLATFORM_FUCHSIA
3367         exportVmo = exportAllocateInfoPtr->handleTypes &
3368                     VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
3369 #endif  // VK_USE_PLATFORM_FUCHSIA
3370         exportDmabuf =
3371             exportAllocateInfoPtr->handleTypes & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
3372                                                   VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
3373     } else if (importAhbInfoPtr) {
3374         importAhb = true;
3375     } else if (importBufferCollectionInfoPtr) {
3376         importBufferCollection = true;
3377     } else if (importVmoInfoPtr) {
3378         importVmo = true;
3379     }
3380 
3381     if (importFdInfoPtr) {
3382         importDmabuf =
3383             (importFdInfoPtr->handleType & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
3384                                             VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT));
3385     }
3386     bool isImport = importAhb || importBufferCollection || importVmo || importDmabuf;
3387 
3388 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
3389     if (exportAhb) {
3390         hasDedicatedImage =
3391             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3392         hasDedicatedBuffer =
3393             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3394         VkExtent3D imageExtent = {0, 0, 0};
3395         uint32_t imageLayers = 0;
3396         VkFormat imageFormat = VK_FORMAT_UNDEFINED;
3397         VkImageUsageFlags imageUsage = 0;
3398         VkImageCreateFlags imageCreateFlags = 0;
3399         VkDeviceSize bufferSize = 0;
3400         VkDeviceSize allocationInfoAllocSize = finalAllocInfo.allocationSize;
3401 
3402         if (hasDedicatedImage) {
3403             std::lock_guard<std::recursive_mutex> lock(mLock);
3404 
3405             auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3406             if (it == info_VkImage.end())
3407                 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3408             const auto& info = it->second;
3409             const auto& imgCi = info.createInfo;
3410 
3411             imageExtent = imgCi.extent;
3412             imageLayers = imgCi.arrayLayers;
3413             imageFormat = imgCi.format;
3414             imageUsage = imgCi.usage;
3415             imageCreateFlags = imgCi.flags;
3416         }
3417 
3418         if (hasDedicatedBuffer) {
3419             std::lock_guard<std::recursive_mutex> lock(mLock);
3420 
3421             auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
3422             if (it == info_VkBuffer.end())
3423                 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3424             const auto& info = it->second;
3425             const auto& bufCi = info.createInfo;
3426 
3427             bufferSize = bufCi.size;
3428         }
3429 
3430         VkResult ahbCreateRes = createAndroidHardwareBuffer(
3431             mGralloc.get(), hasDedicatedImage, hasDedicatedBuffer, imageExtent, imageLayers,
3432             imageFormat, imageUsage, imageCreateFlags, bufferSize, allocationInfoAllocSize, &ahw);
3433 
3434         if (ahbCreateRes != VK_SUCCESS) {
3435             _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(ahbCreateRes);
3436         }
3437     }
3438 
3439     if (importAhb) {
3440         ahw = importAhbInfoPtr->buffer;
3441         // We still need to acquire the AHardwareBuffer.
3442         importAndroidHardwareBuffer(mGralloc.get(), importAhbInfoPtr, nullptr);
3443     }
3444 
3445     if (ahw) {
3446         const uint32_t hostHandle = mGralloc->getHostHandle(ahw);
3447         if (mGralloc->getFormat(ahw) == AHARDWAREBUFFER_FORMAT_BLOB &&
3448             !mGralloc->treatBlobAsImage()) {
3449             importBufferInfo.buffer = hostHandle;
3450             vk_append_struct(&structChainIter, &importBufferInfo);
3451         } else {
3452             importCbInfo.colorBuffer = hostHandle;
3453             vk_append_struct(&structChainIter, &importCbInfo);
3454         }
3455     }
3456 #endif
3457     zx_handle_t vmo_handle = ZX_HANDLE_INVALID;
3458 
3459 #ifdef VK_USE_PLATFORM_FUCHSIA
3460     if (importBufferCollection) {
3461         const auto& collection =
3462             *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
3463                 importBufferCollectionInfoPtr->collection);
3464         auto result = collection->WaitForBuffersAllocated();
3465         if (!result.ok() || result->status != ZX_OK) {
3466             mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
3467                       GET_STATUS_SAFE(result, status));
3468             _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3469         }
3470         fuchsia_sysmem::wire::BufferCollectionInfo2& info = result->buffer_collection_info;
3471         uint32_t index = importBufferCollectionInfoPtr->index;
3472         if (info.buffer_count < index) {
3473             mesa_loge("Invalid buffer index: %d", index);
3474             _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3475         }
3476         vmo_handle = info.buffers[index].vmo.release();
3477     }
3478 
3479     if (importVmo) {
3480         vmo_handle = importVmoInfoPtr->handle;
3481     }
3482 
3483     if (exportVmo) {
3484         hasDedicatedImage =
3485             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3486         hasDedicatedBuffer =
3487             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3488 
3489         if (hasDedicatedImage && hasDedicatedBuffer) {
3490             mesa_loge(
3491                 "Invalid VkMemoryDedicatedAllocationInfo: At least one "
3492                 "of image and buffer must be VK_NULL_HANDLE.");
3493             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3494         }
3495 
3496         const VkImageCreateInfo* pImageCreateInfo = nullptr;
3497 
3498         VkBufferConstraintsInfoFUCHSIA bufferConstraintsInfo = {
3499             .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA,
3500             .pNext = nullptr,
3501             .createInfo = {},
3502             .requiredFormatFeatures = 0,
3503             .bufferCollectionConstraints =
3504                 VkBufferCollectionConstraintsInfoFUCHSIA{
3505                     .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
3506                     .pNext = nullptr,
3507                     .minBufferCount = 1,
3508                     .maxBufferCount = 0,
3509                     .minBufferCountForCamping = 0,
3510                     .minBufferCountForDedicatedSlack = 0,
3511                     .minBufferCountForSharedSlack = 0,
3512                 },
3513         };
3514         const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo = nullptr;
3515 
3516         if (hasDedicatedImage) {
3517             std::lock_guard<std::recursive_mutex> lock(mLock);
3518 
3519             auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3520             if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
3521             const auto& imageInfo = it->second;
3522 
3523             pImageCreateInfo = &imageInfo.createInfo;
3524         }
3525 
3526         if (hasDedicatedBuffer) {
3527             std::lock_guard<std::recursive_mutex> lock(mLock);
3528 
3529             auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
3530             if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
3531             const auto& bufferInfo = it->second;
3532 
3533             bufferConstraintsInfo.createInfo = bufferInfo.createInfo;
3534             pBufferConstraintsInfo = &bufferConstraintsInfo;
3535         }
3536 
3537         hasDedicatedImage =
3538             hasDedicatedImage && getBufferCollectionConstraintsVulkanImageUsage(pImageCreateInfo);
3539         hasDedicatedBuffer = hasDedicatedBuffer && getBufferCollectionConstraintsVulkanBufferUsage(
3540                                                        pBufferConstraintsInfo);
3541 
3542         if (hasDedicatedImage || hasDedicatedBuffer) {
3543             auto token_ends = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
3544             if (!token_ends.is_ok()) {
3545                 mesa_loge("zx_channel_create failed: %d", token_ends.status_value());
3546                 abort();
3547             }
3548 
3549             {
3550                 auto result =
3551                     mSysmemAllocator->AllocateSharedCollection(std::move(token_ends->server));
3552                 if (!result.ok()) {
3553                     mesa_loge("AllocateSharedCollection failed: %d", result.status());
3554                     abort();
3555                 }
3556             }
3557 
3558             auto collection_ends = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
3559             if (!collection_ends.is_ok()) {
3560                 mesa_loge("zx_channel_create failed: %d", collection_ends.status_value());
3561                 abort();
3562             }
3563 
3564             {
3565                 auto result = mSysmemAllocator->BindSharedCollection(
3566                     std::move(token_ends->client), std::move(collection_ends->server));
3567                 if (!result.ok()) {
3568                     mesa_loge("BindSharedCollection failed: %d", result.status());
3569                     abort();
3570                 }
3571             }
3572 
3573             fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> collection(
3574                 std::move(collection_ends->client));
3575             if (hasDedicatedImage) {
3576                 // TODO(fxbug.dev/42172354): Use setBufferCollectionImageConstraintsFUCHSIA.
3577                 VkResult res = setBufferCollectionConstraintsFUCHSIA(enc, device, &collection,
3578                                                                      pImageCreateInfo);
3579                 if (res == VK_ERROR_FORMAT_NOT_SUPPORTED) {
3580                     mesa_loge("setBufferCollectionConstraints failed: format %u is not supported",
3581                               pImageCreateInfo->format);
3582                     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3583                 }
3584                 if (res != VK_SUCCESS) {
3585                     mesa_loge("setBufferCollectionConstraints failed: %d", res);
3586                     abort();
3587                 }
3588             }
3589 
3590             if (hasDedicatedBuffer) {
3591                 VkResult res = setBufferCollectionBufferConstraintsFUCHSIA(&collection,
3592                                                                            pBufferConstraintsInfo);
3593                 if (res != VK_SUCCESS) {
3594                     mesa_loge("setBufferCollectionBufferConstraints failed: %d", res);
3595                     abort();
3596                 }
3597             }
3598 
3599             {
3600                 auto result = collection->WaitForBuffersAllocated();
3601                 if (result.ok() && result->status == ZX_OK) {
3602                     fuchsia_sysmem::wire::BufferCollectionInfo2& info =
3603                         result->buffer_collection_info;
3604                     if (!info.buffer_count) {
3605                         mesa_loge(
3606                             "WaitForBuffersAllocated returned "
3607                             "invalid count: %d",
3608                             info.buffer_count);
3609                         abort();
3610                     }
3611                     vmo_handle = info.buffers[0].vmo.release();
3612                 } else {
3613                     mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
3614                               GET_STATUS_SAFE(result, status));
3615                     abort();
3616                 }
3617             }
3618 
3619             collection->Close();
3620 
3621             zx::vmo vmo_copy;
3622             zx_status_t status = zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS,
3623                                                      vmo_copy.reset_and_get_address());
3624             if (status != ZX_OK) {
3625                 mesa_loge("Failed to duplicate VMO: %d", status);
3626                 abort();
3627             }
3628 
3629             if (pImageCreateInfo) {
3630                 // Only device-local images need to create color buffer; for
3631                 // host-visible images, the color buffer is already created
3632                 // when sysmem allocates memory. Here we use the |tiling|
3633                 // field of image creation info to determine if it uses
3634                 // host-visible memory.
3635                 bool isLinear = pImageCreateInfo->tiling == VK_IMAGE_TILING_LINEAR;
3636                 if (!isLinear) {
3637                     fuchsia_hardware_goldfish::wire::ColorBufferFormatType format;
3638                     switch (pImageCreateInfo->format) {
3639                         case VK_FORMAT_B8G8R8A8_SINT:
3640                         case VK_FORMAT_B8G8R8A8_UNORM:
3641                         case VK_FORMAT_B8G8R8A8_SRGB:
3642                         case VK_FORMAT_B8G8R8A8_SNORM:
3643                         case VK_FORMAT_B8G8R8A8_SSCALED:
3644                         case VK_FORMAT_B8G8R8A8_USCALED:
3645                             format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra;
3646                             break;
3647                         case VK_FORMAT_R8G8B8A8_SINT:
3648                         case VK_FORMAT_R8G8B8A8_UNORM:
3649                         case VK_FORMAT_R8G8B8A8_SRGB:
3650                         case VK_FORMAT_R8G8B8A8_SNORM:
3651                         case VK_FORMAT_R8G8B8A8_SSCALED:
3652                         case VK_FORMAT_R8G8B8A8_USCALED:
3653                             format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba;
3654                             break;
3655                         case VK_FORMAT_R8_UNORM:
3656                         case VK_FORMAT_R8_UINT:
3657                         case VK_FORMAT_R8_USCALED:
3658                         case VK_FORMAT_R8_SNORM:
3659                         case VK_FORMAT_R8_SINT:
3660                         case VK_FORMAT_R8_SSCALED:
3661                         case VK_FORMAT_R8_SRGB:
3662                             format =
3663                                 fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kLuminance;
3664                             break;
3665                         case VK_FORMAT_R8G8_UNORM:
3666                         case VK_FORMAT_R8G8_UINT:
3667                         case VK_FORMAT_R8G8_USCALED:
3668                         case VK_FORMAT_R8G8_SNORM:
3669                         case VK_FORMAT_R8G8_SINT:
3670                         case VK_FORMAT_R8G8_SSCALED:
3671                         case VK_FORMAT_R8G8_SRGB:
3672                             format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRg;
3673                             break;
3674                         default:
3675                             mesa_loge("Unsupported format: %d", pImageCreateInfo->format);
3676                             abort();
3677                     }
3678 
3679                     fidl::Arena arena;
3680                     fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(arena);
3681                     createParams.set_width(pImageCreateInfo->extent.width)
3682                         .set_height(pImageCreateInfo->extent.height)
3683                         .set_format(format)
3684                         .set_memory_property(
3685                             fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
3686 
3687                     auto result = mControlDevice->CreateColorBuffer2(std::move(vmo_copy),
3688                                                                      std::move(createParams));
3689                     if (!result.ok() || result->res != ZX_OK) {
3690                         if (result.ok() && result->res == ZX_ERR_ALREADY_EXISTS) {
3691                             mesa_logd(
3692                                 "CreateColorBuffer: color buffer already "
3693                                 "exists\n");
3694                         } else {
3695                             mesa_loge("CreateColorBuffer failed: %d:%d", result.status(),
3696                                       GET_STATUS_SAFE(result, res));
3697                             abort();
3698                         }
3699                     }
3700                 }
3701             }
3702 
3703             if (pBufferConstraintsInfo) {
3704                 fidl::Arena arena;
3705                 fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena);
3706                 createParams.set_size(arena, pBufferConstraintsInfo->createInfo.size)
3707                     .set_memory_property(
3708                         fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
3709 
3710                 auto result =
3711                     mControlDevice->CreateBuffer2(std::move(vmo_copy), std::move(createParams));
3712                 if (!result.ok() || result->is_error()) {
3713                     mesa_loge("CreateBuffer2 failed: %d:%d", result.status(),
3714                               GET_STATUS_SAFE(result, error_value()));
3715                     abort();
3716                 }
3717             }
3718         } else {
3719             mesa_logw(
3720                 "Dedicated image / buffer not available. Cannot create "
3721                 "BufferCollection to export VMOs.");
3722             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3723         }
3724     }
3725 
3726     if (vmo_handle != ZX_HANDLE_INVALID) {
3727         zx::vmo vmo_copy;
3728         zx_status_t status =
3729             zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS, vmo_copy.reset_and_get_address());
3730         if (status != ZX_OK) {
3731             mesa_loge("Failed to duplicate VMO: %d", status);
3732             abort();
3733         }
3734         zx_status_t status2 = ZX_OK;
3735 
3736         auto result = mControlDevice->GetBufferHandle(std::move(vmo_copy));
3737         if (!result.ok() || result->res != ZX_OK) {
3738             mesa_loge("GetBufferHandle failed: %d:%d", result.status(),
3739                       GET_STATUS_SAFE(result, res));
3740         } else {
3741             fuchsia_hardware_goldfish::wire::BufferHandleType handle_type = result->type;
3742             uint32_t buffer_handle = result->id;
3743 
3744             if (handle_type == fuchsia_hardware_goldfish::wire::BufferHandleType::kBuffer) {
3745                 importBufferInfo.buffer = buffer_handle;
3746                 vk_append_struct(&structChainIter, &importBufferInfo);
3747             } else {
3748                 importCbInfo.colorBuffer = buffer_handle;
3749                 vk_append_struct(&structChainIter, &importCbInfo);
3750             }
3751         }
3752     }
3753 #endif
3754 
3755     VirtGpuResourcePtr bufferBlob = nullptr;
3756 #if defined(LINUX_GUEST_BUILD)
3757     if (exportDmabuf) {
3758         VirtGpuDevice* instance = VirtGpuDevice::getInstance();
3759         hasDedicatedImage =
3760             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3761         hasDedicatedBuffer =
3762             dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3763 
3764         if (hasDedicatedImage) {
3765             VkImageCreateInfo imageCreateInfo;
3766             bool isDmaBufImage = false;
3767             {
3768                 std::lock_guard<std::recursive_mutex> lock(mLock);
3769 
3770                 auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3771                 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
3772                 const auto& imageInfo = it->second;
3773 
3774                 imageCreateInfo = imageInfo.createInfo;
3775                 isDmaBufImage = imageInfo.isDmaBufImage;
3776             }
3777 
3778             if (isDmaBufImage) {
3779                 const VkImageSubresource imageSubresource = {
3780                     .aspectMask = exportAllocateInfoPtr->handleTypes &
3781                                           VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT
3782                                       ? VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT
3783                                       : VK_IMAGE_ASPECT_COLOR_BIT,
3784                     .mipLevel = 0,
3785                     .arrayLayer = 0,
3786                 };
3787                 VkSubresourceLayout subResourceLayout;
3788                 on_vkGetImageSubresourceLayout(context, device, dedicatedAllocInfoPtr->image,
3789                                                &imageSubresource, &subResourceLayout);
3790                 if (!subResourceLayout.rowPitch) {
3791                     mesa_loge("Failed to query stride for VirtGpu resource creation.");
3792                     return VK_ERROR_INITIALIZATION_FAILED;
3793                 }
3794 
3795                 uint32_t virglFormat = gfxstream::vk::getVirglFormat(imageCreateInfo.format);
3796                 if (!virglFormat) {
3797                     mesa_loge("Unsupported VK format for VirtGpu resource, vkFormat: 0x%x",
3798                               imageCreateInfo.format);
3799                     return VK_ERROR_FORMAT_NOT_SUPPORTED;
3800                 }
3801                 const uint32_t target = PIPE_TEXTURE_2D;
3802                 uint32_t bind = VIRGL_BIND_RENDER_TARGET;
3803                 if (VK_IMAGE_TILING_LINEAR == imageCreateInfo.tiling) {
3804                     bind |= VIRGL_BIND_LINEAR;
3805                 }
3806 
3807                 if (mCaps.vulkanCapset.alwaysBlob) {
3808                     struct gfxstreamResourceCreate3d create3d = {};
3809                     struct VirtGpuExecBuffer exec = {};
3810                     struct gfxstreamPlaceholderCommandVk placeholderCmd = {};
3811                     struct VirtGpuCreateBlob createBlob = {};
3812 
3813                     create3d.hdr.opCode = GFXSTREAM_RESOURCE_CREATE_3D;
3814                     create3d.bind = bind;
3815                     create3d.target = target;
3816                     create3d.format = virglFormat;
3817                     create3d.width = imageCreateInfo.extent.width;
3818                     create3d.height = imageCreateInfo.extent.height;
3819                     create3d.blobId = ++mAtomicId;
3820 
3821                     createBlob.blobCmd = reinterpret_cast<uint8_t*>(&create3d);
3822                     createBlob.blobCmdSize = sizeof(create3d);
3823                     createBlob.blobMem = kBlobMemHost3d;
3824                     createBlob.flags = kBlobFlagShareable | kBlobFlagCrossDevice;
3825                     createBlob.blobId = create3d.blobId;
3826                     createBlob.size = finalAllocInfo.allocationSize;
3827 
3828                     bufferBlob = instance->createBlob(createBlob);
3829                     if (!bufferBlob) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3830 
3831                     placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK;
3832                     exec.command = static_cast<void*>(&placeholderCmd);
3833                     exec.command_size = sizeof(placeholderCmd);
3834                     exec.flags = kRingIdx;
3835                     exec.ring_idx = 1;
3836                     if (instance->execBuffer(exec, bufferBlob.get())) {
3837                         mesa_loge("Failed to execbuffer placeholder command.");
3838                         return VK_ERROR_OUT_OF_HOST_MEMORY;
3839                     }
3840 
3841                     if (bufferBlob->wait()) {
3842                         mesa_loge("Failed to wait for blob.");
3843                         return VK_ERROR_OUT_OF_HOST_MEMORY;
3844                     }
3845                 } else {
3846                     bufferBlob = instance->createResource(
3847                         imageCreateInfo.extent.width, imageCreateInfo.extent.height,
3848                         subResourceLayout.rowPitch,
3849                         subResourceLayout.rowPitch * imageCreateInfo.extent.height, virglFormat,
3850                         target, bind);
3851                     if (!bufferBlob) {
3852                         mesa_loge("Failed to create colorBuffer resource for Image memory");
3853                         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3854                     }
3855                     if (bufferBlob->wait()) {
3856                         mesa_loge("Failed to wait for colorBuffer resource for Image memory");
3857                         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3858                     }
3859                 }
3860             } else {
3861                 mesa_logw(
3862                     "The VkMemoryDedicatedAllocateInfo::image associated with VkDeviceMemory "
3863                     "allocation cannot be used to create exportable resource "
3864                     "(VkExportMemoryAllocateInfo).\n");
3865             }
3866         } else if (hasDedicatedBuffer) {
3867             uint32_t virglFormat = VIRGL_FORMAT_R8_UNORM;
3868             const uint32_t target = PIPE_BUFFER;
3869             uint32_t bind = VIRGL_BIND_LINEAR;
3870             uint32_t width = finalAllocInfo.allocationSize;
3871             uint32_t height = 1;
3872 
3873             if (mCaps.vulkanCapset.alwaysBlob) {
3874                 struct gfxstreamResourceCreate3d create3d = {};
3875                 struct VirtGpuExecBuffer exec = {};
3876                 struct gfxstreamPlaceholderCommandVk placeholderCmd = {};
3877                 struct VirtGpuCreateBlob createBlob = {};
3878 
3879                 create3d.hdr.opCode = GFXSTREAM_RESOURCE_CREATE_3D;
3880                 create3d.bind = bind;
3881                 create3d.target = target;
3882                 create3d.format = virglFormat;
3883                 create3d.width = width;
3884                 create3d.height = height;
3885                 create3d.blobId = ++mAtomicId;
3886 
3887                 createBlob.blobCmd = reinterpret_cast<uint8_t*>(&create3d);
3888                 createBlob.blobCmdSize = sizeof(create3d);
3889                 createBlob.blobMem = kBlobMemHost3d;
3890                 createBlob.flags = kBlobFlagShareable | kBlobFlagCrossDevice;
3891                 createBlob.blobId = create3d.blobId;
3892                 createBlob.size = width;
3893 
3894                 bufferBlob = instance->createBlob(createBlob);
3895                 if (!bufferBlob) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3896 
3897                 placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK;
3898                 exec.command = static_cast<void*>(&placeholderCmd);
3899                 exec.command_size = sizeof(placeholderCmd);
3900                 exec.flags = kRingIdx;
3901                 exec.ring_idx = 1;
3902                 if (instance->execBuffer(exec, bufferBlob.get())) {
3903                     mesa_loge("Failed to allocate coherent memory: failed to execbuffer for wait.");
3904                     return VK_ERROR_OUT_OF_HOST_MEMORY;
3905                 }
3906 
3907                 bufferBlob->wait();
3908             } else {
3909                 bufferBlob = instance->createResource(width, height, width, width * height,
3910                                                       virglFormat, target, bind);
3911                 if (!bufferBlob) {
3912                     mesa_loge("Failed to create colorBuffer resource for Image memory");
3913                     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3914                 }
3915                 if (bufferBlob->wait()) {
3916                     mesa_loge("Failed to wait for colorBuffer resource for Image memory");
3917                     return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3918                 }
3919             }
3920         } else {
3921             mesa_logw(
3922                 "VkDeviceMemory is not exportable (VkExportMemoryAllocateInfo). Requires "
3923                 "VkMemoryDedicatedAllocateInfo::image to create external resource.");
3924         }
3925     }
3926 
3927     if (importDmabuf) {
3928         VirtGpuExternalHandle importHandle = {};
3929         importHandle.osHandle = importFdInfoPtr->fd;
3930         importHandle.type = kMemHandleDmabuf;
3931 
3932         auto instance = VirtGpuDevice::getInstance();
3933         bufferBlob = instance->importBlob(importHandle);
3934         if (!bufferBlob) {
3935             mesa_loge("%s: Failed to import colorBuffer resource\n", __func__);
3936             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3937         }
3938     }
3939 
3940     if (bufferBlob) {
3941         if (hasDedicatedBuffer) {
3942             importBufferInfo.buffer = bufferBlob->getResourceHandle();
3943             vk_append_struct(&structChainIter, &importBufferInfo);
3944         } else {
3945             importCbInfo.colorBuffer = bufferBlob->getResourceHandle();
3946             vk_append_struct(&structChainIter, &importCbInfo);
3947         }
3948     }
3949 #endif
3950 
3951     if (ahw || bufferBlob || !requestedMemoryIsHostVisible) {
3952         input_result =
3953             enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
3954 
3955         if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
3956 
3957         setDeviceMemoryInfo(device, *pMemory, 0, nullptr, finalAllocInfo.memoryTypeIndex, ahw,
3958                             isImport, vmo_handle, bufferBlob);
3959 
3960         uint64_t memoryObjectId = (uint64_t)(void*)*pMemory;
3961 #ifdef VK_USE_PLATFORM_ANDROID_KHR
3962         if (ahw) {
3963             memoryObjectId = getAHardwareBufferId(ahw);
3964         }
3965 #endif
3966         emitDeviceMemoryReport(info_VkDevice[device],
3967                                isImport ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT
3968                                         : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT,
3969                                memoryObjectId, pAllocateInfo->allocationSize,
3970                                VK_OBJECT_TYPE_DEVICE_MEMORY, (uint64_t)(void*)*pMemory,
3971                                pAllocateInfo->memoryTypeIndex);
3972         return VK_SUCCESS;
3973     }
3974 
3975 #ifdef VK_USE_PLATFORM_FUCHSIA
3976     if (vmo_handle != ZX_HANDLE_INVALID) {
3977         input_result =
3978             enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
3979 
3980         // Get VMO handle rights, and only use allowed rights to map the
3981         // host memory.
3982         zx_info_handle_basic handle_info;
3983         zx_status_t status = zx_object_get_info(vmo_handle, ZX_INFO_HANDLE_BASIC, &handle_info,
3984                                                 sizeof(handle_info), nullptr, nullptr);
3985         if (status != ZX_OK) {
3986             mesa_loge("%s: cannot get vmo object info: vmo = %u status: %d.", __func__, vmo_handle,
3987                       status);
3988             return VK_ERROR_OUT_OF_HOST_MEMORY;
3989         }
3990 
3991         zx_vm_option_t vm_permission = 0u;
3992         vm_permission |= (handle_info.rights & ZX_RIGHT_READ) ? ZX_VM_PERM_READ : 0;
3993         vm_permission |= (handle_info.rights & ZX_RIGHT_WRITE) ? ZX_VM_PERM_WRITE : 0;
3994 
3995         zx_paddr_t addr;
3996         status = zx_vmar_map(zx_vmar_root_self(), vm_permission, 0, vmo_handle, 0,
3997                              finalAllocInfo.allocationSize, &addr);
3998         if (status != ZX_OK) {
3999             mesa_loge("%s: cannot map vmar: status %d.", __func__, status);
4000             return VK_ERROR_OUT_OF_HOST_MEMORY;
4001         }
4002 
4003         setDeviceMemoryInfo(device, *pMemory, finalAllocInfo.allocationSize,
4004                             reinterpret_cast<uint8_t*>(addr), finalAllocInfo.memoryTypeIndex,
4005                             /*ahw=*/nullptr, isImport, vmo_handle, /*blobPtr=*/nullptr);
4006         return VK_SUCCESS;
4007     }
4008 #endif
4009 
4010     // Host visible memory with direct mapping
4011     VkResult result = getCoherentMemory(&finalAllocInfo, enc, device, pMemory);
4012     if (result != VK_SUCCESS) return result;
4013 
4014     uint64_t memoryObjectId = (uint64_t)(void*)*pMemory;
4015 
4016 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4017     if (ahw) {
4018         memoryObjectId = getAHardwareBufferId(ahw);
4019     }
4020 #endif
4021 
4022     emitDeviceMemoryReport(info_VkDevice[device],
4023                            isImport ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT
4024                                     : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT,
4025                            memoryObjectId, pAllocateInfo->allocationSize,
4026                            VK_OBJECT_TYPE_DEVICE_MEMORY, (uint64_t)(void*)*pMemory,
4027                            pAllocateInfo->memoryTypeIndex);
4028     return VK_SUCCESS;
4029 }
4030 
on_vkFreeMemory(void * context,VkDevice device,VkDeviceMemory memory,const VkAllocationCallbacks * pAllocateInfo)4031 void ResourceTracker::on_vkFreeMemory(void* context, VkDevice device, VkDeviceMemory memory,
4032                                       const VkAllocationCallbacks* pAllocateInfo) {
4033     std::unique_lock<std::recursive_mutex> lock(mLock);
4034 
4035     auto it = info_VkDeviceMemory.find(memory);
4036     if (it == info_VkDeviceMemory.end()) return;
4037     auto& info = it->second;
4038     uint64_t memoryObjectId = (uint64_t)(void*)memory;
4039 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4040     if (info.ahw) {
4041         memoryObjectId = getAHardwareBufferId(info.ahw);
4042     }
4043 #endif
4044 
4045     emitDeviceMemoryReport(info_VkDevice[device],
4046                            info.imported ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT
4047                                          : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT,
4048                            memoryObjectId, 0 /* size */, VK_OBJECT_TYPE_DEVICE_MEMORY,
4049                            (uint64_t)(void*)memory);
4050 
4051 #ifdef VK_USE_PLATFORM_FUCHSIA
4052     if (info.vmoHandle && info.ptr) {
4053         zx_status_t status = zx_vmar_unmap(
4054             zx_vmar_root_self(), reinterpret_cast<zx_paddr_t>(info.ptr), info.allocationSize);
4055         if (status != ZX_OK) {
4056             mesa_loge("%s: Cannot unmap ptr: status %d", __func__, status);
4057         }
4058         info.ptr = nullptr;
4059     }
4060 #endif
4061 
4062     if (!info.coherentMemory) {
4063         lock.unlock();
4064         VkEncoder* enc = (VkEncoder*)context;
4065         enc->vkFreeMemory(device, memory, pAllocateInfo, true /* do lock */);
4066         return;
4067     }
4068 
4069     auto coherentMemory = freeCoherentMemoryLocked(memory, info);
4070 
4071     // We have to release the lock before we could possibly free a
4072     // CoherentMemory, because that will call into VkEncoder, which
4073     // shouldn't be called when the lock is held.
4074     lock.unlock();
4075     coherentMemory = nullptr;
4076 }
4077 
on_vkMapMemory(void * context,VkResult host_result,VkDevice device,VkDeviceMemory memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags,void ** ppData)4078 VkResult ResourceTracker::on_vkMapMemory(void* context, VkResult host_result, VkDevice device,
4079                                          VkDeviceMemory memory, VkDeviceSize offset,
4080                                          VkDeviceSize size, VkMemoryMapFlags, void** ppData) {
4081     if (host_result != VK_SUCCESS) {
4082         mesa_loge("%s: Host failed to map", __func__);
4083         return host_result;
4084     }
4085 
4086     std::unique_lock<std::recursive_mutex> lock(mLock);
4087 
4088     auto deviceMemoryInfoIt = info_VkDeviceMemory.find(memory);
4089     if (deviceMemoryInfoIt == info_VkDeviceMemory.end()) {
4090         mesa_loge("%s: Failed to find VkDeviceMemory.", __func__);
4091         return VK_ERROR_MEMORY_MAP_FAILED;
4092     }
4093     auto& deviceMemoryInfo = deviceMemoryInfoIt->second;
4094 
4095     if (deviceMemoryInfo.blobId && !deviceMemoryInfo.coherentMemory &&
4096         !mCaps.params[kParamCreateGuestHandle]) {
4097         // NOTE: must not hold lock while calling into the encoder.
4098         lock.unlock();
4099         VkEncoder* enc = (VkEncoder*)context;
4100         VkResult vkResult = enc->vkGetBlobGOOGLE(device, memory, /*doLock*/ false);
4101         if (vkResult != VK_SUCCESS) {
4102             mesa_loge("%s: Failed to vkGetBlobGOOGLE().", __func__);
4103             return vkResult;
4104         }
4105         lock.lock();
4106 
4107         // NOTE: deviceMemoryInfoIt potentially invalidated but deviceMemoryInfo still okay.
4108 
4109         struct VirtGpuCreateBlob createBlob = {};
4110         createBlob.blobMem = kBlobMemHost3d;
4111         createBlob.flags = kBlobFlagMappable;
4112         createBlob.blobId = deviceMemoryInfo.blobId;
4113         createBlob.size = deviceMemoryInfo.coherentMemorySize;
4114 
4115         auto blob = VirtGpuDevice::getInstance()->createBlob(createBlob);
4116         if (!blob) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4117 
4118         VirtGpuResourceMappingPtr mapping = blob->createMapping();
4119         if (!mapping) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4120 
4121         auto coherentMemory =
4122             std::make_shared<CoherentMemory>(mapping, createBlob.size, device, memory);
4123 
4124         uint8_t* ptr;
4125         uint64_t offset;
4126         coherentMemory->subAllocate(deviceMemoryInfo.allocationSize, &ptr, offset);
4127 
4128         deviceMemoryInfo.coherentMemoryOffset = offset;
4129         deviceMemoryInfo.coherentMemory = coherentMemory;
4130         deviceMemoryInfo.ptr = ptr;
4131     }
4132 
4133     if (!deviceMemoryInfo.ptr) {
4134         mesa_loge("%s: VkDeviceMemory has nullptr.", __func__);
4135         return VK_ERROR_MEMORY_MAP_FAILED;
4136     }
4137 
4138     if (size != VK_WHOLE_SIZE && (deviceMemoryInfo.ptr + offset + size >
4139                                   deviceMemoryInfo.ptr + deviceMemoryInfo.allocationSize)) {
4140         mesa_loge(
4141             "%s: size is too big. alloc size 0x%llx while we wanted offset 0x%llx size 0x%llx "
4142             "total 0x%llx",
4143             __func__, (unsigned long long)deviceMemoryInfo.allocationSize,
4144             (unsigned long long)offset, (unsigned long long)size, (unsigned long long)offset);
4145         return VK_ERROR_MEMORY_MAP_FAILED;
4146     }
4147 
4148     *ppData = deviceMemoryInfo.ptr + offset;
4149 
4150     return host_result;
4151 }
4152 
on_vkUnmapMemory(void *,VkDevice,VkDeviceMemory)4153 void ResourceTracker::on_vkUnmapMemory(void*, VkDevice, VkDeviceMemory) {
4154     // no-op
4155 }
4156 
transformImageMemoryRequirements2ForGuest(VkImage image,VkMemoryRequirements2 * reqs2)4157 void ResourceTracker::transformImageMemoryRequirements2ForGuest(VkImage image,
4158                                                                 VkMemoryRequirements2* reqs2) {
4159     std::lock_guard<std::recursive_mutex> lock(mLock);
4160 
4161     auto it = info_VkImage.find(image);
4162     if (it == info_VkImage.end()) return;
4163 
4164     auto& info = it->second;
4165 
4166     if (!info.external || !info.externalCreateInfo.handleTypes) {
4167         transformImageMemoryRequirementsForGuestLocked(image, &reqs2->memoryRequirements);
4168         return;
4169     }
4170 
4171     transformImageMemoryRequirementsForGuestLocked(image, &reqs2->memoryRequirements);
4172 
4173     VkMemoryDedicatedRequirements* dedicatedReqs =
4174         vk_find_struct(reqs2, MEMORY_DEDICATED_REQUIREMENTS);
4175 
4176     if (!dedicatedReqs) return;
4177 
4178     transformExternalResourceMemoryDedicatedRequirementsForGuest(dedicatedReqs);
4179 }
4180 
transformBufferMemoryRequirements2ForGuest(VkBuffer buffer,VkMemoryRequirements2 * reqs2)4181 void ResourceTracker::transformBufferMemoryRequirements2ForGuest(VkBuffer buffer,
4182                                                                  VkMemoryRequirements2* reqs2) {
4183     std::lock_guard<std::recursive_mutex> lock(mLock);
4184 
4185     auto it = info_VkBuffer.find(buffer);
4186     if (it == info_VkBuffer.end()) return;
4187 
4188     auto& info = it->second;
4189 
4190     if (!info.external || !info.externalCreateInfo.handleTypes) {
4191         return;
4192     }
4193 
4194     VkMemoryDedicatedRequirements* dedicatedReqs =
4195         vk_find_struct(reqs2, MEMORY_DEDICATED_REQUIREMENTS);
4196 
4197     if (!dedicatedReqs) return;
4198 
4199     transformExternalResourceMemoryDedicatedRequirementsForGuest(dedicatedReqs);
4200 }
4201 
on_vkCreateImage(void * context,VkResult,VkDevice device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)4202 VkResult ResourceTracker::on_vkCreateImage(void* context, VkResult, VkDevice device,
4203                                            const VkImageCreateInfo* pCreateInfo,
4204                                            const VkAllocationCallbacks* pAllocator,
4205                                            VkImage* pImage) {
4206     VkEncoder* enc = (VkEncoder*)context;
4207 
4208     VkImageCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4209     if (localCreateInfo.sharingMode != VK_SHARING_MODE_CONCURRENT) {
4210         localCreateInfo.queueFamilyIndexCount = 0;
4211         localCreateInfo.pQueueFamilyIndices = nullptr;
4212     }
4213 
4214     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
4215     VkExternalMemoryImageCreateInfo localExtImgCi;
4216 
4217     const VkExternalMemoryImageCreateInfo* extImgCiPtr =
4218         vk_find_struct_const(pCreateInfo, EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
4219 
4220     if (extImgCiPtr) {
4221         localExtImgCi = vk_make_orphan_copy(*extImgCiPtr);
4222         vk_append_struct(&structChainIter, &localExtImgCi);
4223     }
4224 
4225 #if defined(LINUX_GUEST_BUILD)
4226     bool isDmaBufImage = false;
4227     VkImageDrmFormatModifierExplicitCreateInfoEXT localDrmFormatModifierInfo;
4228     VkImageDrmFormatModifierListCreateInfoEXT localDrmFormatModifierList;
4229 
4230     if (extImgCiPtr &&
4231         (extImgCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)) {
4232         const wsi_image_create_info* wsiImageCi =
4233             vk_find_struct_const(pCreateInfo, WSI_IMAGE_CREATE_INFO_MESA);
4234         if (wsiImageCi && wsiImageCi->scanout) {
4235             // Linux WSI creates swapchain images with VK_IMAGE_CREATE_ALIAS_BIT. Vulkan spec
4236             // states: "If the pNext chain includes a VkExternalMemoryImageCreateInfo or
4237             // VkExternalMemoryImageCreateInfoNV structure whose handleTypes member is not 0, it is
4238             // as if VK_IMAGE_CREATE_ALIAS_BIT is set." To avoid flag mismatches on host driver,
4239             // remove the VK_IMAGE_CREATE_ALIAS_BIT here.
4240             localCreateInfo.flags &= ~VK_IMAGE_CREATE_ALIAS_BIT;
4241         }
4242 
4243         const VkImageDrmFormatModifierExplicitCreateInfoEXT* drmFmtMod =
4244             vk_find_struct_const(pCreateInfo, IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT);
4245         const VkImageDrmFormatModifierListCreateInfoEXT* drmFmtModList =
4246             vk_find_struct_const(pCreateInfo, IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
4247         if (drmFmtMod || drmFmtModList) {
4248             if (getHostDeviceExtensionIndex(VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME) !=
4249                 -1) {
4250                 // host supports DRM format modifiers => forward the struct
4251                 if (drmFmtMod) {
4252                     localDrmFormatModifierInfo = vk_make_orphan_copy(*drmFmtMod);
4253                     vk_append_struct(&structChainIter, &localDrmFormatModifierInfo);
4254                 }
4255                 if (drmFmtModList) {
4256                     localDrmFormatModifierList = vk_make_orphan_copy(*drmFmtModList);
4257                     vk_append_struct(&structChainIter, &localDrmFormatModifierList);
4258                 }
4259             } else {
4260                 bool canUseLinearModifier =
4261                     (drmFmtMod && drmFmtMod->drmFormatModifier == DRM_FORMAT_MOD_LINEAR) ||
4262                     std::any_of(
4263                         drmFmtModList->pDrmFormatModifiers,
4264                         drmFmtModList->pDrmFormatModifiers + drmFmtModList->drmFormatModifierCount,
4265                         [](const uint64_t mod) { return mod == DRM_FORMAT_MOD_LINEAR; });
4266                 // host doesn't support DRM format modifiers, try emulating
4267                 if (canUseLinearModifier) {
4268                     mesa_logd("emulating DRM_FORMAT_MOD_LINEAR with VK_IMAGE_TILING_LINEAR");
4269                     localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
4270                 } else {
4271                     return VK_ERROR_VALIDATION_FAILED_EXT;
4272                 }
4273             }
4274         }
4275 
4276         isDmaBufImage = true;
4277     }
4278 #endif
4279 
4280 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4281     VkNativeBufferANDROID localAnb;
4282     const VkNativeBufferANDROID* anbInfoPtr = vk_find_struct_const(pCreateInfo, NATIVE_BUFFER_ANDROID);
4283     if (anbInfoPtr) {
4284         localAnb = vk_make_orphan_copy(*anbInfoPtr);
4285         vk_append_struct(&structChainIter, &localAnb);
4286     }
4287 
4288     VkExternalFormatANDROID localExtFormatAndroid;
4289     const VkExternalFormatANDROID* extFormatAndroidPtr =
4290         vk_find_struct_const(pCreateInfo, EXTERNAL_FORMAT_ANDROID);
4291     if (extFormatAndroidPtr) {
4292         localExtFormatAndroid = vk_make_orphan_copy(*extFormatAndroidPtr);
4293 
4294         // Do not append external format android;
4295         // instead, replace the local image localCreateInfo format
4296         // with the corresponding Vulkan format
4297         if (extFormatAndroidPtr->externalFormat) {
4298             localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat);
4299             if (localCreateInfo.format == VK_FORMAT_UNDEFINED)
4300                 return VK_ERROR_VALIDATION_FAILED_EXT;
4301         }
4302     }
4303 #endif
4304 
4305 #ifdef VK_USE_PLATFORM_FUCHSIA
4306     const VkBufferCollectionImageCreateInfoFUCHSIA* extBufferCollectionPtr =
4307         vk_find_struct_const(pCreateInfo, BUFFER_COLLECTION_IMAGE_CREATE_INFO_FUCHSIA);
4308 
4309     bool isSysmemBackedMemory = false;
4310 
4311     if (extImgCiPtr &&
4312         (extImgCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) {
4313         isSysmemBackedMemory = true;
4314     }
4315 
4316     if (extBufferCollectionPtr) {
4317         const auto& collection =
4318             *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
4319                 extBufferCollectionPtr->collection);
4320         uint32_t index = extBufferCollectionPtr->index;
4321         zx::vmo vmo;
4322 
4323         fuchsia_sysmem::wire::BufferCollectionInfo2 info;
4324 
4325         auto result = collection->WaitForBuffersAllocated();
4326         if (result.ok() && result->status == ZX_OK) {
4327             info = std::move(result->buffer_collection_info);
4328             if (index < info.buffer_count && info.settings.has_image_format_constraints) {
4329                 vmo = std::move(info.buffers[index].vmo);
4330             }
4331         } else {
4332             mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
4333                       GET_STATUS_SAFE(result, status));
4334         }
4335 
4336         if (vmo.is_valid()) {
4337             zx::vmo vmo_dup;
4338             if (zx_status_t status = vmo.duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
4339                 status != ZX_OK) {
4340                 mesa_loge("%s: zx_vmo_duplicate failed: %d", __func__, status);
4341                 abort();
4342             }
4343 
4344             auto buffer_handle_result = mControlDevice->GetBufferHandle(std::move(vmo_dup));
4345             if (!buffer_handle_result.ok()) {
4346                 mesa_loge("%s: GetBufferHandle FIDL error: %d", __func__,
4347                           buffer_handle_result.status());
4348                 abort();
4349             }
4350             if (buffer_handle_result.value().res == ZX_OK) {
4351                 // Buffer handle already exists.
4352                 // If it is a ColorBuffer, no-op; Otherwise return error.
4353                 if (buffer_handle_result.value().type !=
4354                     fuchsia_hardware_goldfish::wire::BufferHandleType::kColorBuffer) {
4355                     mesa_loge("%s: BufferHandle %u is not a ColorBuffer", __func__,
4356                               buffer_handle_result.value().id);
4357                     return VK_ERROR_OUT_OF_HOST_MEMORY;
4358                 }
4359             } else if (buffer_handle_result.value().res == ZX_ERR_NOT_FOUND) {
4360                 // Buffer handle not found. Create ColorBuffer based on buffer settings.
4361                 auto format = info.settings.image_format_constraints.pixel_format.type ==
4362                                       fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8
4363                                   ? fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba
4364                                   : fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra;
4365 
4366                 uint32_t memory_property =
4367                     info.settings.buffer_settings.heap ==
4368                             fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal
4369                         ? fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal
4370                         : fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
4371 
4372                 fidl::Arena arena;
4373                 fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(arena);
4374                 createParams.set_width(info.settings.image_format_constraints.min_coded_width)
4375                     .set_height(info.settings.image_format_constraints.min_coded_height)
4376                     .set_format(format)
4377                     .set_memory_property(memory_property);
4378 
4379                 auto result =
4380                     mControlDevice->CreateColorBuffer2(std::move(vmo), std::move(createParams));
4381                 if (result.ok() && result->res == ZX_ERR_ALREADY_EXISTS) {
4382                     mesa_logd("CreateColorBuffer: color buffer already exists\n");
4383                 } else if (!result.ok() || result->res != ZX_OK) {
4384                     mesa_loge("CreateColorBuffer failed: %d:%d", result.status(),
4385                               GET_STATUS_SAFE(result, res));
4386                 }
4387             }
4388 
4389             if (info.settings.buffer_settings.heap ==
4390                 fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible) {
4391                 mesa_logd(
4392                     "%s: Image uses host visible memory heap; set tiling "
4393                     "to linear to match host ImageCreateInfo",
4394                     __func__);
4395                 localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
4396             }
4397         }
4398         isSysmemBackedMemory = true;
4399     }
4400 
4401     if (isSysmemBackedMemory) {
4402         localCreateInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
4403     }
4404 #endif
4405 
4406     VkResult res;
4407     VkMemoryRequirements memReqs;
4408 
4409     if (supportsCreateResourcesWithRequirements()) {
4410         res = enc->vkCreateImageWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator, pImage,
4411                                                        &memReqs, true /* do lock */);
4412     } else {
4413         res = enc->vkCreateImage(device, &localCreateInfo, pAllocator, pImage, true /* do lock */);
4414     }
4415 
4416     if (res != VK_SUCCESS) return res;
4417 
4418     std::lock_guard<std::recursive_mutex> lock(mLock);
4419 
4420     auto it = info_VkImage.find(*pImage);
4421     if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
4422 
4423     auto& info = it->second;
4424 
4425     info.device = device;
4426     info.createInfo = *pCreateInfo;
4427     info.createInfo.pNext = nullptr;
4428 
4429 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4430     if (extFormatAndroidPtr && extFormatAndroidPtr->externalFormat) {
4431         info.hasExternalFormat = true;
4432         info.externalFourccFormat = extFormatAndroidPtr->externalFormat;
4433     }
4434 #endif  // VK_USE_PLATFORM_ANDROID_KHR
4435 
4436     if (supportsCreateResourcesWithRequirements()) {
4437         info.baseRequirementsKnown = true;
4438     }
4439 
4440     if (extImgCiPtr) {
4441         info.external = true;
4442         info.externalCreateInfo = *extImgCiPtr;
4443     }
4444 
4445 #ifdef VK_USE_PLATFORM_FUCHSIA
4446     if (isSysmemBackedMemory) {
4447         info.isSysmemBackedMemory = true;
4448     }
4449 #endif
4450 
4451 // Delete `protocolVersion` check goldfish drivers are gone.
4452 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
4453     if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
4454         mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
4455     }
4456     if ((extImgCiPtr && (extImgCiPtr->handleTypes &
4457                          VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) {
4458         updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
4459     }
4460 #endif
4461 #if defined(LINUX_GUEST_BUILD)
4462     if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
4463         mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
4464     }
4465     info.isDmaBufImage = isDmaBufImage;
4466     if (info.isDmaBufImage) {
4467         updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
4468         if (localCreateInfo.tiling == VK_IMAGE_TILING_OPTIMAL) {
4469             // Linux WSI calls vkGetImageSubresourceLayout() to query the stride for swapchain
4470             // support. Similarly, stride is also queried from vkGetImageSubresourceLayout() to
4471             // determine the stride for colorBuffer resource creation (guest-side dmabuf resource).
4472             // To satisfy valid usage of this API, must call on the linearPeerImage for the VkImage
4473             // in question. As long as these two use cases match, the rowPitch won't actually be
4474             // used by WSI.
4475             VkImageCreateInfo linearPeerImageCreateInfo = {
4476                 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4477                 .pNext = nullptr,
4478                 .flags = {},
4479                 .imageType = VK_IMAGE_TYPE_2D,
4480                 .format = localCreateInfo.format,
4481                 .extent = localCreateInfo.extent,
4482                 .mipLevels = 1,
4483                 .arrayLayers = 1,
4484                 .samples = VK_SAMPLE_COUNT_1_BIT,
4485                 .tiling = VK_IMAGE_TILING_LINEAR,
4486                 .usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
4487                 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
4488                 .queueFamilyIndexCount = 0,
4489                 .pQueueFamilyIndices = nullptr,
4490                 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
4491             };
4492             res = enc->vkCreateImage(device, &linearPeerImageCreateInfo, pAllocator,
4493                                      &info.linearPeerImage, true /* do lock */);
4494             if (res != VK_SUCCESS) return res;
4495         }
4496     }
4497 #endif
4498 
4499     if (info.baseRequirementsKnown) {
4500         transformImageMemoryRequirementsForGuestLocked(*pImage, &memReqs);
4501         info.baseRequirements = memReqs;
4502     }
4503     return res;
4504 }
4505 
on_vkCreateSamplerYcbcrConversion(void * context,VkResult,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)4506 VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversion(
4507     void* context, VkResult, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4508     const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) {
4509     VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4510 
4511 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4512     const VkExternalFormatANDROID* extFormatAndroidPtr =
4513         vk_find_struct_const(pCreateInfo, EXTERNAL_FORMAT_ANDROID);
4514     if (extFormatAndroidPtr) {
4515         if (extFormatAndroidPtr->externalFormat == DRM_FORMAT_RGB565) {
4516             // We don't support external formats on host and it causes RGB565
4517             // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4518             // when passed as an external format.
4519             // We may consider doing this for all external formats.
4520             // See b/134771579.
4521             *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4522             return VK_SUCCESS;
4523         } else if (extFormatAndroidPtr->externalFormat) {
4524             localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat);
4525         }
4526     }
4527 #endif
4528 
4529     VkEncoder* enc = (VkEncoder*)context;
4530     VkResult res = enc->vkCreateSamplerYcbcrConversion(device, &localCreateInfo, pAllocator,
4531                                                        pYcbcrConversion, true /* do lock */);
4532 
4533     if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
4534         mesa_loge(
4535             "FATAL: vkCreateSamplerYcbcrConversion returned a reserved value "
4536             "(VK_YCBCR_CONVERSION_DO_NOTHING)");
4537         abort();
4538     }
4539     return res;
4540 }
4541 
on_vkDestroySamplerYcbcrConversion(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)4542 void ResourceTracker::on_vkDestroySamplerYcbcrConversion(void* context, VkDevice device,
4543                                                          VkSamplerYcbcrConversion ycbcrConversion,
4544                                                          const VkAllocationCallbacks* pAllocator) {
4545     VkEncoder* enc = (VkEncoder*)context;
4546     if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4547         enc->vkDestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator,
4548                                              true /* do lock */);
4549     }
4550 }
4551 
on_vkCreateSamplerYcbcrConversionKHR(void * context,VkResult,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)4552 VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversionKHR(
4553     void* context, VkResult, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4554     const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) {
4555     VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4556 
4557 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
4558     const VkExternalFormatANDROID* extFormatAndroidPtr =
4559         vk_find_struct_const(pCreateInfo, EXTERNAL_FORMAT_ANDROID);
4560     if (extFormatAndroidPtr) {
4561         if (extFormatAndroidPtr->externalFormat == DRM_FORMAT_RGB565) {
4562             // We don't support external formats on host and it causes RGB565
4563             // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4564             // when passed as an external format.
4565             // We may consider doing this for all external formats.
4566             // See b/134771579.
4567             *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4568             return VK_SUCCESS;
4569         } else if (extFormatAndroidPtr->externalFormat) {
4570             localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat);
4571         }
4572     }
4573 #endif
4574 
4575     VkEncoder* enc = (VkEncoder*)context;
4576     VkResult res = enc->vkCreateSamplerYcbcrConversionKHR(device, &localCreateInfo, pAllocator,
4577                                                           pYcbcrConversion, true /* do lock */);
4578 
4579     if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
4580         mesa_loge(
4581             "FATAL: vkCreateSamplerYcbcrConversionKHR returned a reserved value "
4582             "(VK_YCBCR_CONVERSION_DO_NOTHING)");
4583         abort();
4584     }
4585     return res;
4586 }
4587 
on_vkDestroySamplerYcbcrConversionKHR(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)4588 void ResourceTracker::on_vkDestroySamplerYcbcrConversionKHR(
4589     void* context, VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
4590     const VkAllocationCallbacks* pAllocator) {
4591     VkEncoder* enc = (VkEncoder*)context;
4592     if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4593         enc->vkDestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator,
4594                                                 true /* do lock */);
4595     }
4596 }
4597 
on_vkCreateSampler(void * context,VkResult,VkDevice device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler)4598 VkResult ResourceTracker::on_vkCreateSampler(void* context, VkResult, VkDevice device,
4599                                              const VkSamplerCreateInfo* pCreateInfo,
4600                                              const VkAllocationCallbacks* pAllocator,
4601                                              VkSampler* pSampler) {
4602     VkSamplerCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4603 
4604 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(VK_USE_PLATFORM_FUCHSIA)
4605     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
4606     VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
4607     const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo =
4608         vk_find_struct_const(pCreateInfo, SAMPLER_YCBCR_CONVERSION_INFO);
4609     if (samplerYcbcrConversionInfo) {
4610         if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4611             localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
4612             vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
4613         }
4614     }
4615 
4616     VkSamplerCustomBorderColorCreateInfoEXT localVkSamplerCustomBorderColorCreateInfo;
4617     const VkSamplerCustomBorderColorCreateInfoEXT* samplerCustomBorderColorCreateInfo =
4618         vk_find_struct_const(pCreateInfo, SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT);
4619     if (samplerCustomBorderColorCreateInfo) {
4620         localVkSamplerCustomBorderColorCreateInfo =
4621             vk_make_orphan_copy(*samplerCustomBorderColorCreateInfo);
4622         vk_append_struct(&structChainIter, &localVkSamplerCustomBorderColorCreateInfo);
4623     }
4624 #endif
4625 
4626     VkEncoder* enc = (VkEncoder*)context;
4627     return enc->vkCreateSampler(device, &localCreateInfo, pAllocator, pSampler, true /* do lock */);
4628 }
4629 
on_vkGetPhysicalDeviceExternalFenceProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)4630 void ResourceTracker::on_vkGetPhysicalDeviceExternalFenceProperties(
4631     void* context, VkPhysicalDevice physicalDevice,
4632     const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
4633     VkExternalFenceProperties* pExternalFenceProperties) {
4634     (void)context;
4635     (void)physicalDevice;
4636 
4637     pExternalFenceProperties->exportFromImportedHandleTypes = 0;
4638     pExternalFenceProperties->compatibleHandleTypes = 0;
4639     pExternalFenceProperties->externalFenceFeatures = 0;
4640 
4641     bool syncFd = pExternalFenceInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4642 
4643     if (!syncFd) {
4644         return;
4645     }
4646 
4647 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
4648     pExternalFenceProperties->exportFromImportedHandleTypes =
4649         VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4650     pExternalFenceProperties->compatibleHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4651     pExternalFenceProperties->externalFenceFeatures =
4652         VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT | VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT;
4653 #endif
4654 }
4655 
on_vkGetPhysicalDeviceExternalFencePropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)4656 void ResourceTracker::on_vkGetPhysicalDeviceExternalFencePropertiesKHR(
4657     void* context, VkPhysicalDevice physicalDevice,
4658     const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
4659     VkExternalFenceProperties* pExternalFenceProperties) {
4660     on_vkGetPhysicalDeviceExternalFenceProperties(context, physicalDevice, pExternalFenceInfo,
4661                                                   pExternalFenceProperties);
4662 }
4663 
on_vkCreateFence(void * context,VkResult input_result,VkDevice device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)4664 VkResult ResourceTracker::on_vkCreateFence(void* context, VkResult input_result, VkDevice device,
4665                                            const VkFenceCreateInfo* pCreateInfo,
4666                                            const VkAllocationCallbacks* pAllocator,
4667                                            VkFence* pFence) {
4668     VkEncoder* enc = (VkEncoder*)context;
4669     VkFenceCreateInfo finalCreateInfo = *pCreateInfo;
4670 
4671     const VkExportFenceCreateInfo* exportFenceInfoPtr =
4672         vk_find_struct_const(pCreateInfo, EXPORT_FENCE_CREATE_INFO);
4673 
4674 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
4675     bool exportSyncFd = exportFenceInfoPtr && (exportFenceInfoPtr->handleTypes &
4676                                                VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
4677 #endif
4678 
4679     input_result =
4680         enc->vkCreateFence(device, &finalCreateInfo, pAllocator, pFence, true /* do lock */);
4681 
4682     if (input_result != VK_SUCCESS) return input_result;
4683 
4684 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
4685     if (exportSyncFd) {
4686         if (!mFeatureInfo.hasVirtioGpuNativeSync) {
4687             mesa_logd("%s: ensure sync device\n", __func__);
4688             ensureSyncDeviceFd();
4689         }
4690 
4691         mesa_logd("%s: getting fence info\n", __func__);
4692         std::lock_guard<std::recursive_mutex> lock(mLock);
4693         auto it = info_VkFence.find(*pFence);
4694 
4695         if (it == info_VkFence.end()) return VK_ERROR_INITIALIZATION_FAILED;
4696 
4697         auto& info = it->second;
4698 
4699         info.external = true;
4700         info.exportFenceCreateInfo = *exportFenceInfoPtr;
4701         mesa_logd("%s: info set (fence still -1). fence: %p\n", __func__, (void*)(*pFence));
4702         // syncFd is still -1 because we expect user to explicitly
4703         // export it via vkGetFenceFdKHR
4704     }
4705 #endif
4706 
4707     return input_result;
4708 }
4709 
on_vkDestroyFence(void * context,VkDevice device,VkFence fence,const VkAllocationCallbacks * pAllocator)4710 void ResourceTracker::on_vkDestroyFence(void* context, VkDevice device, VkFence fence,
4711                                         const VkAllocationCallbacks* pAllocator) {
4712     VkEncoder* enc = (VkEncoder*)context;
4713     enc->vkDestroyFence(device, fence, pAllocator, true /* do lock */);
4714 }
4715 
on_vkResetFences(void * context,VkResult,VkDevice device,uint32_t fenceCount,const VkFence * pFences)4716 VkResult ResourceTracker::on_vkResetFences(void* context, VkResult, VkDevice device,
4717                                            uint32_t fenceCount, const VkFence* pFences) {
4718     VkEncoder* enc = (VkEncoder*)context;
4719     VkResult res = enc->vkResetFences(device, fenceCount, pFences, true /* do lock */);
4720 
4721     if (res != VK_SUCCESS) return res;
4722 
4723     if (!fenceCount) return res;
4724 
4725     // Permanence: temporary
4726     // on fence reset, close the fence fd
4727     // and act like we need to GetFenceFdKHR/ImportFenceFdKHR again
4728     std::lock_guard<std::recursive_mutex> lock(mLock);
4729     for (uint32_t i = 0; i < fenceCount; ++i) {
4730         VkFence fence = pFences[i];
4731         auto it = info_VkFence.find(fence);
4732         auto& info = it->second;
4733         if (!info.external) continue;
4734 
4735 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
4736         if (info.syncFd && *info.syncFd >= 0) {
4737             mesa_logd("%s: resetting fence. make fd -1\n", __func__);
4738             goldfish_sync_signal(*info.syncFd);
4739             mSyncHelper->close(*info.syncFd);
4740         }
4741         info.syncFd.reset();
4742 #endif
4743     }
4744 
4745     return res;
4746 }
4747 
on_vkImportFenceFdKHR(void * context,VkResult,VkDevice device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)4748 VkResult ResourceTracker::on_vkImportFenceFdKHR(void* context, VkResult, VkDevice device,
4749                                                 const VkImportFenceFdInfoKHR* pImportFenceFdInfo) {
4750     (void)context;
4751     (void)device;
4752     (void)pImportFenceFdInfo;
4753 
4754     // Transference: copy
4755     // meaning dup() the incoming fd
4756 
4757     bool hasFence = pImportFenceFdInfo->fence != VK_NULL_HANDLE;
4758 
4759     if (!hasFence) return VK_ERROR_OUT_OF_HOST_MEMORY;
4760 
4761 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
4762 
4763     bool syncFdImport = pImportFenceFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4764 
4765     if (!syncFdImport) {
4766         mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd import\n", __func__);
4767         return VK_ERROR_OUT_OF_HOST_MEMORY;
4768     }
4769 
4770     std::lock_guard<std::recursive_mutex> lock(mLock);
4771     auto it = info_VkFence.find(pImportFenceFdInfo->fence);
4772     if (it == info_VkFence.end()) {
4773         mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
4774         return VK_ERROR_OUT_OF_HOST_MEMORY;
4775     }
4776 
4777     auto& info = it->second;
4778 
4779 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
4780     if (info.syncFd && *info.syncFd >= 0) {
4781         mesa_logd("%s: previous sync fd exists, close it\n", __func__);
4782         goldfish_sync_signal(*info.syncFd);
4783         mSyncHelper->close(*info.syncFd);
4784     }
4785 #endif
4786 
4787     if (pImportFenceFdInfo->fd < 0) {
4788         mesa_logd("%s: import -1, set to -1 and exit\n", __func__);
4789         info.syncFd = -1;
4790     } else {
4791         mesa_logd("%s: import actual fd, dup and close()\n", __func__);
4792 
4793         int fenceCopy = mSyncHelper->dup(pImportFenceFdInfo->fd);
4794         if (fenceCopy < 0) {
4795             mesa_loge("Failed to dup() import sync fd.");
4796             return VK_ERROR_OUT_OF_HOST_MEMORY;
4797         }
4798 
4799         info.syncFd = fenceCopy;
4800 
4801         mSyncHelper->close(pImportFenceFdInfo->fd);
4802     }
4803     return VK_SUCCESS;
4804 #else
4805     return VK_ERROR_OUT_OF_HOST_MEMORY;
4806 #endif
4807 }
4808 
on_vkGetFenceFdKHR(void * context,VkResult,VkDevice device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd)4809 VkResult ResourceTracker::on_vkGetFenceFdKHR(void* context, VkResult, VkDevice device,
4810                                              const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd) {
4811     // export operation.
4812     // first check if fence is signaled
4813     // then if so, return -1
4814     // else, queue work
4815 
4816     VkEncoder* enc = (VkEncoder*)context;
4817 
4818     bool hasFence = pGetFdInfo->fence != VK_NULL_HANDLE;
4819 
4820     if (!hasFence) {
4821         mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence\n", __func__);
4822         return VK_ERROR_OUT_OF_HOST_MEMORY;
4823     }
4824 
4825 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
4826     bool syncFdExport = pGetFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4827 
4828     if (!syncFdExport) {
4829         mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd fence\n", __func__);
4830         return VK_ERROR_OUT_OF_HOST_MEMORY;
4831     }
4832 
4833     VkResult currentFenceStatus =
4834         enc->vkGetFenceStatus(device, pGetFdInfo->fence, true /* do lock */);
4835 
4836     if (VK_ERROR_DEVICE_LOST == currentFenceStatus) {  // Other error
4837         mesa_loge("%s: VK_ERROR_DEVICE_LOST: Other error\n", __func__);
4838         *pFd = -1;
4839         return VK_ERROR_DEVICE_LOST;
4840     }
4841 
4842     if (VK_NOT_READY == currentFenceStatus || VK_SUCCESS == currentFenceStatus) {
4843         // Fence is valid. We also create a new sync fd for a signaled
4844         // fence, because ANGLE will use the returned fd directly to
4845         // implement eglDupNativeFenceFDANDROID, where -1 is only returned
4846         // when error occurs.
4847         std::lock_guard<std::recursive_mutex> lock(mLock);
4848 
4849         auto it = info_VkFence.find(pGetFdInfo->fence);
4850         if (it == info_VkFence.end()) {
4851             mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
4852             return VK_ERROR_OUT_OF_HOST_MEMORY;
4853         }
4854 
4855         auto& info = it->second;
4856 
4857         bool syncFdCreated = info.external && (info.exportFenceCreateInfo.handleTypes &
4858                                                VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
4859 
4860         if (!syncFdCreated) {
4861             mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd created\n", __func__);
4862             return VK_ERROR_OUT_OF_HOST_MEMORY;
4863         }
4864 
4865         if (mFeatureInfo.hasVirtioGpuNativeSync) {
4866             VkResult result;
4867             int64_t osHandle;
4868             uint64_t hostFenceHandle = get_host_u64_VkFence(pGetFdInfo->fence);
4869 
4870             result = createFence(device, hostFenceHandle, osHandle);
4871             if (result != VK_SUCCESS) return result;
4872 
4873             *pFd = osHandle;
4874         } else {
4875 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
4876             goldfish_sync_queue_work(
4877                 mSyncDeviceFd, get_host_u64_VkFence(pGetFdInfo->fence) /* the handle */,
4878                 GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */,
4879                 pFd);
4880 #endif
4881         }
4882 
4883         // relinquish ownership
4884         info.syncFd.reset();
4885 
4886         mesa_logd("%s: got fd: %d\n", __func__, *pFd);
4887         return VK_SUCCESS;
4888     }
4889     return VK_ERROR_DEVICE_LOST;
4890 #else
4891     return VK_ERROR_OUT_OF_HOST_MEMORY;
4892 #endif
4893 }
4894 
on_vkGetFenceStatus(void * context,VkResult input_result,VkDevice device,VkFence fence)4895 VkResult ResourceTracker::on_vkGetFenceStatus(void* context, VkResult input_result, VkDevice device,
4896                                               VkFence fence) {
4897     VkEncoder* enc = (VkEncoder*)context;
4898 
4899 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
4900     {
4901         std::unique_lock<std::recursive_mutex> lock(mLock);
4902 
4903         auto fenceInfoIt = info_VkFence.find(fence);
4904         if (fenceInfoIt == info_VkFence.end()) {
4905             mesa_loge("Failed to find VkFence:%p", fence);
4906             return VK_NOT_READY;
4907         }
4908         auto& fenceInfo = fenceInfoIt->second;
4909 
4910         if (fenceInfo.syncFd) {
4911             if (*fenceInfo.syncFd == -1) {
4912                 return VK_SUCCESS;
4913             }
4914 
4915             int syncFdSignaled = mSyncHelper->wait(*fenceInfo.syncFd, /*timeout=*/0) == 0;
4916             return syncFdSignaled ? VK_SUCCESS : VK_NOT_READY;
4917         }
4918     }
4919 #endif
4920 
4921     return enc->vkGetFenceStatus(device, fence, /*doLock=*/true);
4922 }
4923 
on_vkWaitForFences(void * context,VkResult,VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)4924 VkResult ResourceTracker::on_vkWaitForFences(void* context, VkResult, VkDevice device,
4925                                              uint32_t fenceCount, const VkFence* pFences,
4926                                              VkBool32 waitAll, uint64_t timeout) {
4927 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
4928     (void)context;
4929     std::vector<int> fencesExternalSyncFds;
4930     std::vector<VkFence> fencesNonExternal;
4931 
4932     std::unique_lock<std::recursive_mutex> lock(mLock);
4933 
4934     for (uint32_t i = 0; i < fenceCount; ++i) {
4935         auto it = info_VkFence.find(pFences[i]);
4936         if (it == info_VkFence.end()) continue;
4937         const auto& info = it->second;
4938         if (info.syncFd) {
4939             if (*info.syncFd >= 0) {
4940                 fencesExternalSyncFds.push_back(*info.syncFd);
4941             }
4942         } else {
4943             fencesNonExternal.push_back(pFences[i]);
4944         }
4945     }
4946 
4947     lock.unlock();
4948 
4949     for (auto fd : fencesExternalSyncFds) {
4950         mesa_logd("Waiting on sync fd: %d", fd);
4951 
4952         std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
4953         // syncHelper works in milliseconds
4954         mSyncHelper->wait(fd, DIV_ROUND_UP(timeout, 1000));
4955         std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
4956 
4957         uint64_t timeTaken =
4958             std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
4959         if (timeTaken >= timeout) {
4960             return VK_TIMEOUT;
4961         }
4962 
4963         timeout -= timeTaken;
4964         mesa_logd("Done waiting on sync fd: %d", fd);
4965 
4966 #if GFXSTREAM_SYNC_DEBUG
4967         mSyncHelper->debugPrint(fd);
4968 #endif
4969     }
4970 
4971     if (!fencesNonExternal.empty()) {
4972         auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
4973         auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
4974         mesa_logd("vkWaitForFences to host");
4975         return vkEncoder->vkWaitForFences(device, fencesNonExternal.size(),
4976                                           fencesNonExternal.data(), waitAll, timeout,
4977                                           true /* do lock */);
4978     }
4979 
4980     return VK_SUCCESS;
4981 
4982 #else
4983     VkEncoder* enc = (VkEncoder*)context;
4984     return enc->vkWaitForFences(device, fenceCount, pFences, waitAll, timeout, true /* do lock */);
4985 #endif
4986 }
4987 
on_vkCreateDescriptorPool(void * context,VkResult,VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)4988 VkResult ResourceTracker::on_vkCreateDescriptorPool(void* context, VkResult, VkDevice device,
4989                                                     const VkDescriptorPoolCreateInfo* pCreateInfo,
4990                                                     const VkAllocationCallbacks* pAllocator,
4991                                                     VkDescriptorPool* pDescriptorPool) {
4992     VkEncoder* enc = (VkEncoder*)context;
4993 
4994     VkResult res = enc->vkCreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool,
4995                                                true /* do lock */);
4996 
4997     if (res != VK_SUCCESS) return res;
4998 
4999     VkDescriptorPool pool = *pDescriptorPool;
5000 
5001     struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
5002     dp->allocInfo = new DescriptorPoolAllocationInfo;
5003     dp->allocInfo->device = device;
5004     dp->allocInfo->createFlags = pCreateInfo->flags;
5005     dp->allocInfo->maxSets = pCreateInfo->maxSets;
5006     dp->allocInfo->usedSets = 0;
5007 
5008     for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; ++i) {
5009         dp->allocInfo->descriptorCountInfo.push_back({
5010             pCreateInfo->pPoolSizes[i].type, pCreateInfo->pPoolSizes[i].descriptorCount,
5011             0, /* used */
5012         });
5013     }
5014 
5015     if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate) {
5016         std::vector<uint64_t> poolIds(pCreateInfo->maxSets);
5017 
5018         uint32_t count = pCreateInfo->maxSets;
5019         enc->vkCollectDescriptorPoolIdsGOOGLE(device, pool, &count, poolIds.data(),
5020                                               true /* do lock */);
5021 
5022         dp->allocInfo->freePoolIds = poolIds;
5023     }
5024 
5025     return res;
5026 }
5027 
on_vkDestroyDescriptorPool(void * context,VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)5028 void ResourceTracker::on_vkDestroyDescriptorPool(void* context, VkDevice device,
5029                                                  VkDescriptorPool descriptorPool,
5030                                                  const VkAllocationCallbacks* pAllocator) {
5031     if (!descriptorPool) return;
5032 
5033     VkEncoder* enc = (VkEncoder*)context;
5034 
5035     clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
5036 
5037     enc->vkDestroyDescriptorPool(device, descriptorPool, pAllocator, true /* do lock */);
5038 }
5039 
on_vkResetDescriptorPool(void * context,VkResult,VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)5040 VkResult ResourceTracker::on_vkResetDescriptorPool(void* context, VkResult, VkDevice device,
5041                                                    VkDescriptorPool descriptorPool,
5042                                                    VkDescriptorPoolResetFlags flags) {
5043     if (!descriptorPool) return VK_ERROR_INITIALIZATION_FAILED;
5044 
5045     VkEncoder* enc = (VkEncoder*)context;
5046 
5047     VkResult res = enc->vkResetDescriptorPool(device, descriptorPool, flags, true /* do lock */);
5048 
5049     if (res != VK_SUCCESS) return res;
5050 
5051     clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
5052     return res;
5053 }
5054 
on_vkAllocateDescriptorSets(void * context,VkResult,VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)5055 VkResult ResourceTracker::on_vkAllocateDescriptorSets(
5056     void* context, VkResult, VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo,
5057     VkDescriptorSet* pDescriptorSets) {
5058     VkEncoder* enc = (VkEncoder*)context;
5059     auto ci = pAllocateInfo;
5060     auto sets = pDescriptorSets;
5061     if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate) {
5062         // Using the pool ID's we collected earlier from the host
5063         VkResult poolAllocResult = validateAndApplyVirtualDescriptorSetAllocation(ci, sets);
5064 
5065         if (poolAllocResult != VK_SUCCESS) return poolAllocResult;
5066 
5067         for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
5068             register_VkDescriptorSet(sets[i]);
5069             VkDescriptorSetLayout setLayout =
5070                 as_goldfish_VkDescriptorSet(sets[i])->reified->setLayout;
5071 
5072             // Need to add ref to the set layout in the virtual case
5073             // because the set itself might not be realized on host at the
5074             // same time
5075             struct goldfish_VkDescriptorSetLayout* dsl =
5076                 as_goldfish_VkDescriptorSetLayout(setLayout);
5077             ++dsl->layoutInfo->refcount;
5078         }
5079     } else {
5080         VkResult allocRes = enc->vkAllocateDescriptorSets(device, ci, sets, true /* do lock */);
5081 
5082         if (allocRes != VK_SUCCESS) return allocRes;
5083 
5084         for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
5085             applyDescriptorSetAllocation(ci->descriptorPool, ci->pSetLayouts[i]);
5086             fillDescriptorSetInfoForPool(ci->descriptorPool, ci->pSetLayouts[i], sets[i]);
5087         }
5088     }
5089 
5090     return VK_SUCCESS;
5091 }
5092 
on_vkFreeDescriptorSets(void * context,VkResult,VkDevice device,VkDescriptorPool descriptorPool,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)5093 VkResult ResourceTracker::on_vkFreeDescriptorSets(void* context, VkResult, VkDevice device,
5094                                                   VkDescriptorPool descriptorPool,
5095                                                   uint32_t descriptorSetCount,
5096                                                   const VkDescriptorSet* pDescriptorSets) {
5097     VkEncoder* enc = (VkEncoder*)context;
5098 
5099     // Bit of robustness so that we can double free descriptor sets
5100     // and do other invalid usages
5101     // https://github.com/KhronosGroup/Vulkan-Docs/issues/1070
5102     // (people expect VK_SUCCESS to always be returned by vkFreeDescriptorSets)
5103     std::vector<VkDescriptorSet> toActuallyFree;
5104     {
5105         std::lock_guard<std::recursive_mutex> lock(mLock);
5106 
5107         // Pool was destroyed
5108         if (info_VkDescriptorPool.find(descriptorPool) == info_VkDescriptorPool.end()) {
5109             return VK_SUCCESS;
5110         }
5111 
5112         if (!descriptorPoolSupportsIndividualFreeLocked(descriptorPool)) return VK_SUCCESS;
5113 
5114         std::vector<VkDescriptorSet> existingDescriptorSets;
5115         ;
5116 
5117         // Check if this descriptor set was in the pool's set of allocated descriptor sets,
5118         // to guard against double free (Double free is allowed by the client)
5119         {
5120             auto allocedSets = as_goldfish_VkDescriptorPool(descriptorPool)->allocInfo->allocedSets;
5121 
5122             for (uint32_t i = 0; i < descriptorSetCount; ++i) {
5123                 if (allocedSets.end() == allocedSets.find(pDescriptorSets[i])) {
5124                     mesa_loge(
5125                         "%s: Warning: descriptor set %p not found in pool. Was this "
5126                         "double-freed?\n",
5127                         __func__, (void*)pDescriptorSets[i]);
5128                     continue;
5129                 }
5130 
5131                 auto it = info_VkDescriptorSet.find(pDescriptorSets[i]);
5132                 if (it == info_VkDescriptorSet.end()) continue;
5133 
5134                 existingDescriptorSets.push_back(pDescriptorSets[i]);
5135             }
5136         }
5137 
5138         for (auto set : existingDescriptorSets) {
5139             if (removeDescriptorSetFromPool(set,
5140                                             mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate)) {
5141                 toActuallyFree.push_back(set);
5142             }
5143         }
5144 
5145         if (toActuallyFree.empty()) return VK_SUCCESS;
5146     }
5147 
5148     if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate) {
5149         // In the batched set update case, decrement refcount on the set layout
5150         // and only free on host if we satisfied a pending allocation on the
5151         // host.
5152         for (uint32_t i = 0; i < toActuallyFree.size(); ++i) {
5153             VkDescriptorSetLayout setLayout =
5154                 as_goldfish_VkDescriptorSet(toActuallyFree[i])->reified->setLayout;
5155             decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
5156         }
5157         freeDescriptorSetsIfHostAllocated(enc, device, (uint32_t)toActuallyFree.size(),
5158                                           toActuallyFree.data());
5159     } else {
5160         // In the non-batched set update case, just free them directly.
5161         enc->vkFreeDescriptorSets(device, descriptorPool, (uint32_t)toActuallyFree.size(),
5162                                   toActuallyFree.data(), true /* do lock */);
5163     }
5164     return VK_SUCCESS;
5165 }
5166 
on_vkCreateDescriptorSetLayout(void * context,VkResult,VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)5167 VkResult ResourceTracker::on_vkCreateDescriptorSetLayout(
5168     void* context, VkResult, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
5169     const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout) {
5170     VkEncoder* enc = (VkEncoder*)context;
5171 
5172     VkResult res = enc->vkCreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout,
5173                                                     true /* do lock */);
5174 
5175     if (res != VK_SUCCESS) return res;
5176 
5177     struct goldfish_VkDescriptorSetLayout* dsl = as_goldfish_VkDescriptorSetLayout(*pSetLayout);
5178     dsl->layoutInfo = new DescriptorSetLayoutInfo;
5179     for (uint32_t i = 0; i < pCreateInfo->bindingCount; ++i) {
5180         dsl->layoutInfo->bindings.push_back(pCreateInfo->pBindings[i]);
5181     }
5182     dsl->layoutInfo->refcount = 1;
5183 
5184     return res;
5185 }
5186 
on_vkUpdateDescriptorSets(void * context,VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)5187 void ResourceTracker::on_vkUpdateDescriptorSets(void* context, VkDevice device,
5188                                                 uint32_t descriptorWriteCount,
5189                                                 const VkWriteDescriptorSet* pDescriptorWrites,
5190                                                 uint32_t descriptorCopyCount,
5191                                                 const VkCopyDescriptorSet* pDescriptorCopies) {
5192     VkEncoder* enc = (VkEncoder*)context;
5193 
5194     std::vector<VkDescriptorImageInfo> transformedImageInfos;
5195     std::vector<VkWriteDescriptorSet> transformedWrites(descriptorWriteCount);
5196 
5197     memcpy(transformedWrites.data(), pDescriptorWrites,
5198            sizeof(VkWriteDescriptorSet) * descriptorWriteCount);
5199 
5200     size_t imageInfosNeeded = 0;
5201     for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5202         if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5203         if (!transformedWrites[i].pImageInfo) continue;
5204 
5205         imageInfosNeeded += transformedWrites[i].descriptorCount;
5206     }
5207 
5208     transformedImageInfos.resize(imageInfosNeeded);
5209 
5210     size_t imageInfoIndex = 0;
5211     for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5212         if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5213         if (!transformedWrites[i].pImageInfo) continue;
5214 
5215         for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
5216             transformedImageInfos[imageInfoIndex] = transformedWrites[i].pImageInfo[j];
5217             ++imageInfoIndex;
5218         }
5219         transformedWrites[i].pImageInfo =
5220             &transformedImageInfos[imageInfoIndex - transformedWrites[i].descriptorCount];
5221     }
5222 
5223     {
5224         // Validate and filter samplers
5225         std::lock_guard<std::recursive_mutex> lock(mLock);
5226         size_t imageInfoIndex = 0;
5227         for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5228             if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5229             if (!transformedWrites[i].pImageInfo) continue;
5230 
5231             bool isImmutableSampler = descriptorBindingIsImmutableSampler(
5232                 transformedWrites[i].dstSet, transformedWrites[i].dstBinding);
5233 
5234             for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
5235                 if (isImmutableSampler) {
5236                     transformedImageInfos[imageInfoIndex].sampler = 0;
5237                 }
5238                 transformedImageInfos[imageInfoIndex] =
5239                     filterNonexistentSampler(transformedImageInfos[imageInfoIndex]);
5240                 ++imageInfoIndex;
5241             }
5242         }
5243     }
5244 
5245     if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate) {
5246         for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5247             VkDescriptorSet set = transformedWrites[i].dstSet;
5248             doEmulatedDescriptorWrite(&transformedWrites[i],
5249                                       as_goldfish_VkDescriptorSet(set)->reified);
5250         }
5251 
5252         for (uint32_t i = 0; i < descriptorCopyCount; ++i) {
5253             doEmulatedDescriptorCopy(
5254                 &pDescriptorCopies[i],
5255                 as_goldfish_VkDescriptorSet(pDescriptorCopies[i].srcSet)->reified,
5256                 as_goldfish_VkDescriptorSet(pDescriptorCopies[i].dstSet)->reified);
5257         }
5258     } else {
5259         enc->vkUpdateDescriptorSets(device, descriptorWriteCount, transformedWrites.data(),
5260                                     descriptorCopyCount, pDescriptorCopies, true /* do lock */);
5261     }
5262 }
5263 
on_vkDestroyImage(void * context,VkDevice device,VkImage image,const VkAllocationCallbacks * pAllocator)5264 void ResourceTracker::on_vkDestroyImage(void* context, VkDevice device, VkImage image,
5265                                         const VkAllocationCallbacks* pAllocator) {
5266 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5267     {
5268         std::lock_guard<std::recursive_mutex> lock(mLock);  // do not guard encoder may cause
5269                                                             // deadlock b/243339973
5270 
5271         // Wait for any pending QSRIs to prevent a race between the Gfxstream host
5272         // potentially processing the below `vkDestroyImage()` from the VK encoder
5273         // command stream before processing a previously submitted
5274         // `VIRTIO_GPU_NATIVE_SYNC_VULKAN_QSRI_EXPORT` from the virtio-gpu command
5275         // stream which relies on the image existing.
5276         auto imageInfoIt = info_VkImage.find(image);
5277         if (imageInfoIt != info_VkImage.end()) {
5278             auto& imageInfo = imageInfoIt->second;
5279             for (int syncFd : imageInfo.pendingQsriSyncFds) {
5280                 int syncWaitRet = mSyncHelper->wait(syncFd, 3000);
5281                 if (syncWaitRet < 0) {
5282                     mesa_loge("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d",
5283                               __func__, strerror(errno), errno);
5284                 }
5285 
5286 #if GFXSTREAM_SYNC_DEBUG
5287                 mSyncHelper->debugPrint(syncFd);
5288 #endif
5289                 mSyncHelper->close(syncFd);
5290             }
5291             imageInfo.pendingQsriSyncFds.clear();
5292         }
5293     }
5294 #endif
5295     VkEncoder* enc = (VkEncoder*)context;
5296 #if defined(LINUX_GUEST_BUILD)
5297     auto imageInfoIt = info_VkImage.find(image);
5298     if (imageInfoIt != info_VkImage.end()) {
5299         auto& imageInfo = imageInfoIt->second;
5300         if (imageInfo.linearPeerImage) {
5301             enc->vkDestroyImage(device, imageInfo.linearPeerImage, pAllocator, true /* do lock */);
5302         }
5303     }
5304 #endif
5305     enc->vkDestroyImage(device, image, pAllocator, true /* do lock */);
5306 }
5307 
on_vkGetImageMemoryRequirements(void * context,VkDevice device,VkImage image,VkMemoryRequirements * pMemoryRequirements)5308 void ResourceTracker::on_vkGetImageMemoryRequirements(void* context, VkDevice device, VkImage image,
5309                                                       VkMemoryRequirements* pMemoryRequirements) {
5310     std::unique_lock<std::recursive_mutex> lock(mLock);
5311 
5312     auto it = info_VkImage.find(image);
5313     if (it == info_VkImage.end()) return;
5314 
5315     auto& info = it->second;
5316 
5317     if (info.baseRequirementsKnown) {
5318         *pMemoryRequirements = info.baseRequirements;
5319         return;
5320     }
5321 
5322     lock.unlock();
5323 
5324     VkEncoder* enc = (VkEncoder*)context;
5325 
5326     enc->vkGetImageMemoryRequirements(device, image, pMemoryRequirements, true /* do lock */);
5327 
5328     lock.lock();
5329 
5330     transformImageMemoryRequirementsForGuestLocked(image, pMemoryRequirements);
5331 
5332     info.baseRequirementsKnown = true;
5333     info.baseRequirements = *pMemoryRequirements;
5334 }
5335 
on_vkGetImageMemoryRequirements2(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5336 void ResourceTracker::on_vkGetImageMemoryRequirements2(void* context, VkDevice device,
5337                                                        const VkImageMemoryRequirementsInfo2* pInfo,
5338                                                        VkMemoryRequirements2* pMemoryRequirements) {
5339     VkEncoder* enc = (VkEncoder*)context;
5340     enc->vkGetImageMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */);
5341     transformImageMemoryRequirements2ForGuest(pInfo->image, pMemoryRequirements);
5342 }
5343 
on_vkGetImageMemoryRequirements2KHR(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5344 void ResourceTracker::on_vkGetImageMemoryRequirements2KHR(
5345     void* context, VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
5346     VkMemoryRequirements2* pMemoryRequirements) {
5347     VkEncoder* enc = (VkEncoder*)context;
5348     enc->vkGetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */);
5349     transformImageMemoryRequirements2ForGuest(pInfo->image, pMemoryRequirements);
5350 }
5351 
on_vkGetImageSubresourceLayout(void * context,VkDevice device,VkImage image,const VkImageSubresource * pSubresource,VkSubresourceLayout * pLayout)5352 void ResourceTracker::on_vkGetImageSubresourceLayout(void* context, VkDevice device, VkImage image,
5353                                                      const VkImageSubresource* pSubresource,
5354                                                      VkSubresourceLayout* pLayout) {
5355     VkEncoder* enc = (VkEncoder*)context;
5356     VkImage targetImage = image;
5357 #if defined(LINUX_GUEST_BUILD)
5358     auto it = info_VkImage.find(image);
5359     if (it == info_VkImage.end()) return;
5360     const auto& info = it->second;
5361     if (info.linearPeerImage) {
5362         targetImage = info.linearPeerImage;
5363     }
5364 #endif
5365     enc->vkGetImageSubresourceLayout(device, targetImage, pSubresource, pLayout,
5366                                      true /* do lock */);
5367 }
5368 
on_vkBindImageMemory(void * context,VkResult,VkDevice device,VkImage image,VkDeviceMemory memory,VkDeviceSize memoryOffset)5369 VkResult ResourceTracker::on_vkBindImageMemory(void* context, VkResult, VkDevice device,
5370                                                VkImage image, VkDeviceMemory memory,
5371                                                VkDeviceSize memoryOffset) {
5372     VkEncoder* enc = (VkEncoder*)context;
5373     // Do not forward calls with invalid handles to host.
5374     if (info_VkDeviceMemory.find(memory) == info_VkDeviceMemory.end() ||
5375         info_VkImage.find(image) == info_VkImage.end()) {
5376         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5377     }
5378     return enc->vkBindImageMemory(device, image, memory, memoryOffset, true /* do lock */);
5379 }
5380 
on_vkBindImageMemory2(void * context,VkResult,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)5381 VkResult ResourceTracker::on_vkBindImageMemory2(void* context, VkResult, VkDevice device,
5382                                                 uint32_t bindingCount,
5383                                                 const VkBindImageMemoryInfo* pBindInfos) {
5384     VkEncoder* enc = (VkEncoder*)context;
5385 
5386     if (bindingCount < 1 || !pBindInfos) {
5387         return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5388     }
5389 
5390     for (uint32_t i = 0; i < bindingCount; i++) {
5391         const VkBindImageMemoryInfo& bimi = pBindInfos[i];
5392 
5393         auto imageIt = info_VkImage.find(bimi.image);
5394         if (imageIt == info_VkImage.end()) {
5395             return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5396         }
5397 
5398         if (bimi.memory != VK_NULL_HANDLE) {
5399             auto memoryIt = info_VkDeviceMemory.find(bimi.memory);
5400             if (memoryIt == info_VkDeviceMemory.end()) {
5401                 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5402             }
5403         }
5404     }
5405 
5406     return enc->vkBindImageMemory2(device, bindingCount, pBindInfos, true /* do lock */);
5407 }
5408 
on_vkBindImageMemory2KHR(void * context,VkResult result,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)5409 VkResult ResourceTracker::on_vkBindImageMemory2KHR(void* context, VkResult result, VkDevice device,
5410                                                    uint32_t bindingCount,
5411                                                    const VkBindImageMemoryInfo* pBindInfos) {
5412     return on_vkBindImageMemory2(context, result, device, bindingCount, pBindInfos);
5413 }
5414 
on_vkCreateBuffer(void * context,VkResult,VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)5415 VkResult ResourceTracker::on_vkCreateBuffer(void* context, VkResult, VkDevice device,
5416                                             const VkBufferCreateInfo* pCreateInfo,
5417                                             const VkAllocationCallbacks* pAllocator,
5418                                             VkBuffer* pBuffer) {
5419     VkEncoder* enc = (VkEncoder*)context;
5420 
5421     VkBufferCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
5422     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
5423     VkExternalMemoryBufferCreateInfo localExtBufCi;
5424 
5425     const VkExternalMemoryBufferCreateInfo* extBufCiPtr =
5426         vk_find_struct_const(pCreateInfo, EXTERNAL_MEMORY_BUFFER_CREATE_INFO);
5427     if (extBufCiPtr) {
5428         localExtBufCi = vk_make_orphan_copy(*extBufCiPtr);
5429         vk_append_struct(&structChainIter, &localExtBufCi);
5430     }
5431 
5432     VkBufferOpaqueCaptureAddressCreateInfo localCapAddrCi;
5433     const VkBufferOpaqueCaptureAddressCreateInfo* pCapAddrCi =
5434         vk_find_struct_const(pCreateInfo, BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO);
5435     if (pCapAddrCi) {
5436         localCapAddrCi = vk_make_orphan_copy(*pCapAddrCi);
5437         vk_append_struct(&structChainIter, &localCapAddrCi);
5438     }
5439 
5440     VkBufferDeviceAddressCreateInfoEXT localDevAddrCi;
5441     const VkBufferDeviceAddressCreateInfoEXT* pDevAddrCi =
5442         vk_find_struct_const(pCreateInfo, BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT);
5443     if (pDevAddrCi) {
5444         localDevAddrCi = vk_make_orphan_copy(*pDevAddrCi);
5445         vk_append_struct(&structChainIter, &localDevAddrCi);
5446     }
5447 
5448 #ifdef VK_USE_PLATFORM_FUCHSIA
5449     std::optional<zx::vmo> vmo;
5450     bool isSysmemBackedMemory = false;
5451 
5452     if (extBufCiPtr &&
5453         (extBufCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) {
5454         isSysmemBackedMemory = true;
5455     }
5456 
5457     const VkBufferCollectionBufferCreateInfoFUCHSIA* extBufferCollectionPtr =
5458         vk_find_struct_const(pCreateInfo, BUFFER_COLLECTION_BUFFER_CREATE_INFO_FUCHSIA);
5459 
5460     if (extBufferCollectionPtr) {
5461         const auto& collection =
5462             *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
5463                 extBufferCollectionPtr->collection);
5464         uint32_t index = extBufferCollectionPtr->index;
5465 
5466         auto result = collection->WaitForBuffersAllocated();
5467         if (result.ok() && result->status == ZX_OK) {
5468             auto& info = result->buffer_collection_info;
5469             if (index < info.buffer_count) {
5470                 vmo = std::make_optional<zx::vmo>(std::move(info.buffers[index].vmo));
5471             }
5472         } else {
5473             mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
5474                       GET_STATUS_SAFE(result, status));
5475         }
5476 
5477         if (vmo && vmo->is_valid()) {
5478             fidl::Arena arena;
5479             fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena);
5480             createParams.set_size(arena, pCreateInfo->size)
5481                 .set_memory_property(fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
5482 
5483             auto result = mControlDevice->CreateBuffer2(std::move(*vmo), createParams);
5484             if (!result.ok() ||
5485                 (result->is_error() != ZX_OK && result->error_value() != ZX_ERR_ALREADY_EXISTS)) {
5486                 mesa_loge("CreateBuffer2 failed: %d:%d", result.status(),
5487                           GET_STATUS_SAFE(result, error_value()));
5488             }
5489             isSysmemBackedMemory = true;
5490         }
5491     }
5492 #endif  // VK_USE_PLATFORM_FUCHSIA
5493 
5494     VkResult res;
5495     VkMemoryRequirements memReqs;
5496 
5497     if (supportsCreateResourcesWithRequirements()) {
5498         res = enc->vkCreateBufferWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator,
5499                                                         pBuffer, &memReqs, true /* do lock */);
5500     } else {
5501         res =
5502             enc->vkCreateBuffer(device, &localCreateInfo, pAllocator, pBuffer, true /* do lock */);
5503     }
5504 
5505     if (res != VK_SUCCESS) return res;
5506 
5507 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
5508     if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
5509         mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
5510     }
5511     if (extBufCiPtr &&
5512         ((extBufCiPtr->handleTypes &
5513           VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) ||
5514          (extBufCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT))) {
5515         updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
5516     }
5517 #endif
5518 
5519     std::lock_guard<std::recursive_mutex> lock(mLock);
5520 
5521     auto it = info_VkBuffer.find(*pBuffer);
5522     if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
5523 
5524     auto& info = it->second;
5525 
5526     info.createInfo = localCreateInfo;
5527     info.createInfo.pNext = nullptr;
5528 
5529     if (supportsCreateResourcesWithRequirements()) {
5530         info.baseRequirementsKnown = true;
5531         info.baseRequirements = memReqs;
5532     }
5533 
5534     if (extBufCiPtr) {
5535         info.external = true;
5536         info.externalCreateInfo = *extBufCiPtr;
5537     }
5538 
5539 #ifdef VK_USE_PLATFORM_FUCHSIA
5540     if (isSysmemBackedMemory) {
5541         info.isSysmemBackedMemory = true;
5542     }
5543 #endif
5544 
5545     return res;
5546 }
5547 
on_vkDestroyBuffer(void * context,VkDevice device,VkBuffer buffer,const VkAllocationCallbacks * pAllocator)5548 void ResourceTracker::on_vkDestroyBuffer(void* context, VkDevice device, VkBuffer buffer,
5549                                          const VkAllocationCallbacks* pAllocator) {
5550     VkEncoder* enc = (VkEncoder*)context;
5551     enc->vkDestroyBuffer(device, buffer, pAllocator, true /* do lock */);
5552 }
5553 
on_vkGetBufferMemoryRequirements(void * context,VkDevice device,VkBuffer buffer,VkMemoryRequirements * pMemoryRequirements)5554 void ResourceTracker::on_vkGetBufferMemoryRequirements(void* context, VkDevice device,
5555                                                        VkBuffer buffer,
5556                                                        VkMemoryRequirements* pMemoryRequirements) {
5557     std::unique_lock<std::recursive_mutex> lock(mLock);
5558 
5559     auto it = info_VkBuffer.find(buffer);
5560     if (it == info_VkBuffer.end()) return;
5561 
5562     auto& info = it->second;
5563 
5564     if (info.baseRequirementsKnown) {
5565         *pMemoryRequirements = info.baseRequirements;
5566         return;
5567     }
5568 
5569     lock.unlock();
5570 
5571     VkEncoder* enc = (VkEncoder*)context;
5572     enc->vkGetBufferMemoryRequirements(device, buffer, pMemoryRequirements, true /* do lock */);
5573 
5574     lock.lock();
5575 
5576     info.baseRequirementsKnown = true;
5577     info.baseRequirements = *pMemoryRequirements;
5578 }
5579 
on_vkGetBufferMemoryRequirements2(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5580 void ResourceTracker::on_vkGetBufferMemoryRequirements2(
5581     void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5582     VkMemoryRequirements2* pMemoryRequirements) {
5583     VkEncoder* enc = (VkEncoder*)context;
5584     enc->vkGetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */);
5585     transformBufferMemoryRequirements2ForGuest(pInfo->buffer, pMemoryRequirements);
5586 }
5587 
on_vkGetBufferMemoryRequirements2KHR(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5588 void ResourceTracker::on_vkGetBufferMemoryRequirements2KHR(
5589     void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5590     VkMemoryRequirements2* pMemoryRequirements) {
5591     VkEncoder* enc = (VkEncoder*)context;
5592     enc->vkGetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */);
5593     transformBufferMemoryRequirements2ForGuest(pInfo->buffer, pMemoryRequirements);
5594 }
5595 
on_vkBindBufferMemory(void * context,VkResult,VkDevice device,VkBuffer buffer,VkDeviceMemory memory,VkDeviceSize memoryOffset)5596 VkResult ResourceTracker::on_vkBindBufferMemory(void* context, VkResult, VkDevice device,
5597                                                 VkBuffer buffer, VkDeviceMemory memory,
5598                                                 VkDeviceSize memoryOffset) {
5599     VkEncoder* enc = (VkEncoder*)context;
5600     return enc->vkBindBufferMemory(device, buffer, memory, memoryOffset, true /* do lock */);
5601 }
5602 
on_vkBindBufferMemory2(void * context,VkResult,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)5603 VkResult ResourceTracker::on_vkBindBufferMemory2(void* context, VkResult, VkDevice device,
5604                                                  uint32_t bindInfoCount,
5605                                                  const VkBindBufferMemoryInfo* pBindInfos) {
5606     VkEncoder* enc = (VkEncoder*)context;
5607     return enc->vkBindBufferMemory2(device, bindInfoCount, pBindInfos, true /* do lock */);
5608 }
5609 
on_vkBindBufferMemory2KHR(void * context,VkResult,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)5610 VkResult ResourceTracker::on_vkBindBufferMemory2KHR(void* context, VkResult, VkDevice device,
5611                                                     uint32_t bindInfoCount,
5612                                                     const VkBindBufferMemoryInfo* pBindInfos) {
5613     VkEncoder* enc = (VkEncoder*)context;
5614     return enc->vkBindBufferMemory2KHR(device, bindInfoCount, pBindInfos, true /* do lock */);
5615 }
5616 
on_vkCreateSemaphore(void * context,VkResult input_result,VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)5617 VkResult ResourceTracker::on_vkCreateSemaphore(void* context, VkResult input_result,
5618                                                VkDevice device,
5619                                                const VkSemaphoreCreateInfo* pCreateInfo,
5620                                                const VkAllocationCallbacks* pAllocator,
5621                                                VkSemaphore* pSemaphore) {
5622     (void)input_result;
5623     VkEncoder* enc = (VkEncoder*)context;
5624 
5625     VkSemaphoreCreateInfo finalCreateInfo = *pCreateInfo;
5626 
5627     const VkExportSemaphoreCreateInfoKHR* exportSemaphoreInfoPtr =
5628         vk_find_struct_const(pCreateInfo, EXPORT_SEMAPHORE_CREATE_INFO);
5629 
5630 #ifdef VK_USE_PLATFORM_FUCHSIA
5631     bool exportEvent =
5632         exportSemaphoreInfoPtr && (exportSemaphoreInfoPtr->handleTypes &
5633                                    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA);
5634 
5635     if (exportEvent) {
5636         finalCreateInfo.pNext = nullptr;
5637         // If we have timeline semaphores externally, leave it there.
5638         const VkSemaphoreTypeCreateInfo* typeCi =
5639             vk_find_struct_const(pCreateInfo, SEMAPHORE_TYPE_CREATE_INFO);
5640         if (typeCi) finalCreateInfo.pNext = typeCi;
5641     }
5642 #endif
5643 
5644 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
5645     bool exportSyncFd = exportSemaphoreInfoPtr && (exportSemaphoreInfoPtr->handleTypes &
5646                                                    VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
5647 
5648     if (exportSyncFd) {
5649         finalCreateInfo.pNext = nullptr;
5650         // If we have timeline semaphores externally, leave it there.
5651         const VkSemaphoreTypeCreateInfo* typeCi =
5652             vk_find_struct_const(pCreateInfo, SEMAPHORE_TYPE_CREATE_INFO);
5653         if (typeCi) finalCreateInfo.pNext = typeCi;
5654     }
5655 #endif
5656     input_result = enc->vkCreateSemaphore(device, &finalCreateInfo, pAllocator, pSemaphore,
5657                                           true /* do lock */);
5658 
5659     zx_handle_t event_handle = ZX_HANDLE_INVALID;
5660 
5661 #ifdef VK_USE_PLATFORM_FUCHSIA
5662     if (exportEvent) {
5663         zx_event_create(0, &event_handle);
5664     }
5665 #endif
5666 
5667     std::lock_guard<std::recursive_mutex> lock(mLock);
5668 
5669     auto it = info_VkSemaphore.find(*pSemaphore);
5670     if (it == info_VkSemaphore.end()) return VK_ERROR_INITIALIZATION_FAILED;
5671 
5672     auto& info = it->second;
5673 
5674     info.device = device;
5675     info.eventHandle = event_handle;
5676 #ifdef VK_USE_PLATFORM_FUCHSIA
5677     info.eventKoid = getEventKoid(info.eventHandle);
5678 #endif
5679 
5680 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
5681     if (exportSyncFd) {
5682         if (mFeatureInfo.hasVirtioGpuNativeSync &&
5683             !(mCaps.params[kParamFencePassing] && mCaps.vulkanCapset.externalSync)) {
5684             VkResult result;
5685             int64_t osHandle;
5686             uint64_t hostFenceHandle = get_host_u64_VkSemaphore(*pSemaphore);
5687 
5688             result = createFence(device, hostFenceHandle, osHandle);
5689             if (result != VK_SUCCESS) return result;
5690 
5691             info.syncFd.emplace(osHandle);
5692         } else {
5693 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
5694             ensureSyncDeviceFd();
5695 
5696             if (exportSyncFd) {
5697                 int syncFd = -1;
5698                 goldfish_sync_queue_work(
5699                     mSyncDeviceFd, get_host_u64_VkSemaphore(*pSemaphore) /* the handle */,
5700                     GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */
5701                     ,
5702                     &syncFd);
5703                 info.syncFd.emplace(syncFd);
5704             }
5705 #endif
5706         }
5707     }
5708 #endif
5709 
5710     return VK_SUCCESS;
5711 }
5712 
on_vkDestroySemaphore(void * context,VkDevice device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)5713 void ResourceTracker::on_vkDestroySemaphore(void* context, VkDevice device, VkSemaphore semaphore,
5714                                             const VkAllocationCallbacks* pAllocator) {
5715     VkEncoder* enc = (VkEncoder*)context;
5716     enc->vkDestroySemaphore(device, semaphore, pAllocator, true /* do lock */);
5717 }
5718 
5719 // https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#vkGetSemaphoreFdKHR
5720 // Each call to vkGetSemaphoreFdKHR must create a new file descriptor and transfer ownership
5721 // of it to the application. To avoid leaking resources, the application must release ownership
5722 // of the file descriptor when it is no longer needed.
on_vkGetSemaphoreFdKHR(void * context,VkResult,VkDevice device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd)5723 VkResult ResourceTracker::on_vkGetSemaphoreFdKHR(void* context, VkResult, VkDevice device,
5724                                                  const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
5725                                                  int* pFd) {
5726 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
5727     VkEncoder* enc = (VkEncoder*)context;
5728     bool getSyncFd = pGetFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
5729 
5730     if (getSyncFd) {
5731         if (mCaps.params[kParamFencePassing] && mCaps.vulkanCapset.externalSync) {
5732             uint64_t syncId = ++mAtomicId;
5733             int64_t osHandle = -1;
5734 
5735             VkResult result = enc->vkGetSemaphoreGOOGLE(device, pGetFdInfo->semaphore, syncId,
5736                                                         true /* do lock */);
5737             if (result != VK_SUCCESS) {
5738                 mesa_loge("unable to get the semaphore");
5739                 return result;
5740             }
5741 
5742             result = acquireSync(syncId, osHandle);
5743             if (result != VK_SUCCESS) {
5744                 mesa_loge("unable to create host sync object");
5745                 return result;
5746             }
5747 
5748             *pFd = (int)osHandle;
5749             return VK_SUCCESS;
5750         } else {
5751             // Doesn't this assume that sync file descriptor generated via the non-fence
5752             // passing path during "on_vkCreateSemaphore" is the same one that would be
5753             // generated via guest's "okGetSemaphoreFdKHR" call?
5754             std::lock_guard<std::recursive_mutex> lock(mLock);
5755             auto it = info_VkSemaphore.find(pGetFdInfo->semaphore);
5756             if (it == info_VkSemaphore.end()) return VK_ERROR_OUT_OF_HOST_MEMORY;
5757             auto& semInfo = it->second;
5758             // syncFd is supposed to have value.
5759             *pFd = mSyncHelper->dup(semInfo.syncFd.value_or(-1));
5760             return VK_SUCCESS;
5761         }
5762     } else {
5763         // opaque fd
5764         int hostFd = 0;
5765         int32_t size = 0;
5766         VkResult result = enc->vkGetSemaphoreFdKHR(device, pGetFdInfo, &hostFd, true /* do lock */);
5767         if (result != VK_SUCCESS) {
5768             return result;
5769         }
5770         *pFd = os_create_anonymous_file(size, "vk_opaque_fd");
5771         int write_result = write(*pFd, &hostFd, sizeof(hostFd));
5772         (void)write_result;
5773         return VK_SUCCESS;
5774     }
5775 #else
5776     (void)context;
5777     (void)device;
5778     (void)pGetFdInfo;
5779     (void)pFd;
5780     return VK_ERROR_INCOMPATIBLE_DRIVER;
5781 #endif
5782 }
5783 
on_vkImportSemaphoreFdKHR(void * context,VkResult input_result,VkDevice device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)5784 VkResult ResourceTracker::on_vkImportSemaphoreFdKHR(
5785     void* context, VkResult input_result, VkDevice device,
5786     const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) {
5787 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
5788     VkEncoder* enc = (VkEncoder*)context;
5789     if (input_result != VK_SUCCESS) {
5790         return input_result;
5791     }
5792 
5793     if (pImportSemaphoreFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
5794         std::lock_guard<std::recursive_mutex> lock(mLock);
5795 
5796         auto semaphoreIt = info_VkSemaphore.find(pImportSemaphoreFdInfo->semaphore);
5797         auto& info = semaphoreIt->second;
5798 
5799         if (info.syncFd.value_or(-1) >= 0) {
5800             mSyncHelper->close(info.syncFd.value());
5801         }
5802 
5803         info.syncFd.emplace(pImportSemaphoreFdInfo->fd);
5804 
5805         return VK_SUCCESS;
5806     } else {
5807         int fd = pImportSemaphoreFdInfo->fd;
5808         int err = lseek(fd, 0, SEEK_SET);
5809         if (err == -1) {
5810             mesa_loge("lseek fail on import semaphore");
5811         }
5812         int hostFd = 0;
5813         int read_result = read(fd, &hostFd, sizeof(hostFd));
5814         (void)read_result;
5815         VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo;
5816         tmpInfo.fd = hostFd;
5817         VkResult result = enc->vkImportSemaphoreFdKHR(device, &tmpInfo, true /* do lock */);
5818         mSyncHelper->close(fd);
5819         return result;
5820     }
5821 #else
5822     (void)context;
5823     (void)input_result;
5824     (void)device;
5825     (void)pImportSemaphoreFdInfo;
5826     return VK_ERROR_INCOMPATIBLE_DRIVER;
5827 #endif
5828 }
5829 
on_vkGetMemoryFdPropertiesKHR(void * context,VkResult,VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,int fd,VkMemoryFdPropertiesKHR * pMemoryFdProperties)5830 VkResult ResourceTracker::on_vkGetMemoryFdPropertiesKHR(
5831     void* context, VkResult, VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd,
5832     VkMemoryFdPropertiesKHR* pMemoryFdProperties) {
5833 #if DETECT_OS_LINUX && !defined(VK_USE_PLATFORM_ANDROID_KHR)
5834     if (!(handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)) {
5835         mesa_loge("%s: VK_KHR_external_memory_fd behavior not defined for handleType: 0x%x\n",
5836                   __func__, handleType);
5837         return VK_ERROR_INVALID_EXTERNAL_HANDLE;
5838     }
5839     // Sanity-check device
5840     std::lock_guard<std::recursive_mutex> lock(mLock);
5841     auto deviceIt = info_VkDevice.find(device);
5842     if (deviceIt == info_VkDevice.end()) {
5843         return VK_ERROR_OUT_OF_HOST_MEMORY;
5844     }
5845     // TODO: Verify FD valid ?
5846     (void)fd;
5847 
5848     if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
5849         mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
5850     }
5851 
5852     updateMemoryTypeBits(&pMemoryFdProperties->memoryTypeBits,
5853                          mCaps.vulkanCapset.colorBufferMemoryIndex);
5854 
5855     return VK_SUCCESS;
5856 #else
5857     (void)context;
5858     (void)device;
5859     (void)handleType;
5860     (void)fd;
5861     (void)pMemoryFdProperties;
5862     return VK_ERROR_INCOMPATIBLE_DRIVER;
5863 #endif
5864 }
5865 
on_vkGetMemoryFdKHR(void * context,VkResult,VkDevice device,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFd)5866 VkResult ResourceTracker::on_vkGetMemoryFdKHR(void* context, VkResult, VkDevice device,
5867                                               const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd) {
5868 #if DETECT_OS_LINUX && !defined(VK_USE_PLATFORM_ANDROID_KHR)
5869     if (!pGetFdInfo) return VK_ERROR_OUT_OF_HOST_MEMORY;
5870     if (!pGetFdInfo->memory) return VK_ERROR_OUT_OF_HOST_MEMORY;
5871 
5872     if (!(pGetFdInfo->handleType & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
5873                                     VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT))) {
5874         mesa_loge("%s: Export operation not defined for handleType: 0x%x\n", __func__,
5875                   pGetFdInfo->handleType);
5876         return VK_ERROR_OUT_OF_HOST_MEMORY;
5877     }
5878     // Sanity-check device
5879     std::lock_guard<std::recursive_mutex> lock(mLock);
5880     auto deviceIt = info_VkDevice.find(device);
5881     if (deviceIt == info_VkDevice.end()) {
5882         return VK_ERROR_OUT_OF_HOST_MEMORY;
5883     }
5884 
5885     auto deviceMemIt = info_VkDeviceMemory.find(pGetFdInfo->memory);
5886     if (deviceMemIt == info_VkDeviceMemory.end()) {
5887         return VK_ERROR_OUT_OF_HOST_MEMORY;
5888     }
5889     auto& info = deviceMemIt->second;
5890 
5891     if (!info.blobPtr) {
5892         mesa_loge("%s: VkDeviceMemory does not have a resource available for export.\n", __func__);
5893         return VK_ERROR_OUT_OF_HOST_MEMORY;
5894     }
5895 
5896     VirtGpuExternalHandle handle{};
5897     int ret = info.blobPtr->exportBlob(handle);
5898     if (ret != 0 || handle.osHandle < 0) {
5899         mesa_loge("%s: Failed to export host resource to FD.\n", __func__);
5900         return VK_ERROR_OUT_OF_HOST_MEMORY;
5901     }
5902     *pFd = handle.osHandle;
5903     return VK_SUCCESS;
5904 #else
5905     (void)context;
5906     (void)device;
5907     (void)pGetFdInfo;
5908     (void)pFd;
5909     return VK_ERROR_INCOMPATIBLE_DRIVER;
5910 #endif
5911 }
5912 
flushCommandBufferPendingCommandsBottomUp(void * context,VkQueue queue,const std::vector<VkCommandBuffer> & workingSet)5913 void ResourceTracker::flushCommandBufferPendingCommandsBottomUp(
5914     void* context, VkQueue queue, const std::vector<VkCommandBuffer>& workingSet) {
5915     if (workingSet.empty()) return;
5916 
5917     std::vector<VkCommandBuffer> nextLevel;
5918     for (auto commandBuffer : workingSet) {
5919         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
5920         forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
5921             nextLevel.push_back((VkCommandBuffer)secondary);
5922         });
5923     }
5924 
5925     flushCommandBufferPendingCommandsBottomUp(context, queue, nextLevel);
5926 
5927     // After this point, everyone at the previous level has been flushed
5928     for (auto cmdbuf : workingSet) {
5929         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
5930 
5931         // There's no pending commands here, skip. (case 1)
5932         if (!cb->privateStream) continue;
5933 
5934         unsigned char* writtenPtr = 0;
5935         size_t written = 0;
5936         CommandBufferStagingStream* cmdBufStream =
5937             static_cast<CommandBufferStagingStream*>(cb->privateStream);
5938         cmdBufStream->getWritten(&writtenPtr, &written);
5939 
5940         // There's no pending commands here, skip. (case 2, stream created but no new recordings)
5941         if (!written) continue;
5942 
5943         // There are pending commands to flush.
5944         VkEncoder* enc = (VkEncoder*)context;
5945         VkDeviceMemory deviceMemory = cmdBufStream->getDeviceMemory();
5946         VkDeviceSize dataOffset = 0;
5947         if (mFeatureInfo.hasVulkanAuxCommandMemory) {
5948             // for suballocations, deviceMemory is an alias VkDeviceMemory
5949             // get underling VkDeviceMemory for given alias
5950             deviceMemoryTransform_tohost(&deviceMemory, 1 /*memoryCount*/, &dataOffset,
5951                                          1 /*offsetCount*/, nullptr /*size*/, 0 /*sizeCount*/,
5952                                          nullptr /*typeIndex*/, 0 /*typeIndexCount*/,
5953                                          nullptr /*typeBits*/, 0 /*typeBitCounts*/);
5954 
5955             // mark stream as flushing before flushing commands
5956             cmdBufStream->markFlushing();
5957             enc->vkQueueFlushCommandsFromAuxMemoryGOOGLE(queue, cmdbuf, deviceMemory, dataOffset,
5958                                                          written, true /*do lock*/);
5959         } else {
5960             enc->vkQueueFlushCommandsGOOGLE(queue, cmdbuf, written, (const void*)writtenPtr,
5961                                             true /* do lock */);
5962         }
5963         // Reset this stream.
5964         // flushing happens on vkQueueSubmit
5965         // vulkan api states that on queue submit,
5966         // applications MUST not attempt to modify the command buffer in any way
5967         // -as the device may be processing the commands recorded to it.
5968         // It is safe to call reset() here for this reason.
5969         // Command Buffer associated with this stream will only leave pending state
5970         // after queue submit is complete and host has read the data
5971         cmdBufStream->reset();
5972     }
5973 }
5974 
syncEncodersForQueue(VkQueue queue,VkEncoder * currentEncoder)5975 uint32_t ResourceTracker::syncEncodersForQueue(VkQueue queue, VkEncoder* currentEncoder) {
5976     if (!supportsAsyncQueueSubmit()) {
5977         return 0;
5978     }
5979 
5980     struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
5981     if (!q) return 0;
5982 
5983     auto lastEncoder = q->lastUsedEncoder;
5984 
5985     if (lastEncoder == currentEncoder) return 0;
5986 
5987     currentEncoder->incRef();
5988 
5989     q->lastUsedEncoder = currentEncoder;
5990 
5991     if (!lastEncoder) return 0;
5992 
5993     auto oldSeq = q->sequenceNumber;
5994     q->sequenceNumber += 2;
5995     lastEncoder->vkQueueHostSyncGOOGLE(queue, false, oldSeq + 1, true /* do lock */);
5996     lastEncoder->flush();
5997     currentEncoder->vkQueueHostSyncGOOGLE(queue, true, oldSeq + 2, true /* do lock */);
5998 
5999     if (lastEncoder->decRef()) {
6000         q->lastUsedEncoder = nullptr;
6001     }
6002 
6003     return 0;
6004 }
6005 
6006 template <class VkSubmitInfoType>
flushStagingStreams(void * context,VkQueue queue,uint32_t submitCount,const VkSubmitInfoType * pSubmits)6007 void ResourceTracker::flushStagingStreams(void* context, VkQueue queue, uint32_t submitCount,
6008                                           const VkSubmitInfoType* pSubmits) {
6009     std::vector<VkCommandBuffer> toFlush;
6010     for (uint32_t i = 0; i < submitCount; ++i) {
6011         for (uint32_t j = 0; j < getCommandBufferCount(pSubmits[i]); ++j) {
6012             toFlush.push_back(getCommandBuffer(pSubmits[i], j));
6013         }
6014     }
6015 
6016     std::unordered_set<VkDescriptorSet> pendingSets;
6017     collectAllPendingDescriptorSetsBottomUp(toFlush, pendingSets);
6018     commitDescriptorSetUpdates(context, queue, pendingSets);
6019 
6020     flushCommandBufferPendingCommandsBottomUp(context, queue, toFlush);
6021 
6022     for (auto cb : toFlush) {
6023         resetCommandBufferPendingTopology(cb);
6024     }
6025 }
6026 
on_vkQueueSubmit(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)6027 VkResult ResourceTracker::on_vkQueueSubmit(void* context, VkResult input_result, VkQueue queue,
6028                                            uint32_t submitCount, const VkSubmitInfo* pSubmits,
6029                                            VkFence fence) {
6030     MESA_TRACE_SCOPE("on_vkQueueSubmit");
6031 
6032     /* From the Vulkan 1.3.204 spec:
6033      *
6034      *    VUID-VkSubmitInfo-pNext-03240
6035      *
6036      *    "If the pNext chain of this structure includes a VkTimelineSemaphoreSubmitInfo structure
6037      *    and any element of pSignalSemaphores was created with a VkSemaphoreType of
6038      *    VK_SEMAPHORE_TYPE_TIMELINE, then its signalSemaphoreValueCount member must equal
6039      *    signalSemaphoreCount"
6040      *
6041      * Internally, Mesa WSI creates placeholder semaphores/fences (see transformVkSemaphore functions
6042      * in in gfxstream_vk_private.cpp).  We don't want to forward that to the host, since there is
6043      * no host side Vulkan object associated with the placeholder sync objects.
6044      *
6045      * The way to test this behavior is Zink + glxgears, on Linux hosts.  It should fail without
6046      * this check.
6047      */
6048     for (uint32_t i = 0; i < submitCount; i++) {
6049         VkTimelineSemaphoreSubmitInfo* tssi =
6050             vk_find_struct(const_cast<VkSubmitInfo*>(&pSubmits[i]), TIMELINE_SEMAPHORE_SUBMIT_INFO);
6051 
6052         if (tssi) {
6053             uint32_t count = getSignalSemaphoreCount(pSubmits[i]);
6054             if (count != tssi->signalSemaphoreValueCount) {
6055                 tssi->signalSemaphoreValueCount = count;
6056             }
6057         }
6058     }
6059 
6060     return on_vkQueueSubmitTemplate<VkSubmitInfo>(context, input_result, queue, submitCount,
6061                                                   pSubmits, fence);
6062 }
6063 
on_vkQueueSubmit2(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)6064 VkResult ResourceTracker::on_vkQueueSubmit2(void* context, VkResult input_result, VkQueue queue,
6065                                             uint32_t submitCount, const VkSubmitInfo2* pSubmits,
6066                                             VkFence fence) {
6067     MESA_TRACE_SCOPE("on_vkQueueSubmit2");
6068     return on_vkQueueSubmitTemplate<VkSubmitInfo2>(context, input_result, queue, submitCount,
6069                                                    pSubmits, fence);
6070 }
6071 
vkQueueSubmitEnc(VkEncoder * enc,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)6072 VkResult ResourceTracker::vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount,
6073                                            const VkSubmitInfo* pSubmits, VkFence fence) {
6074     if (supportsAsyncQueueSubmit()) {
6075         enc->vkQueueSubmitAsyncGOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
6076         return VK_SUCCESS;
6077     } else {
6078         return enc->vkQueueSubmit(queue, submitCount, pSubmits, fence, true /* do lock */);
6079     }
6080 }
6081 
vkQueueSubmitEnc(VkEncoder * enc,VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)6082 VkResult ResourceTracker::vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount,
6083                                            const VkSubmitInfo2* pSubmits, VkFence fence) {
6084     if (supportsAsyncQueueSubmit()) {
6085         enc->vkQueueSubmitAsync2GOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
6086         return VK_SUCCESS;
6087     } else {
6088         return enc->vkQueueSubmit2(queue, submitCount, pSubmits, fence, true /* do lock */);
6089     }
6090 }
6091 
6092 template <typename VkSubmitInfoType>
on_vkQueueSubmitTemplate(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfoType * pSubmits,VkFence fence)6093 VkResult ResourceTracker::on_vkQueueSubmitTemplate(void* context, VkResult input_result,
6094                                                    VkQueue queue, uint32_t submitCount,
6095                                                    const VkSubmitInfoType* pSubmits,
6096                                                    VkFence fence) {
6097     flushStagingStreams(context, queue, submitCount, pSubmits);
6098 
6099     std::vector<VkSemaphore> pre_signal_semaphores;
6100     std::vector<zx_handle_t> pre_signal_events;
6101     std::vector<int> pre_signal_sync_fds;
6102     std::vector<std::pair<zx_handle_t, zx_koid_t>> post_wait_events;
6103     std::vector<int> post_wait_sync_fds;
6104 
6105     VkEncoder* enc = (VkEncoder*)context;
6106 
6107     std::unique_lock<std::recursive_mutex> lock(mLock);
6108 
6109     for (uint32_t i = 0; i < submitCount; ++i) {
6110         for (uint32_t j = 0; j < getWaitSemaphoreCount(pSubmits[i]); ++j) {
6111             VkSemaphore semaphore = getWaitSemaphore(pSubmits[i], j);
6112             auto it = info_VkSemaphore.find(semaphore);
6113             if (it != info_VkSemaphore.end()) {
6114                 auto& semInfo = it->second;
6115 #ifdef VK_USE_PLATFORM_FUCHSIA
6116                 if (semInfo.eventHandle) {
6117                     pre_signal_events.push_back(semInfo.eventHandle);
6118                     pre_signal_semaphores.push_back(semaphore);
6119                 }
6120 #endif
6121 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
6122                 if (semInfo.syncFd.has_value()) {
6123                     pre_signal_sync_fds.push_back(semInfo.syncFd.value());
6124                     pre_signal_semaphores.push_back(semaphore);
6125                 }
6126 #endif
6127             }
6128         }
6129         for (uint32_t j = 0; j < getSignalSemaphoreCount(pSubmits[i]); ++j) {
6130             auto it = info_VkSemaphore.find(getSignalSemaphore(pSubmits[i], j));
6131             if (it != info_VkSemaphore.end()) {
6132                 auto& semInfo = it->second;
6133 #ifdef VK_USE_PLATFORM_FUCHSIA
6134                 if (semInfo.eventHandle) {
6135                     post_wait_events.push_back({semInfo.eventHandle, semInfo.eventKoid});
6136 #ifndef FUCHSIA_NO_TRACE
6137                     if (semInfo.eventKoid != ZX_KOID_INVALID) {
6138                         // TODO(fxbug.dev/42144867): Remove the "semaphore"
6139                         // FLOW_END events once it is removed from clients
6140                         // (for example, gfx Engine).
6141                         TRACE_FLOW_END("gfx", "semaphore", semInfo.eventKoid);
6142                         TRACE_FLOW_BEGIN("gfx", "goldfish_post_wait_event", semInfo.eventKoid);
6143                     }
6144 #endif
6145                 }
6146 #endif
6147 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
6148                 if (semInfo.syncFd.value_or(-1) >= 0) {
6149                     post_wait_sync_fds.push_back(semInfo.syncFd.value());
6150                 }
6151 #endif
6152             }
6153         }
6154     }
6155     lock.unlock();
6156 
6157     if (pre_signal_semaphores.empty()) {
6158         input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence);
6159         if (input_result != VK_SUCCESS) return input_result;
6160     } else {
6161         // Schedule waits on the OS external objects and
6162         // signal the wait semaphores
6163         // in a separate thread.
6164 #ifdef VK_USE_PLATFORM_FUCHSIA
6165         for (auto event : pre_signal_events) {
6166             preSignalTasks.push_back([event] {
6167                 zx_object_wait_one(event, ZX_EVENT_SIGNALED, ZX_TIME_INFINITE, nullptr);
6168             });
6169         }
6170 #endif
6171 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
6172         for (auto fd : pre_signal_sync_fds) {
6173             // https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkImportSemaphoreFdInfoKHR.html
6174             // fd == -1 is treated as already signaled
6175             if (fd != -1) {
6176                 mSyncHelper->wait(fd, 3000);
6177 #if GFXSTREAM_SYNC_DEBUG
6178                 mSyncHelper->debugPrint(fd);
6179 #endif
6180             }
6181         }
6182 #endif
6183         // Use the old version of VkSubmitInfo
6184         VkSubmitInfo submit_info = {
6185             .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
6186             .waitSemaphoreCount = 0,
6187             .pWaitSemaphores = nullptr,
6188             .pWaitDstStageMask = nullptr,
6189             .signalSemaphoreCount = static_cast<uint32_t>(pre_signal_semaphores.size()),
6190             .pSignalSemaphores = pre_signal_semaphores.data()};
6191         vkQueueSubmitEnc(enc, queue, 1, &submit_info, VK_NULL_HANDLE);
6192         input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence);
6193         if (input_result != VK_SUCCESS) return input_result;
6194     }
6195     lock.lock();
6196     int externalFenceFdToSignal = -1;
6197 
6198 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
6199     if (fence != VK_NULL_HANDLE) {
6200         auto it = info_VkFence.find(fence);
6201         if (it != info_VkFence.end()) {
6202             const auto& info = it->second;
6203             if (info.syncFd && *info.syncFd >= 0) {
6204                 externalFenceFdToSignal = *info.syncFd;
6205             }
6206         }
6207     }
6208 #endif
6209     VkResult waitIdleRes = VK_SUCCESS;
6210     if (externalFenceFdToSignal >= 0 || !post_wait_events.empty() || !post_wait_sync_fds.empty()) {
6211         auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
6212         auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
6213         waitIdleRes = vkEncoder->vkQueueWaitIdle(queue, true /* do lock */);
6214         if (VK_SUCCESS == waitIdleRes) {
6215 #ifdef VK_USE_PLATFORM_FUCHSIA
6216             MESA_TRACE_SCOPE("on_vkQueueSubmit::SignalSemaphores");
6217             (void)externalFenceFdToSignal;
6218             for (auto& [event, koid] : post_wait_events) {
6219 #ifndef FUCHSIA_NO_TRACE
6220                 if (koid != ZX_KOID_INVALID) {
6221                     TRACE_FLOW_END("gfx", "goldfish_post_wait_event", koid);
6222                     TRACE_FLOW_BEGIN("gfx", "event_signal", koid);
6223                 }
6224 #endif
6225                 zx_object_signal(event, 0, ZX_EVENT_SIGNALED);
6226             }
6227 #endif
6228 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
6229             for (auto& fd : post_wait_sync_fds) {
6230                 goldfish_sync_signal(fd);
6231             }
6232 
6233             if (externalFenceFdToSignal >= 0) {
6234                 mesa_logd("%s: external fence real signal: %d\n", __func__,
6235                           externalFenceFdToSignal);
6236                 goldfish_sync_signal(externalFenceFdToSignal);
6237             }
6238 #endif
6239         }
6240     }
6241     return waitIdleRes;
6242 }
6243 
on_vkQueueWaitIdle(void * context,VkResult,VkQueue queue)6244 VkResult ResourceTracker::on_vkQueueWaitIdle(void* context, VkResult, VkQueue queue) {
6245     VkEncoder* enc = (VkEncoder*)context;
6246 
6247     // now done waiting, get the host's opinion
6248     return enc->vkQueueWaitIdle(queue, true /* do lock */);
6249 }
6250 
6251 #ifdef VK_USE_PLATFORM_ANDROID_KHR
unwrap_VkNativeBufferANDROID(const VkNativeBufferANDROID * inputNativeInfo,VkNativeBufferANDROID * outputNativeInfo)6252 void ResourceTracker::unwrap_VkNativeBufferANDROID(const VkNativeBufferANDROID* inputNativeInfo,
6253                                                    VkNativeBufferANDROID* outputNativeInfo) {
6254     if (!inputNativeInfo || !inputNativeInfo->handle) {
6255         return;
6256     }
6257 
6258     if (!outputNativeInfo || !outputNativeInfo) {
6259         mesa_loge("FATAL: Local native buffer info not properly allocated!");
6260         abort();
6261     }
6262 
6263     const native_handle_t* nativeHandle = (const native_handle_t*)inputNativeInfo->handle;
6264     *(uint32_t*)(outputNativeInfo->handle) = mGralloc->getHostHandle(nativeHandle);
6265 }
6266 
unwrap_VkBindImageMemorySwapchainInfoKHR(const VkBindImageMemorySwapchainInfoKHR * inputBimsi,VkBindImageMemorySwapchainInfoKHR * outputBimsi)6267 void ResourceTracker::unwrap_VkBindImageMemorySwapchainInfoKHR(
6268     const VkBindImageMemorySwapchainInfoKHR* inputBimsi,
6269     VkBindImageMemorySwapchainInfoKHR* outputBimsi) {
6270     if (!inputBimsi || !inputBimsi->swapchain) {
6271         return;
6272     }
6273 
6274     if (!outputBimsi || !outputBimsi->swapchain) {
6275         return;
6276     }
6277 
6278     // Android based swapchains are implemented by the Android framework's
6279     // libvulkan. The only exist within the guest and should not be sent to
6280     // the host.
6281     outputBimsi->swapchain = VK_NULL_HANDLE;
6282 }
6283 #endif
6284 
unwrap_vkCreateImage_pCreateInfo(const VkImageCreateInfo * pCreateInfo,VkImageCreateInfo * local_pCreateInfo)6285 void ResourceTracker::unwrap_vkCreateImage_pCreateInfo(const VkImageCreateInfo* pCreateInfo,
6286                                                        VkImageCreateInfo* local_pCreateInfo) {
6287 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6288     const VkNativeBufferANDROID* inputNativeInfo =
6289         vk_find_struct_const(pCreateInfo, NATIVE_BUFFER_ANDROID);
6290 
6291     VkNativeBufferANDROID* outputNativeInfo = vk_find_struct(local_pCreateInfo, NATIVE_BUFFER_ANDROID);
6292 
6293     unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo);
6294 #endif
6295 }
6296 
unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd,int * fd_out)6297 void ResourceTracker::unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd, int* fd_out) {
6298 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6299     (void)fd_out;
6300     if (fd != -1) {
6301         MESA_TRACE_SCOPE("waitNativeFenceInAcquire");
6302         // Implicit Synchronization
6303         mSyncHelper->wait(fd, 3000);
6304         // From libvulkan's swapchain.cpp:
6305         // """
6306         // NOTE: we're relying on AcquireImageANDROID to close fence_clone,
6307         // even if the call fails. We could close it ourselves on failure, but
6308         // that would create a race condition if the driver closes it on a
6309         // failure path: some other thread might create an fd with the same
6310         // number between the time the driver closes it and the time we close
6311         // it. We must assume one of: the driver *always* closes it even on
6312         // failure, or *never* closes it on failure.
6313         // """
6314         // Therefore, assume contract where we need to close fd in this driver
6315 
6316 #if GFXSTREAM_SYNC_DEBUG
6317         mSyncHelper->debugPrint(fd);
6318 #endif
6319         mSyncHelper->close(fd);
6320     }
6321 #endif
6322 }
6323 
unwrap_VkBindImageMemory2_pBindInfos(uint32_t bindInfoCount,const VkBindImageMemoryInfo * inputBindInfos,VkBindImageMemoryInfo * outputBindInfos)6324 void ResourceTracker::unwrap_VkBindImageMemory2_pBindInfos(
6325     uint32_t bindInfoCount, const VkBindImageMemoryInfo* inputBindInfos,
6326     VkBindImageMemoryInfo* outputBindInfos) {
6327 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6328     for (uint32_t i = 0; i < bindInfoCount; ++i) {
6329         const VkBindImageMemoryInfo* inputBindInfo = &inputBindInfos[i];
6330         VkBindImageMemoryInfo* outputBindInfo = &outputBindInfos[i];
6331 
6332         const VkNativeBufferANDROID* inputNativeInfo =
6333             vk_find_struct_const(inputBindInfo, NATIVE_BUFFER_ANDROID);
6334 
6335         VkNativeBufferANDROID* outputNativeInfo = vk_find_struct(outputBindInfo, NATIVE_BUFFER_ANDROID);
6336 
6337         unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo);
6338 
6339         const VkBindImageMemorySwapchainInfoKHR* inputBimsi =
6340             vk_find_struct_const(inputBindInfo, BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR);
6341 
6342         VkBindImageMemorySwapchainInfoKHR* outputBimsi = vk_find_struct(outputBindInfo, BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR);
6343 
6344         unwrap_VkBindImageMemorySwapchainInfoKHR(inputBimsi, outputBimsi);
6345     }
6346 #endif
6347 }
6348 
6349 // Action of vkMapMemoryIntoAddressSpaceGOOGLE:
6350 // 1. preprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE_pre):
6351 //    uses address space device to reserve the right size of
6352 //    memory.
6353 // 2. the reservation results in a physical address. the physical
6354 //    address is set as |*pAddress|.
6355 // 3. after pre, the API call is encoded to the host, where the
6356 //    value of pAddress is also sent (the physical address).
6357 // 4. the host will obtain the actual gpu pointer and send it
6358 //    back out in |*pAddress|.
6359 // 5. postprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE) will run,
6360 //    using the mmap() method of GoldfishAddressSpaceBlock to obtain
6361 //    a pointer in guest userspace corresponding to the host pointer.
on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void *,VkResult,VkDevice,VkDeviceMemory memory,uint64_t * pAddress)6362 VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void*, VkResult, VkDevice,
6363                                                                    VkDeviceMemory memory,
6364                                                                    uint64_t* pAddress) {
6365     std::lock_guard<std::recursive_mutex> lock(mLock);
6366 
6367     auto it = info_VkDeviceMemory.find(memory);
6368     if (it == info_VkDeviceMemory.end()) {
6369         return VK_ERROR_OUT_OF_HOST_MEMORY;
6370     }
6371 
6372 #if DETECT_OS_ANDROID
6373     auto& memInfo = it->second;
6374 
6375     GoldfishAddressSpaceBlockPtr block = std::make_shared<GoldfishAddressSpaceBlock>();
6376     block->allocate(mGoldfishAddressSpaceBlockProvider.get(), memInfo.coherentMemorySize);
6377 
6378     memInfo.goldfishBlock = block;
6379     *pAddress = block->physAddr();
6380 
6381     return VK_SUCCESS;
6382 #else
6383     (void)pAddress;
6384     return VK_ERROR_MEMORY_MAP_FAILED;
6385 #endif
6386 }
6387 
on_vkMapMemoryIntoAddressSpaceGOOGLE(void *,VkResult input_result,VkDevice,VkDeviceMemory memory,uint64_t * pAddress)6388 VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE(void*, VkResult input_result,
6389                                                                VkDevice, VkDeviceMemory memory,
6390                                                                uint64_t* pAddress) {
6391     (void)memory;
6392     (void)pAddress;
6393 
6394     if (input_result != VK_SUCCESS) {
6395         return input_result;
6396     }
6397 
6398     return input_result;
6399 }
6400 
initDescriptorUpdateTemplateBuffers(const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,VkDescriptorUpdateTemplate descriptorUpdateTemplate)6401 VkResult ResourceTracker::initDescriptorUpdateTemplateBuffers(
6402     const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6403     VkDescriptorUpdateTemplate descriptorUpdateTemplate) {
6404     std::lock_guard<std::recursive_mutex> lock(mLock);
6405 
6406     auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6407     if (it == info_VkDescriptorUpdateTemplate.end()) {
6408         return VK_ERROR_INITIALIZATION_FAILED;
6409     }
6410 
6411     auto& info = it->second;
6412     uint32_t inlineUniformBlockBufferSize = 0;
6413 
6414     for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6415         const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6416         uint32_t descCount = entry.descriptorCount;
6417         VkDescriptorType descType = entry.descriptorType;
6418         ++info.templateEntryCount;
6419         if (isDescriptorTypeInlineUniformBlock(descType)) {
6420             inlineUniformBlockBufferSize += descCount;
6421             ++info.inlineUniformBlockCount;
6422         } else {
6423             for (uint32_t j = 0; j < descCount; ++j) {
6424                 if (isDescriptorTypeImageInfo(descType)) {
6425                     ++info.imageInfoCount;
6426                 } else if (isDescriptorTypeBufferInfo(descType)) {
6427                     ++info.bufferInfoCount;
6428                 } else if (isDescriptorTypeBufferView(descType)) {
6429                     ++info.bufferViewCount;
6430                 } else {
6431                     mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6432                     // abort();
6433                 }
6434             }
6435         }
6436     }
6437 
6438     if (info.templateEntryCount)
6439         info.templateEntries = new VkDescriptorUpdateTemplateEntry[info.templateEntryCount];
6440 
6441     if (info.imageInfoCount) {
6442         info.imageInfoIndices = new uint32_t[info.imageInfoCount];
6443         info.imageInfos = new VkDescriptorImageInfo[info.imageInfoCount];
6444     }
6445 
6446     if (info.bufferInfoCount) {
6447         info.bufferInfoIndices = new uint32_t[info.bufferInfoCount];
6448         info.bufferInfos = new VkDescriptorBufferInfo[info.bufferInfoCount];
6449     }
6450 
6451     if (info.bufferViewCount) {
6452         info.bufferViewIndices = new uint32_t[info.bufferViewCount];
6453         info.bufferViews = new VkBufferView[info.bufferViewCount];
6454     }
6455 
6456     if (info.inlineUniformBlockCount) {
6457         info.inlineUniformBlockBuffer.resize(inlineUniformBlockBufferSize);
6458         info.inlineUniformBlockBytesPerBlocks.resize(info.inlineUniformBlockCount);
6459     }
6460 
6461     uint32_t imageInfoIndex = 0;
6462     uint32_t bufferInfoIndex = 0;
6463     uint32_t bufferViewIndex = 0;
6464     uint32_t inlineUniformBlockIndex = 0;
6465 
6466     for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6467         const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6468         uint32_t descCount = entry.descriptorCount;
6469         VkDescriptorType descType = entry.descriptorType;
6470 
6471         info.templateEntries[i] = entry;
6472 
6473         if (isDescriptorTypeInlineUniformBlock(descType)) {
6474             info.inlineUniformBlockBytesPerBlocks[inlineUniformBlockIndex] = descCount;
6475             ++inlineUniformBlockIndex;
6476         } else {
6477             for (uint32_t j = 0; j < descCount; ++j) {
6478                 if (isDescriptorTypeImageInfo(descType)) {
6479                     info.imageInfoIndices[imageInfoIndex] = i;
6480                     ++imageInfoIndex;
6481                 } else if (isDescriptorTypeBufferInfo(descType)) {
6482                     info.bufferInfoIndices[bufferInfoIndex] = i;
6483                     ++bufferInfoIndex;
6484                 } else if (isDescriptorTypeBufferView(descType)) {
6485                     info.bufferViewIndices[bufferViewIndex] = i;
6486                     ++bufferViewIndex;
6487                 } else {
6488                     mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6489                     // abort();
6490                 }
6491             }
6492         }
6493     }
6494 
6495     return VK_SUCCESS;
6496 }
6497 
on_vkCreateDescriptorUpdateTemplate(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)6498 VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplate(
6499     void* context, VkResult input_result, VkDevice device,
6500     const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6501     const VkAllocationCallbacks* pAllocator,
6502     VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6503     (void)context;
6504     (void)device;
6505     (void)pAllocator;
6506 
6507     if (input_result != VK_SUCCESS) return input_result;
6508 
6509     return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6510 }
6511 
on_vkCreateDescriptorUpdateTemplateKHR(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)6512 VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplateKHR(
6513     void* context, VkResult input_result, VkDevice device,
6514     const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6515     const VkAllocationCallbacks* pAllocator,
6516     VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6517     (void)context;
6518     (void)device;
6519     (void)pAllocator;
6520 
6521     if (input_result != VK_SUCCESS) return input_result;
6522 
6523     return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6524 }
6525 
on_vkUpdateDescriptorSetWithTemplate(void * context,VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)6526 void ResourceTracker::on_vkUpdateDescriptorSetWithTemplate(
6527     void* context, VkDevice device, VkDescriptorSet descriptorSet,
6528     VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData) {
6529     VkEncoder* enc = (VkEncoder*)context;
6530 
6531     uint8_t* userBuffer = (uint8_t*)pData;
6532     if (!userBuffer) return;
6533 
6534     // TODO: Make this thread safe
6535     std::unique_lock<std::recursive_mutex> lock(mLock);
6536 
6537     auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6538     if (it == info_VkDescriptorUpdateTemplate.end()) {
6539         return;
6540     }
6541 
6542     auto& info = it->second;
6543 
6544     uint32_t templateEntryCount = info.templateEntryCount;
6545     VkDescriptorUpdateTemplateEntry* templateEntries = info.templateEntries;
6546 
6547     uint32_t imageInfoCount = info.imageInfoCount;
6548     uint32_t bufferInfoCount = info.bufferInfoCount;
6549     uint32_t bufferViewCount = info.bufferViewCount;
6550     uint32_t* imageInfoIndices = info.imageInfoIndices;
6551     uint32_t* bufferInfoIndices = info.bufferInfoIndices;
6552     uint32_t* bufferViewIndices = info.bufferViewIndices;
6553     VkDescriptorImageInfo* imageInfos = info.imageInfos;
6554     VkDescriptorBufferInfo* bufferInfos = info.bufferInfos;
6555     VkBufferView* bufferViews = info.bufferViews;
6556     uint8_t* inlineUniformBlockBuffer = info.inlineUniformBlockBuffer.data();
6557     uint32_t* inlineUniformBlockBytesPerBlocks = info.inlineUniformBlockBytesPerBlocks.data();
6558 
6559     lock.unlock();
6560 
6561     size_t currImageInfoOffset = 0;
6562     size_t currBufferInfoOffset = 0;
6563     size_t currBufferViewOffset = 0;
6564     size_t inlineUniformBlockOffset = 0;
6565     size_t inlineUniformBlockIdx = 0;
6566 
6567     struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(descriptorSet);
6568     ReifiedDescriptorSet* reified = ds->reified;
6569 
6570     bool batched = mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate;
6571 
6572     for (uint32_t i = 0; i < templateEntryCount; ++i) {
6573         const auto& entry = templateEntries[i];
6574         VkDescriptorType descType = entry.descriptorType;
6575         uint32_t dstBinding = entry.dstBinding;
6576 
6577         auto offset = entry.offset;
6578         auto stride = entry.stride;
6579         auto dstArrayElement = entry.dstArrayElement;
6580 
6581         uint32_t descCount = entry.descriptorCount;
6582 
6583         if (isDescriptorTypeImageInfo(descType)) {
6584             if (!stride) stride = sizeof(VkDescriptorImageInfo);
6585 
6586             const VkDescriptorImageInfo* currImageInfoBegin =
6587                 (const VkDescriptorImageInfo*)((uint8_t*)imageInfos + currImageInfoOffset);
6588 
6589             for (uint32_t j = 0; j < descCount; ++j) {
6590                 const VkDescriptorImageInfo* user =
6591                     (const VkDescriptorImageInfo*)(userBuffer + offset + j * stride);
6592 
6593                 memcpy(((uint8_t*)imageInfos) + currImageInfoOffset, user,
6594                        sizeof(VkDescriptorImageInfo));
6595                 currImageInfoOffset += sizeof(VkDescriptorImageInfo);
6596             }
6597 
6598             if (batched) {
6599                 doEmulatedDescriptorImageInfoWriteFromTemplate(
6600                     descType, dstBinding, dstArrayElement, descCount, currImageInfoBegin, reified);
6601             }
6602         } else if (isDescriptorTypeBufferInfo(descType)) {
6603             if (!stride) stride = sizeof(VkDescriptorBufferInfo);
6604 
6605             const VkDescriptorBufferInfo* currBufferInfoBegin =
6606                 (const VkDescriptorBufferInfo*)((uint8_t*)bufferInfos + currBufferInfoOffset);
6607 
6608             for (uint32_t j = 0; j < descCount; ++j) {
6609                 const VkDescriptorBufferInfo* user =
6610                     (const VkDescriptorBufferInfo*)(userBuffer + offset + j * stride);
6611 
6612                 memcpy(((uint8_t*)bufferInfos) + currBufferInfoOffset, user,
6613                        sizeof(VkDescriptorBufferInfo));
6614 
6615                 // TODO(b/355497683): move this into gfxstream_vk_UpdateDescriptorSetWithTemplate().
6616 #if DETECT_OS_LINUX || defined(VK_USE_PLATFORM_ANDROID_KHR)
6617                 // Convert mesa to internal for objects in the user buffer
6618                 VkDescriptorBufferInfo* internalBufferInfo =
6619                     (VkDescriptorBufferInfo*)(((uint8_t*)bufferInfos) + currBufferInfoOffset);
6620                 VK_FROM_HANDLE(gfxstream_vk_buffer, gfxstream_buffer, internalBufferInfo->buffer);
6621                 internalBufferInfo->buffer = gfxstream_buffer->internal_object;
6622 #endif
6623                 currBufferInfoOffset += sizeof(VkDescriptorBufferInfo);
6624             }
6625 
6626             if (batched) {
6627                 doEmulatedDescriptorBufferInfoWriteFromTemplate(
6628                     descType, dstBinding, dstArrayElement, descCount, currBufferInfoBegin, reified);
6629             }
6630 
6631         } else if (isDescriptorTypeBufferView(descType)) {
6632             if (!stride) stride = sizeof(VkBufferView);
6633 
6634             const VkBufferView* currBufferViewBegin =
6635                 (const VkBufferView*)((uint8_t*)bufferViews + currBufferViewOffset);
6636 
6637             for (uint32_t j = 0; j < descCount; ++j) {
6638                 const VkBufferView* user = (const VkBufferView*)(userBuffer + offset + j * stride);
6639 
6640                 memcpy(((uint8_t*)bufferViews) + currBufferViewOffset, user, sizeof(VkBufferView));
6641                 currBufferViewOffset += sizeof(VkBufferView);
6642             }
6643 
6644             if (batched) {
6645                 doEmulatedDescriptorBufferViewWriteFromTemplate(
6646                     descType, dstBinding, dstArrayElement, descCount, currBufferViewBegin, reified);
6647             }
6648         } else if (isDescriptorTypeInlineUniformBlock(descType)) {
6649             uint32_t inlineUniformBlockBytesPerBlock =
6650                 inlineUniformBlockBytesPerBlocks[inlineUniformBlockIdx];
6651             uint8_t* currInlineUniformBlockBufferBegin =
6652                 inlineUniformBlockBuffer + inlineUniformBlockOffset;
6653             memcpy(currInlineUniformBlockBufferBegin, userBuffer + offset,
6654                    inlineUniformBlockBytesPerBlock);
6655             inlineUniformBlockIdx++;
6656             inlineUniformBlockOffset += inlineUniformBlockBytesPerBlock;
6657 
6658             if (batched) {
6659                 doEmulatedDescriptorInlineUniformBlockFromTemplate(
6660                     descType, dstBinding, dstArrayElement, descCount,
6661                     currInlineUniformBlockBufferBegin, reified);
6662             }
6663         } else {
6664             mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6665             abort();
6666         }
6667     }
6668 
6669     if (batched) return;
6670 
6671     enc->vkUpdateDescriptorSetWithTemplateSized2GOOGLE(
6672         device, descriptorSet, descriptorUpdateTemplate, imageInfoCount, bufferInfoCount,
6673         bufferViewCount, static_cast<uint32_t>(info.inlineUniformBlockBuffer.size()),
6674         imageInfoIndices, bufferInfoIndices, bufferViewIndices, imageInfos, bufferInfos,
6675         bufferViews, inlineUniformBlockBuffer, true /* do lock */);
6676 }
6677 
on_vkUpdateDescriptorSetWithTemplateKHR(void * context,VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)6678 void ResourceTracker::on_vkUpdateDescriptorSetWithTemplateKHR(
6679     void* context, VkDevice device, VkDescriptorSet descriptorSet,
6680     VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData) {
6681     on_vkUpdateDescriptorSetWithTemplate(context, device, descriptorSet, descriptorUpdateTemplate,
6682                                          pData);
6683 }
6684 
on_vkGetPhysicalDeviceImageFormatProperties2_common(bool isKhr,void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6685 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2_common(
6686     bool isKhr, void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6687     const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6688     VkImageFormatProperties2* pImageFormatProperties) {
6689     VkEncoder* enc = (VkEncoder*)context;
6690     (void)input_result;
6691 
6692     VkPhysicalDeviceImageFormatInfo2 localImageFormatInfo = *pImageFormatInfo;
6693 
6694     uint32_t supportedHandleType = 0;
6695     VkExternalImageFormatProperties* ext_img_properties =
6696         vk_find_struct(pImageFormatProperties, EXTERNAL_IMAGE_FORMAT_PROPERTIES);
6697 
6698 #ifdef VK_USE_PLATFORM_FUCHSIA
6699 
6700     constexpr VkFormat kExternalImageSupportedFormats[] = {
6701         VK_FORMAT_B8G8R8A8_SINT,  VK_FORMAT_B8G8R8A8_UNORM,   VK_FORMAT_B8G8R8A8_SRGB,
6702         VK_FORMAT_B8G8R8A8_SNORM, VK_FORMAT_B8G8R8A8_SSCALED, VK_FORMAT_B8G8R8A8_USCALED,
6703         VK_FORMAT_R8G8B8A8_SINT,  VK_FORMAT_R8G8B8A8_UNORM,   VK_FORMAT_R8G8B8A8_SRGB,
6704         VK_FORMAT_R8G8B8A8_SNORM, VK_FORMAT_R8G8B8A8_SSCALED, VK_FORMAT_R8G8B8A8_USCALED,
6705         VK_FORMAT_R8_UNORM,       VK_FORMAT_R8_UINT,          VK_FORMAT_R8_USCALED,
6706         VK_FORMAT_R8_SNORM,       VK_FORMAT_R8_SINT,          VK_FORMAT_R8_SSCALED,
6707         VK_FORMAT_R8_SRGB,        VK_FORMAT_R8G8_UNORM,       VK_FORMAT_R8G8_UINT,
6708         VK_FORMAT_R8G8_USCALED,   VK_FORMAT_R8G8_SNORM,       VK_FORMAT_R8G8_SINT,
6709         VK_FORMAT_R8G8_SSCALED,   VK_FORMAT_R8G8_SRGB,
6710     };
6711 
6712     if (ext_img_properties) {
6713         if (std::find(std::begin(kExternalImageSupportedFormats),
6714                       std::end(kExternalImageSupportedFormats),
6715                       pImageFormatInfo->format) == std::end(kExternalImageSupportedFormats)) {
6716             return VK_ERROR_FORMAT_NOT_SUPPORTED;
6717         }
6718     }
6719     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
6720 #endif
6721 
6722 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6723     VkAndroidHardwareBufferUsageANDROID* output_ahw_usage = vk_find_struct(pImageFormatProperties, ANDROID_HARDWARE_BUFFER_USAGE_ANDROID);
6724     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
6725                            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
6726 #endif
6727     const VkPhysicalDeviceExternalImageFormatInfo* ext_img_info =
6728         vk_find_struct_const(pImageFormatInfo, PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO);
6729     if (supportedHandleType && ext_img_info) {
6730         // 0 is a valid handleType so we don't check against 0
6731         if (ext_img_info->handleType != (ext_img_info->handleType & supportedHandleType)) {
6732             return VK_ERROR_FORMAT_NOT_SUPPORTED;
6733         }
6734     }
6735 
6736 #ifdef LINUX_GUEST_BUILD
6737     const VkPhysicalDeviceImageDrmFormatModifierInfoEXT* drmFmtMod =
6738         vk_find_struct_const(pImageFormatInfo, PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT);
6739     VkDrmFormatModifierPropertiesListEXT* emulatedDrmFmtModPropsList = nullptr;
6740     if (drmFmtMod &&
6741         getHostDeviceExtensionIndex(VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME) == -1) {
6742         if (drmFmtMod->drmFormatModifier != DRM_FORMAT_MOD_LINEAR) {
6743             return VK_ERROR_FORMAT_NOT_SUPPORTED;
6744         }
6745         mesa_logd("emulating DRM_FORMAT_MOD_LINEAR with VK_IMAGE_TILING_OPTIMAL");
6746         emulatedDrmFmtModPropsList =
6747             vk_find_struct(pImageFormatProperties, DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT);
6748         localImageFormatInfo.tiling = VK_IMAGE_TILING_LINEAR;
6749         localImageFormatInfo.usage &=
6750             ~(VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
6751         pImageFormatInfo = &localImageFormatInfo;
6752         // Leave drmFormatMod in the input; it should be ignored when
6753         // tiling is not VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT
6754     }
6755 #endif  // LINUX_GUEST_BUILD
6756 
6757     VkResult hostRes;
6758 
6759     if (isKhr) {
6760         hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2KHR(
6761             physicalDevice, &localImageFormatInfo, pImageFormatProperties, true /* do lock */);
6762     } else {
6763         hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2(
6764             physicalDevice, &localImageFormatInfo, pImageFormatProperties, true /* do lock */);
6765     }
6766 
6767     if (hostRes != VK_SUCCESS) return hostRes;
6768 
6769 #ifdef LINUX_GUEST_BUILD
6770     if (emulatedDrmFmtModPropsList) {
6771         VkFormatProperties formatProperties;
6772         enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, localImageFormatInfo.format,
6773                                                  &formatProperties, true /* do lock */);
6774 
6775         emulatedDrmFmtModPropsList->drmFormatModifierCount = 1;
6776         if (emulatedDrmFmtModPropsList->pDrmFormatModifierProperties) {
6777             emulatedDrmFmtModPropsList->pDrmFormatModifierProperties[0] = {
6778                 .drmFormatModifier = DRM_FORMAT_MOD_LINEAR,
6779                 .drmFormatModifierPlaneCount = 1,
6780                 .drmFormatModifierTilingFeatures = formatProperties.linearTilingFeatures,
6781             };
6782         }
6783     }
6784     if (ext_img_info &&
6785         ext_img_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT) {
6786         ext_img_properties->externalMemoryProperties.externalMemoryFeatures |=
6787             VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT | VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT;
6788         ext_img_properties->externalMemoryProperties.exportFromImportedHandleTypes =
6789             ext_img_properties->externalMemoryProperties.compatibleHandleTypes =
6790                 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
6791     }
6792 #endif  // LINUX_GUEST_BUILD
6793 
6794 #ifdef VK_USE_PLATFORM_FUCHSIA
6795     if (ext_img_properties) {
6796         if (ext_img_info) {
6797             if (static_cast<uint32_t>(ext_img_info->handleType) ==
6798                 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
6799                 ext_img_properties->externalMemoryProperties = {
6800                     .externalMemoryFeatures = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
6801                                               VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
6802                     .exportFromImportedHandleTypes =
6803                         VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6804                     .compatibleHandleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6805                 };
6806             }
6807         }
6808     }
6809 #endif
6810 
6811 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6812     if (output_ahw_usage) {
6813         output_ahw_usage->androidHardwareBufferUsage = getAndroidHardwareBufferUsageFromVkUsage(
6814             pImageFormatInfo->flags, pImageFormatInfo->usage);
6815     }
6816 #endif
6817     if (ext_img_properties) {
6818         transformImpl_VkExternalMemoryProperties_fromhost(
6819             &ext_img_properties->externalMemoryProperties, 0);
6820     }
6821     return hostRes;
6822 }
6823 
on_vkGetPhysicalDeviceImageFormatProperties2(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6824 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2(
6825     void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6826     const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6827     VkImageFormatProperties2* pImageFormatProperties) {
6828     return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6829         false /* not KHR */, context, input_result, physicalDevice, pImageFormatInfo,
6830         pImageFormatProperties);
6831 }
6832 
on_vkGetPhysicalDeviceImageFormatProperties2KHR(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6833 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2KHR(
6834     void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6835     const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6836     VkImageFormatProperties2* pImageFormatProperties) {
6837     return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6838         true /* is KHR */, context, input_result, physicalDevice, pImageFormatInfo,
6839         pImageFormatProperties);
6840 }
6841 
on_vkGetPhysicalDeviceExternalBufferProperties_common(bool isKhr,void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)6842 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferProperties_common(
6843     bool isKhr, void* context, VkPhysicalDevice physicalDevice,
6844     const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
6845     VkExternalBufferProperties* pExternalBufferProperties) {
6846     VkEncoder* enc = (VkEncoder*)context;
6847 
6848 #if defined(ANDROID)
6849     // Older versions of Goldfish's Gralloc did not support allocating AHARDWAREBUFFER_FORMAT_BLOB
6850     // with GPU usage (b/299520213).
6851     if (mGralloc->treatBlobAsImage() &&
6852         pExternalBufferInfo->handleType ==
6853             VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) {
6854         pExternalBufferProperties->externalMemoryProperties.externalMemoryFeatures = 0;
6855         pExternalBufferProperties->externalMemoryProperties.exportFromImportedHandleTypes = 0;
6856         pExternalBufferProperties->externalMemoryProperties.compatibleHandleTypes = 0;
6857         return;
6858     }
6859 #endif
6860 
6861     uint32_t supportedHandleType = 0;
6862 #ifdef VK_USE_PLATFORM_FUCHSIA
6863     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
6864 #endif
6865 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6866     supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
6867                            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
6868 #endif
6869     if (supportedHandleType) {
6870         // 0 is a valid handleType so we can't check against 0
6871         if (pExternalBufferInfo->handleType !=
6872             (pExternalBufferInfo->handleType & supportedHandleType)) {
6873             return;
6874         }
6875     }
6876 
6877     if (isKhr) {
6878         enc->vkGetPhysicalDeviceExternalBufferPropertiesKHR(
6879             physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */);
6880     } else {
6881         enc->vkGetPhysicalDeviceExternalBufferProperties(
6882             physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */);
6883     }
6884     transformImpl_VkExternalMemoryProperties_fromhost(
6885         &pExternalBufferProperties->externalMemoryProperties, 0);
6886 }
6887 
on_vkGetPhysicalDeviceExternalBufferProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)6888 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferProperties(
6889     void* context, VkPhysicalDevice physicalDevice,
6890     const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
6891     VkExternalBufferProperties* pExternalBufferProperties) {
6892     return on_vkGetPhysicalDeviceExternalBufferProperties_common(
6893         false /* not KHR */, context, physicalDevice, pExternalBufferInfo,
6894         pExternalBufferProperties);
6895 }
6896 
on_vkGetPhysicalDeviceExternalBufferPropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfoKHR * pExternalBufferInfo,VkExternalBufferPropertiesKHR * pExternalBufferProperties)6897 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferPropertiesKHR(
6898     void* context, VkPhysicalDevice physicalDevice,
6899     const VkPhysicalDeviceExternalBufferInfoKHR* pExternalBufferInfo,
6900     VkExternalBufferPropertiesKHR* pExternalBufferProperties) {
6901     return on_vkGetPhysicalDeviceExternalBufferProperties_common(
6902         true /* is KHR */, context, physicalDevice, pExternalBufferInfo, pExternalBufferProperties);
6903 }
6904 
on_vkGetPhysicalDeviceExternalSemaphoreProperties(void *,VkPhysicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)6905 void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphoreProperties(
6906     void*, VkPhysicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
6907     VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
6908     (void)pExternalSemaphoreInfo;
6909     (void)pExternalSemaphoreProperties;
6910 #ifdef VK_USE_PLATFORM_FUCHSIA
6911     if (pExternalSemaphoreInfo->handleType ==
6912         static_cast<uint32_t>(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA)) {
6913         pExternalSemaphoreProperties->compatibleHandleTypes |=
6914             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6915         pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6916             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6917         pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6918             VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6919             VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6920     }
6921 #else
6922     const VkSemaphoreTypeCreateInfo* semaphoreTypeCi =
6923         vk_find_struct_const(pExternalSemaphoreInfo, SEMAPHORE_TYPE_CREATE_INFO);
6924     bool isSemaphoreTimeline =
6925         semaphoreTypeCi != nullptr && semaphoreTypeCi->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE;
6926     if (isSemaphoreTimeline) {
6927         // b/304373623
6928         // dEQP-VK.api.external.semaphore.sync_fd#info_timeline
6929         pExternalSemaphoreProperties->compatibleHandleTypes = 0;
6930         pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
6931         pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
6932     } else if (pExternalSemaphoreInfo->handleType ==
6933                VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
6934         pExternalSemaphoreProperties->compatibleHandleTypes |=
6935             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6936         pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6937             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6938         pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6939             VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6940             VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6941     }
6942 #endif  // VK_USE_PLATFORM_FUCHSIA
6943 }
6944 
on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)6945 void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(
6946     void* context, VkPhysicalDevice physicalDevice,
6947     const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
6948     VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
6949     on_vkGetPhysicalDeviceExternalSemaphoreProperties(
6950         context, physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties);
6951 }
6952 
registerEncoderCleanupCallback(const VkEncoder * encoder,void * object,CleanupCallback callback)6953 void ResourceTracker::registerEncoderCleanupCallback(const VkEncoder* encoder, void* object,
6954                                                      CleanupCallback callback) {
6955     std::lock_guard<std::recursive_mutex> lock(mLock);
6956     auto& callbacks = mEncoderCleanupCallbacks[encoder];
6957     callbacks[object] = callback;
6958 }
6959 
unregisterEncoderCleanupCallback(const VkEncoder * encoder,void * object)6960 void ResourceTracker::unregisterEncoderCleanupCallback(const VkEncoder* encoder, void* object) {
6961     std::lock_guard<std::recursive_mutex> lock(mLock);
6962     mEncoderCleanupCallbacks[encoder].erase(object);
6963 }
6964 
onEncoderDeleted(const VkEncoder * encoder)6965 void ResourceTracker::onEncoderDeleted(const VkEncoder* encoder) {
6966     std::unique_lock<std::recursive_mutex> lock(mLock);
6967     if (mEncoderCleanupCallbacks.find(encoder) == mEncoderCleanupCallbacks.end()) return;
6968 
6969     std::unordered_map<void*, CleanupCallback> callbackCopies = mEncoderCleanupCallbacks[encoder];
6970 
6971     mEncoderCleanupCallbacks.erase(encoder);
6972     lock.unlock();
6973 
6974     for (auto it : callbackCopies) {
6975         it.second();
6976     }
6977 }
6978 
getAlloc()6979 CommandBufferStagingStream::Alloc ResourceTracker::getAlloc() {
6980     if (mFeatureInfo.hasVulkanAuxCommandMemory) {
6981         return [this](size_t size) -> CommandBufferStagingStream::Memory {
6982             VkMemoryAllocateInfo info{
6983                 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
6984                 .pNext = nullptr,
6985                 .allocationSize = size,
6986                 .memoryTypeIndex = VK_MAX_MEMORY_TYPES  // indicates auxiliary memory
6987             };
6988 
6989             auto enc = ResourceTracker::getThreadLocalEncoder();
6990             VkDevice device = VK_NULL_HANDLE;
6991             VkDeviceMemory vkDeviceMem = VK_NULL_HANDLE;
6992             VkResult result = getCoherentMemory(&info, enc, device, &vkDeviceMem);
6993             if (result != VK_SUCCESS) {
6994                 mesa_loge("Failed to get coherent memory %u", result);
6995                 return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
6996             }
6997 
6998             // getCoherentMemory() uses suballocations.
6999             // To retrieve the suballocated memory address, look up
7000             // VkDeviceMemory filled in by getCoherentMemory()
7001             // scope of mLock
7002             {
7003                 std::lock_guard<std::recursive_mutex> lock(mLock);
7004                 const auto it = info_VkDeviceMemory.find(vkDeviceMem);
7005                 if (it == info_VkDeviceMemory.end()) {
7006                     mesa_loge("Coherent memory allocated %u not found", result);
7007                     return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
7008                 };
7009 
7010                 const auto& info = it->second;
7011                 return {.deviceMemory = vkDeviceMem, .ptr = info.ptr};
7012             }
7013         };
7014     }
7015     return nullptr;
7016 }
7017 
getFree()7018 CommandBufferStagingStream::Free ResourceTracker::getFree() {
7019     if (mFeatureInfo.hasVulkanAuxCommandMemory) {
7020         return [this](const CommandBufferStagingStream::Memory& memory) {
7021             // deviceMemory may not be the actual backing auxiliary VkDeviceMemory
7022             // for suballocations, deviceMemory is a alias VkDeviceMemory hand;
7023             // freeCoherentMemoryLocked maps the alias to the backing VkDeviceMemory
7024             VkDeviceMemory deviceMemory = memory.deviceMemory;
7025             std::unique_lock<std::recursive_mutex> lock(mLock);
7026             auto it = info_VkDeviceMemory.find(deviceMemory);
7027             if (it == info_VkDeviceMemory.end()) {
7028                 mesa_loge("Device memory to free not found");
7029                 return;
7030             }
7031             auto coherentMemory = freeCoherentMemoryLocked(deviceMemory, it->second);
7032             // We have to release the lock before we could possibly free a
7033             // CoherentMemory, because that will call into VkEncoder, which
7034             // shouldn't be called when the lock is held.
7035             lock.unlock();
7036             coherentMemory = nullptr;
7037         };
7038     }
7039     return nullptr;
7040 }
7041 
on_vkBeginCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)7042 VkResult ResourceTracker::on_vkBeginCommandBuffer(void* context, VkResult input_result,
7043                                                   VkCommandBuffer commandBuffer,
7044                                                   const VkCommandBufferBeginInfo* pBeginInfo) {
7045     (void)context;
7046 
7047     resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
7048                                   true /* also clear pending descriptor sets */);
7049 
7050     VkEncoder* enc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
7051     (void)input_result;
7052 
7053     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7054     cb->flags = pBeginInfo->flags;
7055 
7056     VkCommandBufferBeginInfo modifiedBeginInfo;
7057 
7058     if (pBeginInfo->pInheritanceInfo && !cb->isSecondary) {
7059         modifiedBeginInfo = *pBeginInfo;
7060         modifiedBeginInfo.pInheritanceInfo = nullptr;
7061         pBeginInfo = &modifiedBeginInfo;
7062     }
7063 
7064     if (!supportsDeferredCommands()) {
7065         return enc->vkBeginCommandBuffer(commandBuffer, pBeginInfo, true /* do lock */);
7066     }
7067 
7068     enc->vkBeginCommandBufferAsyncGOOGLE(commandBuffer, pBeginInfo, true /* do lock */);
7069 
7070     return VK_SUCCESS;
7071 }
7072 
on_vkEndCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer)7073 VkResult ResourceTracker::on_vkEndCommandBuffer(void* context, VkResult input_result,
7074                                                 VkCommandBuffer commandBuffer) {
7075     VkEncoder* enc = (VkEncoder*)context;
7076     (void)input_result;
7077 
7078     if (!supportsDeferredCommands()) {
7079         return enc->vkEndCommandBuffer(commandBuffer, true /* do lock */);
7080     }
7081 
7082     enc->vkEndCommandBufferAsyncGOOGLE(commandBuffer, true /* do lock */);
7083 
7084     return VK_SUCCESS;
7085 }
7086 
on_vkResetCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)7087 VkResult ResourceTracker::on_vkResetCommandBuffer(void* context, VkResult input_result,
7088                                                   VkCommandBuffer commandBuffer,
7089                                                   VkCommandBufferResetFlags flags) {
7090     VkEncoder* enc = (VkEncoder*)context;
7091     (void)input_result;
7092 
7093     if (!supportsDeferredCommands()) {
7094         VkResult res = enc->vkResetCommandBuffer(commandBuffer, flags, true /* do lock */);
7095         resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
7096                                     true /* also clear pending descriptor sets */);
7097         return res;
7098     }
7099 
7100     enc->vkResetCommandBufferAsyncGOOGLE(commandBuffer, flags, true /* do lock */);
7101     resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
7102                                   true /* also clear pending descriptor sets */);
7103     return VK_SUCCESS;
7104 }
7105 
on_vkCreateImageView(void * context,VkResult input_result,VkDevice device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)7106 VkResult ResourceTracker::on_vkCreateImageView(void* context, VkResult input_result,
7107                                                VkDevice device,
7108                                                const VkImageViewCreateInfo* pCreateInfo,
7109                                                const VkAllocationCallbacks* pAllocator,
7110                                                VkImageView* pView) {
7111     VkEncoder* enc = (VkEncoder*)context;
7112     (void)input_result;
7113 
7114     VkImageViewCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
7115 
7116 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
7117     vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
7118     if (pCreateInfo->format == VK_FORMAT_UNDEFINED) {
7119         std::lock_guard<std::recursive_mutex> lock(mLock);
7120 
7121         auto it = info_VkImage.find(pCreateInfo->image);
7122         if (it != info_VkImage.end() && it->second.hasExternalFormat) {
7123             localCreateInfo.format = vk_format_from_fourcc(it->second.externalFourccFormat);
7124         }
7125     }
7126     VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
7127     const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo = vk_find_struct_const(pCreateInfo, SAMPLER_YCBCR_CONVERSION_INFO);
7128     if (samplerYcbcrConversionInfo) {
7129         if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
7130             localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
7131             vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
7132         }
7133     }
7134 #endif
7135 
7136     return enc->vkCreateImageView(device, &localCreateInfo, pAllocator, pView, true /* do lock */);
7137 }
7138 
on_vkCmdExecuteCommands(void * context,VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)7139 void ResourceTracker::on_vkCmdExecuteCommands(void* context, VkCommandBuffer commandBuffer,
7140                                               uint32_t commandBufferCount,
7141                                               const VkCommandBuffer* pCommandBuffers) {
7142     VkEncoder* enc = (VkEncoder*)context;
7143 
7144     if (!mFeatureInfo.hasVulkanQueueSubmitWithCommands) {
7145         enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers,
7146                                   true /* do lock */);
7147         return;
7148     }
7149 
7150     struct goldfish_VkCommandBuffer* primary = as_goldfish_VkCommandBuffer(commandBuffer);
7151     for (uint32_t i = 0; i < commandBufferCount; ++i) {
7152         struct goldfish_VkCommandBuffer* secondary =
7153             as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7154         appendObject(&secondary->superObjects, primary);
7155         appendObject(&primary->subObjects, secondary);
7156     }
7157 
7158     enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers,
7159                               true /* do lock */);
7160 }
7161 
on_vkCmdBindDescriptorSets(void * context,VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)7162 void ResourceTracker::on_vkCmdBindDescriptorSets(void* context, VkCommandBuffer commandBuffer,
7163                                                  VkPipelineBindPoint pipelineBindPoint,
7164                                                  VkPipelineLayout layout, uint32_t firstSet,
7165                                                  uint32_t descriptorSetCount,
7166                                                  const VkDescriptorSet* pDescriptorSets,
7167                                                  uint32_t dynamicOffsetCount,
7168                                                  const uint32_t* pDynamicOffsets) {
7169     VkEncoder* enc = (VkEncoder*)context;
7170 
7171     if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate)
7172         addPendingDescriptorSets(commandBuffer, descriptorSetCount, pDescriptorSets);
7173 
7174     enc->vkCmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet,
7175                                  descriptorSetCount, pDescriptorSets, dynamicOffsetCount,
7176                                  pDynamicOffsets, true /* do lock */);
7177 }
7178 
on_vkCmdPipelineBarrier(void * context,VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)7179 void ResourceTracker::on_vkCmdPipelineBarrier(
7180     void* context, VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
7181     VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
7182     uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
7183     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
7184     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers) {
7185     VkEncoder* enc = (VkEncoder*)context;
7186 
7187     std::vector<VkImageMemoryBarrier> updatedImageMemoryBarriers;
7188     updatedImageMemoryBarriers.reserve(imageMemoryBarrierCount);
7189     for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
7190         VkImageMemoryBarrier barrier = pImageMemoryBarriers[i];
7191 
7192 #ifdef VK_USE_PLATFORM_ANDROID_KHR
7193         // Unfortunetly, Android does not yet have a mechanism for sharing the expected
7194         // VkImageLayout when passing around AHardwareBuffer-s so many existing users
7195         // that import AHardwareBuffer-s into VkImage-s/VkDeviceMemory-s simply use
7196         // VK_IMAGE_LAYOUT_UNDEFINED. However, the Vulkan spec's image layout transition
7197         // sections says "If the old layout is VK_IMAGE_LAYOUT_UNDEFINED, the contents of
7198         // that range may be discarded." Some Vulkan drivers have been observed to actually
7199         // perform the discard which leads to AHardwareBuffer-s being unintentionally
7200         // cleared. See go/ahb-vkimagelayout for more information.
7201         if (barrier.srcQueueFamilyIndex != barrier.dstQueueFamilyIndex &&
7202             (barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
7203              barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) &&
7204             barrier.oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
7205             // This is not a complete solution as the Vulkan spec does not require that
7206             // Vulkan drivers perform a no-op in the case when oldLayout equals newLayout
7207             // but this has been observed to be enough to work for now to avoid clearing
7208             // out images.
7209             // TODO(b/236179843): figure out long term solution.
7210             barrier.oldLayout = barrier.newLayout;
7211         }
7212 #endif
7213 
7214         updatedImageMemoryBarriers.push_back(barrier);
7215     }
7216 
7217     enc->vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
7218                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7219                               pBufferMemoryBarriers, updatedImageMemoryBarriers.size(),
7220                               updatedImageMemoryBarriers.data(), true /* do lock */);
7221 }
7222 
on_vkDestroyDescriptorSetLayout(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)7223 void ResourceTracker::on_vkDestroyDescriptorSetLayout(void* context, VkDevice device,
7224                                                       VkDescriptorSetLayout descriptorSetLayout,
7225                                                       const VkAllocationCallbacks* pAllocator) {
7226     decDescriptorSetLayoutRef(context, device, descriptorSetLayout, pAllocator);
7227 }
7228 
on_vkAllocateCommandBuffers(void * context,VkResult input_result,VkDevice device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)7229 VkResult ResourceTracker::on_vkAllocateCommandBuffers(
7230     void* context, VkResult input_result, VkDevice device,
7231     const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers) {
7232     (void)input_result;
7233 
7234     VkEncoder* enc = (VkEncoder*)context;
7235     VkResult res =
7236         enc->vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers, true /* do lock */);
7237     if (VK_SUCCESS != res) return res;
7238 
7239     for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; ++i) {
7240         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7241         cb->isSecondary = pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY;
7242         cb->device = device;
7243     }
7244 
7245     return res;
7246 }
7247 
7248 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
exportSyncFdForQSRILocked(VkImage image,int * fd)7249 VkResult ResourceTracker::exportSyncFdForQSRILocked(VkImage image, int* fd) {
7250     mesa_logd("%s: call for image %p host image handle 0x%llx\n", __func__, (void*)image,
7251               (unsigned long long)get_host_u64_VkImage(image));
7252 
7253     if (mFeatureInfo.hasVirtioGpuNativeSync) {
7254         struct VirtGpuExecBuffer exec = {};
7255         struct gfxstreamCreateQSRIExportVK exportQSRI = {};
7256         VirtGpuDevice* instance = VirtGpuDevice::getInstance();
7257 
7258         uint64_t hostImageHandle = get_host_u64_VkImage(image);
7259 
7260         exportQSRI.hdr.opCode = GFXSTREAM_CREATE_QSRI_EXPORT_VK;
7261         exportQSRI.imageHandleLo = (uint32_t)hostImageHandle;
7262         exportQSRI.imageHandleHi = (uint32_t)(hostImageHandle >> 32);
7263 
7264         exec.command = static_cast<void*>(&exportQSRI);
7265         exec.command_size = sizeof(exportQSRI);
7266         exec.flags = kFenceOut | kRingIdx;
7267         if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY;
7268 
7269         *fd = exec.handle.osHandle;
7270     } else {
7271 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
7272         ensureSyncDeviceFd();
7273         goldfish_sync_queue_work(
7274             mSyncDeviceFd, get_host_u64_VkImage(image) /* the handle */,
7275             GOLDFISH_SYNC_VULKAN_QSRI /* thread handle (doubling as type field) */, fd);
7276 #endif
7277     }
7278 
7279     mesa_logd("%s: got fd: %d\n", __func__, *fd);
7280     auto imageInfoIt = info_VkImage.find(image);
7281     if (imageInfoIt != info_VkImage.end()) {
7282         auto& imageInfo = imageInfoIt->second;
7283 
7284         // Remove any pending QSRI sync fds that are already signaled.
7285         auto syncFdIt = imageInfo.pendingQsriSyncFds.begin();
7286         while (syncFdIt != imageInfo.pendingQsriSyncFds.end()) {
7287             int syncFd = *syncFdIt;
7288             int syncWaitRet = mSyncHelper->wait(syncFd, /*timeout msecs*/ 0);
7289             if (syncWaitRet == 0) {
7290                 // Sync fd is signaled.
7291                 syncFdIt = imageInfo.pendingQsriSyncFds.erase(syncFdIt);
7292                 mSyncHelper->close(syncFd);
7293             } else {
7294                 if (errno != ETIME) {
7295                     mesa_loge("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d",
7296                               __func__, strerror(errno), errno);
7297                 }
7298                 break;
7299             }
7300         }
7301 
7302         int syncFdDup = mSyncHelper->dup(*fd);
7303         if (syncFdDup < 0) {
7304             mesa_loge("%s: Failed to dup() QSRI sync fd : sterror: %s errno: %d", __func__,
7305                       strerror(errno), errno);
7306         } else {
7307             imageInfo.pendingQsriSyncFds.push_back(syncFdDup);
7308         }
7309     }
7310 
7311     return VK_SUCCESS;
7312 }
7313 
on_vkQueueSignalReleaseImageANDROID(void * context,VkResult input_result,VkQueue queue,uint32_t waitSemaphoreCount,const VkSemaphore * pWaitSemaphores,VkImage image,int * pNativeFenceFd)7314 VkResult ResourceTracker::on_vkQueueSignalReleaseImageANDROID(void* context, VkResult input_result,
7315                                                               VkQueue queue,
7316                                                               uint32_t waitSemaphoreCount,
7317                                                               const VkSemaphore* pWaitSemaphores,
7318                                                               VkImage image, int* pNativeFenceFd) {
7319     (void)input_result;
7320 
7321     VkEncoder* enc = (VkEncoder*)context;
7322 
7323     if (!mFeatureInfo.hasVulkanAsyncQsri) {
7324         return enc->vkQueueSignalReleaseImageANDROID(queue, waitSemaphoreCount, pWaitSemaphores,
7325                                                      image, pNativeFenceFd, true /* lock */);
7326     }
7327 
7328     {
7329         std::lock_guard<std::recursive_mutex> lock(mLock);
7330         auto it = info_VkImage.find(image);
7331         if (it == info_VkImage.end()) {
7332             if (pNativeFenceFd) *pNativeFenceFd = -1;
7333             return VK_ERROR_INITIALIZATION_FAILED;
7334         }
7335     }
7336 
7337     enc->vkQueueSignalReleaseImageANDROIDAsyncGOOGLE(queue, waitSemaphoreCount, pWaitSemaphores,
7338                                                      image, true /* lock */);
7339 
7340     std::lock_guard<std::recursive_mutex> lock(mLock);
7341     VkResult result;
7342     if (pNativeFenceFd) {
7343         result = exportSyncFdForQSRILocked(image, pNativeFenceFd);
7344     } else {
7345         int syncFd;
7346         result = exportSyncFdForQSRILocked(image, &syncFd);
7347 
7348         if (syncFd >= 0) {
7349             mSyncHelper->close(syncFd);
7350         }
7351     }
7352 
7353     return result;
7354 }
7355 #endif
7356 
on_vkCreateGraphicsPipelines(void * context,VkResult input_result,VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)7357 VkResult ResourceTracker::on_vkCreateGraphicsPipelines(
7358     void* context, VkResult input_result, VkDevice device, VkPipelineCache pipelineCache,
7359     uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos,
7360     const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) {
7361     (void)input_result;
7362     VkEncoder* enc = (VkEncoder*)context;
7363     std::vector<VkGraphicsPipelineCreateInfo> localCreateInfos(pCreateInfos,
7364                                                                pCreateInfos + createInfoCount);
7365     for (VkGraphicsPipelineCreateInfo& graphicsPipelineCreateInfo : localCreateInfos) {
7366         // dEQP-VK.api.pipeline.pipeline_invalid_pointers_unused_structs#graphics
7367         bool requireViewportState = false;
7368         // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750
7369         requireViewportState |=
7370             graphicsPipelineCreateInfo.pRasterizationState != nullptr &&
7371             graphicsPipelineCreateInfo.pRasterizationState->rasterizerDiscardEnable == VK_FALSE;
7372         // VUID-VkGraphicsPipelineCreateInfo-pViewportState-04892
7373 #ifdef VK_EXT_extended_dynamic_state2
7374         if (!requireViewportState && graphicsPipelineCreateInfo.pDynamicState) {
7375             for (uint32_t i = 0; i < graphicsPipelineCreateInfo.pDynamicState->dynamicStateCount;
7376                  i++) {
7377                 if (VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT ==
7378                     graphicsPipelineCreateInfo.pDynamicState->pDynamicStates[i]) {
7379                     requireViewportState = true;
7380                     break;
7381                 }
7382             }
7383         }
7384 #endif  // VK_EXT_extended_dynamic_state2
7385         if (!requireViewportState) {
7386             graphicsPipelineCreateInfo.pViewportState = nullptr;
7387         }
7388 
7389         // It has the same requirement as for pViewportState.
7390         bool shouldIncludeFragmentShaderState = requireViewportState;
7391 
7392         // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00751
7393         if (!shouldIncludeFragmentShaderState) {
7394             graphicsPipelineCreateInfo.pMultisampleState = nullptr;
7395         }
7396 
7397         bool forceDepthStencilState = false;
7398         bool forceColorBlendState = false;
7399 
7400         const VkPipelineRenderingCreateInfo* pipelineRenderingInfo =
7401             vk_find_struct_const(&graphicsPipelineCreateInfo, PIPELINE_RENDERING_CREATE_INFO);
7402 
7403         if (pipelineRenderingInfo) {
7404             forceDepthStencilState |=
7405                 pipelineRenderingInfo->depthAttachmentFormat != VK_FORMAT_UNDEFINED;
7406             forceDepthStencilState |=
7407                 pipelineRenderingInfo->stencilAttachmentFormat != VK_FORMAT_UNDEFINED;
7408             forceColorBlendState |= pipelineRenderingInfo->colorAttachmentCount != 0;
7409         }
7410 
7411         // VUID-VkGraphicsPipelineCreateInfo-renderPass-06043
7412         // VUID-VkGraphicsPipelineCreateInfo-renderPass-06044
7413         if (graphicsPipelineCreateInfo.renderPass == VK_NULL_HANDLE ||
7414             !shouldIncludeFragmentShaderState) {
7415             // VUID-VkGraphicsPipelineCreateInfo-renderPass-06053
7416             if (!forceDepthStencilState) {
7417                 graphicsPipelineCreateInfo.pDepthStencilState = nullptr;
7418             }
7419             if (!forceColorBlendState) {
7420                 graphicsPipelineCreateInfo.pColorBlendState = nullptr;
7421             }
7422         }
7423     }
7424     return enc->vkCreateGraphicsPipelines(device, pipelineCache, localCreateInfos.size(),
7425                                           localCreateInfos.data(), pAllocator, pPipelines,
7426                                           true /* do lock */);
7427 }
7428 
getApiVersionFromInstance(VkInstance instance)7429 uint32_t ResourceTracker::getApiVersionFromInstance(VkInstance instance) {
7430     std::lock_guard<std::recursive_mutex> lock(mLock);
7431     uint32_t api = kDefaultApiVersion;
7432 
7433     auto it = info_VkInstance.find(instance);
7434     if (it == info_VkInstance.end()) return api;
7435 
7436     api = it->second.highestApiVersion;
7437 
7438     return api;
7439 }
7440 
getApiVersionFromDevice(VkDevice device)7441 uint32_t ResourceTracker::getApiVersionFromDevice(VkDevice device) {
7442     std::lock_guard<std::recursive_mutex> lock(mLock);
7443 
7444     uint32_t api = kDefaultApiVersion;
7445 
7446     auto it = info_VkDevice.find(device);
7447     if (it == info_VkDevice.end()) return api;
7448 
7449     api = it->second.apiVersion;
7450 
7451     return api;
7452 }
7453 
hasInstanceExtension(VkInstance instance,const std::string & name)7454 bool ResourceTracker::hasInstanceExtension(VkInstance instance, const std::string& name) {
7455     std::lock_guard<std::recursive_mutex> lock(mLock);
7456 
7457     auto it = info_VkInstance.find(instance);
7458     if (it == info_VkInstance.end()) return false;
7459 
7460     return it->second.enabledExtensions.find(name) != it->second.enabledExtensions.end();
7461 }
7462 
hasDeviceExtension(VkDevice device,const std::string & name)7463 bool ResourceTracker::hasDeviceExtension(VkDevice device, const std::string& name) {
7464     std::lock_guard<std::recursive_mutex> lock(mLock);
7465 
7466     auto it = info_VkDevice.find(device);
7467     if (it == info_VkDevice.end()) return false;
7468 
7469     return it->second.enabledExtensions.find(name) != it->second.enabledExtensions.end();
7470 }
7471 
getDevice(VkCommandBuffer commandBuffer) const7472 VkDevice ResourceTracker::getDevice(VkCommandBuffer commandBuffer) const {
7473     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7474     if (!cb) {
7475         return nullptr;
7476     }
7477     return cb->device;
7478 }
7479 
7480 // Resets staging stream for this command buffer and primary command buffers
7481 // where this command buffer has been recorded. If requested, also clears the pending
7482 // descriptor sets.
resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer,bool alsoResetPrimaries,bool alsoClearPendingDescriptorSets)7483 void ResourceTracker::resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer,
7484                                                     bool alsoResetPrimaries,
7485                                                     bool alsoClearPendingDescriptorSets) {
7486     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7487     if (!cb) {
7488         return;
7489     }
7490     if (cb->privateEncoder) {
7491         sStaging.pushStaging((CommandBufferStagingStream*)cb->privateStream, cb->privateEncoder);
7492         cb->privateEncoder = nullptr;
7493         cb->privateStream = nullptr;
7494     }
7495 
7496     if (alsoClearPendingDescriptorSets && cb->userPtr) {
7497         CommandBufferPendingDescriptorSets* pendingSets =
7498             (CommandBufferPendingDescriptorSets*)cb->userPtr;
7499         pendingSets->sets.clear();
7500     }
7501 
7502     if (alsoResetPrimaries) {
7503         forAllObjects(cb->superObjects, [this, alsoResetPrimaries,
7504                                          alsoClearPendingDescriptorSets](void* obj) {
7505             VkCommandBuffer superCommandBuffer = (VkCommandBuffer)obj;
7506             this->resetCommandBufferStagingInfo(superCommandBuffer, alsoResetPrimaries,
7507                                                 alsoClearPendingDescriptorSets);
7508         });
7509         eraseObjects(&cb->superObjects);
7510     }
7511 
7512     forAllObjects(cb->subObjects, [cb](void* obj) {
7513         VkCommandBuffer subCommandBuffer = (VkCommandBuffer)obj;
7514         struct goldfish_VkCommandBuffer* subCb = as_goldfish_VkCommandBuffer(subCommandBuffer);
7515         // We don't do resetCommandBufferStagingInfo(subCommandBuffer)
7516         // since the user still might have submittable stuff pending there.
7517         eraseObject(&subCb->superObjects, (void*)cb);
7518     });
7519 
7520     eraseObjects(&cb->subObjects);
7521 }
7522 
7523 // Unlike resetCommandBufferStagingInfo, this does not always erase its
7524 // superObjects pointers because the command buffer has merely been
7525 // submitted, not reset.  However, if the command buffer was recorded with
7526 // ONE_TIME_SUBMIT_BIT, then it will also reset its primaries.
7527 //
7528 // Also, we save the set of descriptor sets referenced by this command
7529 // buffer because we only submitted the command buffer and it's possible to
7530 // update the descriptor set again and re-submit the same command without
7531 // recording it (Update-after-bind descriptor sets)
resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer)7532 void ResourceTracker::resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer) {
7533     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7534     if (cb->flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) {
7535         resetCommandBufferStagingInfo(commandBuffer, true /* reset primaries */,
7536                                       true /* clear pending descriptor sets */);
7537     } else {
7538         resetCommandBufferStagingInfo(commandBuffer, false /* Don't reset primaries */,
7539                                       false /* Don't clear pending descriptor sets */);
7540     }
7541 }
7542 
resetCommandPoolStagingInfo(VkCommandPool commandPool)7543 void ResourceTracker::resetCommandPoolStagingInfo(VkCommandPool commandPool) {
7544     struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7545 
7546     if (!p) return;
7547 
7548     forAllObjects(p->subObjects, [this](void* commandBuffer) {
7549         this->resetCommandBufferStagingInfo((VkCommandBuffer)commandBuffer,
7550                                             true /* also reset primaries */,
7551                                             true /* also clear pending descriptor sets */);
7552     });
7553 }
7554 
addToCommandPool(VkCommandPool commandPool,uint32_t commandBufferCount,VkCommandBuffer * pCommandBuffers)7555 void ResourceTracker::addToCommandPool(VkCommandPool commandPool, uint32_t commandBufferCount,
7556                                        VkCommandBuffer* pCommandBuffers) {
7557     for (uint32_t i = 0; i < commandBufferCount; ++i) {
7558         struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7559         struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7560         appendObject(&p->subObjects, (void*)(pCommandBuffers[i]));
7561         appendObject(&cb->poolObjects, (void*)commandPool);
7562     }
7563 }
7564 
clearCommandPool(VkCommandPool commandPool)7565 void ResourceTracker::clearCommandPool(VkCommandPool commandPool) {
7566     resetCommandPoolStagingInfo(commandPool);
7567     struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7568     forAllObjects(p->subObjects, [this](void* commandBuffer) {
7569         this->unregister_VkCommandBuffer((VkCommandBuffer)commandBuffer);
7570     });
7571     eraseObjects(&p->subObjects);
7572 }
7573 
getPhysicalDeviceMemoryProperties(void * context,VkDevice device,VkPhysicalDevice physicalDevice)7574 const VkPhysicalDeviceMemoryProperties& ResourceTracker::getPhysicalDeviceMemoryProperties(
7575     void* context, VkDevice device, VkPhysicalDevice physicalDevice) {
7576     if (!mCachedPhysicalDeviceMemoryProps) {
7577         if (physicalDevice == VK_NULL_HANDLE) {
7578             std::lock_guard<std::recursive_mutex> lock(mLock);
7579 
7580             auto deviceInfoIt = info_VkDevice.find(device);
7581             if (deviceInfoIt == info_VkDevice.end()) {
7582                 mesa_loge("Failed to pass device or physical device.");
7583                 abort();
7584             }
7585             const auto& deviceInfo = deviceInfoIt->second;
7586             physicalDevice = deviceInfo.physdev;
7587         }
7588 
7589         VkEncoder* enc = (VkEncoder*)context;
7590 
7591         VkPhysicalDeviceMemoryProperties properties;
7592         enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &properties, true /* no lock */);
7593 
7594         mCachedPhysicalDeviceMemoryProps.emplace(std::move(properties));
7595     }
7596     return *mCachedPhysicalDeviceMemoryProps;
7597 }
7598 
7599 static ResourceTracker* sTracker = nullptr;
7600 
ResourceTracker()7601 ResourceTracker::ResourceTracker() {
7602     mCreateMapping = new CreateMapping();
7603     mDestroyMapping = new DestroyMapping();
7604     // nothing to do
7605 }
7606 
~ResourceTracker()7607 ResourceTracker::~ResourceTracker() {
7608     delete mCreateMapping;
7609     delete mDestroyMapping;
7610 }
7611 
createMapping()7612 VulkanHandleMapping* ResourceTracker::createMapping() { return mCreateMapping; }
7613 
destroyMapping()7614 VulkanHandleMapping* ResourceTracker::destroyMapping() { return mDestroyMapping; }
7615 
7616 // static
get()7617 ResourceTracker* ResourceTracker::get() {
7618     if (!sTracker) {
7619         // To be initialized once on vulkan device open.
7620         sTracker = new ResourceTracker;
7621     }
7622     return sTracker;
7623 }
7624 
7625 // static
getCommandBufferEncoder(VkCommandBuffer commandBuffer)7626 ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getCommandBufferEncoder(
7627     VkCommandBuffer commandBuffer) {
7628     if (!(ResourceTracker::streamFeatureBits &
7629           VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
7630         auto enc = ResourceTracker::getThreadLocalEncoder();
7631         ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, enc);
7632         return enc;
7633     }
7634 
7635     struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7636     if (!cb->privateEncoder) {
7637         sStaging.setAllocFree(ResourceTracker::get()->getAlloc(),
7638                               ResourceTracker::get()->getFree());
7639         sStaging.popStaging((CommandBufferStagingStream**)&cb->privateStream, &cb->privateEncoder);
7640     }
7641     uint8_t* writtenPtr;
7642     size_t written;
7643     ((CommandBufferStagingStream*)cb->privateStream)->getWritten(&writtenPtr, &written);
7644     return cb->privateEncoder;
7645 }
7646 
7647 // static
getQueueEncoder(VkQueue queue)7648 ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getQueueEncoder(VkQueue queue) {
7649     auto enc = ResourceTracker::getThreadLocalEncoder();
7650     if (!(ResourceTracker::streamFeatureBits &
7651           VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
7652         ResourceTracker::get()->syncEncodersForQueue(queue, enc);
7653     }
7654     return enc;
7655 }
7656 
7657 // static
getThreadLocalEncoder()7658 ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getThreadLocalEncoder() {
7659     auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
7660     auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
7661     return vkEncoder;
7662 }
7663 
7664 // static
setSeqnoPtr(uint32_t * seqnoptr)7665 void ResourceTracker::setSeqnoPtr(uint32_t* seqnoptr) { sSeqnoPtr = seqnoptr; }
7666 
7667 // static
nextSeqno()7668 ALWAYS_INLINE_GFXSTREAM uint32_t ResourceTracker::nextSeqno() {
7669     uint32_t res = __atomic_add_fetch(sSeqnoPtr, 1, __ATOMIC_SEQ_CST);
7670     return res;
7671 }
7672 
7673 // static
getSeqno()7674 ALWAYS_INLINE_GFXSTREAM uint32_t ResourceTracker::getSeqno() {
7675     uint32_t res = __atomic_load_n(sSeqnoPtr, __ATOMIC_SEQ_CST);
7676     return res;
7677 }
7678 
transformImpl_VkExternalMemoryProperties_tohost(VkExternalMemoryProperties *,uint32_t)7679 void ResourceTracker::transformImpl_VkExternalMemoryProperties_tohost(VkExternalMemoryProperties*,
7680                                                                       uint32_t) {}
7681 
transformImpl_VkImageCreateInfo_fromhost(const VkImageCreateInfo *,uint32_t)7682 void ResourceTracker::transformImpl_VkImageCreateInfo_fromhost(const VkImageCreateInfo*, uint32_t) {
7683 }
transformImpl_VkImageCreateInfo_tohost(const VkImageCreateInfo *,uint32_t)7684 void ResourceTracker::transformImpl_VkImageCreateInfo_tohost(const VkImageCreateInfo*, uint32_t) {}
7685 
7686 #define DEFINE_TRANSFORMED_TYPE_IMPL(type)                                  \
7687     void ResourceTracker::transformImpl_##type##_tohost(type*, uint32_t) {} \
7688     void ResourceTracker::transformImpl_##type##_fromhost(type*, uint32_t) {}
7689 
7690 LIST_TRIVIAL_TRANSFORMED_TYPES(DEFINE_TRANSFORMED_TYPE_IMPL)
7691 
7692 }  // namespace vk
7693 }  // namespace gfxstream
7694