1 /*
2 * Copyright 2018 Google
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "ResourceTracker.h"
7
8 #include "CommandBufferStagingStream.h"
9 #include "DescriptorSetVirtualization.h"
10 #include "HostVisibleMemoryVirtualization.h"
11 #include "Resources.h"
12 #include "VkEncoder.h"
13 #include "gfxstream_vk_private.h"
14 #include "goldfish_address_space.h"
15 #include "goldfish_vk_private_defs.h"
16 #include "util/anon_file.h"
17 #include "util/macros.h"
18 #include "virtgpu_gfxstream_protocol.h"
19 #include "vulkan/vulkan_core.h"
20 #include "util/detect_os.h"
21
22 #ifdef VK_USE_PLATFORM_ANDROID_KHR
23 #include "vk_format_info.h"
24 #include <vndk/hardware_buffer.h>
25 #endif
26 #include <stdlib.h>
27
28 #include <algorithm>
29 #include <chrono>
30 #include <set>
31 #include <string>
32 #include <unordered_map>
33 #include <unordered_set>
34
35 #include "vk_util.h"
36
37 #if DETECT_OS_LINUX
38 #include <drm_fourcc.h>
39 #endif
40
41 #ifndef VK_USE_PLATFORM_FUCHSIA
zx_handle_close(zx_handle_t)42 void zx_handle_close(zx_handle_t) {}
zx_event_create(int,zx_handle_t *)43 void zx_event_create(int, zx_handle_t*) {}
44 #endif
45
46 static constexpr uint32_t kDefaultApiVersion = VK_MAKE_VERSION(1, 1, 0);
47
48 struct vk_struct_chain_iterator {
49 VkBaseOutStructure* value;
50 };
51
52 template <class T>
vk_make_chain_iterator(T * vk_struct)53 static vk_struct_chain_iterator vk_make_chain_iterator(T* vk_struct) {
54 vk_struct_chain_iterator result = {reinterpret_cast<VkBaseOutStructure*>(vk_struct)};
55 return result;
56 }
57
58 template <class T>
vk_append_struct(vk_struct_chain_iterator * i,T * vk_struct)59 static void vk_append_struct(vk_struct_chain_iterator* i, T* vk_struct) {
60 VkBaseOutStructure* p = i->value;
61 if (p->pNext) {
62 ::abort();
63 }
64
65 p->pNext = reinterpret_cast<VkBaseOutStructure*>(vk_struct);
66 vk_struct->pNext = NULL;
67
68 *i = vk_make_chain_iterator(vk_struct);
69 }
70
71 template <class T>
vk_make_orphan_copy(const T & vk_struct)72 static T vk_make_orphan_copy(const T& vk_struct) {
73 T copy = vk_struct;
74 copy.pNext = NULL;
75 return copy;
76 }
77
78 namespace gfxstream {
79 namespace vk {
80
81 #define MAKE_HANDLE_MAPPING_FOREACH(type_name, map_impl, map_to_u64_impl, map_from_u64_impl) \
82 void mapHandles_##type_name(type_name* handles, size_t count) override { \
83 for (size_t i = 0; i < count; ++i) { \
84 map_impl; \
85 } \
86 } \
87 void mapHandles_##type_name##_u64(const type_name* handles, uint64_t* handle_u64s, \
88 size_t count) override { \
89 for (size_t i = 0; i < count; ++i) { \
90 map_to_u64_impl; \
91 } \
92 } \
93 void mapHandles_u64_##type_name(const uint64_t* handle_u64s, type_name* handles, size_t count) \
94 override { \
95 for (size_t i = 0; i < count; ++i) { \
96 map_from_u64_impl; \
97 } \
98 }
99
100 #define DEFINE_RESOURCE_TRACKING_CLASS(class_name, impl) \
101 class class_name : public VulkanHandleMapping { \
102 public: \
103 virtual ~class_name() {} \
104 GOLDFISH_VK_LIST_HANDLE_TYPES(impl) \
105 };
106
107 #define CREATE_MAPPING_IMPL_FOR_TYPE(type_name) \
108 MAKE_HANDLE_MAPPING_FOREACH( \
109 type_name, handles[i] = new_from_host_##type_name(handles[i]); \
110 ResourceTracker::get()->register_##type_name(handles[i]); \
111 , handle_u64s[i] = (uint64_t)new_from_host_##type_name(handles[i]), \
112 handles[i] = (type_name)new_from_host_u64_##type_name(handle_u64s[i]); \
113 ResourceTracker::get()->register_##type_name(handles[i]);)
114
115 #define UNWRAP_MAPPING_IMPL_FOR_TYPE(type_name) \
116 MAKE_HANDLE_MAPPING_FOREACH( \
117 type_name, handles[i] = get_host_##type_name(handles[i]), \
118 handle_u64s[i] = (uint64_t)get_host_u64_##type_name(handles[i]), \
119 handles[i] = (type_name)get_host_##type_name((type_name)handle_u64s[i]))
120
121 #define DESTROY_MAPPING_IMPL_FOR_TYPE(type_name) \
122 MAKE_HANDLE_MAPPING_FOREACH(type_name, \
123 ResourceTracker::get()->unregister_##type_name(handles[i]); \
124 delete_goldfish_##type_name(handles[i]), (void)handle_u64s[i]; \
125 delete_goldfish_##type_name(handles[i]), (void)handles[i]; \
126 delete_goldfish_##type_name((type_name)handle_u64s[i]))
127
128 DEFINE_RESOURCE_TRACKING_CLASS(CreateMapping, CREATE_MAPPING_IMPL_FOR_TYPE)
129 DEFINE_RESOURCE_TRACKING_CLASS(DestroyMapping, DESTROY_MAPPING_IMPL_FOR_TYPE)
130
131 static uint32_t* sSeqnoPtr = nullptr;
132
133 // static
134 uint32_t ResourceTracker::streamFeatureBits = 0;
135 ResourceTracker::ThreadingCallbacks ResourceTracker::threadingCallbacks;
136
137 struct StagingInfo {
138 std::mutex mLock;
139 std::vector<CommandBufferStagingStream*> streams;
140 std::vector<VkEncoder*> encoders;
141 /// \brief sets alloc and free callbacks for memory allocation for CommandBufferStagingStream(s)
142 /// \param allocFn is the callback to allocate memory
143 /// \param freeFn is the callback to free memory
setAllocFreegfxstream::vk::StagingInfo144 void setAllocFree(CommandBufferStagingStream::Alloc&& allocFn,
145 CommandBufferStagingStream::Free&& freeFn) {
146 mAlloc = allocFn;
147 mFree = freeFn;
148 }
149
~StagingInfogfxstream::vk::StagingInfo150 ~StagingInfo() {
151 for (auto stream : streams) {
152 delete stream;
153 }
154
155 for (auto encoder : encoders) {
156 delete encoder;
157 }
158 }
159
pushStaginggfxstream::vk::StagingInfo160 void pushStaging(CommandBufferStagingStream* stream, VkEncoder* encoder) {
161 std::lock_guard<std::mutex> lock(mLock);
162 stream->reset();
163 streams.push_back(stream);
164 encoders.push_back(encoder);
165 }
166
popStaginggfxstream::vk::StagingInfo167 void popStaging(CommandBufferStagingStream** streamOut, VkEncoder** encoderOut) {
168 std::lock_guard<std::mutex> lock(mLock);
169 CommandBufferStagingStream* stream;
170 VkEncoder* encoder;
171 if (streams.empty()) {
172 if (mAlloc && mFree) {
173 // if custom allocators are provided, forward them to CommandBufferStagingStream
174 stream = new CommandBufferStagingStream(mAlloc, mFree);
175 } else {
176 stream = new CommandBufferStagingStream;
177 }
178 encoder = new VkEncoder(stream);
179 } else {
180 stream = streams.back();
181 encoder = encoders.back();
182 streams.pop_back();
183 encoders.pop_back();
184 }
185 *streamOut = stream;
186 *encoderOut = encoder;
187 }
188
189 private:
190 CommandBufferStagingStream::Alloc mAlloc = nullptr;
191 CommandBufferStagingStream::Free mFree = nullptr;
192 };
193
194 static StagingInfo sStaging;
195
196 struct CommandBufferPendingDescriptorSets {
197 std::unordered_set<VkDescriptorSet> sets;
198 };
199
200 #define HANDLE_REGISTER_IMPL_IMPL(type) \
201 void ResourceTracker::register_##type(type obj) { \
202 std::lock_guard<std::recursive_mutex> lock(mLock); \
203 info_##type[obj] = type##_Info(); \
204 }
205
206 #define HANDLE_UNREGISTER_IMPL_IMPL(type) \
207 void ResourceTracker::unregister_##type(type obj) { \
208 std::lock_guard<std::recursive_mutex> lock(mLock); \
209 info_##type.erase(obj); \
210 }
211
212 GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL_IMPL)
GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)213 GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)
214 uint32_t getWaitSemaphoreCount(const VkSubmitInfo& pSubmit) { return pSubmit.waitSemaphoreCount; }
215
getWaitSemaphoreCount(const VkSubmitInfo2 & pSubmit)216 uint32_t getWaitSemaphoreCount(const VkSubmitInfo2& pSubmit) {
217 return pSubmit.waitSemaphoreInfoCount;
218 }
219
getCommandBufferCount(const VkSubmitInfo & pSubmit)220 uint32_t getCommandBufferCount(const VkSubmitInfo& pSubmit) { return pSubmit.commandBufferCount; }
221
getCommandBufferCount(const VkSubmitInfo2 & pSubmit)222 uint32_t getCommandBufferCount(const VkSubmitInfo2& pSubmit) {
223 return pSubmit.commandBufferInfoCount;
224 }
225
getSignalSemaphoreCount(const VkSubmitInfo & pSubmit)226 uint32_t getSignalSemaphoreCount(const VkSubmitInfo& pSubmit) {
227 return pSubmit.signalSemaphoreCount;
228 }
229
getSignalSemaphoreCount(const VkSubmitInfo2 & pSubmit)230 uint32_t getSignalSemaphoreCount(const VkSubmitInfo2& pSubmit) {
231 return pSubmit.signalSemaphoreInfoCount;
232 }
233
getWaitSemaphore(const VkSubmitInfo & pSubmit,int i)234 VkSemaphore getWaitSemaphore(const VkSubmitInfo& pSubmit, int i) {
235 return pSubmit.pWaitSemaphores[i];
236 }
237
getWaitSemaphore(const VkSubmitInfo2 & pSubmit,int i)238 VkSemaphore getWaitSemaphore(const VkSubmitInfo2& pSubmit, int i) {
239 return pSubmit.pWaitSemaphoreInfos[i].semaphore;
240 }
241
getSignalSemaphore(const VkSubmitInfo & pSubmit,int i)242 VkSemaphore getSignalSemaphore(const VkSubmitInfo& pSubmit, int i) {
243 return pSubmit.pSignalSemaphores[i];
244 }
245
getSignalSemaphore(const VkSubmitInfo2 & pSubmit,int i)246 VkSemaphore getSignalSemaphore(const VkSubmitInfo2& pSubmit, int i) {
247 return pSubmit.pSignalSemaphoreInfos[i].semaphore;
248 }
249
getCommandBuffer(const VkSubmitInfo & pSubmit,int i)250 VkCommandBuffer getCommandBuffer(const VkSubmitInfo& pSubmit, int i) {
251 return pSubmit.pCommandBuffers[i];
252 }
253
getCommandBuffer(const VkSubmitInfo2 & pSubmit,int i)254 VkCommandBuffer getCommandBuffer(const VkSubmitInfo2& pSubmit, int i) {
255 return pSubmit.pCommandBufferInfos[i].commandBuffer;
256 }
257
descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool)258 bool descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool) {
259 return as_goldfish_VkDescriptorPool(pool)->allocInfo->createFlags &
260 VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
261 }
262
createImmutableSamplersFilteredImageInfo(VkDescriptorType descType,VkDescriptorSet descSet,uint32_t binding,const VkDescriptorImageInfo * pImageInfo)263 VkDescriptorImageInfo createImmutableSamplersFilteredImageInfo(
264 VkDescriptorType descType, VkDescriptorSet descSet, uint32_t binding,
265 const VkDescriptorImageInfo* pImageInfo) {
266 VkDescriptorImageInfo res = *pImageInfo;
267
268 if (descType != VK_DESCRIPTOR_TYPE_SAMPLER &&
269 descType != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
270 return res;
271
272 bool immutableSampler =
273 as_goldfish_VkDescriptorSet(descSet)->reified->bindingIsImmutableSampler[binding];
274
275 if (!immutableSampler) return res;
276
277 res.sampler = 0;
278
279 return res;
280 }
281
descriptorBindingIsImmutableSampler(VkDescriptorSet dstSet,uint32_t dstBinding)282 bool descriptorBindingIsImmutableSampler(VkDescriptorSet dstSet, uint32_t dstBinding) {
283 return as_goldfish_VkDescriptorSet(dstSet)->reified->bindingIsImmutableSampler[dstBinding];
284 }
285
isHostVisible(const VkPhysicalDeviceMemoryProperties * memoryProps,uint32_t index)286 static bool isHostVisible(const VkPhysicalDeviceMemoryProperties* memoryProps, uint32_t index) {
287 return memoryProps->memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
288 }
289
filterNonexistentSampler(const VkDescriptorImageInfo & inputInfo)290 VkDescriptorImageInfo ResourceTracker::filterNonexistentSampler(
291 const VkDescriptorImageInfo& inputInfo) {
292 VkSampler sampler = inputInfo.sampler;
293
294 VkDescriptorImageInfo res = inputInfo;
295
296 if (sampler) {
297 auto it = info_VkSampler.find(sampler);
298 bool samplerExists = it != info_VkSampler.end();
299 if (!samplerExists) res.sampler = 0;
300 }
301
302 return res;
303 }
304
emitDeviceMemoryReport(VkDevice_Info info,VkDeviceMemoryReportEventTypeEXT type,uint64_t memoryObjectId,VkDeviceSize size,VkObjectType objectType,uint64_t objectHandle,uint32_t heapIndex)305 void ResourceTracker::emitDeviceMemoryReport(VkDevice_Info info,
306 VkDeviceMemoryReportEventTypeEXT type,
307 uint64_t memoryObjectId, VkDeviceSize size,
308 VkObjectType objectType, uint64_t objectHandle,
309 uint32_t heapIndex) {
310 if (info.deviceMemoryReportCallbacks.empty()) return;
311
312 const VkDeviceMemoryReportCallbackDataEXT callbackData = {
313 VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT, // sType
314 nullptr, // pNext
315 0, // flags
316 type, // type
317 memoryObjectId, // memoryObjectId
318 size, // size
319 objectType, // objectType
320 objectHandle, // objectHandle
321 heapIndex, // heapIndex
322 };
323 for (const auto& callback : info.deviceMemoryReportCallbacks) {
324 callback.first(&callbackData, callback.second);
325 }
326 }
327
328 #ifdef VK_USE_PLATFORM_FUCHSIA
defaultBufferCollectionConstraints(size_t minSizeBytes,size_t minBufferCount,size_t maxBufferCount=0u,size_t minBufferCountForCamping=0u,size_t minBufferCountForDedicatedSlack=0u,size_t minBufferCountForSharedSlack=0u)329 inline fuchsia_sysmem::wire::BufferCollectionConstraints defaultBufferCollectionConstraints(
330 size_t minSizeBytes, size_t minBufferCount, size_t maxBufferCount = 0u,
331 size_t minBufferCountForCamping = 0u, size_t minBufferCountForDedicatedSlack = 0u,
332 size_t minBufferCountForSharedSlack = 0u) {
333 fuchsia_sysmem::wire::BufferCollectionConstraints constraints = {};
334 constraints.min_buffer_count = minBufferCount;
335 if (maxBufferCount > 0) {
336 constraints.max_buffer_count = maxBufferCount;
337 }
338 if (minBufferCountForCamping) {
339 constraints.min_buffer_count_for_camping = minBufferCountForCamping;
340 }
341 if (minBufferCountForSharedSlack) {
342 constraints.min_buffer_count_for_shared_slack = minBufferCountForSharedSlack;
343 }
344 constraints.has_buffer_memory_constraints = true;
345 fuchsia_sysmem::wire::BufferMemoryConstraints& buffer_constraints =
346 constraints.buffer_memory_constraints;
347
348 buffer_constraints.min_size_bytes = minSizeBytes;
349 buffer_constraints.max_size_bytes = 0xffffffff;
350 buffer_constraints.physically_contiguous_required = false;
351 buffer_constraints.secure_required = false;
352
353 // No restrictions on coherency domain or Heaps.
354 buffer_constraints.ram_domain_supported = true;
355 buffer_constraints.cpu_domain_supported = true;
356 buffer_constraints.inaccessible_domain_supported = true;
357 buffer_constraints.heap_permitted_count = 2;
358 buffer_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
359 buffer_constraints.heap_permitted[1] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
360
361 return constraints;
362 }
363
getBufferCollectionConstraintsVulkanImageUsage(const VkImageCreateInfo * pImageInfo)364 uint32_t getBufferCollectionConstraintsVulkanImageUsage(const VkImageCreateInfo* pImageInfo) {
365 uint32_t usage = 0u;
366 VkImageUsageFlags imageUsage = pImageInfo->usage;
367
368 #define SetUsageBit(BIT, VALUE) \
369 if (imageUsage & VK_IMAGE_USAGE_##BIT##_BIT) { \
370 usage |= fuchsia_sysmem::wire::kVulkanImageUsage##VALUE; \
371 }
372
373 SetUsageBit(COLOR_ATTACHMENT, ColorAttachment);
374 SetUsageBit(TRANSFER_SRC, TransferSrc);
375 SetUsageBit(TRANSFER_DST, TransferDst);
376 SetUsageBit(SAMPLED, Sampled);
377
378 #undef SetUsageBit
379 return usage;
380 }
381
getBufferCollectionConstraintsVulkanBufferUsage(VkBufferUsageFlags bufferUsage)382 uint32_t getBufferCollectionConstraintsVulkanBufferUsage(VkBufferUsageFlags bufferUsage) {
383 uint32_t usage = 0u;
384
385 #define SetUsageBit(BIT, VALUE) \
386 if (bufferUsage & VK_BUFFER_USAGE_##BIT##_BIT) { \
387 usage |= fuchsia_sysmem::wire::kVulkanBufferUsage##VALUE; \
388 }
389
390 SetUsageBit(TRANSFER_SRC, TransferSrc);
391 SetUsageBit(TRANSFER_DST, TransferDst);
392 SetUsageBit(UNIFORM_TEXEL_BUFFER, UniformTexelBuffer);
393 SetUsageBit(STORAGE_TEXEL_BUFFER, StorageTexelBuffer);
394 SetUsageBit(UNIFORM_BUFFER, UniformBuffer);
395 SetUsageBit(STORAGE_BUFFER, StorageBuffer);
396 SetUsageBit(INDEX_BUFFER, IndexBuffer);
397 SetUsageBit(VERTEX_BUFFER, VertexBuffer);
398 SetUsageBit(INDIRECT_BUFFER, IndirectBuffer);
399
400 #undef SetUsageBit
401 return usage;
402 }
403
getBufferCollectionConstraintsVulkanBufferUsage(const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)404 uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
405 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
406 VkBufferUsageFlags bufferUsage = pBufferConstraintsInfo->createInfo.usage;
407 return getBufferCollectionConstraintsVulkanBufferUsage(bufferUsage);
408 }
409
vkFormatTypeToSysmem(VkFormat format)410 static fuchsia_sysmem::wire::PixelFormatType vkFormatTypeToSysmem(VkFormat format) {
411 switch (format) {
412 case VK_FORMAT_B8G8R8A8_SINT:
413 case VK_FORMAT_B8G8R8A8_UNORM:
414 case VK_FORMAT_B8G8R8A8_SRGB:
415 case VK_FORMAT_B8G8R8A8_SNORM:
416 case VK_FORMAT_B8G8R8A8_SSCALED:
417 case VK_FORMAT_B8G8R8A8_USCALED:
418 return fuchsia_sysmem::wire::PixelFormatType::kBgra32;
419 case VK_FORMAT_R8G8B8A8_SINT:
420 case VK_FORMAT_R8G8B8A8_UNORM:
421 case VK_FORMAT_R8G8B8A8_SRGB:
422 case VK_FORMAT_R8G8B8A8_SNORM:
423 case VK_FORMAT_R8G8B8A8_SSCALED:
424 case VK_FORMAT_R8G8B8A8_USCALED:
425 return fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
426 case VK_FORMAT_R8_UNORM:
427 case VK_FORMAT_R8_UINT:
428 case VK_FORMAT_R8_USCALED:
429 case VK_FORMAT_R8_SNORM:
430 case VK_FORMAT_R8_SINT:
431 case VK_FORMAT_R8_SSCALED:
432 case VK_FORMAT_R8_SRGB:
433 return fuchsia_sysmem::wire::PixelFormatType::kR8;
434 case VK_FORMAT_R8G8_UNORM:
435 case VK_FORMAT_R8G8_UINT:
436 case VK_FORMAT_R8G8_USCALED:
437 case VK_FORMAT_R8G8_SNORM:
438 case VK_FORMAT_R8G8_SINT:
439 case VK_FORMAT_R8G8_SSCALED:
440 case VK_FORMAT_R8G8_SRGB:
441 return fuchsia_sysmem::wire::PixelFormatType::kR8G8;
442 default:
443 return fuchsia_sysmem::wire::PixelFormatType::kInvalid;
444 }
445 }
446
vkFormatMatchesSysmemFormat(VkFormat vkFormat,fuchsia_sysmem::wire::PixelFormatType sysmemFormat)447 static bool vkFormatMatchesSysmemFormat(VkFormat vkFormat,
448 fuchsia_sysmem::wire::PixelFormatType sysmemFormat) {
449 switch (vkFormat) {
450 case VK_FORMAT_B8G8R8A8_SINT:
451 case VK_FORMAT_B8G8R8A8_UNORM:
452 case VK_FORMAT_B8G8R8A8_SRGB:
453 case VK_FORMAT_B8G8R8A8_SNORM:
454 case VK_FORMAT_B8G8R8A8_SSCALED:
455 case VK_FORMAT_B8G8R8A8_USCALED:
456 return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kBgra32;
457 case VK_FORMAT_R8G8B8A8_SINT:
458 case VK_FORMAT_R8G8B8A8_UNORM:
459 case VK_FORMAT_R8G8B8A8_SRGB:
460 case VK_FORMAT_R8G8B8A8_SNORM:
461 case VK_FORMAT_R8G8B8A8_SSCALED:
462 case VK_FORMAT_R8G8B8A8_USCALED:
463 return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
464 case VK_FORMAT_R8_UNORM:
465 case VK_FORMAT_R8_UINT:
466 case VK_FORMAT_R8_USCALED:
467 case VK_FORMAT_R8_SNORM:
468 case VK_FORMAT_R8_SINT:
469 case VK_FORMAT_R8_SSCALED:
470 case VK_FORMAT_R8_SRGB:
471 return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8 ||
472 sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kL8;
473 case VK_FORMAT_R8G8_UNORM:
474 case VK_FORMAT_R8G8_UINT:
475 case VK_FORMAT_R8G8_USCALED:
476 case VK_FORMAT_R8G8_SNORM:
477 case VK_FORMAT_R8G8_SINT:
478 case VK_FORMAT_R8G8_SSCALED:
479 case VK_FORMAT_R8G8_SRGB:
480 return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8G8;
481 default:
482 return false;
483 }
484 }
485
sysmemPixelFormatTypeToVk(fuchsia_sysmem::wire::PixelFormatType format)486 static VkFormat sysmemPixelFormatTypeToVk(fuchsia_sysmem::wire::PixelFormatType format) {
487 switch (format) {
488 case fuchsia_sysmem::wire::PixelFormatType::kBgra32:
489 return VK_FORMAT_B8G8R8A8_SRGB;
490 case fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8:
491 return VK_FORMAT_R8G8B8A8_SRGB;
492 case fuchsia_sysmem::wire::PixelFormatType::kL8:
493 case fuchsia_sysmem::wire::PixelFormatType::kR8:
494 return VK_FORMAT_R8_UNORM;
495 case fuchsia_sysmem::wire::PixelFormatType::kR8G8:
496 return VK_FORMAT_R8G8_UNORM;
497 default:
498 return VK_FORMAT_UNDEFINED;
499 }
500 }
501
502 // TODO(fxbug.dev/42172354): This is currently only used for allocating
503 // memory for dedicated external images. It should be migrated to use
504 // SetBufferCollectionImageConstraintsFUCHSIA.
setBufferCollectionConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * collection,const VkImageCreateInfo * pImageInfo)505 VkResult ResourceTracker::setBufferCollectionConstraintsFUCHSIA(
506 VkEncoder* enc, VkDevice device,
507 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
508 const VkImageCreateInfo* pImageInfo) {
509 if (pImageInfo == nullptr) {
510 mesa_loge("setBufferCollectionConstraints: pImageInfo cannot be null.");
511 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
512 }
513
514 const VkSysmemColorSpaceFUCHSIA kDefaultColorSpace = {
515 .sType = VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA,
516 .pNext = nullptr,
517 .colorSpace = static_cast<uint32_t>(fuchsia_sysmem::wire::ColorSpaceType::kSrgb),
518 };
519
520 std::vector<VkImageFormatConstraintsInfoFUCHSIA> formatInfos;
521 if (pImageInfo->format == VK_FORMAT_UNDEFINED) {
522 const auto kFormats = {
523 VK_FORMAT_B8G8R8A8_SRGB,
524 VK_FORMAT_R8G8B8A8_SRGB,
525 };
526 for (auto format : kFormats) {
527 // shallow copy, using pNext from pImageInfo directly.
528 auto createInfo = *pImageInfo;
529 createInfo.format = format;
530 formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
531 .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
532 .pNext = nullptr,
533 .imageCreateInfo = createInfo,
534 .colorSpaceCount = 1,
535 .pColorSpaces = &kDefaultColorSpace,
536 });
537 }
538 } else {
539 formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
540 .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
541 .pNext = nullptr,
542 .imageCreateInfo = *pImageInfo,
543 .colorSpaceCount = 1,
544 .pColorSpaces = &kDefaultColorSpace,
545 });
546 }
547
548 VkImageConstraintsInfoFUCHSIA imageConstraints = {
549 .sType = VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA,
550 .pNext = nullptr,
551 .formatConstraintsCount = static_cast<uint32_t>(formatInfos.size()),
552 .pFormatConstraints = formatInfos.data(),
553 .bufferCollectionConstraints =
554 VkBufferCollectionConstraintsInfoFUCHSIA{
555 .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
556 .pNext = nullptr,
557 .minBufferCount = 1,
558 .maxBufferCount = 0,
559 .minBufferCountForCamping = 0,
560 .minBufferCountForDedicatedSlack = 0,
561 .minBufferCountForSharedSlack = 0,
562 },
563 .flags = 0u,
564 };
565
566 return setBufferCollectionImageConstraintsFUCHSIA(enc, device, collection, &imageConstraints);
567 }
568
addImageBufferCollectionConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,VkPhysicalDevice physicalDevice,const VkImageFormatConstraintsInfoFUCHSIA * formatConstraints,VkImageTiling tiling,fuchsia_sysmem::wire::BufferCollectionConstraints * constraints)569 VkResult addImageBufferCollectionConstraintsFUCHSIA(
570 VkEncoder* enc, VkDevice device, VkPhysicalDevice physicalDevice,
571 const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints, // always non-zero
572 VkImageTiling tiling, fuchsia_sysmem::wire::BufferCollectionConstraints* constraints) {
573 // First check if the format, tiling and usage is supported on host.
574 VkImageFormatProperties imageFormatProperties;
575 auto createInfo = &formatConstraints->imageCreateInfo;
576 auto result = enc->vkGetPhysicalDeviceImageFormatProperties(
577 physicalDevice, createInfo->format, createInfo->imageType, tiling, createInfo->usage,
578 createInfo->flags, &imageFormatProperties, true /* do lock */);
579 if (result != VK_SUCCESS) {
580 mesa_logd(
581 "%s: Image format (%u) type (%u) tiling (%u) "
582 "usage (%u) flags (%u) not supported by physical "
583 "device",
584 __func__, static_cast<uint32_t>(createInfo->format),
585 static_cast<uint32_t>(createInfo->imageType), static_cast<uint32_t>(tiling),
586 static_cast<uint32_t>(createInfo->usage), static_cast<uint32_t>(createInfo->flags));
587 return VK_ERROR_FORMAT_NOT_SUPPORTED;
588 }
589
590 // Check if format constraints contains unsupported format features.
591 {
592 VkFormatProperties formatProperties;
593 enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, createInfo->format,
594 &formatProperties, true /* do lock */);
595
596 auto supportedFeatures = (tiling == VK_IMAGE_TILING_LINEAR)
597 ? formatProperties.linearTilingFeatures
598 : formatProperties.optimalTilingFeatures;
599 auto requiredFeatures = formatConstraints->requiredFormatFeatures;
600 if ((~supportedFeatures) & requiredFeatures) {
601 mesa_logd(
602 "%s: Host device support features for %s tiling: %08x, "
603 "required features: %08x, feature bits %08x missing",
604 __func__, tiling == VK_IMAGE_TILING_LINEAR ? "LINEAR" : "OPTIMAL",
605 static_cast<uint32_t>(requiredFeatures), static_cast<uint32_t>(supportedFeatures),
606 static_cast<uint32_t>((~supportedFeatures) & requiredFeatures));
607 return VK_ERROR_FORMAT_NOT_SUPPORTED;
608 }
609 }
610
611 fuchsia_sysmem::wire::ImageFormatConstraints imageConstraints;
612 if (formatConstraints->sysmemPixelFormat != 0) {
613 auto pixelFormat = static_cast<fuchsia_sysmem::wire::PixelFormatType>(
614 formatConstraints->sysmemPixelFormat);
615 if (createInfo->format != VK_FORMAT_UNDEFINED &&
616 !vkFormatMatchesSysmemFormat(createInfo->format, pixelFormat)) {
617 mesa_logd("%s: VkFormat %u doesn't match sysmem pixelFormat %lu", __func__,
618 static_cast<uint32_t>(createInfo->format), formatConstraints->sysmemPixelFormat);
619 return VK_ERROR_FORMAT_NOT_SUPPORTED;
620 }
621 imageConstraints.pixel_format.type = pixelFormat;
622 } else {
623 auto pixel_format = vkFormatTypeToSysmem(createInfo->format);
624 if (pixel_format == fuchsia_sysmem::wire::PixelFormatType::kInvalid) {
625 mesa_logd("%s: Unsupported VkFormat %u", __func__,
626 static_cast<uint32_t>(createInfo->format));
627 return VK_ERROR_FORMAT_NOT_SUPPORTED;
628 }
629 imageConstraints.pixel_format.type = pixel_format;
630 }
631
632 imageConstraints.color_spaces_count = formatConstraints->colorSpaceCount;
633 for (size_t i = 0; i < formatConstraints->colorSpaceCount; i++) {
634 imageConstraints.color_space[0].type = static_cast<fuchsia_sysmem::wire::ColorSpaceType>(
635 formatConstraints->pColorSpaces[i].colorSpace);
636 }
637
638 // Get row alignment from host GPU.
639 VkDeviceSize offset = 0;
640 VkDeviceSize rowPitchAlignment = 1u;
641
642 if (tiling == VK_IMAGE_TILING_LINEAR) {
643 VkImageCreateInfo createInfoDup = *createInfo;
644 createInfoDup.pNext = nullptr;
645 enc->vkGetLinearImageLayout2GOOGLE(device, &createInfoDup, &offset, &rowPitchAlignment,
646 true /* do lock */);
647 mesa_logd(
648 "vkGetLinearImageLayout2GOOGLE: format %d offset %lu "
649 "rowPitchAlignment = %lu",
650 (int)createInfo->format, offset, rowPitchAlignment);
651 }
652
653 imageConstraints.min_coded_width = createInfo->extent.width;
654 imageConstraints.max_coded_width = 0xfffffff;
655 imageConstraints.min_coded_height = createInfo->extent.height;
656 imageConstraints.max_coded_height = 0xffffffff;
657 // The min_bytes_per_row can be calculated by sysmem using
658 // |min_coded_width|, |bytes_per_row_divisor| and color format.
659 imageConstraints.min_bytes_per_row = 0;
660 imageConstraints.max_bytes_per_row = 0xffffffff;
661 imageConstraints.max_coded_width_times_coded_height = 0xffffffff;
662
663 imageConstraints.layers = 1;
664 imageConstraints.coded_width_divisor = 1;
665 imageConstraints.coded_height_divisor = 1;
666 imageConstraints.bytes_per_row_divisor = rowPitchAlignment;
667 imageConstraints.start_offset_divisor = 1;
668 imageConstraints.display_width_divisor = 1;
669 imageConstraints.display_height_divisor = 1;
670 imageConstraints.pixel_format.has_format_modifier = true;
671 imageConstraints.pixel_format.format_modifier.value =
672 (tiling == VK_IMAGE_TILING_LINEAR)
673 ? fuchsia_sysmem::wire::kFormatModifierLinear
674 : fuchsia_sysmem::wire::kFormatModifierGoogleGoldfishOptimal;
675
676 constraints->image_format_constraints[constraints->image_format_constraints_count++] =
677 imageConstraints;
678 return VK_SUCCESS;
679 }
680
setBufferCollectionBufferConstraintsImpl(fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)681 SetBufferCollectionBufferConstraintsResult setBufferCollectionBufferConstraintsImpl(
682 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
683 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
684 const auto& collection = *pCollection;
685 if (pBufferConstraintsInfo == nullptr) {
686 mesa_loge(
687 "setBufferCollectionBufferConstraints: "
688 "pBufferConstraintsInfo cannot be null.");
689 return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
690 }
691
692 fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
693 defaultBufferCollectionConstraints(
694 /* min_size_bytes */ pBufferConstraintsInfo->createInfo.size,
695 /* buffer_count */ pBufferConstraintsInfo->bufferCollectionConstraints.minBufferCount);
696 constraints.usage.vulkan =
697 getBufferCollectionConstraintsVulkanBufferUsage(pBufferConstraintsInfo);
698
699 constexpr uint32_t kVulkanPriority = 5;
700 const char kName[] = "GoldfishBufferSysmemShared";
701 collection->SetName(kVulkanPriority, fidl::StringView(kName));
702
703 auto result = collection->SetConstraints(true, constraints);
704 if (!result.ok()) {
705 mesa_loge("setBufferCollectionConstraints: SetConstraints failed: %d", result.status());
706 return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
707 }
708
709 return {VK_SUCCESS, constraints};
710 }
711 #endif
712
713 #ifdef VK_USE_PLATFORM_ANDROID_KHR
getAHardwareBufferId(AHardwareBuffer * ahw)714 uint64_t ResourceTracker::getAHardwareBufferId(AHardwareBuffer* ahw) {
715 uint64_t id = 0;
716 mGralloc->getId(ahw, &id);
717 return id;
718 }
719 #endif
720
transformExternalResourceMemoryDedicatedRequirementsForGuest(VkMemoryDedicatedRequirements * dedicatedReqs)721 void transformExternalResourceMemoryDedicatedRequirementsForGuest(
722 VkMemoryDedicatedRequirements* dedicatedReqs) {
723 dedicatedReqs->prefersDedicatedAllocation = VK_TRUE;
724 dedicatedReqs->requiresDedicatedAllocation = VK_TRUE;
725 }
726
transformImageMemoryRequirementsForGuestLocked(VkImage image,VkMemoryRequirements * reqs)727 void ResourceTracker::transformImageMemoryRequirementsForGuestLocked(VkImage image,
728 VkMemoryRequirements* reqs) {
729 #ifdef VK_USE_PLATFORM_FUCHSIA
730 auto it = info_VkImage.find(image);
731 if (it == info_VkImage.end()) return;
732 auto& info = it->second;
733 if (info.isSysmemBackedMemory) {
734 auto width = info.createInfo.extent.width;
735 auto height = info.createInfo.extent.height;
736 reqs->size = width * height * 4;
737 }
738 #else
739 // Bypass "unused parameter" checks.
740 (void)image;
741 (void)reqs;
742 #endif
743 }
744
freeCoherentMemoryLocked(VkDeviceMemory memory,VkDeviceMemory_Info & info)745 CoherentMemoryPtr ResourceTracker::freeCoherentMemoryLocked(VkDeviceMemory memory,
746 VkDeviceMemory_Info& info) {
747 if (info.coherentMemory && info.ptr) {
748 if (info.coherentMemory->getDeviceMemory() != memory) {
749 delete_goldfish_VkDeviceMemory(memory);
750 }
751
752 if (info.ptr) {
753 info.coherentMemory->release(info.ptr);
754 info.ptr = nullptr;
755 }
756
757 return std::move(info.coherentMemory);
758 }
759
760 return nullptr;
761 }
762
acquireSync(uint64_t syncId,int64_t & osHandle)763 VkResult acquireSync(uint64_t syncId, int64_t& osHandle) {
764 struct VirtGpuExecBuffer exec = {};
765 struct gfxstreamAcquireSync acquireSync = {};
766 VirtGpuDevice* instance = VirtGpuDevice::getInstance();
767
768 acquireSync.hdr.opCode = GFXSTREAM_ACQUIRE_SYNC;
769 acquireSync.syncId = syncId;
770
771 exec.command = static_cast<void*>(&acquireSync);
772 exec.command_size = sizeof(acquireSync);
773 exec.flags = kFenceOut | kRingIdx | kShareableOut;
774
775 if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY;
776
777 osHandle = exec.handle.osHandle;
778 return VK_SUCCESS;
779 }
780
createFence(VkDevice device,uint64_t hostFenceHandle,int64_t & osHandle)781 VkResult createFence(VkDevice device, uint64_t hostFenceHandle, int64_t& osHandle) {
782 struct VirtGpuExecBuffer exec = {};
783 struct gfxstreamCreateExportSyncVK exportSync = {};
784 VirtGpuDevice* instance = VirtGpuDevice::getInstance();
785
786 uint64_t hostDeviceHandle = get_host_u64_VkDevice(device);
787
788 exportSync.hdr.opCode = GFXSTREAM_CREATE_EXPORT_SYNC_VK;
789 exportSync.deviceHandleLo = (uint32_t)hostDeviceHandle;
790 exportSync.deviceHandleHi = (uint32_t)(hostDeviceHandle >> 32);
791 exportSync.fenceHandleLo = (uint32_t)hostFenceHandle;
792 exportSync.fenceHandleHi = (uint32_t)(hostFenceHandle >> 32);
793
794 exec.command = static_cast<void*>(&exportSync);
795 exec.command_size = sizeof(exportSync);
796 exec.flags = kFenceOut | kRingIdx;
797 if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY;
798
799 osHandle = exec.handle.osHandle;
800 return VK_SUCCESS;
801 }
802
collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer> & workingSet,std::unordered_set<VkDescriptorSet> & allDs)803 void collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer>& workingSet,
804 std::unordered_set<VkDescriptorSet>& allDs) {
805 if (workingSet.empty()) return;
806
807 std::vector<VkCommandBuffer> nextLevel;
808 for (auto commandBuffer : workingSet) {
809 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
810 forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
811 nextLevel.push_back((VkCommandBuffer)secondary);
812 });
813 }
814
815 collectAllPendingDescriptorSetsBottomUp(nextLevel, allDs);
816
817 for (auto cmdbuf : workingSet) {
818 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
819
820 if (!cb->userPtr) {
821 continue; // No descriptors to update.
822 }
823
824 CommandBufferPendingDescriptorSets* pendingDescriptorSets =
825 (CommandBufferPendingDescriptorSets*)(cb->userPtr);
826
827 if (pendingDescriptorSets->sets.empty()) {
828 continue; // No descriptors to update.
829 }
830
831 allDs.insert(pendingDescriptorSets->sets.begin(), pendingDescriptorSets->sets.end());
832 }
833 }
834
commitDescriptorSetUpdates(void * context,VkQueue queue,const std::unordered_set<VkDescriptorSet> & sets)835 void commitDescriptorSetUpdates(void* context, VkQueue queue,
836 const std::unordered_set<VkDescriptorSet>& sets) {
837 VkEncoder* enc = (VkEncoder*)context;
838
839 std::unordered_map<VkDescriptorPool, uint32_t> poolSet;
840 std::vector<VkDescriptorPool> pools;
841 std::vector<VkDescriptorSetLayout> setLayouts;
842 std::vector<uint64_t> poolIds;
843 std::vector<uint32_t> descriptorSetWhichPool;
844 std::vector<uint32_t> pendingAllocations;
845 std::vector<uint32_t> writeStartingIndices;
846 std::vector<VkWriteDescriptorSet> writesForHost;
847
848 uint32_t poolIndex = 0;
849 uint32_t currentWriteIndex = 0;
850 for (auto set : sets) {
851 ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
852 VkDescriptorPool pool = reified->pool;
853 VkDescriptorSetLayout setLayout = reified->setLayout;
854
855 auto it = poolSet.find(pool);
856 if (it == poolSet.end()) {
857 poolSet[pool] = poolIndex;
858 descriptorSetWhichPool.push_back(poolIndex);
859 pools.push_back(pool);
860 ++poolIndex;
861 } else {
862 uint32_t savedPoolIndex = it->second;
863 descriptorSetWhichPool.push_back(savedPoolIndex);
864 }
865
866 poolIds.push_back(reified->poolId);
867 setLayouts.push_back(setLayout);
868 pendingAllocations.push_back(reified->allocationPending ? 1 : 0);
869 writeStartingIndices.push_back(currentWriteIndex);
870
871 auto& writes = reified->allWrites;
872
873 for (size_t i = 0; i < writes.size(); ++i) {
874 uint32_t binding = i;
875
876 for (size_t j = 0; j < writes[i].size(); ++j) {
877 auto& write = writes[i][j];
878
879 if (write.type == DescriptorWriteType::Empty) continue;
880
881 uint32_t dstArrayElement = 0;
882
883 VkDescriptorImageInfo* imageInfo = nullptr;
884 VkDescriptorBufferInfo* bufferInfo = nullptr;
885 VkBufferView* bufferView = nullptr;
886
887 switch (write.type) {
888 case DescriptorWriteType::Empty:
889 break;
890 case DescriptorWriteType::ImageInfo:
891 dstArrayElement = j;
892 imageInfo = &write.imageInfo;
893 break;
894 case DescriptorWriteType::BufferInfo:
895 dstArrayElement = j;
896 bufferInfo = &write.bufferInfo;
897 break;
898 case DescriptorWriteType::BufferView:
899 dstArrayElement = j;
900 bufferView = &write.bufferView;
901 break;
902 case DescriptorWriteType::InlineUniformBlock:
903 case DescriptorWriteType::AccelerationStructure:
904 // TODO
905 mesa_loge(
906 "Encountered pending inline uniform block or acceleration structure "
907 "desc write, abort (NYI)\n");
908 abort();
909 default:
910 break;
911 }
912
913 // TODO: Combine multiple writes into one VkWriteDescriptorSet.
914 VkWriteDescriptorSet forHost = {
915 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
916 0 /* TODO: inline uniform block */,
917 set,
918 binding,
919 dstArrayElement,
920 1,
921 write.descriptorType,
922 imageInfo,
923 bufferInfo,
924 bufferView,
925 };
926
927 writesForHost.push_back(forHost);
928 ++currentWriteIndex;
929
930 // Set it back to empty.
931 write.type = DescriptorWriteType::Empty;
932 }
933 }
934 }
935
936 // Skip out if there's nothing to VkWriteDescriptorSet home about.
937 if (writesForHost.empty()) {
938 return;
939 }
940
941 enc->vkQueueCommitDescriptorSetUpdatesGOOGLE(
942 queue, (uint32_t)pools.size(), pools.data(), (uint32_t)sets.size(), setLayouts.data(),
943 poolIds.data(), descriptorSetWhichPool.data(), pendingAllocations.data(),
944 writeStartingIndices.data(), (uint32_t)writesForHost.size(), writesForHost.data(),
945 false /* no lock */);
946
947 // If we got here, then we definitely serviced the allocations.
948 for (auto set : sets) {
949 ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
950 reified->allocationPending = false;
951 }
952 }
953
syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer,VkEncoder * currentEncoder)954 uint32_t ResourceTracker::syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer,
955 VkEncoder* currentEncoder) {
956 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
957 if (!cb) return 0;
958
959 auto lastEncoder = cb->lastUsedEncoder;
960
961 if (lastEncoder == currentEncoder) return 0;
962
963 currentEncoder->incRef();
964
965 cb->lastUsedEncoder = currentEncoder;
966
967 if (!lastEncoder) return 0;
968
969 auto oldSeq = cb->sequenceNumber;
970 cb->sequenceNumber += 2;
971 lastEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, false, oldSeq + 1,
972 true /* do lock */);
973 lastEncoder->flush();
974 currentEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, true, oldSeq + 2,
975 true /* do lock */);
976
977 if (lastEncoder->decRef()) {
978 cb->lastUsedEncoder = nullptr;
979 }
980 return 0;
981 }
982
addPendingDescriptorSets(VkCommandBuffer commandBuffer,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)983 void addPendingDescriptorSets(VkCommandBuffer commandBuffer, uint32_t descriptorSetCount,
984 const VkDescriptorSet* pDescriptorSets) {
985 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
986
987 if (!cb->userPtr) {
988 CommandBufferPendingDescriptorSets* newPendingSets = new CommandBufferPendingDescriptorSets;
989 cb->userPtr = newPendingSets;
990 }
991
992 CommandBufferPendingDescriptorSets* pendingSets =
993 (CommandBufferPendingDescriptorSets*)cb->userPtr;
994
995 for (uint32_t i = 0; i < descriptorSetCount; ++i) {
996 pendingSets->sets.insert(pDescriptorSets[i]);
997 }
998 }
999
decDescriptorSetLayoutRef(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)1000 void decDescriptorSetLayoutRef(void* context, VkDevice device,
1001 VkDescriptorSetLayout descriptorSetLayout,
1002 const VkAllocationCallbacks* pAllocator) {
1003 if (!descriptorSetLayout) return;
1004
1005 struct goldfish_VkDescriptorSetLayout* setLayout =
1006 as_goldfish_VkDescriptorSetLayout(descriptorSetLayout);
1007
1008 if (0 == --setLayout->layoutInfo->refcount) {
1009 VkEncoder* enc = (VkEncoder*)context;
1010 enc->vkDestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator,
1011 true /* do lock */);
1012 }
1013 }
1014
ensureSyncDeviceFd()1015 void ResourceTracker::ensureSyncDeviceFd() {
1016 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
1017 if (mSyncDeviceFd >= 0) return;
1018 mSyncDeviceFd = goldfish_sync_open();
1019 if (mSyncDeviceFd >= 0) {
1020 mesa_logd("%s: created sync device for current Vulkan process: %d\n", __func__, mSyncDeviceFd);
1021 } else {
1022 mesa_logd("%s: failed to create sync device for current Vulkan process\n", __func__);
1023 }
1024 #endif
1025 }
1026
unregister_VkInstance(VkInstance instance)1027 void ResourceTracker::unregister_VkInstance(VkInstance instance) {
1028 std::lock_guard<std::recursive_mutex> lock(mLock);
1029
1030 auto it = info_VkInstance.find(instance);
1031 if (it == info_VkInstance.end()) return;
1032 auto info = it->second;
1033 info_VkInstance.erase(instance);
1034 }
1035
unregister_VkDevice(VkDevice device)1036 void ResourceTracker::unregister_VkDevice(VkDevice device) {
1037 std::lock_guard<std::recursive_mutex> lock(mLock);
1038
1039 auto it = info_VkDevice.find(device);
1040 if (it == info_VkDevice.end()) return;
1041 auto info = it->second;
1042 info_VkDevice.erase(device);
1043 }
1044
unregister_VkCommandPool(VkCommandPool pool)1045 void ResourceTracker::unregister_VkCommandPool(VkCommandPool pool) {
1046 if (!pool) return;
1047
1048 clearCommandPool(pool);
1049
1050 std::lock_guard<std::recursive_mutex> lock(mLock);
1051 info_VkCommandPool.erase(pool);
1052 }
1053
unregister_VkSampler(VkSampler sampler)1054 void ResourceTracker::unregister_VkSampler(VkSampler sampler) {
1055 if (!sampler) return;
1056
1057 std::lock_guard<std::recursive_mutex> lock(mLock);
1058 info_VkSampler.erase(sampler);
1059 }
1060
unregister_VkCommandBuffer(VkCommandBuffer commandBuffer)1061 void ResourceTracker::unregister_VkCommandBuffer(VkCommandBuffer commandBuffer) {
1062 resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
1063 true /* also clear pending descriptor sets */);
1064
1065 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
1066 if (!cb) return;
1067 if (cb->lastUsedEncoder) {
1068 cb->lastUsedEncoder->decRef();
1069 }
1070 eraseObjects(&cb->subObjects);
1071 forAllObjects(cb->poolObjects, [cb](void* commandPool) {
1072 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool((VkCommandPool)commandPool);
1073 eraseObject(&p->subObjects, (void*)cb);
1074 });
1075 eraseObjects(&cb->poolObjects);
1076
1077 if (cb->userPtr) {
1078 CommandBufferPendingDescriptorSets* pendingSets =
1079 (CommandBufferPendingDescriptorSets*)cb->userPtr;
1080 delete pendingSets;
1081 }
1082
1083 std::lock_guard<std::recursive_mutex> lock(mLock);
1084 info_VkCommandBuffer.erase(commandBuffer);
1085 }
1086
unregister_VkQueue(VkQueue queue)1087 void ResourceTracker::unregister_VkQueue(VkQueue queue) {
1088 struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
1089 if (!q) return;
1090 if (q->lastUsedEncoder) {
1091 q->lastUsedEncoder->decRef();
1092 }
1093
1094 std::lock_guard<std::recursive_mutex> lock(mLock);
1095 info_VkQueue.erase(queue);
1096 }
1097
unregister_VkDeviceMemory(VkDeviceMemory mem)1098 void ResourceTracker::unregister_VkDeviceMemory(VkDeviceMemory mem) {
1099 std::lock_guard<std::recursive_mutex> lock(mLock);
1100
1101 auto it = info_VkDeviceMemory.find(mem);
1102 if (it == info_VkDeviceMemory.end()) return;
1103
1104 auto& memInfo = it->second;
1105
1106 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1107 if (memInfo.ahw) {
1108 mGralloc->release(memInfo.ahw);
1109 }
1110 #endif
1111
1112 if (memInfo.vmoHandle != ZX_HANDLE_INVALID) {
1113 zx_handle_close(memInfo.vmoHandle);
1114 }
1115
1116 info_VkDeviceMemory.erase(mem);
1117 }
1118
unregister_VkImage(VkImage img)1119 void ResourceTracker::unregister_VkImage(VkImage img) {
1120 std::lock_guard<std::recursive_mutex> lock(mLock);
1121
1122 auto it = info_VkImage.find(img);
1123 if (it == info_VkImage.end()) return;
1124
1125 info_VkImage.erase(img);
1126 }
1127
unregister_VkBuffer(VkBuffer buf)1128 void ResourceTracker::unregister_VkBuffer(VkBuffer buf) {
1129 std::lock_guard<std::recursive_mutex> lock(mLock);
1130
1131 auto it = info_VkBuffer.find(buf);
1132 if (it == info_VkBuffer.end()) return;
1133
1134 info_VkBuffer.erase(buf);
1135 }
1136
unregister_VkSemaphore(VkSemaphore sem)1137 void ResourceTracker::unregister_VkSemaphore(VkSemaphore sem) {
1138 std::lock_guard<std::recursive_mutex> lock(mLock);
1139
1140 auto it = info_VkSemaphore.find(sem);
1141 if (it == info_VkSemaphore.end()) return;
1142
1143 auto& semInfo = it->second;
1144
1145 if (semInfo.eventHandle != ZX_HANDLE_INVALID) {
1146 zx_handle_close(semInfo.eventHandle);
1147 }
1148
1149 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
1150 if (semInfo.syncFd.value_or(-1) >= 0) {
1151 mSyncHelper->close(semInfo.syncFd.value());
1152 }
1153 #endif
1154
1155 info_VkSemaphore.erase(sem);
1156 }
1157
unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ)1158 void ResourceTracker::unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ) {
1159 std::lock_guard<std::recursive_mutex> lock(mLock);
1160 auto it = info_VkDescriptorUpdateTemplate.find(templ);
1161 if (it == info_VkDescriptorUpdateTemplate.end()) return;
1162
1163 auto& info = it->second;
1164 if (info.templateEntryCount) delete[] info.templateEntries;
1165 if (info.imageInfoCount) {
1166 delete[] info.imageInfoIndices;
1167 delete[] info.imageInfos;
1168 }
1169 if (info.bufferInfoCount) {
1170 delete[] info.bufferInfoIndices;
1171 delete[] info.bufferInfos;
1172 }
1173 if (info.bufferViewCount) {
1174 delete[] info.bufferViewIndices;
1175 delete[] info.bufferViews;
1176 }
1177 info_VkDescriptorUpdateTemplate.erase(it);
1178 }
1179
unregister_VkFence(VkFence fence)1180 void ResourceTracker::unregister_VkFence(VkFence fence) {
1181 std::lock_guard<std::recursive_mutex> lock(mLock);
1182 auto it = info_VkFence.find(fence);
1183 if (it == info_VkFence.end()) return;
1184
1185 auto& fenceInfo = it->second;
1186 (void)fenceInfo;
1187
1188 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
1189 if (fenceInfo.syncFd && *fenceInfo.syncFd >= 0) {
1190 mSyncHelper->close(*fenceInfo.syncFd);
1191 }
1192 #endif
1193
1194 info_VkFence.erase(fence);
1195 }
1196
1197 #ifdef VK_USE_PLATFORM_FUCHSIA
unregister_VkBufferCollectionFUCHSIA(VkBufferCollectionFUCHSIA collection)1198 void ResourceTracker::unregister_VkBufferCollectionFUCHSIA(VkBufferCollectionFUCHSIA collection) {
1199 std::lock_guard<std::recursive_mutex> lock(mLock);
1200 info_VkBufferCollectionFUCHSIA.erase(collection);
1201 }
1202 #endif
1203
unregister_VkDescriptorSet_locked(VkDescriptorSet set)1204 void ResourceTracker::unregister_VkDescriptorSet_locked(VkDescriptorSet set) {
1205 struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(set);
1206 delete ds->reified;
1207 info_VkDescriptorSet.erase(set);
1208 }
1209
unregister_VkDescriptorSet(VkDescriptorSet set)1210 void ResourceTracker::unregister_VkDescriptorSet(VkDescriptorSet set) {
1211 if (!set) return;
1212
1213 std::lock_guard<std::recursive_mutex> lock(mLock);
1214 unregister_VkDescriptorSet_locked(set);
1215 }
1216
unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout)1217 void ResourceTracker::unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout) {
1218 if (!setLayout) return;
1219
1220 std::lock_guard<std::recursive_mutex> lock(mLock);
1221 delete as_goldfish_VkDescriptorSetLayout(setLayout)->layoutInfo;
1222 info_VkDescriptorSetLayout.erase(setLayout);
1223 }
1224
freeDescriptorSetsIfHostAllocated(VkEncoder * enc,VkDevice device,uint32_t descriptorSetCount,const VkDescriptorSet * sets)1225 void ResourceTracker::freeDescriptorSetsIfHostAllocated(VkEncoder* enc, VkDevice device,
1226 uint32_t descriptorSetCount,
1227 const VkDescriptorSet* sets) {
1228 for (uint32_t i = 0; i < descriptorSetCount; ++i) {
1229 struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(sets[i]);
1230 if (ds->reified->allocationPending) {
1231 unregister_VkDescriptorSet(sets[i]);
1232 delete_goldfish_VkDescriptorSet(sets[i]);
1233 } else {
1234 enc->vkFreeDescriptorSets(device, ds->reified->pool, 1, &sets[i], false /* no lock */);
1235 }
1236 }
1237 }
1238
clearDescriptorPoolAndUnregisterDescriptorSets(void * context,VkDevice device,VkDescriptorPool pool)1239 void ResourceTracker::clearDescriptorPoolAndUnregisterDescriptorSets(void* context, VkDevice device,
1240 VkDescriptorPool pool) {
1241 std::vector<VkDescriptorSet> toClear =
1242 clearDescriptorPool(pool, mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate);
1243
1244 for (auto set : toClear) {
1245 if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate) {
1246 VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(set)->reified->setLayout;
1247 decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
1248 }
1249 unregister_VkDescriptorSet(set);
1250 delete_goldfish_VkDescriptorSet(set);
1251 }
1252 }
1253
unregister_VkDescriptorPool(VkDescriptorPool pool)1254 void ResourceTracker::unregister_VkDescriptorPool(VkDescriptorPool pool) {
1255 if (!pool) return;
1256
1257 std::lock_guard<std::recursive_mutex> lock(mLock);
1258
1259 struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
1260 delete dp->allocInfo;
1261
1262 info_VkDescriptorPool.erase(pool);
1263 }
1264
deviceMemoryTransform_fromhost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)1265 void ResourceTracker::deviceMemoryTransform_fromhost(VkDeviceMemory* memory, uint32_t memoryCount,
1266 VkDeviceSize* offset, uint32_t offsetCount,
1267 VkDeviceSize* size, uint32_t sizeCount,
1268 uint32_t* typeIndex, uint32_t typeIndexCount,
1269 uint32_t* typeBits, uint32_t typeBitsCount) {
1270 (void)memory;
1271 (void)memoryCount;
1272 (void)offset;
1273 (void)offsetCount;
1274 (void)size;
1275 (void)sizeCount;
1276 (void)typeIndex;
1277 (void)typeIndexCount;
1278 (void)typeBits;
1279 (void)typeBitsCount;
1280 }
1281
transformImpl_VkExternalMemoryProperties_fromhost(VkExternalMemoryProperties * pProperties,uint32_t)1282 void ResourceTracker::transformImpl_VkExternalMemoryProperties_fromhost(
1283 VkExternalMemoryProperties* pProperties, uint32_t) {
1284 VkExternalMemoryHandleTypeFlags supportedHandleType = 0u;
1285 #ifdef VK_USE_PLATFORM_FUCHSIA
1286 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
1287 #endif // VK_USE_PLATFORM_FUCHSIA
1288 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1289 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
1290 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
1291 #endif // VK_USE_PLATFORM_ANDROID_KHR
1292 if (supportedHandleType) {
1293 pProperties->compatibleHandleTypes &= supportedHandleType;
1294 pProperties->exportFromImportedHandleTypes &= supportedHandleType;
1295 }
1296 }
1297
setInstanceInfo(VkInstance instance,uint32_t enabledExtensionCount,const char * const * ppEnabledExtensionNames,uint32_t apiVersion)1298 void ResourceTracker::setInstanceInfo(VkInstance instance, uint32_t enabledExtensionCount,
1299 const char* const* ppEnabledExtensionNames,
1300 uint32_t apiVersion) {
1301 std::lock_guard<std::recursive_mutex> lock(mLock);
1302 auto& info = info_VkInstance[instance];
1303 info.highestApiVersion = apiVersion;
1304
1305 if (!ppEnabledExtensionNames) return;
1306
1307 for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
1308 info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
1309 }
1310 }
1311
setDeviceInfo(VkDevice device,VkPhysicalDevice physdev,VkPhysicalDeviceProperties props,VkPhysicalDeviceMemoryProperties memProps,uint32_t enabledExtensionCount,const char * const * ppEnabledExtensionNames,const void * pNext)1312 void ResourceTracker::setDeviceInfo(VkDevice device, VkPhysicalDevice physdev,
1313 VkPhysicalDeviceProperties props,
1314 VkPhysicalDeviceMemoryProperties memProps,
1315 uint32_t enabledExtensionCount,
1316 const char* const* ppEnabledExtensionNames, const void* pNext) {
1317 std::lock_guard<std::recursive_mutex> lock(mLock);
1318 auto& info = info_VkDevice[device];
1319 info.physdev = physdev;
1320 info.props = props;
1321 info.memProps = memProps;
1322 info.apiVersion = props.apiVersion;
1323
1324 const VkBaseInStructure* extensionCreateInfo =
1325 reinterpret_cast<const VkBaseInStructure*>(pNext);
1326 while (extensionCreateInfo) {
1327 if (extensionCreateInfo->sType ==
1328 VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT) {
1329 auto deviceMemoryReportCreateInfo =
1330 reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT*>(
1331 extensionCreateInfo);
1332 if (deviceMemoryReportCreateInfo->pfnUserCallback != nullptr) {
1333 info.deviceMemoryReportCallbacks.emplace_back(
1334 deviceMemoryReportCreateInfo->pfnUserCallback,
1335 deviceMemoryReportCreateInfo->pUserData);
1336 }
1337 }
1338 extensionCreateInfo = extensionCreateInfo->pNext;
1339 }
1340
1341 if (!ppEnabledExtensionNames) return;
1342
1343 for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
1344 info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
1345 }
1346 }
1347
setDeviceMemoryInfo(VkDevice device,VkDeviceMemory memory,VkDeviceSize allocationSize,uint8_t * ptr,uint32_t memoryTypeIndex,void * ahw,bool imported,zx_handle_t vmoHandle,VirtGpuResourcePtr blobPtr)1348 void ResourceTracker::setDeviceMemoryInfo(VkDevice device, VkDeviceMemory memory,
1349 VkDeviceSize allocationSize, uint8_t* ptr,
1350 uint32_t memoryTypeIndex, void* ahw, bool imported,
1351 zx_handle_t vmoHandle, VirtGpuResourcePtr blobPtr) {
1352 std::lock_guard<std::recursive_mutex> lock(mLock);
1353 auto& info = info_VkDeviceMemory[memory];
1354
1355 info.device = device;
1356 info.allocationSize = allocationSize;
1357 info.ptr = ptr;
1358 info.memoryTypeIndex = memoryTypeIndex;
1359 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1360 info.ahw = (AHardwareBuffer*)ahw;
1361 #endif
1362 info.imported = imported;
1363 info.vmoHandle = vmoHandle;
1364 info.blobPtr = blobPtr;
1365 }
1366
setImageInfo(VkImage image,VkDevice device,const VkImageCreateInfo * pCreateInfo)1367 void ResourceTracker::setImageInfo(VkImage image, VkDevice device,
1368 const VkImageCreateInfo* pCreateInfo) {
1369 std::lock_guard<std::recursive_mutex> lock(mLock);
1370 auto& info = info_VkImage[image];
1371
1372 info.device = device;
1373 info.createInfo = *pCreateInfo;
1374 }
1375
getMappedPointer(VkDeviceMemory memory)1376 uint8_t* ResourceTracker::getMappedPointer(VkDeviceMemory memory) {
1377 std::lock_guard<std::recursive_mutex> lock(mLock);
1378 const auto it = info_VkDeviceMemory.find(memory);
1379 if (it == info_VkDeviceMemory.end()) return nullptr;
1380
1381 const auto& info = it->second;
1382 return info.ptr;
1383 }
1384
getMappedSize(VkDeviceMemory memory)1385 VkDeviceSize ResourceTracker::getMappedSize(VkDeviceMemory memory) {
1386 std::lock_guard<std::recursive_mutex> lock(mLock);
1387 const auto it = info_VkDeviceMemory.find(memory);
1388 if (it == info_VkDeviceMemory.end()) return 0;
1389
1390 const auto& info = it->second;
1391 return info.allocationSize;
1392 }
1393
isValidMemoryRange(const VkMappedMemoryRange & range)1394 bool ResourceTracker::isValidMemoryRange(const VkMappedMemoryRange& range) {
1395 std::lock_guard<std::recursive_mutex> lock(mLock);
1396 const auto it = info_VkDeviceMemory.find(range.memory);
1397 if (it == info_VkDeviceMemory.end()) return false;
1398 const auto& info = it->second;
1399
1400 if (!info.ptr) return false;
1401
1402 VkDeviceSize offset = range.offset;
1403 VkDeviceSize size = range.size;
1404
1405 if (size == VK_WHOLE_SIZE) {
1406 return offset <= info.allocationSize;
1407 }
1408
1409 return offset + size <= info.allocationSize;
1410 }
1411
setupCaps(uint32_t & noRenderControlEnc)1412 void ResourceTracker::setupCaps(uint32_t& noRenderControlEnc) {
1413 VirtGpuDevice* instance = VirtGpuDevice::getInstance(kCapsetGfxStreamVulkan);
1414 mCaps = instance->getCaps();
1415
1416 // Delete once goldfish Linux drivers are gone
1417 if (mCaps.vulkanCapset.protocolVersion == 0) {
1418 mCaps.vulkanCapset.colorBufferMemoryIndex = 0xFFFFFFFF;
1419 } else {
1420 // Don't query the render control encoder for features, since for virtio-gpu the
1421 // capabilities provide versioning. Set features to be unconditionally true, since
1422 // using virtio-gpu encompasses all prior goldfish features. mFeatureInfo should be
1423 // deprecated in favor of caps.
1424 mFeatureInfo.hasVulkanNullOptionalStrings = true;
1425 mFeatureInfo.hasVulkanIgnoredHandles = true;
1426 mFeatureInfo.hasVulkanShaderFloat16Int8 = true;
1427 mFeatureInfo.hasVulkanQueueSubmitWithCommands = true;
1428 mFeatureInfo.hasDeferredVulkanCommands = true;
1429 mFeatureInfo.hasVulkanAsyncQueueSubmit = true;
1430 mFeatureInfo.hasVulkanCreateResourcesWithRequirements = true;
1431 mFeatureInfo.hasVirtioGpuNext = true;
1432 mFeatureInfo.hasVirtioGpuNativeSync = true;
1433 mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate =
1434 mCaps.vulkanCapset.vulkanBatchedDescriptorSetUpdate;
1435 mFeatureInfo.hasVulkanAsyncQsri = true;
1436
1437 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
1438 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
1439 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
1440 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
1441 }
1442
1443 noRenderControlEnc = mCaps.vulkanCapset.noRenderControlEnc;
1444 }
1445
setupFeatures(const struct GfxStreamVkFeatureInfo * features)1446 void ResourceTracker::setupFeatures(const struct GfxStreamVkFeatureInfo* features) {
1447 if (mFeatureInfo.setupComplete) {
1448 return;
1449 }
1450
1451 mFeatureInfo = *features;
1452 #if DETECT_OS_ANDROID
1453 if (mFeatureInfo.hasDirectMem) {
1454 mGoldfishAddressSpaceBlockProvider.reset(
1455 new GoldfishAddressSpaceBlockProvider(GoldfishAddressSpaceSubdeviceType::NoSubdevice));
1456 }
1457 #endif // DETECT_OS_ANDROID
1458
1459 #ifdef VK_USE_PLATFORM_FUCHSIA
1460 if (mFeatureInfo.hasVulkan) {
1461 fidl::ClientEnd<fuchsia_hardware_goldfish::ControlDevice> channel{zx::channel(
1462 GetConnectToServiceFunction()("/loader-gpu-devices/class/goldfish-control/000"))};
1463 if (!channel) {
1464 mesa_loge("failed to open control device");
1465 abort();
1466 }
1467 mControlDevice =
1468 fidl::WireSyncClient<fuchsia_hardware_goldfish::ControlDevice>(std::move(channel));
1469
1470 fidl::ClientEnd<fuchsia_sysmem::Allocator> sysmem_channel{
1471 zx::channel(GetConnectToServiceFunction()("/svc/fuchsia.sysmem.Allocator"))};
1472 if (!sysmem_channel) {
1473 mesa_loge("failed to open sysmem connection");
1474 }
1475 mSysmemAllocator =
1476 fidl::WireSyncClient<fuchsia_sysmem::Allocator>(std::move(sysmem_channel));
1477 char name[ZX_MAX_NAME_LEN] = {};
1478 zx_object_get_property(zx_process_self(), ZX_PROP_NAME, name, sizeof(name));
1479 std::string client_name(name);
1480 client_name += "-goldfish";
1481 zx_info_handle_basic_t info;
1482 zx_object_get_info(zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, sizeof(info), nullptr,
1483 nullptr);
1484 mSysmemAllocator->SetDebugClientInfo(fidl::StringView::FromExternal(client_name),
1485 info.koid);
1486 }
1487 #endif
1488
1489 if (mFeatureInfo.hasVulkanNullOptionalStrings) {
1490 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
1491 }
1492 if (mFeatureInfo.hasVulkanIgnoredHandles) {
1493 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
1494 }
1495 if (mFeatureInfo.hasVulkanShaderFloat16Int8) {
1496 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
1497 }
1498 if (mFeatureInfo.hasVulkanQueueSubmitWithCommands) {
1499 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
1500 }
1501
1502 mFeatureInfo.setupComplete = true;
1503 }
1504
setupPlatformHelpers()1505 void ResourceTracker::setupPlatformHelpers() {
1506 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
1507 VirtGpuDevice* instance = VirtGpuDevice::getInstance(kCapsetGfxStreamVulkan);
1508 auto deviceHandle = instance->getDeviceHandle();
1509 if (mGralloc == nullptr) {
1510 mGralloc.reset(gfxstream::createPlatformGralloc(deviceHandle));
1511 }
1512 #endif
1513
1514 if (mSyncHelper == nullptr) {
1515 mSyncHelper.reset(gfxstream::createPlatformSyncHelper());
1516 }
1517 }
1518
setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks & callbacks)1519 void ResourceTracker::setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks& callbacks) {
1520 ResourceTracker::threadingCallbacks = callbacks;
1521 }
1522
usingDirectMapping() const1523 bool ResourceTracker::usingDirectMapping() const { return true; }
1524
getStreamFeatures() const1525 uint32_t ResourceTracker::getStreamFeatures() const { return ResourceTracker::streamFeatureBits; }
1526
supportsDeferredCommands() const1527 bool ResourceTracker::supportsDeferredCommands() const {
1528 return mFeatureInfo.hasDeferredVulkanCommands;
1529 }
1530
supportsAsyncQueueSubmit() const1531 bool ResourceTracker::supportsAsyncQueueSubmit() const {
1532 return mFeatureInfo.hasVulkanAsyncQueueSubmit;
1533 }
1534
supportsCreateResourcesWithRequirements() const1535 bool ResourceTracker::supportsCreateResourcesWithRequirements() const {
1536 return mFeatureInfo.hasVulkanCreateResourcesWithRequirements;
1537 }
1538
getHostInstanceExtensionIndex(const std::string & extName) const1539 int ResourceTracker::getHostInstanceExtensionIndex(const std::string& extName) const {
1540 int i = 0;
1541 for (const auto& prop : mHostInstanceExtensions) {
1542 if (extName == std::string(prop.extensionName)) {
1543 return i;
1544 }
1545 ++i;
1546 }
1547 return -1;
1548 }
1549
getHostDeviceExtensionIndex(const std::string & extName) const1550 int ResourceTracker::getHostDeviceExtensionIndex(const std::string& extName) const {
1551 int i = 0;
1552 for (const auto& prop : mHostDeviceExtensions) {
1553 if (extName == std::string(prop.extensionName)) {
1554 return i;
1555 }
1556 ++i;
1557 }
1558 return -1;
1559 }
1560
deviceMemoryTransform_tohost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)1561 void ResourceTracker::deviceMemoryTransform_tohost(VkDeviceMemory* memory, uint32_t memoryCount,
1562 VkDeviceSize* offset, uint32_t offsetCount,
1563 VkDeviceSize* size, uint32_t sizeCount,
1564 uint32_t* typeIndex, uint32_t typeIndexCount,
1565 uint32_t* typeBits, uint32_t typeBitsCount) {
1566 (void)memoryCount;
1567 (void)offsetCount;
1568 (void)sizeCount;
1569 (void)typeIndex;
1570 (void)typeIndexCount;
1571 (void)typeBits;
1572 (void)typeBitsCount;
1573
1574 if (memory) {
1575 std::lock_guard<std::recursive_mutex> lock(mLock);
1576
1577 for (uint32_t i = 0; i < memoryCount; ++i) {
1578 VkDeviceMemory mem = memory[i];
1579
1580 auto it = info_VkDeviceMemory.find(mem);
1581 if (it == info_VkDeviceMemory.end()) return;
1582
1583 const auto& info = it->second;
1584
1585 if (!info.coherentMemory) continue;
1586
1587 memory[i] = info.coherentMemory->getDeviceMemory();
1588
1589 if (offset) {
1590 offset[i] = info.coherentMemoryOffset + offset[i];
1591 }
1592
1593 if (size && size[i] == VK_WHOLE_SIZE) {
1594 size[i] = info.allocationSize;
1595 }
1596
1597 // TODO
1598 (void)memory;
1599 (void)offset;
1600 (void)size;
1601 }
1602 }
1603 }
1604
getColorBufferMemoryIndex(void * context,VkDevice device)1605 uint32_t ResourceTracker::getColorBufferMemoryIndex(void* context, VkDevice device) {
1606 // Create test image to get the memory requirements
1607 VkEncoder* enc = (VkEncoder*)context;
1608 VkImageCreateInfo createInfo = {
1609 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1610 .imageType = VK_IMAGE_TYPE_2D,
1611 .format = VK_FORMAT_R8G8B8A8_UNORM,
1612 .extent = {64, 64, 1},
1613 .mipLevels = 1,
1614 .arrayLayers = 1,
1615 .samples = VK_SAMPLE_COUNT_1_BIT,
1616 .tiling = VK_IMAGE_TILING_OPTIMAL,
1617 .usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
1618 VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
1619 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
1620 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
1621 };
1622 VkImage image = VK_NULL_HANDLE;
1623 VkResult res = enc->vkCreateImage(device, &createInfo, nullptr, &image, true /* do lock */);
1624
1625 if (res != VK_SUCCESS) {
1626 return 0;
1627 }
1628
1629 VkMemoryRequirements memReqs;
1630 enc->vkGetImageMemoryRequirements(device, image, &memReqs, true /* do lock */);
1631 enc->vkDestroyImage(device, image, nullptr, true /* do lock */);
1632
1633 const VkPhysicalDeviceMemoryProperties& memProps =
1634 getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE);
1635
1636 // Currently, host looks for the last index that has with memory
1637 // property type VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
1638 VkMemoryPropertyFlags memoryProperty = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1639 for (int i = VK_MAX_MEMORY_TYPES - 1; i >= 0; --i) {
1640 if ((memReqs.memoryTypeBits & (1u << i)) &&
1641 (memProps.memoryTypes[i].propertyFlags & memoryProperty)) {
1642 return i;
1643 }
1644 }
1645
1646 return 0;
1647 }
1648
on_vkEnumerateInstanceExtensionProperties(void * context,VkResult,const char *,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)1649 VkResult ResourceTracker::on_vkEnumerateInstanceExtensionProperties(
1650 void* context, VkResult, const char*, uint32_t* pPropertyCount,
1651 VkExtensionProperties* pProperties) {
1652 std::vector<const char*> allowedExtensionNames = {
1653 "VK_KHR_get_physical_device_properties2",
1654 "VK_KHR_sampler_ycbcr_conversion",
1655 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
1656 "VK_KHR_external_semaphore_capabilities",
1657 "VK_KHR_external_memory_capabilities",
1658 "VK_KHR_external_fence_capabilities",
1659 "VK_EXT_debug_utils",
1660 #endif
1661 };
1662
1663 VkEncoder* enc = (VkEncoder*)context;
1664
1665 // Only advertise a select set of extensions.
1666 if (mHostInstanceExtensions.empty()) {
1667 uint32_t hostPropCount = 0;
1668 enc->vkEnumerateInstanceExtensionProperties(nullptr, &hostPropCount, nullptr,
1669 true /* do lock */);
1670 mHostInstanceExtensions.resize(hostPropCount);
1671
1672 VkResult hostRes = enc->vkEnumerateInstanceExtensionProperties(
1673 nullptr, &hostPropCount, mHostInstanceExtensions.data(), true /* do lock */);
1674
1675 if (hostRes != VK_SUCCESS) {
1676 return hostRes;
1677 }
1678 }
1679
1680 std::vector<VkExtensionProperties> filteredExts;
1681
1682 for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1683 auto extIndex = getHostInstanceExtensionIndex(allowedExtensionNames[i]);
1684 if (extIndex != -1) {
1685 filteredExts.push_back(mHostInstanceExtensions[extIndex]);
1686 }
1687 }
1688
1689 VkExtensionProperties anbExtProps[] = {
1690 #ifdef VK_USE_PLATFORM_FUCHSIA
1691 {"VK_KHR_external_memory_capabilities", 1},
1692 {"VK_KHR_external_semaphore_capabilities", 1},
1693 #endif
1694 };
1695
1696 for (auto& anbExtProp : anbExtProps) {
1697 filteredExts.push_back(anbExtProp);
1698 }
1699
1700 // Spec:
1701 //
1702 // https://registry.khronos.org/vulkan/specs/latest/man/html/vkEnumerateInstanceExtensionProperties.html
1703 //
1704 // If pProperties is NULL, then the number of extensions properties
1705 // available is returned in pPropertyCount. Otherwise, pPropertyCount
1706 // must point to a variable set by the user to the number of elements
1707 // in the pProperties array, and on return the variable is overwritten
1708 // with the number of structures actually written to pProperties. If
1709 // pPropertyCount is less than the number of extension properties
1710 // available, at most pPropertyCount structures will be written. If
1711 // pPropertyCount is smaller than the number of extensions available,
1712 // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1713 // that not all the available properties were returned.
1714 //
1715 // pPropertyCount must be a valid pointer to a uint32_t value
1716 if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1717
1718 if (!pProperties) {
1719 *pPropertyCount = (uint32_t)filteredExts.size();
1720 return VK_SUCCESS;
1721 } else {
1722 auto actualExtensionCount = (uint32_t)filteredExts.size();
1723 if (*pPropertyCount > actualExtensionCount) {
1724 *pPropertyCount = actualExtensionCount;
1725 }
1726
1727 for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1728 pProperties[i] = filteredExts[i];
1729 }
1730
1731 if (actualExtensionCount > *pPropertyCount) {
1732 return VK_INCOMPLETE;
1733 }
1734
1735 return VK_SUCCESS;
1736 }
1737 }
1738
on_vkEnumerateDeviceExtensionProperties(void * context,VkResult,VkPhysicalDevice physdev,const char *,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)1739 VkResult ResourceTracker::on_vkEnumerateDeviceExtensionProperties(
1740 void* context, VkResult, VkPhysicalDevice physdev, const char*, uint32_t* pPropertyCount,
1741 VkExtensionProperties* pProperties) {
1742 std::vector<const char*> allowedExtensionNames = {
1743 "VK_KHR_vulkan_memory_model",
1744 "VK_KHR_buffer_device_address",
1745 "VK_KHR_maintenance1",
1746 "VK_KHR_maintenance2",
1747 "VK_KHR_maintenance3",
1748 "VK_KHR_bind_memory2",
1749 "VK_KHR_dedicated_allocation",
1750 "VK_KHR_get_memory_requirements2",
1751 "VK_KHR_sampler_ycbcr_conversion",
1752 "VK_KHR_shader_float16_int8",
1753 // Timeline semaphores buggy in newer NVIDIA drivers
1754 // (vkWaitSemaphoresKHR causes further vkCommandBuffer dispatches to deadlock)
1755 #ifndef VK_USE_PLATFORM_ANDROID_KHR
1756 "VK_KHR_timeline_semaphore",
1757 #endif
1758 "VK_AMD_gpu_shader_half_float",
1759 "VK_NV_shader_subgroup_partitioned",
1760 "VK_KHR_shader_subgroup_extended_types",
1761 "VK_EXT_subgroup_size_control",
1762 "VK_EXT_provoking_vertex",
1763 "VK_KHR_line_rasterization",
1764 "VK_EXT_line_rasterization",
1765 "VK_KHR_shader_terminate_invocation",
1766 "VK_EXT_transform_feedback",
1767 "VK_EXT_primitive_topology_list_restart",
1768 "VK_EXT_index_type_uint8",
1769 "VK_EXT_load_store_op_none",
1770 "VK_EXT_swapchain_colorspace",
1771 "VK_EXT_image_robustness",
1772 "VK_EXT_custom_border_color",
1773 "VK_EXT_shader_stencil_export",
1774 "VK_KHR_image_format_list",
1775 "VK_KHR_incremental_present",
1776 "VK_KHR_pipeline_executable_properties",
1777 "VK_EXT_queue_family_foreign",
1778 "VK_EXT_scalar_block_layout",
1779 "VK_KHR_descriptor_update_template",
1780 "VK_KHR_storage_buffer_storage_class",
1781 "VK_EXT_depth_clip_enable",
1782 "VK_KHR_create_renderpass2",
1783 "VK_EXT_vertex_attribute_divisor",
1784 "VK_EXT_host_query_reset",
1785 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
1786 "VK_KHR_external_semaphore",
1787 "VK_KHR_external_semaphore_fd",
1788 // "VK_KHR_external_semaphore_win32", not exposed because it's translated to fd
1789 "VK_KHR_external_memory",
1790 "VK_KHR_external_fence",
1791 "VK_KHR_external_fence_fd",
1792 "VK_EXT_device_memory_report",
1793 #endif
1794 #if DETECT_OS_LINUX && !defined(VK_USE_PLATFORM_ANDROID_KHR)
1795 "VK_KHR_imageless_framebuffer",
1796 #endif
1797 // Vulkan 1.3
1798 "VK_KHR_synchronization2",
1799 "VK_EXT_private_data",
1800 "VK_EXT_color_write_enable",
1801 };
1802
1803 VkEncoder* enc = (VkEncoder*)context;
1804
1805 if (mHostDeviceExtensions.empty()) {
1806 uint32_t hostPropCount = 0;
1807 enc->vkEnumerateDeviceExtensionProperties(physdev, nullptr, &hostPropCount, nullptr,
1808 true /* do lock */);
1809 mHostDeviceExtensions.resize(hostPropCount);
1810
1811 VkResult hostRes = enc->vkEnumerateDeviceExtensionProperties(
1812 physdev, nullptr, &hostPropCount, mHostDeviceExtensions.data(), true /* do lock */);
1813
1814 if (hostRes != VK_SUCCESS) {
1815 return hostRes;
1816 }
1817 }
1818
1819 std::vector<VkExtensionProperties> filteredExts;
1820
1821 for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1822 auto extIndex = getHostDeviceExtensionIndex(allowedExtensionNames[i]);
1823 if (extIndex != -1) {
1824 filteredExts.push_back(mHostDeviceExtensions[extIndex]);
1825 }
1826 }
1827
1828 VkExtensionProperties anbExtProps[] = {
1829 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1830 {"VK_ANDROID_native_buffer", 7},
1831 #endif
1832 #ifdef VK_USE_PLATFORM_FUCHSIA
1833 {"VK_KHR_external_memory", 1},
1834 {"VK_KHR_external_semaphore", 1},
1835 {"VK_FUCHSIA_external_semaphore", 1},
1836 #endif
1837 };
1838
1839 for (auto& anbExtProp : anbExtProps) {
1840 filteredExts.push_back(anbExtProp);
1841 }
1842
1843 /*
1844 * GfxstreamEnd2EndVkTest::DeviceMemoryReport always assumes the memory report
1845 * extension is present. It's is filtered out when sent host side, since for a
1846 * virtual GPU this is quite difficult to implement.
1847 *
1848 * Mesa runtime checks physical device features. So if the test tries to enable
1849 * device level extension without it definitely existing, the test will fail.
1850 *
1851 * The test can also be modified to check VkPhysicalDeviceDeviceMemoryReportFeaturesEXT,
1852 * but that's more involved. Work around this by always advertising the extension.
1853 * Tracking bug: b/338270042
1854 */
1855 filteredExts.push_back(VkExtensionProperties{"VK_EXT_device_memory_report", 1});
1856
1857 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
1858 bool hostSupportsExternalFenceFd =
1859 getHostDeviceExtensionIndex("VK_KHR_external_fence_fd") != -1;
1860 if (!hostSupportsExternalFenceFd) {
1861 filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_fence_fd", 1});
1862 }
1863 #endif
1864
1865 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
1866 bool hostHasPosixExternalSemaphore =
1867 getHostDeviceExtensionIndex("VK_KHR_external_semaphore_fd") != -1;
1868 if (!hostHasPosixExternalSemaphore) {
1869 // Always advertise posix external semaphore capabilities on Android/Linux.
1870 // SYNC_FD handles will always work, regardless of host support. Support
1871 // for non-sync, opaque FDs, depends on host driver support, but will
1872 // be handled accordingly by host.
1873 filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_semaphore_fd", 1});
1874 }
1875 #endif
1876
1877 bool win32ExtMemAvailable = getHostDeviceExtensionIndex("VK_KHR_external_memory_win32") != -1;
1878 bool posixExtMemAvailable = getHostDeviceExtensionIndex("VK_KHR_external_memory_fd") != -1;
1879 bool metalExtMemAvailable = getHostDeviceExtensionIndex("VK_EXT_external_memory_metal") != -1 ||
1880 getHostDeviceExtensionIndex("VK_MVK_moltenvk") != -1;
1881 bool qnxExtMemAvailable =
1882 getHostDeviceExtensionIndex("VK_QNX_external_memory_screen_buffer") != -1;
1883
1884 bool hostHasExternalMemorySupport =
1885 win32ExtMemAvailable || posixExtMemAvailable || metalExtMemAvailable || qnxExtMemAvailable;
1886
1887 if (hostHasExternalMemorySupport) {
1888 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1889 filteredExts.push_back(
1890 VkExtensionProperties{"VK_ANDROID_external_memory_android_hardware_buffer", 7});
1891 filteredExts.push_back(VkExtensionProperties{"VK_EXT_queue_family_foreign", 1});
1892 #endif
1893 #ifdef VK_USE_PLATFORM_FUCHSIA
1894 filteredExts.push_back(VkExtensionProperties{"VK_FUCHSIA_external_memory", 1});
1895 filteredExts.push_back(VkExtensionProperties{"VK_FUCHSIA_buffer_collection", 1});
1896 #endif
1897 #if !defined(VK_USE_PLATFORM_ANDROID_KHR) && DETECT_OS_LINUX
1898 filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_memory_fd", 1});
1899 filteredExts.push_back(VkExtensionProperties{"VK_EXT_external_memory_dma_buf", 1});
1900 // In case the host doesn't support format modifiers, they are emulated
1901 // on guest side.
1902 filteredExts.push_back(VkExtensionProperties{"VK_EXT_image_drm_format_modifier", 1});
1903 #endif
1904 }
1905
1906 // NOTE: the Vulkan Loader's trampoline functions will remove duplicates. This can lead
1907 // to lead errors if this function returns VK_SUCCESS with N elements (including a duplicate)
1908 // but the Vulkan Loader's trampoline function returns VK_INCOMPLETE with N-1 elements
1909 // (without the duplicate).
1910 std::sort(filteredExts.begin(),
1911 filteredExts.end(),
1912 [](const VkExtensionProperties& a,
1913 const VkExtensionProperties& b) {
1914 return strcmp(a.extensionName, b.extensionName) < 0;
1915 });
1916 filteredExts.erase(std::unique(filteredExts.begin(),
1917 filteredExts.end(),
1918 [](const VkExtensionProperties& a,
1919 const VkExtensionProperties& b) {
1920 return strcmp(a.extensionName, b.extensionName) == 0;
1921 }),
1922 filteredExts.end());
1923
1924 // Spec:
1925 //
1926 // https://registry.khronos.org/vulkan/specs/latest/man/html/vkEnumerateDeviceExtensionProperties.html
1927 //
1928 // pPropertyCount is a pointer to an integer related to the number of
1929 // extension properties available or queried, and is treated in the
1930 // same fashion as the
1931 // vkEnumerateInstanceExtensionProperties::pPropertyCount parameter.
1932 //
1933 // https://registry.khronos.org/vulkan/specs/latest/man/html/vkEnumerateInstanceExtensionProperties.html
1934 //
1935 // If pProperties is NULL, then the number of extensions properties
1936 // available is returned in pPropertyCount. Otherwise, pPropertyCount
1937 // must point to a variable set by the user to the number of elements
1938 // in the pProperties array, and on return the variable is overwritten
1939 // with the number of structures actually written to pProperties. If
1940 // pPropertyCount is less than the number of extension properties
1941 // available, at most pPropertyCount structures will be written. If
1942 // pPropertyCount is smaller than the number of extensions available,
1943 // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1944 // that not all the available properties were returned.
1945 //
1946 // pPropertyCount must be a valid pointer to a uint32_t value
1947
1948 if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1949
1950 if (!pProperties) {
1951 *pPropertyCount = (uint32_t)filteredExts.size();
1952 return VK_SUCCESS;
1953 } else {
1954 auto actualExtensionCount = (uint32_t)filteredExts.size();
1955 if (*pPropertyCount > actualExtensionCount) {
1956 *pPropertyCount = actualExtensionCount;
1957 }
1958
1959 for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1960 pProperties[i] = filteredExts[i];
1961 }
1962
1963 if (actualExtensionCount > *pPropertyCount) {
1964 return VK_INCOMPLETE;
1965 }
1966
1967 return VK_SUCCESS;
1968 }
1969 }
1970
on_vkEnumeratePhysicalDevices(void * context,VkResult,VkInstance instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices)1971 VkResult ResourceTracker::on_vkEnumeratePhysicalDevices(void* context, VkResult,
1972 VkInstance instance,
1973 uint32_t* pPhysicalDeviceCount,
1974 VkPhysicalDevice* pPhysicalDevices) {
1975 VkEncoder* enc = (VkEncoder*)context;
1976
1977 if (!instance) return VK_ERROR_INITIALIZATION_FAILED;
1978
1979 if (!pPhysicalDeviceCount) return VK_ERROR_INITIALIZATION_FAILED;
1980
1981 std::unique_lock<std::recursive_mutex> lock(mLock);
1982
1983 // When this function is called, we actually need to do two things:
1984 // - Get full information about physical devices from the host,
1985 // even if the guest did not ask for it
1986 // - Serve the guest query according to the spec:
1987 //
1988 // https://registry.khronos.org/vulkan/specs/latest/man/html/vkEnumeratePhysicalDevices.html
1989
1990 auto it = info_VkInstance.find(instance);
1991
1992 if (it == info_VkInstance.end()) return VK_ERROR_INITIALIZATION_FAILED;
1993
1994 auto& info = it->second;
1995
1996 // Get the full host information here if it doesn't exist already.
1997 if (info.physicalDevices.empty()) {
1998 uint32_t hostPhysicalDeviceCount = 0;
1999
2000 lock.unlock();
2001 VkResult countRes = enc->vkEnumeratePhysicalDevices(instance, &hostPhysicalDeviceCount,
2002 nullptr, false /* no lock */);
2003 lock.lock();
2004
2005 if (countRes != VK_SUCCESS) {
2006 mesa_loge(
2007 "%s: failed: could not count host physical devices. "
2008 "Error %d\n",
2009 __func__, countRes);
2010 return countRes;
2011 }
2012
2013 info.physicalDevices.resize(hostPhysicalDeviceCount);
2014
2015 lock.unlock();
2016 VkResult enumRes = enc->vkEnumeratePhysicalDevices(
2017 instance, &hostPhysicalDeviceCount, info.physicalDevices.data(), false /* no lock */);
2018 lock.lock();
2019
2020 if (enumRes != VK_SUCCESS) {
2021 mesa_loge(
2022 "%s: failed: could not retrieve host physical devices. "
2023 "Error %d\n",
2024 __func__, enumRes);
2025 return enumRes;
2026 }
2027 }
2028
2029 // Serve the guest query according to the spec.
2030 //
2031 // https://registry.khronos.org/vulkan/specs/latest/man/html/vkEnumeratePhysicalDevices.html
2032 //
2033 // If pPhysicalDevices is NULL, then the number of physical devices
2034 // available is returned in pPhysicalDeviceCount. Otherwise,
2035 // pPhysicalDeviceCount must point to a variable set by the user to the
2036 // number of elements in the pPhysicalDevices array, and on return the
2037 // variable is overwritten with the number of handles actually written
2038 // to pPhysicalDevices. If pPhysicalDeviceCount is less than the number
2039 // of physical devices available, at most pPhysicalDeviceCount
2040 // structures will be written. If pPhysicalDeviceCount is smaller than
2041 // the number of physical devices available, VK_INCOMPLETE will be
2042 // returned instead of VK_SUCCESS, to indicate that not all the
2043 // available physical devices were returned.
2044
2045 if (!pPhysicalDevices) {
2046 *pPhysicalDeviceCount = (uint32_t)info.physicalDevices.size();
2047 return VK_SUCCESS;
2048 } else {
2049 uint32_t actualDeviceCount = (uint32_t)info.physicalDevices.size();
2050 uint32_t toWrite =
2051 actualDeviceCount < *pPhysicalDeviceCount ? actualDeviceCount : *pPhysicalDeviceCount;
2052
2053 for (uint32_t i = 0; i < toWrite; ++i) {
2054 pPhysicalDevices[i] = info.physicalDevices[i];
2055 }
2056
2057 *pPhysicalDeviceCount = toWrite;
2058
2059 if (actualDeviceCount > *pPhysicalDeviceCount) {
2060 return VK_INCOMPLETE;
2061 }
2062
2063 return VK_SUCCESS;
2064 }
2065 }
2066
on_vkGetPhysicalDeviceProperties(void *,VkPhysicalDevice,VkPhysicalDeviceProperties * pProperties)2067 void ResourceTracker::on_vkGetPhysicalDeviceProperties(void*, VkPhysicalDevice,
2068 VkPhysicalDeviceProperties* pProperties) {
2069 #if DETECT_OS_LINUX && !defined(VK_USE_PLATFORM_ANDROID_KHR)
2070 if (pProperties) {
2071 if (VK_PHYSICAL_DEVICE_TYPE_CPU == pProperties->deviceType) {
2072 /* For Linux guest: Even if host driver reports DEVICE_TYPE_CPU,
2073 * override this to VIRTUAL_GPU, otherwise Linux DRM interfaces
2074 * will take unexpected code paths to deal with "software" driver
2075 */
2076 pProperties->deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
2077 }
2078 }
2079 #endif
2080 }
2081
on_vkGetPhysicalDeviceFeatures2(void *,VkPhysicalDevice,VkPhysicalDeviceFeatures2 * pFeatures)2082 void ResourceTracker::on_vkGetPhysicalDeviceFeatures2(void*, VkPhysicalDevice,
2083 VkPhysicalDeviceFeatures2* pFeatures) {
2084 if (pFeatures) {
2085 VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
2086 vk_find_struct(pFeatures, PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT);
2087 if (memoryReportFeaturesEXT) {
2088 memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
2089 }
2090 }
2091 }
2092
on_vkGetPhysicalDeviceFeatures2KHR(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceFeatures2 * pFeatures)2093 void ResourceTracker::on_vkGetPhysicalDeviceFeatures2KHR(void* context,
2094 VkPhysicalDevice physicalDevice,
2095 VkPhysicalDeviceFeatures2* pFeatures) {
2096 on_vkGetPhysicalDeviceFeatures2(context, physicalDevice, pFeatures);
2097 }
2098
on_vkGetPhysicalDeviceProperties2(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)2099 void ResourceTracker::on_vkGetPhysicalDeviceProperties2(void* context,
2100 VkPhysicalDevice physicalDevice,
2101 VkPhysicalDeviceProperties2* pProperties) {
2102 if (pProperties) {
2103 on_vkGetPhysicalDeviceProperties(context, physicalDevice, &pProperties->properties);
2104 }
2105 }
2106
on_vkGetPhysicalDeviceProperties2KHR(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)2107 void ResourceTracker::on_vkGetPhysicalDeviceProperties2KHR(
2108 void* context, VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties) {
2109 on_vkGetPhysicalDeviceProperties2(context, physicalDevice, pProperties);
2110 }
2111
on_vkGetPhysicalDeviceMemoryProperties(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties * out)2112 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties(
2113 void* context, VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* out) {
2114 // gfxstream decides which physical device to expose to the guest on startup.
2115 // Otherwise, we would need a physical device to properties mapping.
2116 *out = getPhysicalDeviceMemoryProperties(context, VK_NULL_HANDLE, physicalDevice);
2117 }
2118
on_vkGetPhysicalDeviceMemoryProperties2(void *,VkPhysicalDevice physdev,VkPhysicalDeviceMemoryProperties2 * out)2119 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties2(
2120 void*, VkPhysicalDevice physdev, VkPhysicalDeviceMemoryProperties2* out) {
2121 on_vkGetPhysicalDeviceMemoryProperties(nullptr, physdev, &out->memoryProperties);
2122 }
2123
on_vkGetDeviceQueue(void *,VkDevice device,uint32_t,uint32_t,VkQueue * pQueue)2124 void ResourceTracker::on_vkGetDeviceQueue(void*, VkDevice device, uint32_t, uint32_t,
2125 VkQueue* pQueue) {
2126 std::lock_guard<std::recursive_mutex> lock(mLock);
2127 info_VkQueue[*pQueue].device = device;
2128 }
2129
on_vkGetDeviceQueue2(void *,VkDevice device,const VkDeviceQueueInfo2 *,VkQueue * pQueue)2130 void ResourceTracker::on_vkGetDeviceQueue2(void*, VkDevice device, const VkDeviceQueueInfo2*,
2131 VkQueue* pQueue) {
2132 std::lock_guard<std::recursive_mutex> lock(mLock);
2133 info_VkQueue[*pQueue].device = device;
2134 }
2135
on_vkCreateInstance(void * context,VkResult input_result,const VkInstanceCreateInfo * createInfo,const VkAllocationCallbacks *,VkInstance * pInstance)2136 VkResult ResourceTracker::on_vkCreateInstance(void* context, VkResult input_result,
2137 const VkInstanceCreateInfo* createInfo,
2138 const VkAllocationCallbacks*, VkInstance* pInstance) {
2139 if (input_result != VK_SUCCESS) return input_result;
2140
2141 VkEncoder* enc = (VkEncoder*)context;
2142
2143 uint32_t apiVersion;
2144 input_result = enc->vkEnumerateInstanceVersion(&apiVersion, false /* no lock */);
2145
2146 setInstanceInfo(*pInstance, createInfo->enabledExtensionCount,
2147 createInfo->ppEnabledExtensionNames, apiVersion);
2148
2149 return input_result;
2150 }
2151
on_vkCreateDevice(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks *,VkDevice * pDevice)2152 VkResult ResourceTracker::on_vkCreateDevice(void* context, VkResult input_result,
2153 VkPhysicalDevice physicalDevice,
2154 const VkDeviceCreateInfo* pCreateInfo,
2155 const VkAllocationCallbacks*, VkDevice* pDevice) {
2156 if (input_result != VK_SUCCESS) return input_result;
2157
2158 VkEncoder* enc = (VkEncoder*)context;
2159
2160 VkPhysicalDeviceProperties props;
2161 VkPhysicalDeviceMemoryProperties memProps;
2162 enc->vkGetPhysicalDeviceProperties(physicalDevice, &props, false /* no lock */);
2163 enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps, false /* no lock */);
2164
2165 setDeviceInfo(*pDevice, physicalDevice, props, memProps, pCreateInfo->enabledExtensionCount,
2166 pCreateInfo->ppEnabledExtensionNames, pCreateInfo->pNext);
2167
2168 return input_result;
2169 }
2170
on_vkDestroyDevice_pre(void * context,VkDevice device,const VkAllocationCallbacks *)2171 void ResourceTracker::on_vkDestroyDevice_pre(void* context, VkDevice device,
2172 const VkAllocationCallbacks*) {
2173 (void)context;
2174 std::lock_guard<std::recursive_mutex> lock(mLock);
2175
2176 auto it = info_VkDevice.find(device);
2177 if (it == info_VkDevice.end()) return;
2178
2179 for (auto itr = info_VkDeviceMemory.cbegin(); itr != info_VkDeviceMemory.cend();) {
2180 auto& memInfo = itr->second;
2181 if (memInfo.device == device) {
2182 itr = info_VkDeviceMemory.erase(itr);
2183 } else {
2184 itr++;
2185 }
2186 }
2187 }
2188
2189 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
updateMemoryTypeBits(uint32_t * memoryTypeBits,uint32_t memoryIndex)2190 void updateMemoryTypeBits(uint32_t* memoryTypeBits, uint32_t memoryIndex) {
2191 *memoryTypeBits = 1u << memoryIndex;
2192 }
2193 #endif
2194
2195 #ifdef VK_USE_PLATFORM_ANDROID_KHR
2196
on_vkGetAndroidHardwareBufferPropertiesANDROID(void * context,VkResult,VkDevice device,const AHardwareBuffer * buffer,VkAndroidHardwareBufferPropertiesANDROID * pProperties)2197 VkResult ResourceTracker::on_vkGetAndroidHardwareBufferPropertiesANDROID(
2198 void* context, VkResult, VkDevice device, const AHardwareBuffer* buffer,
2199 VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
2200 // Delete once goldfish Linux drivers are gone
2201 if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
2202 mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
2203 }
2204
2205 updateMemoryTypeBits(&pProperties->memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
2206
2207 return getAndroidHardwareBufferPropertiesANDROID(mGralloc.get(), buffer, pProperties);
2208 }
2209
on_vkGetMemoryAndroidHardwareBufferANDROID(void *,VkResult,VkDevice device,const VkMemoryGetAndroidHardwareBufferInfoANDROID * pInfo,struct AHardwareBuffer ** pBuffer)2210 VkResult ResourceTracker::on_vkGetMemoryAndroidHardwareBufferANDROID(
2211 void*, VkResult, VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
2212 struct AHardwareBuffer** pBuffer) {
2213 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2214 if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
2215
2216 std::lock_guard<std::recursive_mutex> lock(mLock);
2217
2218 auto deviceIt = info_VkDevice.find(device);
2219
2220 if (deviceIt == info_VkDevice.end()) {
2221 return VK_ERROR_INITIALIZATION_FAILED;
2222 }
2223
2224 auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
2225
2226 if (memoryIt == info_VkDeviceMemory.end()) {
2227 return VK_ERROR_INITIALIZATION_FAILED;
2228 }
2229
2230 auto& info = memoryIt->second;
2231 VkResult queryRes = getMemoryAndroidHardwareBufferANDROID(mGralloc.get(), &info.ahw);
2232
2233 if (queryRes != VK_SUCCESS) return queryRes;
2234
2235 *pBuffer = info.ahw;
2236
2237 return queryRes;
2238 }
2239 #endif
2240
2241 #ifdef VK_USE_PLATFORM_FUCHSIA
on_vkGetMemoryZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkMemoryGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)2242 VkResult ResourceTracker::on_vkGetMemoryZirconHandleFUCHSIA(
2243 void*, VkResult, VkDevice device, const VkMemoryGetZirconHandleInfoFUCHSIA* pInfo,
2244 uint32_t* pHandle) {
2245 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2246 if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
2247
2248 std::lock_guard<std::recursive_mutex> lock(mLock);
2249
2250 auto deviceIt = info_VkDevice.find(device);
2251
2252 if (deviceIt == info_VkDevice.end()) {
2253 return VK_ERROR_INITIALIZATION_FAILED;
2254 }
2255
2256 auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
2257
2258 if (memoryIt == info_VkDeviceMemory.end()) {
2259 return VK_ERROR_INITIALIZATION_FAILED;
2260 }
2261
2262 auto& info = memoryIt->second;
2263
2264 if (info.vmoHandle == ZX_HANDLE_INVALID) {
2265 mesa_loge("%s: memory cannot be exported", __func__);
2266 return VK_ERROR_INITIALIZATION_FAILED;
2267 }
2268
2269 *pHandle = ZX_HANDLE_INVALID;
2270 zx_handle_duplicate(info.vmoHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
2271 return VK_SUCCESS;
2272 }
2273
on_vkGetMemoryZirconHandlePropertiesFUCHSIA(void *,VkResult,VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,uint32_t handle,VkMemoryZirconHandlePropertiesFUCHSIA * pProperties)2274 VkResult ResourceTracker::on_vkGetMemoryZirconHandlePropertiesFUCHSIA(
2275 void*, VkResult, VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType,
2276 uint32_t handle, VkMemoryZirconHandlePropertiesFUCHSIA* pProperties) {
2277 using fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal;
2278 using fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
2279
2280 if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
2281 return VK_ERROR_INITIALIZATION_FAILED;
2282 }
2283
2284 zx_info_handle_basic_t handleInfo;
2285 zx_status_t status = zx::unowned_vmo(handle)->get_info(ZX_INFO_HANDLE_BASIC, &handleInfo,
2286 sizeof(handleInfo), nullptr, nullptr);
2287 if (status != ZX_OK || handleInfo.type != ZX_OBJ_TYPE_VMO) {
2288 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
2289 }
2290
2291 std::lock_guard<std::recursive_mutex> lock(mLock);
2292
2293 auto deviceIt = info_VkDevice.find(device);
2294
2295 if (deviceIt == info_VkDevice.end()) {
2296 return VK_ERROR_INITIALIZATION_FAILED;
2297 }
2298
2299 auto& info = deviceIt->second;
2300
2301 zx::vmo vmo_dup;
2302 status = zx::unowned_vmo(handle)->duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
2303 if (status != ZX_OK) {
2304 mesa_loge("zx_handle_duplicate() error: %d", status);
2305 return VK_ERROR_INITIALIZATION_FAILED;
2306 }
2307
2308 uint32_t memoryProperty = 0u;
2309
2310 auto result = mControlDevice->GetBufferHandleInfo(std::move(vmo_dup));
2311 if (!result.ok()) {
2312 mesa_loge("mControlDevice->GetBufferHandleInfo fatal error: epitaph: %d", result.status());
2313 return VK_ERROR_INITIALIZATION_FAILED;
2314 }
2315 if (result.value().is_ok()) {
2316 memoryProperty = result.value().value()->info.memory_property();
2317 } else if (result.value().error_value() == ZX_ERR_NOT_FOUND) {
2318 // If a VMO is allocated while ColorBuffer/Buffer is not created,
2319 // it must be a device-local buffer, since for host-visible buffers,
2320 // ColorBuffer/Buffer is created at sysmem allocation time.
2321 memoryProperty = kMemoryPropertyDeviceLocal;
2322 } else {
2323 // Importing read-only host memory into the Vulkan driver should not
2324 // work, but it is not an error to try to do so. Returning a
2325 // VkMemoryZirconHandlePropertiesFUCHSIA with no available
2326 // memoryType bits should be enough for clients. See fxbug.dev/42098398
2327 // for other issues this this flow.
2328 mesa_logw("GetBufferHandleInfo failed: %d", result.value().error_value());
2329 pProperties->memoryTypeBits = 0;
2330 return VK_SUCCESS;
2331 }
2332
2333 pProperties->memoryTypeBits = 0;
2334 for (uint32_t i = 0; i < info.memProps.memoryTypeCount; ++i) {
2335 if (((memoryProperty & kMemoryPropertyDeviceLocal) &&
2336 (info.memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
2337 ((memoryProperty & kMemoryPropertyHostVisible) &&
2338 (info.memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
2339 pProperties->memoryTypeBits |= 1ull << i;
2340 }
2341 }
2342 return VK_SUCCESS;
2343 }
2344
getEventKoid(zx_handle_t eventHandle)2345 zx_koid_t getEventKoid(zx_handle_t eventHandle) {
2346 if (eventHandle == ZX_HANDLE_INVALID) {
2347 return ZX_KOID_INVALID;
2348 }
2349
2350 zx_info_handle_basic_t info;
2351 zx_status_t status = zx_object_get_info(eventHandle, ZX_INFO_HANDLE_BASIC, &info, sizeof(info),
2352 nullptr, nullptr);
2353 if (status != ZX_OK) {
2354 mesa_loge("Cannot get object info of handle %u: %d", eventHandle, status);
2355 return ZX_KOID_INVALID;
2356 }
2357 return info.koid;
2358 }
2359
on_vkImportSemaphoreZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkImportSemaphoreZirconHandleInfoFUCHSIA * pInfo)2360 VkResult ResourceTracker::on_vkImportSemaphoreZirconHandleFUCHSIA(
2361 void*, VkResult, VkDevice device, const VkImportSemaphoreZirconHandleInfoFUCHSIA* pInfo) {
2362 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2363 if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
2364
2365 std::lock_guard<std::recursive_mutex> lock(mLock);
2366
2367 auto deviceIt = info_VkDevice.find(device);
2368
2369 if (deviceIt == info_VkDevice.end()) {
2370 return VK_ERROR_INITIALIZATION_FAILED;
2371 }
2372
2373 auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
2374
2375 if (semaphoreIt == info_VkSemaphore.end()) {
2376 return VK_ERROR_INITIALIZATION_FAILED;
2377 }
2378
2379 auto& info = semaphoreIt->second;
2380
2381 if (info.eventHandle != ZX_HANDLE_INVALID) {
2382 zx_handle_close(info.eventHandle);
2383 }
2384 #if VK_HEADER_VERSION < 174
2385 info.eventHandle = pInfo->handle;
2386 #else // VK_HEADER_VERSION >= 174
2387 info.eventHandle = pInfo->zirconHandle;
2388 #endif // VK_HEADER_VERSION < 174
2389 if (info.eventHandle != ZX_HANDLE_INVALID) {
2390 info.eventKoid = getEventKoid(info.eventHandle);
2391 }
2392
2393 return VK_SUCCESS;
2394 }
2395
on_vkGetSemaphoreZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkSemaphoreGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)2396 VkResult ResourceTracker::on_vkGetSemaphoreZirconHandleFUCHSIA(
2397 void*, VkResult, VkDevice device, const VkSemaphoreGetZirconHandleInfoFUCHSIA* pInfo,
2398 uint32_t* pHandle) {
2399 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2400 if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
2401
2402 std::lock_guard<std::recursive_mutex> lock(mLock);
2403
2404 auto deviceIt = info_VkDevice.find(device);
2405
2406 if (deviceIt == info_VkDevice.end()) {
2407 return VK_ERROR_INITIALIZATION_FAILED;
2408 }
2409
2410 auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
2411
2412 if (semaphoreIt == info_VkSemaphore.end()) {
2413 return VK_ERROR_INITIALIZATION_FAILED;
2414 }
2415
2416 auto& info = semaphoreIt->second;
2417
2418 if (info.eventHandle == ZX_HANDLE_INVALID) {
2419 return VK_ERROR_INITIALIZATION_FAILED;
2420 }
2421
2422 *pHandle = ZX_HANDLE_INVALID;
2423 zx_handle_duplicate(info.eventHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
2424 return VK_SUCCESS;
2425 }
2426
on_vkCreateBufferCollectionFUCHSIA(void *,VkResult,VkDevice,const VkBufferCollectionCreateInfoFUCHSIA * pInfo,const VkAllocationCallbacks *,VkBufferCollectionFUCHSIA * pCollection)2427 VkResult ResourceTracker::on_vkCreateBufferCollectionFUCHSIA(
2428 void*, VkResult, VkDevice, const VkBufferCollectionCreateInfoFUCHSIA* pInfo,
2429 const VkAllocationCallbacks*, VkBufferCollectionFUCHSIA* pCollection) {
2430 fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken> token_client;
2431
2432 if (pInfo->collectionToken) {
2433 token_client = fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken>(
2434 zx::channel(pInfo->collectionToken));
2435 } else {
2436 auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
2437 if (!endpoints.is_ok()) {
2438 mesa_loge("zx_channel_create failed: %d", endpoints.status_value());
2439 return VK_ERROR_INITIALIZATION_FAILED;
2440 }
2441
2442 auto result = mSysmemAllocator->AllocateSharedCollection(std::move(endpoints->server));
2443 if (!result.ok()) {
2444 mesa_loge("AllocateSharedCollection failed: %d", result.status());
2445 return VK_ERROR_INITIALIZATION_FAILED;
2446 }
2447 token_client = std::move(endpoints->client);
2448 }
2449
2450 auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
2451 if (!endpoints.is_ok()) {
2452 mesa_loge("zx_channel_create failed: %d", endpoints.status_value());
2453 return VK_ERROR_INITIALIZATION_FAILED;
2454 }
2455 auto [collection_client, collection_server] = std::move(endpoints.value());
2456
2457 auto result = mSysmemAllocator->BindSharedCollection(std::move(token_client),
2458 std::move(collection_server));
2459 if (!result.ok()) {
2460 mesa_loge("BindSharedCollection failed: %d", result.status());
2461 return VK_ERROR_INITIALIZATION_FAILED;
2462 }
2463
2464 auto* sysmem_collection =
2465 new fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>(std::move(collection_client));
2466 *pCollection = reinterpret_cast<VkBufferCollectionFUCHSIA>(sysmem_collection);
2467
2468 register_VkBufferCollectionFUCHSIA(*pCollection);
2469 return VK_SUCCESS;
2470 }
2471
on_vkDestroyBufferCollectionFUCHSIA(void *,VkResult,VkDevice,VkBufferCollectionFUCHSIA collection,const VkAllocationCallbacks *)2472 void ResourceTracker::on_vkDestroyBufferCollectionFUCHSIA(void*, VkResult, VkDevice,
2473 VkBufferCollectionFUCHSIA collection,
2474 const VkAllocationCallbacks*) {
2475 auto sysmem_collection =
2476 reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2477 if (sysmem_collection) {
2478 (*sysmem_collection)->Close();
2479 }
2480 delete sysmem_collection;
2481
2482 unregister_VkBufferCollectionFUCHSIA(collection);
2483 }
2484
setBufferCollectionImageConstraintsImpl(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2485 SetBufferCollectionImageConstraintsResult ResourceTracker::setBufferCollectionImageConstraintsImpl(
2486 VkEncoder* enc, VkDevice device,
2487 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2488 const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2489 const auto& collection = *pCollection;
2490 if (!pImageConstraintsInfo ||
2491 pImageConstraintsInfo->sType != VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA) {
2492 mesa_loge("%s: invalid pImageConstraintsInfo", __func__);
2493 return {VK_ERROR_INITIALIZATION_FAILED};
2494 }
2495
2496 if (pImageConstraintsInfo->formatConstraintsCount == 0) {
2497 mesa_loge("%s: formatConstraintsCount must be greater than 0", __func__);
2498 abort();
2499 }
2500
2501 fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
2502 defaultBufferCollectionConstraints(
2503 /* min_size_bytes */ 0,
2504 pImageConstraintsInfo->bufferCollectionConstraints.minBufferCount,
2505 pImageConstraintsInfo->bufferCollectionConstraints.maxBufferCount,
2506 pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForCamping,
2507 pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForDedicatedSlack,
2508 pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForSharedSlack);
2509
2510 std::vector<fuchsia_sysmem::wire::ImageFormatConstraints> format_constraints;
2511
2512 VkPhysicalDevice physicalDevice;
2513 {
2514 std::lock_guard<std::recursive_mutex> lock(mLock);
2515 auto deviceIt = info_VkDevice.find(device);
2516 if (deviceIt == info_VkDevice.end()) {
2517 return {VK_ERROR_INITIALIZATION_FAILED};
2518 }
2519 physicalDevice = deviceIt->second.physdev;
2520 }
2521
2522 std::vector<uint32_t> createInfoIndex;
2523
2524 bool hasOptimalTiling = false;
2525 for (uint32_t i = 0; i < pImageConstraintsInfo->formatConstraintsCount; i++) {
2526 const VkImageCreateInfo* createInfo =
2527 &pImageConstraintsInfo->pFormatConstraints[i].imageCreateInfo;
2528 const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints =
2529 &pImageConstraintsInfo->pFormatConstraints[i];
2530
2531 // add ImageFormatConstraints for *optimal* tiling
2532 VkResult optimalResult = VK_ERROR_FORMAT_NOT_SUPPORTED;
2533 if (createInfo->tiling == VK_IMAGE_TILING_OPTIMAL) {
2534 optimalResult = addImageBufferCollectionConstraintsFUCHSIA(
2535 enc, device, physicalDevice, formatConstraints, VK_IMAGE_TILING_OPTIMAL,
2536 &constraints);
2537 if (optimalResult == VK_SUCCESS) {
2538 createInfoIndex.push_back(i);
2539 hasOptimalTiling = true;
2540 }
2541 }
2542
2543 // Add ImageFormatConstraints for *linear* tiling
2544 VkResult linearResult = addImageBufferCollectionConstraintsFUCHSIA(
2545 enc, device, physicalDevice, formatConstraints, VK_IMAGE_TILING_LINEAR, &constraints);
2546 if (linearResult == VK_SUCCESS) {
2547 createInfoIndex.push_back(i);
2548 }
2549
2550 // Update usage and BufferMemoryConstraints
2551 if (linearResult == VK_SUCCESS || optimalResult == VK_SUCCESS) {
2552 constraints.usage.vulkan |= getBufferCollectionConstraintsVulkanImageUsage(createInfo);
2553
2554 if (formatConstraints && formatConstraints->flags) {
2555 mesa_logw(
2556 "%s: Non-zero flags (%08x) in image format "
2557 "constraints; this is currently not supported, see "
2558 "fxbug.dev/42147900.",
2559 __func__, formatConstraints->flags);
2560 }
2561 }
2562 }
2563
2564 // Set buffer memory constraints based on optimal/linear tiling support
2565 // and flags.
2566 VkImageConstraintsInfoFlagsFUCHSIA flags = pImageConstraintsInfo->flags;
2567 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA)
2568 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageRead;
2569 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA)
2570 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageReadOften;
2571 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA)
2572 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWrite;
2573 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA)
2574 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWriteOften;
2575
2576 constraints.has_buffer_memory_constraints = true;
2577 auto& memory_constraints = constraints.buffer_memory_constraints;
2578 memory_constraints.cpu_domain_supported = true;
2579 memory_constraints.ram_domain_supported = true;
2580 memory_constraints.inaccessible_domain_supported =
2581 hasOptimalTiling && !(flags & (VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA |
2582 VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA |
2583 VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA |
2584 VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA));
2585
2586 if (memory_constraints.inaccessible_domain_supported) {
2587 memory_constraints.heap_permitted_count = 2;
2588 memory_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2589 memory_constraints.heap_permitted[1] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2590 } else {
2591 memory_constraints.heap_permitted_count = 1;
2592 memory_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2593 }
2594
2595 if (constraints.image_format_constraints_count == 0) {
2596 mesa_loge("%s: none of the specified formats is supported by device", __func__);
2597 return {VK_ERROR_FORMAT_NOT_SUPPORTED};
2598 }
2599
2600 constexpr uint32_t kVulkanPriority = 5;
2601 const char kName[] = "GoldfishSysmemShared";
2602 collection->SetName(kVulkanPriority, fidl::StringView(kName));
2603
2604 auto result = collection->SetConstraints(true, constraints);
2605 if (!result.ok()) {
2606 mesa_loge("setBufferCollectionConstraints: SetConstraints failed: %d", result.status());
2607 return {VK_ERROR_INITIALIZATION_FAILED};
2608 }
2609
2610 return {VK_SUCCESS, constraints, std::move(createInfoIndex)};
2611 }
2612
setBufferCollectionImageConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2613 VkResult ResourceTracker::setBufferCollectionImageConstraintsFUCHSIA(
2614 VkEncoder* enc, VkDevice device,
2615 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2616 const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2617 const auto& collection = *pCollection;
2618
2619 auto setConstraintsResult =
2620 setBufferCollectionImageConstraintsImpl(enc, device, pCollection, pImageConstraintsInfo);
2621 if (setConstraintsResult.result != VK_SUCCESS) {
2622 return setConstraintsResult.result;
2623 }
2624
2625 // copy constraints to info_VkBufferCollectionFUCHSIA if
2626 // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2627 std::lock_guard<std::recursive_mutex> lock(mLock);
2628 VkBufferCollectionFUCHSIA buffer_collection =
2629 reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
2630 if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2631 info_VkBufferCollectionFUCHSIA.end()) {
2632 info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2633 std::make_optional<fuchsia_sysmem::wire::BufferCollectionConstraints>(
2634 std::move(setConstraintsResult.constraints));
2635 info_VkBufferCollectionFUCHSIA[buffer_collection].createInfoIndex =
2636 std::move(setConstraintsResult.createInfoIndex);
2637 }
2638
2639 return VK_SUCCESS;
2640 }
2641
setBufferCollectionBufferConstraintsFUCHSIA(fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2642 VkResult ResourceTracker::setBufferCollectionBufferConstraintsFUCHSIA(
2643 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2644 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2645 auto setConstraintsResult =
2646 setBufferCollectionBufferConstraintsImpl(pCollection, pBufferConstraintsInfo);
2647 if (setConstraintsResult.result != VK_SUCCESS) {
2648 return setConstraintsResult.result;
2649 }
2650
2651 // copy constraints to info_VkBufferCollectionFUCHSIA if
2652 // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2653 std::lock_guard<std::recursive_mutex> lock(mLock);
2654 VkBufferCollectionFUCHSIA buffer_collection =
2655 reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
2656 if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2657 info_VkBufferCollectionFUCHSIA.end()) {
2658 info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2659 std::make_optional<fuchsia_sysmem::wire::BufferCollectionConstraints>(
2660 setConstraintsResult.constraints);
2661 }
2662
2663 return VK_SUCCESS;
2664 }
2665
on_vkSetBufferCollectionImageConstraintsFUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2666 VkResult ResourceTracker::on_vkSetBufferCollectionImageConstraintsFUCHSIA(
2667 void* context, VkResult, VkDevice device, VkBufferCollectionFUCHSIA collection,
2668 const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2669 VkEncoder* enc = (VkEncoder*)context;
2670 auto sysmem_collection =
2671 reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2672 return setBufferCollectionImageConstraintsFUCHSIA(enc, device, sysmem_collection,
2673 pImageConstraintsInfo);
2674 }
2675
on_vkSetBufferCollectionBufferConstraintsFUCHSIA(void *,VkResult,VkDevice,VkBufferCollectionFUCHSIA collection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2676 VkResult ResourceTracker::on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
2677 void*, VkResult, VkDevice, VkBufferCollectionFUCHSIA collection,
2678 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2679 auto sysmem_collection =
2680 reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2681 return setBufferCollectionBufferConstraintsFUCHSIA(sysmem_collection, pBufferConstraintsInfo);
2682 }
2683
getBufferCollectionImageCreateInfoIndexLocked(VkBufferCollectionFUCHSIA collection,fuchsia_sysmem::wire::BufferCollectionInfo2 & info,uint32_t * outCreateInfoIndex)2684 VkResult ResourceTracker::getBufferCollectionImageCreateInfoIndexLocked(
2685 VkBufferCollectionFUCHSIA collection, fuchsia_sysmem::wire::BufferCollectionInfo2& info,
2686 uint32_t* outCreateInfoIndex) {
2687 if (!info_VkBufferCollectionFUCHSIA[collection].constraints.has_value()) {
2688 mesa_loge("%s: constraints not set", __func__);
2689 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2690 }
2691
2692 if (!info.settings.has_image_format_constraints) {
2693 // no image format constraints, skip getting createInfoIndex.
2694 return VK_SUCCESS;
2695 }
2696
2697 const auto& constraints = *info_VkBufferCollectionFUCHSIA[collection].constraints;
2698 const auto& createInfoIndices = info_VkBufferCollectionFUCHSIA[collection].createInfoIndex;
2699 const auto& out = info.settings.image_format_constraints;
2700 bool foundCreateInfo = false;
2701
2702 for (size_t imageFormatIndex = 0; imageFormatIndex < constraints.image_format_constraints_count;
2703 imageFormatIndex++) {
2704 const auto& in = constraints.image_format_constraints[imageFormatIndex];
2705 // These checks are sorted in order of how often they're expected to
2706 // mismatch, from most likely to least likely. They aren't always
2707 // equality comparisons, since sysmem may change some values in
2708 // compatible ways on behalf of the other participants.
2709 if ((out.pixel_format.type != in.pixel_format.type) ||
2710 (out.pixel_format.has_format_modifier != in.pixel_format.has_format_modifier) ||
2711 (out.pixel_format.format_modifier.value != in.pixel_format.format_modifier.value) ||
2712 (out.min_bytes_per_row < in.min_bytes_per_row) ||
2713 (out.required_max_coded_width < in.required_max_coded_width) ||
2714 (out.required_max_coded_height < in.required_max_coded_height) ||
2715 (in.bytes_per_row_divisor != 0 &&
2716 out.bytes_per_row_divisor % in.bytes_per_row_divisor != 0)) {
2717 continue;
2718 }
2719 // Check if the out colorspaces are a subset of the in color spaces.
2720 bool all_color_spaces_found = true;
2721 for (uint32_t j = 0; j < out.color_spaces_count; j++) {
2722 bool found_matching_color_space = false;
2723 for (uint32_t k = 0; k < in.color_spaces_count; k++) {
2724 if (out.color_space[j].type == in.color_space[k].type) {
2725 found_matching_color_space = true;
2726 break;
2727 }
2728 }
2729 if (!found_matching_color_space) {
2730 all_color_spaces_found = false;
2731 break;
2732 }
2733 }
2734 if (!all_color_spaces_found) {
2735 continue;
2736 }
2737
2738 // Choose the first valid format for now.
2739 *outCreateInfoIndex = createInfoIndices[imageFormatIndex];
2740 return VK_SUCCESS;
2741 }
2742
2743 mesa_loge("%s: cannot find a valid image format in constraints", __func__);
2744 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2745 }
2746
on_vkGetBufferCollectionPropertiesFUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,VkBufferCollectionPropertiesFUCHSIA * pProperties)2747 VkResult ResourceTracker::on_vkGetBufferCollectionPropertiesFUCHSIA(
2748 void* context, VkResult, VkDevice device, VkBufferCollectionFUCHSIA collection,
2749 VkBufferCollectionPropertiesFUCHSIA* pProperties) {
2750 VkEncoder* enc = (VkEncoder*)context;
2751 const auto& sysmem_collection =
2752 *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2753
2754 auto result = sysmem_collection->WaitForBuffersAllocated();
2755 if (!result.ok() || result->status != ZX_OK) {
2756 mesa_loge("Failed wait for allocation: %d %d", result.status(),
2757 GET_STATUS_SAFE(result, status));
2758 return VK_ERROR_INITIALIZATION_FAILED;
2759 }
2760 fuchsia_sysmem::wire::BufferCollectionInfo2 info = std::move(result->buffer_collection_info);
2761
2762 bool is_host_visible =
2763 info.settings.buffer_settings.heap == fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2764 bool is_device_local =
2765 info.settings.buffer_settings.heap == fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2766 if (!is_host_visible && !is_device_local) {
2767 mesa_loge("buffer collection uses a non-goldfish heap (type 0x%lu)",
2768 static_cast<uint64_t>(info.settings.buffer_settings.heap));
2769 return VK_ERROR_INITIALIZATION_FAILED;
2770 }
2771
2772 // memoryTypeBits
2773 // ====================================================================
2774 {
2775 std::lock_guard<std::recursive_mutex> lock(mLock);
2776 auto deviceIt = info_VkDevice.find(device);
2777 if (deviceIt == info_VkDevice.end()) {
2778 return VK_ERROR_INITIALIZATION_FAILED;
2779 }
2780 auto& deviceInfo = deviceIt->second;
2781
2782 // Device local memory type supported.
2783 pProperties->memoryTypeBits = 0;
2784 for (uint32_t i = 0; i < deviceInfo.memProps.memoryTypeCount; ++i) {
2785 if ((is_device_local && (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2786 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
2787 (is_host_visible && (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2788 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
2789 pProperties->memoryTypeBits |= 1ull << i;
2790 }
2791 }
2792 }
2793
2794 // bufferCount
2795 // ====================================================================
2796 pProperties->bufferCount = info.buffer_count;
2797
2798 auto storeProperties = [this, collection, pProperties]() -> VkResult {
2799 // store properties to storage
2800 std::lock_guard<std::recursive_mutex> lock(mLock);
2801 if (info_VkBufferCollectionFUCHSIA.find(collection) ==
2802 info_VkBufferCollectionFUCHSIA.end()) {
2803 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2804 }
2805
2806 info_VkBufferCollectionFUCHSIA[collection].properties =
2807 std::make_optional<VkBufferCollectionPropertiesFUCHSIA>(*pProperties);
2808
2809 // We only do a shallow copy so we should remove all pNext pointers.
2810 info_VkBufferCollectionFUCHSIA[collection].properties->pNext = nullptr;
2811 info_VkBufferCollectionFUCHSIA[collection].properties->sysmemColorSpaceIndex.pNext =
2812 nullptr;
2813 return VK_SUCCESS;
2814 };
2815
2816 // The fields below only apply to buffer collections with image formats.
2817 if (!info.settings.has_image_format_constraints) {
2818 mesa_logd("%s: buffer collection doesn't have image format constraints", __func__);
2819 return storeProperties();
2820 }
2821
2822 // sysmemFormat
2823 // ====================================================================
2824
2825 pProperties->sysmemPixelFormat =
2826 static_cast<uint64_t>(info.settings.image_format_constraints.pixel_format.type);
2827
2828 // colorSpace
2829 // ====================================================================
2830 if (info.settings.image_format_constraints.color_spaces_count == 0) {
2831 mesa_loge(
2832 "%s: color space missing from allocated buffer collection "
2833 "constraints",
2834 __func__);
2835 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2836 }
2837 // Only report first colorspace for now.
2838 pProperties->sysmemColorSpaceIndex.colorSpace =
2839 static_cast<uint32_t>(info.settings.image_format_constraints.color_space[0].type);
2840
2841 // createInfoIndex
2842 // ====================================================================
2843 {
2844 std::lock_guard<std::recursive_mutex> lock(mLock);
2845 auto getIndexResult = getBufferCollectionImageCreateInfoIndexLocked(
2846 collection, info, &pProperties->createInfoIndex);
2847 if (getIndexResult != VK_SUCCESS) {
2848 return getIndexResult;
2849 }
2850 }
2851
2852 // formatFeatures
2853 // ====================================================================
2854 VkPhysicalDevice physicalDevice;
2855 {
2856 std::lock_guard<std::recursive_mutex> lock(mLock);
2857 auto deviceIt = info_VkDevice.find(device);
2858 if (deviceIt == info_VkDevice.end()) {
2859 return VK_ERROR_INITIALIZATION_FAILED;
2860 }
2861 physicalDevice = deviceIt->second.physdev;
2862 }
2863
2864 VkFormat vkFormat =
2865 sysmemPixelFormatTypeToVk(info.settings.image_format_constraints.pixel_format.type);
2866 VkFormatProperties formatProperties;
2867 enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, vkFormat, &formatProperties,
2868 true /* do lock */);
2869 if (is_device_local) {
2870 pProperties->formatFeatures = formatProperties.optimalTilingFeatures;
2871 }
2872 if (is_host_visible) {
2873 pProperties->formatFeatures = formatProperties.linearTilingFeatures;
2874 }
2875
2876 // YCbCr properties
2877 // ====================================================================
2878 // TODO(59804): Implement this correctly when we support YUV pixel
2879 // formats in goldfish ICD.
2880 pProperties->samplerYcbcrConversionComponents.r = VK_COMPONENT_SWIZZLE_IDENTITY;
2881 pProperties->samplerYcbcrConversionComponents.g = VK_COMPONENT_SWIZZLE_IDENTITY;
2882 pProperties->samplerYcbcrConversionComponents.b = VK_COMPONENT_SWIZZLE_IDENTITY;
2883 pProperties->samplerYcbcrConversionComponents.a = VK_COMPONENT_SWIZZLE_IDENTITY;
2884 pProperties->suggestedYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
2885 pProperties->suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
2886 pProperties->suggestedXChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2887 pProperties->suggestedYChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2888
2889 return storeProperties();
2890 }
2891 #endif
2892
getVirglFormat(VkFormat vkFormat)2893 static uint32_t getVirglFormat(VkFormat vkFormat) {
2894 uint32_t virglFormat = 0;
2895
2896 switch (vkFormat) {
2897 case VK_FORMAT_R8G8B8A8_SINT:
2898 case VK_FORMAT_R8G8B8A8_UNORM:
2899 case VK_FORMAT_R8G8B8A8_SRGB:
2900 case VK_FORMAT_R8G8B8A8_SNORM:
2901 case VK_FORMAT_R8G8B8A8_SSCALED:
2902 case VK_FORMAT_R8G8B8A8_USCALED:
2903 virglFormat = VIRGL_FORMAT_R8G8B8A8_UNORM;
2904 break;
2905 case VK_FORMAT_B8G8R8A8_SINT:
2906 case VK_FORMAT_B8G8R8A8_UNORM:
2907 case VK_FORMAT_B8G8R8A8_SRGB:
2908 case VK_FORMAT_B8G8R8A8_SNORM:
2909 case VK_FORMAT_B8G8R8A8_SSCALED:
2910 case VK_FORMAT_B8G8R8A8_USCALED:
2911 virglFormat = VIRGL_FORMAT_B8G8R8A8_UNORM;
2912 break;
2913 case VK_FORMAT_A2R10G10B10_UNORM_PACK32:
2914 virglFormat = VIRGL_FORMAT_R10G10B10A2_UNORM;
2915 break;
2916 default:
2917 break;
2918 }
2919
2920 return virglFormat;
2921 }
2922
createCoherentMemory(VkDevice device,VkDeviceMemory mem,const VkMemoryAllocateInfo & hostAllocationInfo,VkEncoder * enc,VkResult & res)2923 CoherentMemoryPtr ResourceTracker::createCoherentMemory(
2924 VkDevice device, VkDeviceMemory mem, const VkMemoryAllocateInfo& hostAllocationInfo,
2925 VkEncoder* enc, VkResult& res) {
2926 CoherentMemoryPtr coherentMemory = nullptr;
2927
2928 #if DETECT_OS_ANDROID
2929 if (mFeatureInfo.hasDirectMem) {
2930 uint64_t gpuAddr = 0;
2931 GoldfishAddressSpaceBlockPtr block = nullptr;
2932 res = enc->vkMapMemoryIntoAddressSpaceGOOGLE(device, mem, &gpuAddr, true);
2933 if (res != VK_SUCCESS) {
2934 mesa_loge(
2935 "Failed to create coherent memory: vkMapMemoryIntoAddressSpaceGOOGLE "
2936 "returned:%d.",
2937 res);
2938 return coherentMemory;
2939 }
2940 {
2941 std::lock_guard<std::recursive_mutex> lock(mLock);
2942 auto it = info_VkDeviceMemory.find(mem);
2943 if (it == info_VkDeviceMemory.end()) {
2944 mesa_loge("Failed to create coherent memory: failed to find device memory.");
2945 res = VK_ERROR_OUT_OF_HOST_MEMORY;
2946 return coherentMemory;
2947 }
2948 auto& info = it->second;
2949 block = info.goldfishBlock;
2950 info.goldfishBlock = nullptr;
2951
2952 coherentMemory = std::make_shared<CoherentMemory>(
2953 block, gpuAddr, hostAllocationInfo.allocationSize, device, mem);
2954 }
2955 } else
2956 #endif // DETECT_OS_ANDROID
2957 if (mFeatureInfo.hasVirtioGpuNext) {
2958 struct VirtGpuCreateBlob createBlob = {0};
2959 uint64_t hvaSizeId[3];
2960 res = enc->vkGetMemoryHostAddressInfoGOOGLE(device, mem, &hvaSizeId[0], &hvaSizeId[1],
2961 &hvaSizeId[2], true /* do lock */);
2962 if (res != VK_SUCCESS) {
2963 mesa_loge(
2964 "Failed to create coherent memory: vkMapMemoryIntoAddressSpaceGOOGLE "
2965 "returned:%d.",
2966 res);
2967 return coherentMemory;
2968 }
2969 {
2970 std::lock_guard<std::recursive_mutex> lock(mLock);
2971 VirtGpuDevice* instance = VirtGpuDevice::getInstance((enum VirtGpuCapset)3);
2972 createBlob.blobMem = kBlobMemHost3d;
2973 createBlob.flags = kBlobFlagMappable;
2974 createBlob.blobId = hvaSizeId[2];
2975 createBlob.size = hostAllocationInfo.allocationSize;
2976
2977 auto blob = instance->createBlob(createBlob);
2978 if (!blob) {
2979 mesa_loge("Failed to create coherent memory: failed to create blob.");
2980 res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2981 return coherentMemory;
2982 }
2983
2984 VirtGpuResourceMappingPtr mapping = blob->createMapping();
2985 if (!mapping) {
2986 mesa_loge("Failed to create coherent memory: failed to create blob mapping.");
2987 res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2988 return coherentMemory;
2989 }
2990
2991 coherentMemory =
2992 std::make_shared<CoherentMemory>(mapping, createBlob.size, device, mem);
2993 }
2994 } else {
2995 mesa_loge("FATAL: Unsupported virtual memory feature");
2996 abort();
2997 }
2998 return coherentMemory;
2999 }
3000
allocateCoherentMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,VkEncoder * enc,VkDeviceMemory * pMemory)3001 VkResult ResourceTracker::allocateCoherentMemory(VkDevice device,
3002 const VkMemoryAllocateInfo* pAllocateInfo,
3003 VkEncoder* enc, VkDeviceMemory* pMemory) {
3004 uint64_t offset = 0;
3005 uint8_t* ptr = nullptr;
3006 VkMemoryAllocateFlagsInfo allocFlagsInfo;
3007 VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
3008 VkCreateBlobGOOGLE createBlobInfo;
3009 VirtGpuResourcePtr guestBlob = nullptr;
3010
3011 memset(&createBlobInfo, 0, sizeof(struct VkCreateBlobGOOGLE));
3012 createBlobInfo.sType = VK_STRUCTURE_TYPE_CREATE_BLOB_GOOGLE;
3013
3014 const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3015 vk_find_struct_const(pAllocateInfo, MEMORY_ALLOCATE_FLAGS_INFO);
3016 const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
3017 vk_find_struct_const(pAllocateInfo, MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO);
3018
3019 bool deviceAddressMemoryAllocation =
3020 allocFlagsInfoPtr &&
3021 ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
3022 (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
3023
3024 bool dedicated = deviceAddressMemoryAllocation;
3025
3026 if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle])
3027 dedicated = true;
3028
3029 VkMemoryAllocateInfo hostAllocationInfo = vk_make_orphan_copy(*pAllocateInfo);
3030 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&hostAllocationInfo);
3031
3032 if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3033 hostAllocationInfo.allocationSize =
3034 ALIGN_POT(pAllocateInfo->allocationSize, mCaps.vulkanCapset.blobAlignment);
3035 } else if (dedicated) {
3036 // Over-aligning to kLargestSize to some Windows drivers (b:152769369). Can likely
3037 // have host report the desired alignment.
3038 hostAllocationInfo.allocationSize =
3039 ALIGN_POT(pAllocateInfo->allocationSize, kLargestPageSize);
3040 } else {
3041 VkDeviceSize roundedUpAllocSize = ALIGN_POT(pAllocateInfo->allocationSize, kMegaByte);
3042 hostAllocationInfo.allocationSize = std::max(roundedUpAllocSize, kDefaultHostMemBlockSize);
3043 }
3044
3045 // Support device address capture/replay allocations
3046 if (deviceAddressMemoryAllocation) {
3047 if (allocFlagsInfoPtr) {
3048 mesa_logd("%s: has alloc flags\n", __func__);
3049 allocFlagsInfo = *allocFlagsInfoPtr;
3050 vk_append_struct(&structChainIter, &allocFlagsInfo);
3051 }
3052
3053 if (opaqueCaptureAddressAllocInfoPtr) {
3054 mesa_logd("%s: has opaque capture address\n", __func__);
3055 opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
3056 vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
3057 }
3058 }
3059
3060 if (mCaps.params[kParamCreateGuestHandle]) {
3061 struct VirtGpuCreateBlob createBlob = {0};
3062 struct VirtGpuExecBuffer exec = {};
3063 VirtGpuDevice* instance = VirtGpuDevice::getInstance();
3064 struct gfxstreamPlaceholderCommandVk placeholderCmd = {};
3065
3066 createBlobInfo.blobId = ++mAtomicId;
3067 createBlobInfo.blobMem = kBlobMemGuest;
3068 createBlobInfo.blobFlags = kBlobFlagCreateGuestHandle;
3069 vk_append_struct(&structChainIter, &createBlobInfo);
3070
3071 createBlob.blobMem = kBlobMemGuest;
3072 createBlob.flags = kBlobFlagCreateGuestHandle;
3073 createBlob.blobId = createBlobInfo.blobId;
3074 createBlob.size = hostAllocationInfo.allocationSize;
3075
3076 guestBlob = instance->createBlob(createBlob);
3077 if (!guestBlob) {
3078 mesa_loge("Failed to allocate coherent memory: failed to create blob.");
3079 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3080 }
3081
3082 placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK;
3083 exec.command = static_cast<void*>(&placeholderCmd);
3084 exec.command_size = sizeof(placeholderCmd);
3085 exec.flags = kRingIdx;
3086 exec.ring_idx = 1;
3087 if (instance->execBuffer(exec, guestBlob.get())) {
3088 mesa_loge("Failed to allocate coherent memory: failed to execbuffer for wait.");
3089 return VK_ERROR_OUT_OF_HOST_MEMORY;
3090 }
3091
3092 guestBlob->wait();
3093 } else if (mCaps.vulkanCapset.deferredMapping) {
3094 createBlobInfo.blobId = ++mAtomicId;
3095 createBlobInfo.blobMem = kBlobMemHost3d;
3096 vk_append_struct(&structChainIter, &createBlobInfo);
3097 }
3098
3099 VkDeviceMemory mem = VK_NULL_HANDLE;
3100 VkResult host_res =
3101 enc->vkAllocateMemory(device, &hostAllocationInfo, nullptr, &mem, true /* do lock */);
3102 if (host_res != VK_SUCCESS) {
3103 mesa_loge("Failed to allocate coherent memory: failed to allocate on the host: %d.",
3104 host_res);
3105 return host_res;
3106 }
3107
3108 struct VkDeviceMemory_Info info;
3109 if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3110 info.allocationSize = pAllocateInfo->allocationSize;
3111 info.blobId = createBlobInfo.blobId;
3112 }
3113
3114 if (guestBlob) {
3115 auto mapping = guestBlob->createMapping();
3116 if (!mapping) {
3117 mesa_loge("Failed to allocate coherent memory: failed to create blob mapping.");
3118 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3119 }
3120
3121 auto coherentMemory = std::make_shared<CoherentMemory>(
3122 mapping, hostAllocationInfo.allocationSize, device, mem);
3123
3124 coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset);
3125 info.coherentMemoryOffset = offset;
3126 info.coherentMemory = coherentMemory;
3127 info.ptr = ptr;
3128 }
3129
3130 info.coherentMemorySize = hostAllocationInfo.allocationSize;
3131 info.memoryTypeIndex = hostAllocationInfo.memoryTypeIndex;
3132 info.device = device;
3133 info.dedicated = dedicated;
3134 {
3135 // createCoherentMemory inside need to access info_VkDeviceMemory
3136 // information. set it before use.
3137 std::lock_guard<std::recursive_mutex> lock(mLock);
3138 info_VkDeviceMemory[mem] = info;
3139 }
3140
3141 if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3142 *pMemory = mem;
3143 return host_res;
3144 }
3145
3146 auto coherentMemory = createCoherentMemory(device, mem, hostAllocationInfo, enc, host_res);
3147 if (coherentMemory) {
3148 std::lock_guard<std::recursive_mutex> lock(mLock);
3149 coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset);
3150 info.allocationSize = pAllocateInfo->allocationSize;
3151 info.coherentMemoryOffset = offset;
3152 info.coherentMemory = coherentMemory;
3153 info.ptr = ptr;
3154 info_VkDeviceMemory[mem] = info;
3155 *pMemory = mem;
3156 } else {
3157 enc->vkFreeMemory(device, mem, nullptr, true);
3158 std::lock_guard<std::recursive_mutex> lock(mLock);
3159 info_VkDeviceMemory.erase(mem);
3160 }
3161 return host_res;
3162 }
3163
getCoherentMemory(const VkMemoryAllocateInfo * pAllocateInfo,VkEncoder * enc,VkDevice device,VkDeviceMemory * pMemory)3164 VkResult ResourceTracker::getCoherentMemory(const VkMemoryAllocateInfo* pAllocateInfo,
3165 VkEncoder* enc, VkDevice device,
3166 VkDeviceMemory* pMemory) {
3167 // Add buffer device address capture structs
3168 const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3169 vk_find_struct_const(pAllocateInfo, MEMORY_ALLOCATE_FLAGS_INFO);
3170
3171 bool dedicated =
3172 allocFlagsInfoPtr &&
3173 ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
3174 (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
3175
3176 if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle])
3177 dedicated = true;
3178
3179 CoherentMemoryPtr coherentMemory = nullptr;
3180 uint8_t* ptr = nullptr;
3181 uint64_t offset = 0;
3182 {
3183 std::lock_guard<std::recursive_mutex> lock(mLock);
3184 for (const auto& [memory, info] : info_VkDeviceMemory) {
3185 if (info.device != device) continue;
3186
3187 if (info.memoryTypeIndex != pAllocateInfo->memoryTypeIndex) continue;
3188
3189 if (info.dedicated || dedicated) continue;
3190
3191 if (!info.coherentMemory) continue;
3192
3193 if (!info.coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset))
3194 continue;
3195
3196 coherentMemory = info.coherentMemory;
3197 break;
3198 }
3199 if (coherentMemory) {
3200 struct VkDeviceMemory_Info info;
3201 info.coherentMemoryOffset = offset;
3202 info.ptr = ptr;
3203 info.memoryTypeIndex = pAllocateInfo->memoryTypeIndex;
3204 info.allocationSize = pAllocateInfo->allocationSize;
3205 info.coherentMemory = coherentMemory;
3206 info.device = device;
3207
3208 // for suballocated memory, create an alias VkDeviceMemory handle for application
3209 // memory used for suballocations will still be VkDeviceMemory associated with
3210 // CoherentMemory
3211 auto mem = new_from_host_VkDeviceMemory(VK_NULL_HANDLE);
3212 info_VkDeviceMemory[mem] = info;
3213 *pMemory = mem;
3214 return VK_SUCCESS;
3215 }
3216 }
3217 return allocateCoherentMemory(device, pAllocateInfo, enc, pMemory);
3218 }
3219
on_vkAllocateMemory(void * context,VkResult input_result,VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)3220 VkResult ResourceTracker::on_vkAllocateMemory(void* context, VkResult input_result, VkDevice device,
3221 const VkMemoryAllocateInfo* pAllocateInfo,
3222 const VkAllocationCallbacks* pAllocator,
3223 VkDeviceMemory* pMemory) {
3224 #define _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(result) \
3225 { \
3226 auto it = info_VkDevice.find(device); \
3227 if (it == info_VkDevice.end()) return result; \
3228 emitDeviceMemoryReport(it->second, \
3229 VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT, 0, \
3230 pAllocateInfo->allocationSize, VK_OBJECT_TYPE_DEVICE_MEMORY, 0, \
3231 pAllocateInfo->memoryTypeIndex); \
3232 return result; \
3233 }
3234
3235 if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
3236
3237 VkEncoder* enc = (VkEncoder*)context;
3238
3239 bool hasDedicatedImage = false;
3240 bool hasDedicatedBuffer = false;
3241
3242 VkMemoryAllocateInfo finalAllocInfo = vk_make_orphan_copy(*pAllocateInfo);
3243 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&finalAllocInfo);
3244
3245 VkMemoryAllocateFlagsInfo allocFlagsInfo;
3246 VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
3247
3248 // Add buffer device address capture structs
3249 const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3250 vk_find_struct_const(pAllocateInfo, MEMORY_ALLOCATE_FLAGS_INFO);
3251 const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
3252 vk_find_struct_const(pAllocateInfo, MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO);
3253
3254 if (allocFlagsInfoPtr) {
3255 mesa_logd("%s: has alloc flags\n", __func__);
3256 allocFlagsInfo = *allocFlagsInfoPtr;
3257 vk_append_struct(&structChainIter, &allocFlagsInfo);
3258 }
3259
3260 if (opaqueCaptureAddressAllocInfoPtr) {
3261 mesa_logd("%s: has opaque capture address\n", __func__);
3262 opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
3263 vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
3264 }
3265
3266 VkMemoryDedicatedAllocateInfo dedicatedAllocInfo;
3267 VkImportColorBufferGOOGLE importCbInfo = {
3268 VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE,
3269 0,
3270 };
3271 VkImportBufferGOOGLE importBufferInfo = {
3272 VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE,
3273 0,
3274 };
3275 // VkImportPhysicalAddressGOOGLE importPhysAddrInfo = {
3276 // VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE, 0,
3277 // };
3278
3279 const VkExportMemoryAllocateInfo* exportAllocateInfoPtr =
3280 vk_find_struct_const(pAllocateInfo, EXPORT_MEMORY_ALLOCATE_INFO);
3281
3282 #ifdef VK_USE_PLATFORM_ANDROID_KHR
3283 const VkImportAndroidHardwareBufferInfoANDROID* importAhbInfoPtr =
3284 vk_find_struct_const(pAllocateInfo, IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID);
3285 // Even if we export allocate, the underlying operation
3286 // for the host is always going to be an import operation.
3287 // This is also how Intel's implementation works,
3288 // and is generally simpler;
3289 // even in an export allocation,
3290 // we perform AHardwareBuffer allocation
3291 // on the guest side, at this layer,
3292 // and then we attach a new VkDeviceMemory
3293 // to the AHardwareBuffer on the host via an "import" operation.
3294 AHardwareBuffer* ahw = nullptr;
3295 #else
3296 const void* importAhbInfoPtr = nullptr;
3297 void* ahw = nullptr;
3298 #endif
3299
3300 #if DETECT_OS_LINUX && !defined(VK_USE_PLATFORM_ANDROID_KHR)
3301 const VkImportMemoryFdInfoKHR* importFdInfoPtr =
3302 vk_find_struct_const(pAllocateInfo, IMPORT_MEMORY_FD_INFO_KHR);
3303 #else
3304 const VkImportMemoryFdInfoKHR* importFdInfoPtr = nullptr;
3305 #endif
3306
3307 #ifdef VK_USE_PLATFORM_FUCHSIA
3308 const VkImportMemoryBufferCollectionFUCHSIA* importBufferCollectionInfoPtr =
3309 vk_find_struct_const(pAllocateInfo, IMPORT_MEMORY_BUFFER_COLLECTION_FUCHSIA);
3310
3311 const VkImportMemoryZirconHandleInfoFUCHSIA* importVmoInfoPtr =
3312 vk_find_struct_const(pAllocateInfo, IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA);
3313 #else
3314 const void* importBufferCollectionInfoPtr = nullptr;
3315 const void* importVmoInfoPtr = nullptr;
3316 #endif // VK_USE_PLATFORM_FUCHSIA
3317
3318 const VkMemoryDedicatedAllocateInfo* dedicatedAllocInfoPtr =
3319 vk_find_struct_const(pAllocateInfo, MEMORY_DEDICATED_ALLOCATE_INFO);
3320
3321 // Note for AHardwareBuffers, the Vulkan spec states:
3322 //
3323 // Android hardware buffers have intrinsic width, height, format, and usage
3324 // properties, so Vulkan images bound to memory imported from an Android
3325 // hardware buffer must use dedicated allocations
3326 //
3327 // so any allocation requests with a VkImportAndroidHardwareBufferInfoANDROID
3328 // will necessarily have a VkMemoryDedicatedAllocateInfo. However, the host
3329 // may or may not actually use a dedicated allocation to emulate
3330 // AHardwareBuffers. As such, the VkMemoryDedicatedAllocateInfo is passed to the
3331 // host and the host will decide whether or not to use it.
3332
3333 bool shouldPassThroughDedicatedAllocInfo =
3334 !exportAllocateInfoPtr && !importBufferCollectionInfoPtr && !importVmoInfoPtr;
3335
3336 const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProps =
3337 getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE);
3338
3339 const bool requestedMemoryIsHostVisible =
3340 isHostVisible(&physicalDeviceMemoryProps, pAllocateInfo->memoryTypeIndex);
3341
3342 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
3343 shouldPassThroughDedicatedAllocInfo &= !requestedMemoryIsHostVisible;
3344 #endif // VK_USE_PLATFORM_FUCHSIA
3345
3346 if (shouldPassThroughDedicatedAllocInfo && dedicatedAllocInfoPtr) {
3347 dedicatedAllocInfo = vk_make_orphan_copy(*dedicatedAllocInfoPtr);
3348 vk_append_struct(&structChainIter, &dedicatedAllocInfo);
3349 }
3350
3351 // State needed for import/export.
3352 bool exportAhb = false;
3353 bool exportVmo = false;
3354 bool exportDmabuf = false;
3355 bool importAhb = false;
3356 bool importBufferCollection = false;
3357 bool importVmo = false;
3358 bool importDmabuf = false;
3359 (void)exportVmo;
3360 (void)exportAhb;
3361
3362 if (exportAllocateInfoPtr) {
3363 exportAhb = exportAllocateInfoPtr->handleTypes &
3364 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
3365 #ifdef VK_USE_PLATFORM_FUCHSIA
3366 exportVmo = exportAllocateInfoPtr->handleTypes &
3367 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
3368 #endif // VK_USE_PLATFORM_FUCHSIA
3369 exportDmabuf =
3370 exportAllocateInfoPtr->handleTypes & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
3371 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
3372 } else if (importAhbInfoPtr) {
3373 importAhb = true;
3374 } else if (importBufferCollectionInfoPtr) {
3375 importBufferCollection = true;
3376 } else if (importVmoInfoPtr) {
3377 importVmo = true;
3378 }
3379
3380 if (importFdInfoPtr) {
3381 importDmabuf =
3382 (importFdInfoPtr->handleType & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
3383 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT));
3384 }
3385 bool isImport = importAhb || importBufferCollection || importVmo || importDmabuf;
3386
3387 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
3388 if (exportAhb) {
3389 hasDedicatedImage =
3390 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3391 hasDedicatedBuffer =
3392 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3393 VkExtent3D imageExtent = {0, 0, 0};
3394 uint32_t imageLayers = 0;
3395 VkFormat imageFormat = VK_FORMAT_UNDEFINED;
3396 VkImageUsageFlags imageUsage = 0;
3397 VkImageCreateFlags imageCreateFlags = 0;
3398 VkDeviceSize bufferSize = 0;
3399 VkDeviceSize allocationInfoAllocSize = finalAllocInfo.allocationSize;
3400
3401 if (hasDedicatedImage) {
3402 std::lock_guard<std::recursive_mutex> lock(mLock);
3403
3404 auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3405 if (it == info_VkImage.end())
3406 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3407 const auto& info = it->second;
3408 const auto& imgCi = info.createInfo;
3409
3410 imageExtent = imgCi.extent;
3411 imageLayers = imgCi.arrayLayers;
3412 imageFormat = imgCi.format;
3413 imageUsage = imgCi.usage;
3414 imageCreateFlags = imgCi.flags;
3415 }
3416
3417 if (hasDedicatedBuffer) {
3418 std::lock_guard<std::recursive_mutex> lock(mLock);
3419
3420 auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
3421 if (it == info_VkBuffer.end())
3422 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3423 const auto& info = it->second;
3424 const auto& bufCi = info.createInfo;
3425
3426 bufferSize = bufCi.size;
3427 }
3428
3429 VkResult ahbCreateRes = createAndroidHardwareBuffer(
3430 mGralloc.get(), hasDedicatedImage, hasDedicatedBuffer, imageExtent, imageLayers,
3431 imageFormat, imageUsage, imageCreateFlags, bufferSize, allocationInfoAllocSize, &ahw);
3432
3433 if (ahbCreateRes != VK_SUCCESS) {
3434 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(ahbCreateRes);
3435 }
3436 }
3437
3438 if (importAhb) {
3439 ahw = importAhbInfoPtr->buffer;
3440 // We still need to acquire the AHardwareBuffer.
3441 importAndroidHardwareBuffer(mGralloc.get(), importAhbInfoPtr, nullptr);
3442 }
3443
3444 if (ahw) {
3445 const uint32_t hostHandle = mGralloc->getHostHandle(ahw);
3446 if (mGralloc->getFormat(ahw) == AHARDWAREBUFFER_FORMAT_BLOB &&
3447 !mGralloc->treatBlobAsImage()) {
3448 importBufferInfo.buffer = hostHandle;
3449 vk_append_struct(&structChainIter, &importBufferInfo);
3450 } else {
3451 importCbInfo.colorBuffer = hostHandle;
3452 vk_append_struct(&structChainIter, &importCbInfo);
3453 }
3454 }
3455 #endif
3456 zx_handle_t vmo_handle = ZX_HANDLE_INVALID;
3457
3458 #ifdef VK_USE_PLATFORM_FUCHSIA
3459 if (importBufferCollection) {
3460 const auto& collection =
3461 *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
3462 importBufferCollectionInfoPtr->collection);
3463 auto result = collection->WaitForBuffersAllocated();
3464 if (!result.ok() || result->status != ZX_OK) {
3465 mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
3466 GET_STATUS_SAFE(result, status));
3467 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3468 }
3469 fuchsia_sysmem::wire::BufferCollectionInfo2& info = result->buffer_collection_info;
3470 uint32_t index = importBufferCollectionInfoPtr->index;
3471 if (info.buffer_count < index) {
3472 mesa_loge("Invalid buffer index: %d", index);
3473 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3474 }
3475 vmo_handle = info.buffers[index].vmo.release();
3476 }
3477
3478 if (importVmo) {
3479 vmo_handle = importVmoInfoPtr->handle;
3480 }
3481
3482 if (exportVmo) {
3483 hasDedicatedImage =
3484 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3485 hasDedicatedBuffer =
3486 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3487
3488 if (hasDedicatedImage && hasDedicatedBuffer) {
3489 mesa_loge(
3490 "Invalid VkMemoryDedicatedAllocationInfo: At least one "
3491 "of image and buffer must be VK_NULL_HANDLE.");
3492 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3493 }
3494
3495 const VkImageCreateInfo* pImageCreateInfo = nullptr;
3496
3497 VkBufferConstraintsInfoFUCHSIA bufferConstraintsInfo = {
3498 .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA,
3499 .pNext = nullptr,
3500 .createInfo = {},
3501 .requiredFormatFeatures = 0,
3502 .bufferCollectionConstraints =
3503 VkBufferCollectionConstraintsInfoFUCHSIA{
3504 .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
3505 .pNext = nullptr,
3506 .minBufferCount = 1,
3507 .maxBufferCount = 0,
3508 .minBufferCountForCamping = 0,
3509 .minBufferCountForDedicatedSlack = 0,
3510 .minBufferCountForSharedSlack = 0,
3511 },
3512 };
3513 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo = nullptr;
3514
3515 if (hasDedicatedImage) {
3516 std::lock_guard<std::recursive_mutex> lock(mLock);
3517
3518 auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3519 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
3520 const auto& imageInfo = it->second;
3521
3522 pImageCreateInfo = &imageInfo.createInfo;
3523 }
3524
3525 if (hasDedicatedBuffer) {
3526 std::lock_guard<std::recursive_mutex> lock(mLock);
3527
3528 auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
3529 if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
3530 const auto& bufferInfo = it->second;
3531
3532 bufferConstraintsInfo.createInfo = bufferInfo.createInfo;
3533 pBufferConstraintsInfo = &bufferConstraintsInfo;
3534 }
3535
3536 hasDedicatedImage =
3537 hasDedicatedImage && getBufferCollectionConstraintsVulkanImageUsage(pImageCreateInfo);
3538 hasDedicatedBuffer = hasDedicatedBuffer && getBufferCollectionConstraintsVulkanBufferUsage(
3539 pBufferConstraintsInfo);
3540
3541 if (hasDedicatedImage || hasDedicatedBuffer) {
3542 auto token_ends = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
3543 if (!token_ends.is_ok()) {
3544 mesa_loge("zx_channel_create failed: %d", token_ends.status_value());
3545 abort();
3546 }
3547
3548 {
3549 auto result =
3550 mSysmemAllocator->AllocateSharedCollection(std::move(token_ends->server));
3551 if (!result.ok()) {
3552 mesa_loge("AllocateSharedCollection failed: %d", result.status());
3553 abort();
3554 }
3555 }
3556
3557 auto collection_ends = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
3558 if (!collection_ends.is_ok()) {
3559 mesa_loge("zx_channel_create failed: %d", collection_ends.status_value());
3560 abort();
3561 }
3562
3563 {
3564 auto result = mSysmemAllocator->BindSharedCollection(
3565 std::move(token_ends->client), std::move(collection_ends->server));
3566 if (!result.ok()) {
3567 mesa_loge("BindSharedCollection failed: %d", result.status());
3568 abort();
3569 }
3570 }
3571
3572 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> collection(
3573 std::move(collection_ends->client));
3574 if (hasDedicatedImage) {
3575 // TODO(fxbug.dev/42172354): Use setBufferCollectionImageConstraintsFUCHSIA.
3576 VkResult res = setBufferCollectionConstraintsFUCHSIA(enc, device, &collection,
3577 pImageCreateInfo);
3578 if (res == VK_ERROR_FORMAT_NOT_SUPPORTED) {
3579 mesa_loge("setBufferCollectionConstraints failed: format %u is not supported",
3580 pImageCreateInfo->format);
3581 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3582 }
3583 if (res != VK_SUCCESS) {
3584 mesa_loge("setBufferCollectionConstraints failed: %d", res);
3585 abort();
3586 }
3587 }
3588
3589 if (hasDedicatedBuffer) {
3590 VkResult res = setBufferCollectionBufferConstraintsFUCHSIA(&collection,
3591 pBufferConstraintsInfo);
3592 if (res != VK_SUCCESS) {
3593 mesa_loge("setBufferCollectionBufferConstraints failed: %d", res);
3594 abort();
3595 }
3596 }
3597
3598 {
3599 auto result = collection->WaitForBuffersAllocated();
3600 if (result.ok() && result->status == ZX_OK) {
3601 fuchsia_sysmem::wire::BufferCollectionInfo2& info =
3602 result->buffer_collection_info;
3603 if (!info.buffer_count) {
3604 mesa_loge(
3605 "WaitForBuffersAllocated returned "
3606 "invalid count: %d",
3607 info.buffer_count);
3608 abort();
3609 }
3610 vmo_handle = info.buffers[0].vmo.release();
3611 } else {
3612 mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
3613 GET_STATUS_SAFE(result, status));
3614 abort();
3615 }
3616 }
3617
3618 collection->Close();
3619
3620 zx::vmo vmo_copy;
3621 zx_status_t status = zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS,
3622 vmo_copy.reset_and_get_address());
3623 if (status != ZX_OK) {
3624 mesa_loge("Failed to duplicate VMO: %d", status);
3625 abort();
3626 }
3627
3628 if (pImageCreateInfo) {
3629 // Only device-local images need to create color buffer; for
3630 // host-visible images, the color buffer is already created
3631 // when sysmem allocates memory. Here we use the |tiling|
3632 // field of image creation info to determine if it uses
3633 // host-visible memory.
3634 bool isLinear = pImageCreateInfo->tiling == VK_IMAGE_TILING_LINEAR;
3635 if (!isLinear) {
3636 fuchsia_hardware_goldfish::wire::ColorBufferFormatType format;
3637 switch (pImageCreateInfo->format) {
3638 case VK_FORMAT_B8G8R8A8_SINT:
3639 case VK_FORMAT_B8G8R8A8_UNORM:
3640 case VK_FORMAT_B8G8R8A8_SRGB:
3641 case VK_FORMAT_B8G8R8A8_SNORM:
3642 case VK_FORMAT_B8G8R8A8_SSCALED:
3643 case VK_FORMAT_B8G8R8A8_USCALED:
3644 format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra;
3645 break;
3646 case VK_FORMAT_R8G8B8A8_SINT:
3647 case VK_FORMAT_R8G8B8A8_UNORM:
3648 case VK_FORMAT_R8G8B8A8_SRGB:
3649 case VK_FORMAT_R8G8B8A8_SNORM:
3650 case VK_FORMAT_R8G8B8A8_SSCALED:
3651 case VK_FORMAT_R8G8B8A8_USCALED:
3652 format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba;
3653 break;
3654 case VK_FORMAT_R8_UNORM:
3655 case VK_FORMAT_R8_UINT:
3656 case VK_FORMAT_R8_USCALED:
3657 case VK_FORMAT_R8_SNORM:
3658 case VK_FORMAT_R8_SINT:
3659 case VK_FORMAT_R8_SSCALED:
3660 case VK_FORMAT_R8_SRGB:
3661 format =
3662 fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kLuminance;
3663 break;
3664 case VK_FORMAT_R8G8_UNORM:
3665 case VK_FORMAT_R8G8_UINT:
3666 case VK_FORMAT_R8G8_USCALED:
3667 case VK_FORMAT_R8G8_SNORM:
3668 case VK_FORMAT_R8G8_SINT:
3669 case VK_FORMAT_R8G8_SSCALED:
3670 case VK_FORMAT_R8G8_SRGB:
3671 format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRg;
3672 break;
3673 default:
3674 mesa_loge("Unsupported format: %d", pImageCreateInfo->format);
3675 abort();
3676 }
3677
3678 fidl::Arena arena;
3679 fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(arena);
3680 createParams.set_width(pImageCreateInfo->extent.width)
3681 .set_height(pImageCreateInfo->extent.height)
3682 .set_format(format)
3683 .set_memory_property(
3684 fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
3685
3686 auto result = mControlDevice->CreateColorBuffer2(std::move(vmo_copy),
3687 std::move(createParams));
3688 if (!result.ok() || result->res != ZX_OK) {
3689 if (result.ok() && result->res == ZX_ERR_ALREADY_EXISTS) {
3690 mesa_logd(
3691 "CreateColorBuffer: color buffer already "
3692 "exists\n");
3693 } else {
3694 mesa_loge("CreateColorBuffer failed: %d:%d", result.status(),
3695 GET_STATUS_SAFE(result, res));
3696 abort();
3697 }
3698 }
3699 }
3700 }
3701
3702 if (pBufferConstraintsInfo) {
3703 fidl::Arena arena;
3704 fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena);
3705 createParams.set_size(arena, pBufferConstraintsInfo->createInfo.size)
3706 .set_memory_property(
3707 fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
3708
3709 auto result =
3710 mControlDevice->CreateBuffer2(std::move(vmo_copy), std::move(createParams));
3711 if (!result.ok() || result->is_error()) {
3712 mesa_loge("CreateBuffer2 failed: %d:%d", result.status(),
3713 GET_STATUS_SAFE(result, error_value()));
3714 abort();
3715 }
3716 }
3717 } else {
3718 mesa_logw(
3719 "Dedicated image / buffer not available. Cannot create "
3720 "BufferCollection to export VMOs.");
3721 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3722 }
3723 }
3724
3725 if (vmo_handle != ZX_HANDLE_INVALID) {
3726 zx::vmo vmo_copy;
3727 zx_status_t status =
3728 zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS, vmo_copy.reset_and_get_address());
3729 if (status != ZX_OK) {
3730 mesa_loge("Failed to duplicate VMO: %d", status);
3731 abort();
3732 }
3733 zx_status_t status2 = ZX_OK;
3734
3735 auto result = mControlDevice->GetBufferHandle(std::move(vmo_copy));
3736 if (!result.ok() || result->res != ZX_OK) {
3737 mesa_loge("GetBufferHandle failed: %d:%d", result.status(),
3738 GET_STATUS_SAFE(result, res));
3739 } else {
3740 fuchsia_hardware_goldfish::wire::BufferHandleType handle_type = result->type;
3741 uint32_t buffer_handle = result->id;
3742
3743 if (handle_type == fuchsia_hardware_goldfish::wire::BufferHandleType::kBuffer) {
3744 importBufferInfo.buffer = buffer_handle;
3745 vk_append_struct(&structChainIter, &importBufferInfo);
3746 } else {
3747 importCbInfo.colorBuffer = buffer_handle;
3748 vk_append_struct(&structChainIter, &importCbInfo);
3749 }
3750 }
3751 }
3752 #endif
3753
3754 VirtGpuResourcePtr bufferBlob = nullptr;
3755 #if defined(LINUX_GUEST_BUILD)
3756 if (exportDmabuf) {
3757 VirtGpuDevice* instance = VirtGpuDevice::getInstance();
3758 hasDedicatedImage =
3759 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3760 hasDedicatedBuffer =
3761 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3762
3763 if (hasDedicatedImage) {
3764 VkImageCreateInfo imageCreateInfo;
3765 bool isDmaBufImage = false;
3766 {
3767 std::lock_guard<std::recursive_mutex> lock(mLock);
3768
3769 auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3770 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
3771 const auto& imageInfo = it->second;
3772
3773 imageCreateInfo = imageInfo.createInfo;
3774 isDmaBufImage = imageInfo.isDmaBufImage;
3775 }
3776
3777 if (isDmaBufImage) {
3778 const VkImageSubresource imageSubresource = {
3779 .aspectMask = exportAllocateInfoPtr->handleTypes &
3780 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT
3781 ? VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT
3782 : VK_IMAGE_ASPECT_COLOR_BIT,
3783 .mipLevel = 0,
3784 .arrayLayer = 0,
3785 };
3786 VkSubresourceLayout subResourceLayout;
3787 on_vkGetImageSubresourceLayout(context, device, dedicatedAllocInfoPtr->image,
3788 &imageSubresource, &subResourceLayout);
3789 if (!subResourceLayout.rowPitch) {
3790 mesa_loge("Failed to query stride for VirtGpu resource creation.");
3791 return VK_ERROR_INITIALIZATION_FAILED;
3792 }
3793
3794 uint32_t virglFormat = gfxstream::vk::getVirglFormat(imageCreateInfo.format);
3795 if (!virglFormat) {
3796 mesa_loge("Unsupported VK format for VirtGpu resource, vkFormat: 0x%x",
3797 imageCreateInfo.format);
3798 return VK_ERROR_FORMAT_NOT_SUPPORTED;
3799 }
3800 const uint32_t target = PIPE_TEXTURE_2D;
3801 uint32_t bind = VIRGL_BIND_RENDER_TARGET;
3802 if (VK_IMAGE_TILING_LINEAR == imageCreateInfo.tiling) {
3803 bind |= VIRGL_BIND_LINEAR;
3804 }
3805
3806 if (mCaps.vulkanCapset.alwaysBlob) {
3807 struct gfxstreamResourceCreate3d create3d = {};
3808 struct VirtGpuExecBuffer exec = {};
3809 struct gfxstreamPlaceholderCommandVk placeholderCmd = {};
3810 struct VirtGpuCreateBlob createBlob = {};
3811
3812 create3d.hdr.opCode = GFXSTREAM_RESOURCE_CREATE_3D;
3813 create3d.bind = bind;
3814 create3d.target = target;
3815 create3d.format = virglFormat;
3816 create3d.width = imageCreateInfo.extent.width;
3817 create3d.height = imageCreateInfo.extent.height;
3818 create3d.blobId = ++mAtomicId;
3819
3820 createBlob.blobCmd = reinterpret_cast<uint8_t*>(&create3d);
3821 createBlob.blobCmdSize = sizeof(create3d);
3822 createBlob.blobMem = kBlobMemHost3d;
3823 createBlob.flags = kBlobFlagShareable | kBlobFlagCrossDevice;
3824 createBlob.blobId = create3d.blobId;
3825 createBlob.size = finalAllocInfo.allocationSize;
3826
3827 bufferBlob = instance->createBlob(createBlob);
3828 if (!bufferBlob) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3829
3830 placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK;
3831 exec.command = static_cast<void*>(&placeholderCmd);
3832 exec.command_size = sizeof(placeholderCmd);
3833 exec.flags = kRingIdx;
3834 exec.ring_idx = 1;
3835 if (instance->execBuffer(exec, bufferBlob.get())) {
3836 mesa_loge("Failed to execbuffer placeholder command.");
3837 return VK_ERROR_OUT_OF_HOST_MEMORY;
3838 }
3839
3840 if (bufferBlob->wait()) {
3841 mesa_loge("Failed to wait for blob.");
3842 return VK_ERROR_OUT_OF_HOST_MEMORY;
3843 }
3844 } else {
3845 bufferBlob = instance->createResource(
3846 imageCreateInfo.extent.width, imageCreateInfo.extent.height,
3847 subResourceLayout.rowPitch,
3848 subResourceLayout.rowPitch * imageCreateInfo.extent.height, virglFormat,
3849 target, bind);
3850 if (!bufferBlob) {
3851 mesa_loge("Failed to create colorBuffer resource for Image memory");
3852 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3853 }
3854 if (bufferBlob->wait()) {
3855 mesa_loge("Failed to wait for colorBuffer resource for Image memory");
3856 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3857 }
3858 }
3859 } else {
3860 mesa_logw(
3861 "The VkMemoryDedicatedAllocateInfo::image associated with VkDeviceMemory "
3862 "allocation cannot be used to create exportable resource "
3863 "(VkExportMemoryAllocateInfo).\n");
3864 }
3865 } else if (hasDedicatedBuffer) {
3866 uint32_t virglFormat = VIRGL_FORMAT_R8_UNORM;
3867 const uint32_t target = PIPE_BUFFER;
3868 uint32_t bind = VIRGL_BIND_LINEAR;
3869 uint32_t width = finalAllocInfo.allocationSize;
3870 uint32_t height = 1;
3871
3872 if (mCaps.vulkanCapset.alwaysBlob) {
3873 struct gfxstreamResourceCreate3d create3d = {};
3874 struct VirtGpuExecBuffer exec = {};
3875 struct gfxstreamPlaceholderCommandVk placeholderCmd = {};
3876 struct VirtGpuCreateBlob createBlob = {};
3877
3878 create3d.hdr.opCode = GFXSTREAM_RESOURCE_CREATE_3D;
3879 create3d.bind = bind;
3880 create3d.target = target;
3881 create3d.format = virglFormat;
3882 create3d.width = width;
3883 create3d.height = height;
3884 create3d.blobId = ++mAtomicId;
3885
3886 createBlob.blobCmd = reinterpret_cast<uint8_t*>(&create3d);
3887 createBlob.blobCmdSize = sizeof(create3d);
3888 createBlob.blobMem = kBlobMemHost3d;
3889 createBlob.flags = kBlobFlagShareable | kBlobFlagCrossDevice;
3890 createBlob.blobId = create3d.blobId;
3891 createBlob.size = width;
3892
3893 bufferBlob = instance->createBlob(createBlob);
3894 if (!bufferBlob) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3895
3896 placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK;
3897 exec.command = static_cast<void*>(&placeholderCmd);
3898 exec.command_size = sizeof(placeholderCmd);
3899 exec.flags = kRingIdx;
3900 exec.ring_idx = 1;
3901 if (instance->execBuffer(exec, bufferBlob.get())) {
3902 mesa_loge("Failed to allocate coherent memory: failed to execbuffer for wait.");
3903 return VK_ERROR_OUT_OF_HOST_MEMORY;
3904 }
3905
3906 bufferBlob->wait();
3907 } else {
3908 bufferBlob = instance->createResource(width, height, width, width * height,
3909 virglFormat, target, bind);
3910 if (!bufferBlob) {
3911 mesa_loge("Failed to create colorBuffer resource for Image memory");
3912 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3913 }
3914 if (bufferBlob->wait()) {
3915 mesa_loge("Failed to wait for colorBuffer resource for Image memory");
3916 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3917 }
3918 }
3919 } else {
3920 mesa_logw(
3921 "VkDeviceMemory is not exportable (VkExportMemoryAllocateInfo). Requires "
3922 "VkMemoryDedicatedAllocateInfo::image to create external resource.");
3923 }
3924 }
3925
3926 if (importDmabuf) {
3927 VirtGpuExternalHandle importHandle = {};
3928 importHandle.osHandle = importFdInfoPtr->fd;
3929 importHandle.type = kMemHandleDmabuf;
3930
3931 auto instance = VirtGpuDevice::getInstance();
3932 bufferBlob = instance->importBlob(importHandle);
3933 if (!bufferBlob) {
3934 mesa_loge("%s: Failed to import colorBuffer resource\n", __func__);
3935 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3936 }
3937 }
3938
3939 if (bufferBlob) {
3940 if (hasDedicatedBuffer) {
3941 importBufferInfo.buffer = bufferBlob->getResourceHandle();
3942 vk_append_struct(&structChainIter, &importBufferInfo);
3943 } else {
3944 importCbInfo.colorBuffer = bufferBlob->getResourceHandle();
3945 vk_append_struct(&structChainIter, &importCbInfo);
3946 }
3947 }
3948 #endif
3949
3950 if (ahw || bufferBlob || !requestedMemoryIsHostVisible) {
3951 input_result =
3952 enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
3953
3954 if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
3955
3956 setDeviceMemoryInfo(device, *pMemory, 0, nullptr, finalAllocInfo.memoryTypeIndex, ahw,
3957 isImport, vmo_handle, bufferBlob);
3958
3959 uint64_t memoryObjectId = (uint64_t)(void*)*pMemory;
3960 #ifdef VK_USE_PLATFORM_ANDROID_KHR
3961 if (ahw) {
3962 memoryObjectId = getAHardwareBufferId(ahw);
3963 }
3964 #endif
3965 emitDeviceMemoryReport(info_VkDevice[device],
3966 isImport ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT
3967 : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT,
3968 memoryObjectId, pAllocateInfo->allocationSize,
3969 VK_OBJECT_TYPE_DEVICE_MEMORY, (uint64_t)(void*)*pMemory,
3970 pAllocateInfo->memoryTypeIndex);
3971 return VK_SUCCESS;
3972 }
3973
3974 #ifdef VK_USE_PLATFORM_FUCHSIA
3975 if (vmo_handle != ZX_HANDLE_INVALID) {
3976 input_result =
3977 enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
3978
3979 // Get VMO handle rights, and only use allowed rights to map the
3980 // host memory.
3981 zx_info_handle_basic handle_info;
3982 zx_status_t status = zx_object_get_info(vmo_handle, ZX_INFO_HANDLE_BASIC, &handle_info,
3983 sizeof(handle_info), nullptr, nullptr);
3984 if (status != ZX_OK) {
3985 mesa_loge("%s: cannot get vmo object info: vmo = %u status: %d.", __func__, vmo_handle,
3986 status);
3987 return VK_ERROR_OUT_OF_HOST_MEMORY;
3988 }
3989
3990 zx_vm_option_t vm_permission = 0u;
3991 vm_permission |= (handle_info.rights & ZX_RIGHT_READ) ? ZX_VM_PERM_READ : 0;
3992 vm_permission |= (handle_info.rights & ZX_RIGHT_WRITE) ? ZX_VM_PERM_WRITE : 0;
3993
3994 zx_paddr_t addr;
3995 status = zx_vmar_map(zx_vmar_root_self(), vm_permission, 0, vmo_handle, 0,
3996 finalAllocInfo.allocationSize, &addr);
3997 if (status != ZX_OK) {
3998 mesa_loge("%s: cannot map vmar: status %d.", __func__, status);
3999 return VK_ERROR_OUT_OF_HOST_MEMORY;
4000 }
4001
4002 setDeviceMemoryInfo(device, *pMemory, finalAllocInfo.allocationSize,
4003 reinterpret_cast<uint8_t*>(addr), finalAllocInfo.memoryTypeIndex,
4004 /*ahw=*/nullptr, isImport, vmo_handle, /*blobPtr=*/nullptr);
4005 return VK_SUCCESS;
4006 }
4007 #endif
4008
4009 // Host visible memory with direct mapping
4010 VkResult result = getCoherentMemory(&finalAllocInfo, enc, device, pMemory);
4011 if (result != VK_SUCCESS) return result;
4012
4013 uint64_t memoryObjectId = (uint64_t)(void*)*pMemory;
4014
4015 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4016 if (ahw) {
4017 memoryObjectId = getAHardwareBufferId(ahw);
4018 }
4019 #endif
4020
4021 emitDeviceMemoryReport(info_VkDevice[device],
4022 isImport ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT
4023 : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT,
4024 memoryObjectId, pAllocateInfo->allocationSize,
4025 VK_OBJECT_TYPE_DEVICE_MEMORY, (uint64_t)(void*)*pMemory,
4026 pAllocateInfo->memoryTypeIndex);
4027 return VK_SUCCESS;
4028 }
4029
on_vkFreeMemory(void * context,VkDevice device,VkDeviceMemory memory,const VkAllocationCallbacks * pAllocateInfo)4030 void ResourceTracker::on_vkFreeMemory(void* context, VkDevice device, VkDeviceMemory memory,
4031 const VkAllocationCallbacks* pAllocateInfo) {
4032 std::unique_lock<std::recursive_mutex> lock(mLock);
4033
4034 auto it = info_VkDeviceMemory.find(memory);
4035 if (it == info_VkDeviceMemory.end()) return;
4036 auto& info = it->second;
4037 uint64_t memoryObjectId = (uint64_t)(void*)memory;
4038 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4039 if (info.ahw) {
4040 memoryObjectId = getAHardwareBufferId(info.ahw);
4041 }
4042 #endif
4043
4044 emitDeviceMemoryReport(info_VkDevice[device],
4045 info.imported ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT
4046 : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT,
4047 memoryObjectId, 0 /* size */, VK_OBJECT_TYPE_DEVICE_MEMORY,
4048 (uint64_t)(void*)memory);
4049
4050 #ifdef VK_USE_PLATFORM_FUCHSIA
4051 if (info.vmoHandle && info.ptr) {
4052 zx_status_t status = zx_vmar_unmap(
4053 zx_vmar_root_self(), reinterpret_cast<zx_paddr_t>(info.ptr), info.allocationSize);
4054 if (status != ZX_OK) {
4055 mesa_loge("%s: Cannot unmap ptr: status %d", __func__, status);
4056 }
4057 info.ptr = nullptr;
4058 }
4059 #endif
4060
4061 if (!info.coherentMemory) {
4062 lock.unlock();
4063 VkEncoder* enc = (VkEncoder*)context;
4064 enc->vkFreeMemory(device, memory, pAllocateInfo, true /* do lock */);
4065 return;
4066 }
4067
4068 auto coherentMemory = freeCoherentMemoryLocked(memory, info);
4069
4070 // We have to release the lock before we could possibly free a
4071 // CoherentMemory, because that will call into VkEncoder, which
4072 // shouldn't be called when the lock is held.
4073 lock.unlock();
4074 coherentMemory = nullptr;
4075 }
4076
on_vkMapMemory(void * context,VkResult host_result,VkDevice device,VkDeviceMemory memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags,void ** ppData)4077 VkResult ResourceTracker::on_vkMapMemory(void* context, VkResult host_result, VkDevice device,
4078 VkDeviceMemory memory, VkDeviceSize offset,
4079 VkDeviceSize size, VkMemoryMapFlags, void** ppData) {
4080 if (host_result != VK_SUCCESS) {
4081 mesa_loge("%s: Host failed to map", __func__);
4082 return host_result;
4083 }
4084
4085 std::unique_lock<std::recursive_mutex> lock(mLock);
4086
4087 auto deviceMemoryInfoIt = info_VkDeviceMemory.find(memory);
4088 if (deviceMemoryInfoIt == info_VkDeviceMemory.end()) {
4089 mesa_loge("%s: Failed to find VkDeviceMemory.", __func__);
4090 return VK_ERROR_MEMORY_MAP_FAILED;
4091 }
4092 auto& deviceMemoryInfo = deviceMemoryInfoIt->second;
4093
4094 if (deviceMemoryInfo.blobId && !deviceMemoryInfo.coherentMemory &&
4095 !mCaps.params[kParamCreateGuestHandle]) {
4096 // NOTE: must not hold lock while calling into the encoder.
4097 lock.unlock();
4098 VkEncoder* enc = (VkEncoder*)context;
4099 VkResult vkResult = enc->vkGetBlobGOOGLE(device, memory, /*doLock*/ false);
4100 if (vkResult != VK_SUCCESS) {
4101 mesa_loge("%s: Failed to vkGetBlobGOOGLE().", __func__);
4102 return vkResult;
4103 }
4104 lock.lock();
4105
4106 // NOTE: deviceMemoryInfoIt potentially invalidated but deviceMemoryInfo still okay.
4107
4108 struct VirtGpuCreateBlob createBlob = {};
4109 createBlob.blobMem = kBlobMemHost3d;
4110 createBlob.flags = kBlobFlagMappable;
4111 createBlob.blobId = deviceMemoryInfo.blobId;
4112 createBlob.size = deviceMemoryInfo.coherentMemorySize;
4113
4114 auto blob = VirtGpuDevice::getInstance()->createBlob(createBlob);
4115 if (!blob) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4116
4117 VirtGpuResourceMappingPtr mapping = blob->createMapping();
4118 if (!mapping) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
4119
4120 auto coherentMemory =
4121 std::make_shared<CoherentMemory>(mapping, createBlob.size, device, memory);
4122
4123 uint8_t* ptr;
4124 uint64_t offset;
4125 coherentMemory->subAllocate(deviceMemoryInfo.allocationSize, &ptr, offset);
4126
4127 deviceMemoryInfo.coherentMemoryOffset = offset;
4128 deviceMemoryInfo.coherentMemory = coherentMemory;
4129 deviceMemoryInfo.ptr = ptr;
4130 }
4131
4132 if (!deviceMemoryInfo.ptr) {
4133 mesa_loge("%s: VkDeviceMemory has nullptr.", __func__);
4134 return VK_ERROR_MEMORY_MAP_FAILED;
4135 }
4136
4137 if (size != VK_WHOLE_SIZE && (deviceMemoryInfo.ptr + offset + size >
4138 deviceMemoryInfo.ptr + deviceMemoryInfo.allocationSize)) {
4139 mesa_loge(
4140 "%s: size is too big. alloc size 0x%llx while we wanted offset 0x%llx size 0x%llx "
4141 "total 0x%llx",
4142 __func__, (unsigned long long)deviceMemoryInfo.allocationSize,
4143 (unsigned long long)offset, (unsigned long long)size, (unsigned long long)offset);
4144 return VK_ERROR_MEMORY_MAP_FAILED;
4145 }
4146
4147 *ppData = deviceMemoryInfo.ptr + offset;
4148
4149 return host_result;
4150 }
4151
on_vkUnmapMemory(void *,VkDevice,VkDeviceMemory)4152 void ResourceTracker::on_vkUnmapMemory(void*, VkDevice, VkDeviceMemory) {
4153 // no-op
4154 }
4155
transformImageMemoryRequirements2ForGuest(VkImage image,VkMemoryRequirements2 * reqs2)4156 void ResourceTracker::transformImageMemoryRequirements2ForGuest(VkImage image,
4157 VkMemoryRequirements2* reqs2) {
4158 std::lock_guard<std::recursive_mutex> lock(mLock);
4159
4160 auto it = info_VkImage.find(image);
4161 if (it == info_VkImage.end()) return;
4162
4163 auto& info = it->second;
4164
4165 if (!info.external || !info.externalCreateInfo.handleTypes) {
4166 transformImageMemoryRequirementsForGuestLocked(image, &reqs2->memoryRequirements);
4167 return;
4168 }
4169
4170 transformImageMemoryRequirementsForGuestLocked(image, &reqs2->memoryRequirements);
4171
4172 VkMemoryDedicatedRequirements* dedicatedReqs =
4173 vk_find_struct(reqs2, MEMORY_DEDICATED_REQUIREMENTS);
4174
4175 if (!dedicatedReqs) return;
4176
4177 transformExternalResourceMemoryDedicatedRequirementsForGuest(dedicatedReqs);
4178 }
4179
transformBufferMemoryRequirements2ForGuest(VkBuffer buffer,VkMemoryRequirements2 * reqs2)4180 void ResourceTracker::transformBufferMemoryRequirements2ForGuest(VkBuffer buffer,
4181 VkMemoryRequirements2* reqs2) {
4182 std::lock_guard<std::recursive_mutex> lock(mLock);
4183
4184 auto it = info_VkBuffer.find(buffer);
4185 if (it == info_VkBuffer.end()) return;
4186
4187 auto& info = it->second;
4188
4189 if (!info.external || !info.externalCreateInfo.handleTypes) {
4190 return;
4191 }
4192
4193 VkMemoryDedicatedRequirements* dedicatedReqs =
4194 vk_find_struct(reqs2, MEMORY_DEDICATED_REQUIREMENTS);
4195
4196 if (!dedicatedReqs) return;
4197
4198 transformExternalResourceMemoryDedicatedRequirementsForGuest(dedicatedReqs);
4199 }
4200
on_vkCreateImage(void * context,VkResult,VkDevice device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)4201 VkResult ResourceTracker::on_vkCreateImage(void* context, VkResult, VkDevice device,
4202 const VkImageCreateInfo* pCreateInfo,
4203 const VkAllocationCallbacks* pAllocator,
4204 VkImage* pImage) {
4205 VkEncoder* enc = (VkEncoder*)context;
4206
4207 VkImageCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4208 if (localCreateInfo.sharingMode != VK_SHARING_MODE_CONCURRENT) {
4209 localCreateInfo.queueFamilyIndexCount = 0;
4210 localCreateInfo.pQueueFamilyIndices = nullptr;
4211 }
4212
4213 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
4214 VkExternalMemoryImageCreateInfo localExtImgCi;
4215
4216 const VkExternalMemoryImageCreateInfo* extImgCiPtr =
4217 vk_find_struct_const(pCreateInfo, EXTERNAL_MEMORY_IMAGE_CREATE_INFO);
4218
4219 if (extImgCiPtr) {
4220 localExtImgCi = vk_make_orphan_copy(*extImgCiPtr);
4221 vk_append_struct(&structChainIter, &localExtImgCi);
4222 }
4223
4224 #if defined(LINUX_GUEST_BUILD)
4225 bool isDmaBufImage = false;
4226 VkImageDrmFormatModifierExplicitCreateInfoEXT localDrmFormatModifierInfo;
4227 VkImageDrmFormatModifierListCreateInfoEXT localDrmFormatModifierList;
4228
4229 if (extImgCiPtr &&
4230 (extImgCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)) {
4231 const wsi_image_create_info* wsiImageCi =
4232 vk_find_struct_const(pCreateInfo, WSI_IMAGE_CREATE_INFO_MESA);
4233 if (wsiImageCi && wsiImageCi->scanout) {
4234 // Linux WSI creates swapchain images with VK_IMAGE_CREATE_ALIAS_BIT. Vulkan spec
4235 // states: "If the pNext chain includes a VkExternalMemoryImageCreateInfo or
4236 // VkExternalMemoryImageCreateInfoNV structure whose handleTypes member is not 0, it is
4237 // as if VK_IMAGE_CREATE_ALIAS_BIT is set." To avoid flag mismatches on host driver,
4238 // remove the VK_IMAGE_CREATE_ALIAS_BIT here.
4239 localCreateInfo.flags &= ~VK_IMAGE_CREATE_ALIAS_BIT;
4240 }
4241
4242 const VkImageDrmFormatModifierExplicitCreateInfoEXT* drmFmtMod =
4243 vk_find_struct_const(pCreateInfo, IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT);
4244 const VkImageDrmFormatModifierListCreateInfoEXT* drmFmtModList =
4245 vk_find_struct_const(pCreateInfo, IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
4246 if (drmFmtMod || drmFmtModList) {
4247 if (getHostDeviceExtensionIndex(VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME) !=
4248 -1) {
4249 // host supports DRM format modifiers => forward the struct
4250 if (drmFmtMod) {
4251 localDrmFormatModifierInfo = vk_make_orphan_copy(*drmFmtMod);
4252 vk_append_struct(&structChainIter, &localDrmFormatModifierInfo);
4253 }
4254 if (drmFmtModList) {
4255 localDrmFormatModifierList = vk_make_orphan_copy(*drmFmtModList);
4256 vk_append_struct(&structChainIter, &localDrmFormatModifierList);
4257 }
4258 } else {
4259 bool canUseLinearModifier =
4260 (drmFmtMod && drmFmtMod->drmFormatModifier == DRM_FORMAT_MOD_LINEAR) ||
4261 std::any_of(
4262 drmFmtModList->pDrmFormatModifiers,
4263 drmFmtModList->pDrmFormatModifiers + drmFmtModList->drmFormatModifierCount,
4264 [](const uint64_t mod) { return mod == DRM_FORMAT_MOD_LINEAR; });
4265 // host doesn't support DRM format modifiers, try emulating
4266 if (canUseLinearModifier) {
4267 mesa_logd("emulating DRM_FORMAT_MOD_LINEAR with VK_IMAGE_TILING_LINEAR");
4268 localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
4269 } else {
4270 return VK_ERROR_VALIDATION_FAILED_EXT;
4271 }
4272 return VK_ERROR_VALIDATION_FAILED_EXT; // stub constant
4273 }
4274 }
4275
4276 isDmaBufImage = true;
4277 }
4278 #endif
4279
4280 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4281 VkNativeBufferANDROID localAnb;
4282 const VkNativeBufferANDROID* anbInfoPtr = vk_find_struct_const(pCreateInfo, NATIVE_BUFFER_ANDROID);
4283 if (anbInfoPtr) {
4284 localAnb = vk_make_orphan_copy(*anbInfoPtr);
4285 vk_append_struct(&structChainIter, &localAnb);
4286 }
4287
4288 VkExternalFormatANDROID localExtFormatAndroid;
4289 const VkExternalFormatANDROID* extFormatAndroidPtr =
4290 vk_find_struct_const(pCreateInfo, EXTERNAL_FORMAT_ANDROID);
4291 if (extFormatAndroidPtr) {
4292 localExtFormatAndroid = vk_make_orphan_copy(*extFormatAndroidPtr);
4293
4294 // Do not append external format android;
4295 // instead, replace the local image localCreateInfo format
4296 // with the corresponding Vulkan format
4297 if (extFormatAndroidPtr->externalFormat) {
4298 localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat);
4299 if (localCreateInfo.format == VK_FORMAT_UNDEFINED)
4300 return VK_ERROR_VALIDATION_FAILED_EXT;
4301 }
4302 }
4303 #endif
4304
4305 #ifdef VK_USE_PLATFORM_FUCHSIA
4306 const VkBufferCollectionImageCreateInfoFUCHSIA* extBufferCollectionPtr =
4307 vk_find_struct_const(pCreateInfo, BUFFER_COLLECTION_IMAGE_CREATE_INFO_FUCHSIA);
4308
4309 bool isSysmemBackedMemory = false;
4310
4311 if (extImgCiPtr &&
4312 (extImgCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) {
4313 isSysmemBackedMemory = true;
4314 }
4315
4316 if (extBufferCollectionPtr) {
4317 const auto& collection =
4318 *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
4319 extBufferCollectionPtr->collection);
4320 uint32_t index = extBufferCollectionPtr->index;
4321 zx::vmo vmo;
4322
4323 fuchsia_sysmem::wire::BufferCollectionInfo2 info;
4324
4325 auto result = collection->WaitForBuffersAllocated();
4326 if (result.ok() && result->status == ZX_OK) {
4327 info = std::move(result->buffer_collection_info);
4328 if (index < info.buffer_count && info.settings.has_image_format_constraints) {
4329 vmo = std::move(info.buffers[index].vmo);
4330 }
4331 } else {
4332 mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
4333 GET_STATUS_SAFE(result, status));
4334 }
4335
4336 if (vmo.is_valid()) {
4337 zx::vmo vmo_dup;
4338 if (zx_status_t status = vmo.duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
4339 status != ZX_OK) {
4340 mesa_loge("%s: zx_vmo_duplicate failed: %d", __func__, status);
4341 abort();
4342 }
4343
4344 auto buffer_handle_result = mControlDevice->GetBufferHandle(std::move(vmo_dup));
4345 if (!buffer_handle_result.ok()) {
4346 mesa_loge("%s: GetBufferHandle FIDL error: %d", __func__,
4347 buffer_handle_result.status());
4348 abort();
4349 }
4350 if (buffer_handle_result.value().res == ZX_OK) {
4351 // Buffer handle already exists.
4352 // If it is a ColorBuffer, no-op; Otherwise return error.
4353 if (buffer_handle_result.value().type !=
4354 fuchsia_hardware_goldfish::wire::BufferHandleType::kColorBuffer) {
4355 mesa_loge("%s: BufferHandle %u is not a ColorBuffer", __func__,
4356 buffer_handle_result.value().id);
4357 return VK_ERROR_OUT_OF_HOST_MEMORY;
4358 }
4359 } else if (buffer_handle_result.value().res == ZX_ERR_NOT_FOUND) {
4360 // Buffer handle not found. Create ColorBuffer based on buffer settings.
4361 auto format = info.settings.image_format_constraints.pixel_format.type ==
4362 fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8
4363 ? fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba
4364 : fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra;
4365
4366 uint32_t memory_property =
4367 info.settings.buffer_settings.heap ==
4368 fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal
4369 ? fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal
4370 : fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
4371
4372 fidl::Arena arena;
4373 fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(arena);
4374 createParams.set_width(info.settings.image_format_constraints.min_coded_width)
4375 .set_height(info.settings.image_format_constraints.min_coded_height)
4376 .set_format(format)
4377 .set_memory_property(memory_property);
4378
4379 auto result =
4380 mControlDevice->CreateColorBuffer2(std::move(vmo), std::move(createParams));
4381 if (result.ok() && result->res == ZX_ERR_ALREADY_EXISTS) {
4382 mesa_logd("CreateColorBuffer: color buffer already exists\n");
4383 } else if (!result.ok() || result->res != ZX_OK) {
4384 mesa_loge("CreateColorBuffer failed: %d:%d", result.status(),
4385 GET_STATUS_SAFE(result, res));
4386 }
4387 }
4388
4389 if (info.settings.buffer_settings.heap ==
4390 fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible) {
4391 mesa_logd(
4392 "%s: Image uses host visible memory heap; set tiling "
4393 "to linear to match host ImageCreateInfo",
4394 __func__);
4395 localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
4396 }
4397 }
4398 isSysmemBackedMemory = true;
4399 }
4400
4401 if (isSysmemBackedMemory) {
4402 localCreateInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
4403 }
4404 #endif
4405
4406 VkResult res;
4407 VkMemoryRequirements memReqs;
4408
4409 if (supportsCreateResourcesWithRequirements()) {
4410 res = enc->vkCreateImageWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator, pImage,
4411 &memReqs, true /* do lock */);
4412 } else {
4413 res = enc->vkCreateImage(device, &localCreateInfo, pAllocator, pImage, true /* do lock */);
4414 }
4415
4416 if (res != VK_SUCCESS) return res;
4417
4418 std::lock_guard<std::recursive_mutex> lock(mLock);
4419
4420 auto it = info_VkImage.find(*pImage);
4421 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
4422
4423 auto& info = it->second;
4424
4425 info.device = device;
4426 info.createInfo = *pCreateInfo;
4427 info.createInfo.pNext = nullptr;
4428
4429 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4430 if (extFormatAndroidPtr && extFormatAndroidPtr->externalFormat) {
4431 info.hasExternalFormat = true;
4432 info.externalFourccFormat = extFormatAndroidPtr->externalFormat;
4433 }
4434 #endif // VK_USE_PLATFORM_ANDROID_KHR
4435
4436 if (supportsCreateResourcesWithRequirements()) {
4437 info.baseRequirementsKnown = true;
4438 }
4439
4440 if (extImgCiPtr) {
4441 info.external = true;
4442 info.externalCreateInfo = *extImgCiPtr;
4443 }
4444
4445 #ifdef VK_USE_PLATFORM_FUCHSIA
4446 if (isSysmemBackedMemory) {
4447 info.isSysmemBackedMemory = true;
4448 }
4449 #endif
4450
4451 // Delete `protocolVersion` check goldfish drivers are gone.
4452 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
4453 if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
4454 mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
4455 }
4456 if ((extImgCiPtr && (extImgCiPtr->handleTypes &
4457 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) {
4458 updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
4459 }
4460 #endif
4461 #if defined(LINUX_GUEST_BUILD)
4462 if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
4463 mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
4464 }
4465 info.isDmaBufImage = isDmaBufImage;
4466 if (info.isDmaBufImage) {
4467 updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
4468 if (localCreateInfo.tiling == VK_IMAGE_TILING_OPTIMAL) {
4469 // Linux WSI calls vkGetImageSubresourceLayout() to query the stride for swapchain
4470 // support. Similarly, stride is also queried from vkGetImageSubresourceLayout() to
4471 // determine the stride for colorBuffer resource creation (guest-side dmabuf resource).
4472 // To satisfy valid usage of this API, must call on the linearPeerImage for the VkImage
4473 // in question. As long as these two use cases match, the rowPitch won't actually be
4474 // used by WSI.
4475 VkImageCreateInfo linearPeerImageCreateInfo = {
4476 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4477 .pNext = nullptr,
4478 .flags = {},
4479 .imageType = VK_IMAGE_TYPE_2D,
4480 .format = localCreateInfo.format,
4481 .extent = localCreateInfo.extent,
4482 .mipLevels = 1,
4483 .arrayLayers = 1,
4484 .samples = VK_SAMPLE_COUNT_1_BIT,
4485 .tiling = VK_IMAGE_TILING_LINEAR,
4486 .usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
4487 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
4488 .queueFamilyIndexCount = 0,
4489 .pQueueFamilyIndices = nullptr,
4490 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
4491 };
4492 res = enc->vkCreateImage(device, &linearPeerImageCreateInfo, pAllocator,
4493 &info.linearPeerImage, true /* do lock */);
4494 if (res != VK_SUCCESS) return res;
4495 }
4496 }
4497 #endif
4498
4499 if (info.baseRequirementsKnown) {
4500 transformImageMemoryRequirementsForGuestLocked(*pImage, &memReqs);
4501 info.baseRequirements = memReqs;
4502 }
4503 return res;
4504 }
4505
on_vkCreateSamplerYcbcrConversion(void * context,VkResult,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)4506 VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversion(
4507 void* context, VkResult, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4508 const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) {
4509 VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4510
4511 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4512 const VkExternalFormatANDROID* extFormatAndroidPtr =
4513 vk_find_struct_const(pCreateInfo, EXTERNAL_FORMAT_ANDROID);
4514 if (extFormatAndroidPtr) {
4515 if (extFormatAndroidPtr->externalFormat == DRM_FORMAT_RGB565) {
4516 // We don't support external formats on host and it causes RGB565
4517 // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4518 // when passed as an external format.
4519 // We may consider doing this for all external formats.
4520 // See b/134771579.
4521 *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4522 return VK_SUCCESS;
4523 } else if (extFormatAndroidPtr->externalFormat) {
4524 localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat);
4525 }
4526 }
4527 #endif
4528
4529 VkEncoder* enc = (VkEncoder*)context;
4530 VkResult res = enc->vkCreateSamplerYcbcrConversion(device, &localCreateInfo, pAllocator,
4531 pYcbcrConversion, true /* do lock */);
4532
4533 if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
4534 mesa_loge(
4535 "FATAL: vkCreateSamplerYcbcrConversion returned a reserved value "
4536 "(VK_YCBCR_CONVERSION_DO_NOTHING)");
4537 abort();
4538 }
4539 return res;
4540 }
4541
on_vkDestroySamplerYcbcrConversion(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)4542 void ResourceTracker::on_vkDestroySamplerYcbcrConversion(void* context, VkDevice device,
4543 VkSamplerYcbcrConversion ycbcrConversion,
4544 const VkAllocationCallbacks* pAllocator) {
4545 VkEncoder* enc = (VkEncoder*)context;
4546 if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4547 enc->vkDestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator,
4548 true /* do lock */);
4549 }
4550 }
4551
on_vkCreateSamplerYcbcrConversionKHR(void * context,VkResult,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)4552 VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversionKHR(
4553 void* context, VkResult, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4554 const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) {
4555 VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4556
4557 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
4558 const VkExternalFormatANDROID* extFormatAndroidPtr =
4559 vk_find_struct_const(pCreateInfo, EXTERNAL_FORMAT_ANDROID);
4560 if (extFormatAndroidPtr) {
4561 if (extFormatAndroidPtr->externalFormat == DRM_FORMAT_RGB565) {
4562 // We don't support external formats on host and it causes RGB565
4563 // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4564 // when passed as an external format.
4565 // We may consider doing this for all external formats.
4566 // See b/134771579.
4567 *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4568 return VK_SUCCESS;
4569 } else if (extFormatAndroidPtr->externalFormat) {
4570 localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat);
4571 }
4572 }
4573 #endif
4574
4575 VkEncoder* enc = (VkEncoder*)context;
4576 VkResult res = enc->vkCreateSamplerYcbcrConversionKHR(device, &localCreateInfo, pAllocator,
4577 pYcbcrConversion, true /* do lock */);
4578
4579 if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
4580 mesa_loge(
4581 "FATAL: vkCreateSamplerYcbcrConversionKHR returned a reserved value "
4582 "(VK_YCBCR_CONVERSION_DO_NOTHING)");
4583 abort();
4584 }
4585 return res;
4586 }
4587
on_vkDestroySamplerYcbcrConversionKHR(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)4588 void ResourceTracker::on_vkDestroySamplerYcbcrConversionKHR(
4589 void* context, VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
4590 const VkAllocationCallbacks* pAllocator) {
4591 VkEncoder* enc = (VkEncoder*)context;
4592 if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4593 enc->vkDestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator,
4594 true /* do lock */);
4595 }
4596 }
4597
on_vkCreateSampler(void * context,VkResult,VkDevice device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler)4598 VkResult ResourceTracker::on_vkCreateSampler(void* context, VkResult, VkDevice device,
4599 const VkSamplerCreateInfo* pCreateInfo,
4600 const VkAllocationCallbacks* pAllocator,
4601 VkSampler* pSampler) {
4602 VkSamplerCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4603
4604 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(VK_USE_PLATFORM_FUCHSIA)
4605 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
4606 VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
4607 const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo =
4608 vk_find_struct_const(pCreateInfo, SAMPLER_YCBCR_CONVERSION_INFO);
4609 if (samplerYcbcrConversionInfo) {
4610 if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4611 localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
4612 vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
4613 }
4614 }
4615
4616 VkSamplerCustomBorderColorCreateInfoEXT localVkSamplerCustomBorderColorCreateInfo;
4617 const VkSamplerCustomBorderColorCreateInfoEXT* samplerCustomBorderColorCreateInfo =
4618 vk_find_struct_const(pCreateInfo, SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT);
4619 if (samplerCustomBorderColorCreateInfo) {
4620 localVkSamplerCustomBorderColorCreateInfo =
4621 vk_make_orphan_copy(*samplerCustomBorderColorCreateInfo);
4622 vk_append_struct(&structChainIter, &localVkSamplerCustomBorderColorCreateInfo);
4623 }
4624 #endif
4625
4626 VkEncoder* enc = (VkEncoder*)context;
4627 return enc->vkCreateSampler(device, &localCreateInfo, pAllocator, pSampler, true /* do lock */);
4628 }
4629
on_vkGetPhysicalDeviceExternalFenceProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)4630 void ResourceTracker::on_vkGetPhysicalDeviceExternalFenceProperties(
4631 void* context, VkPhysicalDevice physicalDevice,
4632 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
4633 VkExternalFenceProperties* pExternalFenceProperties) {
4634 (void)context;
4635 (void)physicalDevice;
4636
4637 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
4638 pExternalFenceProperties->compatibleHandleTypes = 0;
4639 pExternalFenceProperties->externalFenceFeatures = 0;
4640
4641 bool syncFd = pExternalFenceInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4642
4643 if (!syncFd) {
4644 return;
4645 }
4646
4647 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
4648 pExternalFenceProperties->exportFromImportedHandleTypes =
4649 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4650 pExternalFenceProperties->compatibleHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4651 pExternalFenceProperties->externalFenceFeatures =
4652 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT | VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT;
4653 #endif
4654 }
4655
on_vkGetPhysicalDeviceExternalFencePropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)4656 void ResourceTracker::on_vkGetPhysicalDeviceExternalFencePropertiesKHR(
4657 void* context, VkPhysicalDevice physicalDevice,
4658 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
4659 VkExternalFenceProperties* pExternalFenceProperties) {
4660 on_vkGetPhysicalDeviceExternalFenceProperties(context, physicalDevice, pExternalFenceInfo,
4661 pExternalFenceProperties);
4662 }
4663
on_vkCreateFence(void * context,VkResult input_result,VkDevice device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)4664 VkResult ResourceTracker::on_vkCreateFence(void* context, VkResult input_result, VkDevice device,
4665 const VkFenceCreateInfo* pCreateInfo,
4666 const VkAllocationCallbacks* pAllocator,
4667 VkFence* pFence) {
4668 VkEncoder* enc = (VkEncoder*)context;
4669 VkFenceCreateInfo finalCreateInfo = *pCreateInfo;
4670
4671 const VkExportFenceCreateInfo* exportFenceInfoPtr =
4672 vk_find_struct_const(pCreateInfo, EXPORT_FENCE_CREATE_INFO);
4673
4674 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
4675 bool exportSyncFd = exportFenceInfoPtr && (exportFenceInfoPtr->handleTypes &
4676 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
4677 #endif
4678
4679 input_result =
4680 enc->vkCreateFence(device, &finalCreateInfo, pAllocator, pFence, true /* do lock */);
4681
4682 if (input_result != VK_SUCCESS) return input_result;
4683
4684 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
4685 if (exportSyncFd) {
4686 if (!mFeatureInfo.hasVirtioGpuNativeSync) {
4687 mesa_logd("%s: ensure sync device\n", __func__);
4688 ensureSyncDeviceFd();
4689 }
4690
4691 mesa_logd("%s: getting fence info\n", __func__);
4692 std::lock_guard<std::recursive_mutex> lock(mLock);
4693 auto it = info_VkFence.find(*pFence);
4694
4695 if (it == info_VkFence.end()) return VK_ERROR_INITIALIZATION_FAILED;
4696
4697 auto& info = it->second;
4698
4699 info.external = true;
4700 info.exportFenceCreateInfo = *exportFenceInfoPtr;
4701 mesa_logd("%s: info set (fence still -1). fence: %p\n", __func__, (void*)(*pFence));
4702 // syncFd is still -1 because we expect user to explicitly
4703 // export it via vkGetFenceFdKHR
4704 }
4705 #endif
4706
4707 return input_result;
4708 }
4709
on_vkDestroyFence(void * context,VkDevice device,VkFence fence,const VkAllocationCallbacks * pAllocator)4710 void ResourceTracker::on_vkDestroyFence(void* context, VkDevice device, VkFence fence,
4711 const VkAllocationCallbacks* pAllocator) {
4712 VkEncoder* enc = (VkEncoder*)context;
4713 enc->vkDestroyFence(device, fence, pAllocator, true /* do lock */);
4714 }
4715
on_vkResetFences(void * context,VkResult,VkDevice device,uint32_t fenceCount,const VkFence * pFences)4716 VkResult ResourceTracker::on_vkResetFences(void* context, VkResult, VkDevice device,
4717 uint32_t fenceCount, const VkFence* pFences) {
4718 VkEncoder* enc = (VkEncoder*)context;
4719 VkResult res = enc->vkResetFences(device, fenceCount, pFences, true /* do lock */);
4720
4721 if (res != VK_SUCCESS) return res;
4722
4723 if (!fenceCount) return res;
4724
4725 // Permanence: temporary
4726 // on fence reset, close the fence fd
4727 // and act like we need to GetFenceFdKHR/ImportFenceFdKHR again
4728 std::lock_guard<std::recursive_mutex> lock(mLock);
4729 for (uint32_t i = 0; i < fenceCount; ++i) {
4730 VkFence fence = pFences[i];
4731 auto it = info_VkFence.find(fence);
4732 auto& info = it->second;
4733 if (!info.external) continue;
4734
4735 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
4736 if (info.syncFd && *info.syncFd >= 0) {
4737 mesa_logd("%s: resetting fence. make fd -1\n", __func__);
4738 goldfish_sync_signal(*info.syncFd);
4739 mSyncHelper->close(*info.syncFd);
4740 }
4741 info.syncFd.reset();
4742 #endif
4743 }
4744
4745 return res;
4746 }
4747
on_vkImportFenceFdKHR(void * context,VkResult,VkDevice device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)4748 VkResult ResourceTracker::on_vkImportFenceFdKHR(void* context, VkResult, VkDevice device,
4749 const VkImportFenceFdInfoKHR* pImportFenceFdInfo) {
4750 (void)context;
4751 (void)device;
4752 (void)pImportFenceFdInfo;
4753
4754 // Transference: copy
4755 // meaning dup() the incoming fd
4756
4757 bool hasFence = pImportFenceFdInfo->fence != VK_NULL_HANDLE;
4758
4759 if (!hasFence) return VK_ERROR_OUT_OF_HOST_MEMORY;
4760
4761 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
4762
4763 bool syncFdImport = pImportFenceFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4764
4765 if (!syncFdImport) {
4766 mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd import\n", __func__);
4767 return VK_ERROR_OUT_OF_HOST_MEMORY;
4768 }
4769
4770 std::lock_guard<std::recursive_mutex> lock(mLock);
4771 auto it = info_VkFence.find(pImportFenceFdInfo->fence);
4772 if (it == info_VkFence.end()) {
4773 mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
4774 return VK_ERROR_OUT_OF_HOST_MEMORY;
4775 }
4776
4777 auto& info = it->second;
4778
4779 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
4780 if (info.syncFd && *info.syncFd >= 0) {
4781 mesa_logd("%s: previous sync fd exists, close it\n", __func__);
4782 goldfish_sync_signal(*info.syncFd);
4783 mSyncHelper->close(*info.syncFd);
4784 }
4785 #endif
4786
4787 if (pImportFenceFdInfo->fd < 0) {
4788 mesa_logd("%s: import -1, set to -1 and exit\n", __func__);
4789 info.syncFd = -1;
4790 } else {
4791 mesa_logd("%s: import actual fd, dup and close()\n", __func__);
4792
4793 int fenceCopy = mSyncHelper->dup(pImportFenceFdInfo->fd);
4794 if (fenceCopy < 0) {
4795 mesa_loge("Failed to dup() import sync fd.");
4796 return VK_ERROR_OUT_OF_HOST_MEMORY;
4797 }
4798
4799 info.syncFd = fenceCopy;
4800
4801 mSyncHelper->close(pImportFenceFdInfo->fd);
4802 }
4803 return VK_SUCCESS;
4804 #else
4805 return VK_ERROR_OUT_OF_HOST_MEMORY;
4806 #endif
4807 }
4808
on_vkGetFenceFdKHR(void * context,VkResult,VkDevice device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd)4809 VkResult ResourceTracker::on_vkGetFenceFdKHR(void* context, VkResult, VkDevice device,
4810 const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd) {
4811 // export operation.
4812 // first check if fence is signaled
4813 // then if so, return -1
4814 // else, queue work
4815
4816 VkEncoder* enc = (VkEncoder*)context;
4817
4818 bool hasFence = pGetFdInfo->fence != VK_NULL_HANDLE;
4819
4820 if (!hasFence) {
4821 mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence\n", __func__);
4822 return VK_ERROR_OUT_OF_HOST_MEMORY;
4823 }
4824
4825 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
4826 bool syncFdExport = pGetFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4827
4828 if (!syncFdExport) {
4829 mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd fence\n", __func__);
4830 return VK_ERROR_OUT_OF_HOST_MEMORY;
4831 }
4832
4833 VkResult currentFenceStatus =
4834 enc->vkGetFenceStatus(device, pGetFdInfo->fence, true /* do lock */);
4835
4836 if (VK_ERROR_DEVICE_LOST == currentFenceStatus) { // Other error
4837 mesa_loge("%s: VK_ERROR_DEVICE_LOST: Other error\n", __func__);
4838 *pFd = -1;
4839 return VK_ERROR_DEVICE_LOST;
4840 }
4841
4842 if (VK_NOT_READY == currentFenceStatus || VK_SUCCESS == currentFenceStatus) {
4843 // Fence is valid. We also create a new sync fd for a signaled
4844 // fence, because ANGLE will use the returned fd directly to
4845 // implement eglDupNativeFenceFDANDROID, where -1 is only returned
4846 // when error occurs.
4847 std::lock_guard<std::recursive_mutex> lock(mLock);
4848
4849 auto it = info_VkFence.find(pGetFdInfo->fence);
4850 if (it == info_VkFence.end()) {
4851 mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
4852 return VK_ERROR_OUT_OF_HOST_MEMORY;
4853 }
4854
4855 auto& info = it->second;
4856
4857 bool syncFdCreated = info.external && (info.exportFenceCreateInfo.handleTypes &
4858 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
4859
4860 if (!syncFdCreated) {
4861 mesa_loge("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd created\n", __func__);
4862 return VK_ERROR_OUT_OF_HOST_MEMORY;
4863 }
4864
4865 if (mFeatureInfo.hasVirtioGpuNativeSync) {
4866 VkResult result;
4867 int64_t osHandle;
4868 uint64_t hostFenceHandle = get_host_u64_VkFence(pGetFdInfo->fence);
4869
4870 result = createFence(device, hostFenceHandle, osHandle);
4871 if (result != VK_SUCCESS) return result;
4872
4873 *pFd = osHandle;
4874 } else {
4875 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
4876 goldfish_sync_queue_work(
4877 mSyncDeviceFd, get_host_u64_VkFence(pGetFdInfo->fence) /* the handle */,
4878 GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */,
4879 pFd);
4880 #endif
4881 }
4882
4883 // relinquish ownership
4884 info.syncFd.reset();
4885
4886 mesa_logd("%s: got fd: %d\n", __func__, *pFd);
4887 return VK_SUCCESS;
4888 }
4889 return VK_ERROR_DEVICE_LOST;
4890 #else
4891 return VK_ERROR_OUT_OF_HOST_MEMORY;
4892 #endif
4893 }
4894
on_vkGetFenceStatus(void * context,VkResult input_result,VkDevice device,VkFence fence)4895 VkResult ResourceTracker::on_vkGetFenceStatus(void* context, VkResult input_result, VkDevice device,
4896 VkFence fence) {
4897 VkEncoder* enc = (VkEncoder*)context;
4898
4899 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
4900 {
4901 std::unique_lock<std::recursive_mutex> lock(mLock);
4902
4903 auto fenceInfoIt = info_VkFence.find(fence);
4904 if (fenceInfoIt == info_VkFence.end()) {
4905 mesa_loge("Failed to find VkFence:%p", fence);
4906 return VK_NOT_READY;
4907 }
4908 auto& fenceInfo = fenceInfoIt->second;
4909
4910 if (fenceInfo.syncFd) {
4911 if (*fenceInfo.syncFd == -1) {
4912 return VK_SUCCESS;
4913 }
4914
4915 int syncFdSignaled = mSyncHelper->wait(*fenceInfo.syncFd, /*timeout=*/0) == 0;
4916 return syncFdSignaled ? VK_SUCCESS : VK_NOT_READY;
4917 }
4918 }
4919 #endif
4920
4921 return enc->vkGetFenceStatus(device, fence, /*doLock=*/true);
4922 }
4923
on_vkWaitForFences(void * context,VkResult,VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)4924 VkResult ResourceTracker::on_vkWaitForFences(void* context, VkResult, VkDevice device,
4925 uint32_t fenceCount, const VkFence* pFences,
4926 VkBool32 waitAll, uint64_t timeout) {
4927 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
4928 (void)context;
4929 std::vector<int> fencesExternalSyncFds;
4930 std::vector<VkFence> fencesNonExternal;
4931
4932 std::unique_lock<std::recursive_mutex> lock(mLock);
4933
4934 for (uint32_t i = 0; i < fenceCount; ++i) {
4935 auto it = info_VkFence.find(pFences[i]);
4936 if (it == info_VkFence.end()) continue;
4937 const auto& info = it->second;
4938 if (info.syncFd) {
4939 if (*info.syncFd >= 0) {
4940 fencesExternalSyncFds.push_back(*info.syncFd);
4941 }
4942 } else {
4943 fencesNonExternal.push_back(pFences[i]);
4944 }
4945 }
4946
4947 lock.unlock();
4948
4949 for (auto fd : fencesExternalSyncFds) {
4950 mesa_logd("Waiting on sync fd: %d", fd);
4951
4952 std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
4953 // syncHelper works in milliseconds
4954 mSyncHelper->wait(fd, DIV_ROUND_UP(timeout, 1000));
4955 std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
4956
4957 uint64_t timeTaken =
4958 std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count();
4959 if (timeTaken >= timeout) {
4960 return VK_TIMEOUT;
4961 }
4962
4963 timeout -= timeTaken;
4964 mesa_logd("Done waiting on sync fd: %d", fd);
4965
4966 #if GFXSTREAM_SYNC_DEBUG
4967 mSyncHelper->debugPrint(fd);
4968 #endif
4969 }
4970
4971 if (!fencesNonExternal.empty()) {
4972 auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
4973 auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
4974 mesa_logd("vkWaitForFences to host");
4975 return vkEncoder->vkWaitForFences(device, fencesNonExternal.size(),
4976 fencesNonExternal.data(), waitAll, timeout,
4977 true /* do lock */);
4978 }
4979
4980 return VK_SUCCESS;
4981
4982 #else
4983 VkEncoder* enc = (VkEncoder*)context;
4984 return enc->vkWaitForFences(device, fenceCount, pFences, waitAll, timeout, true /* do lock */);
4985 #endif
4986 }
4987
on_vkCreateDescriptorPool(void * context,VkResult,VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)4988 VkResult ResourceTracker::on_vkCreateDescriptorPool(void* context, VkResult, VkDevice device,
4989 const VkDescriptorPoolCreateInfo* pCreateInfo,
4990 const VkAllocationCallbacks* pAllocator,
4991 VkDescriptorPool* pDescriptorPool) {
4992 VkEncoder* enc = (VkEncoder*)context;
4993
4994 VkResult res = enc->vkCreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool,
4995 true /* do lock */);
4996
4997 if (res != VK_SUCCESS) return res;
4998
4999 VkDescriptorPool pool = *pDescriptorPool;
5000
5001 struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
5002 dp->allocInfo = new DescriptorPoolAllocationInfo;
5003 dp->allocInfo->device = device;
5004 dp->allocInfo->createFlags = pCreateInfo->flags;
5005 dp->allocInfo->maxSets = pCreateInfo->maxSets;
5006 dp->allocInfo->usedSets = 0;
5007
5008 for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; ++i) {
5009 dp->allocInfo->descriptorCountInfo.push_back({
5010 pCreateInfo->pPoolSizes[i].type, pCreateInfo->pPoolSizes[i].descriptorCount,
5011 0, /* used */
5012 });
5013 }
5014
5015 if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate) {
5016 std::vector<uint64_t> poolIds(pCreateInfo->maxSets);
5017
5018 uint32_t count = pCreateInfo->maxSets;
5019 enc->vkCollectDescriptorPoolIdsGOOGLE(device, pool, &count, poolIds.data(),
5020 true /* do lock */);
5021
5022 dp->allocInfo->freePoolIds = poolIds;
5023 }
5024
5025 return res;
5026 }
5027
on_vkDestroyDescriptorPool(void * context,VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)5028 void ResourceTracker::on_vkDestroyDescriptorPool(void* context, VkDevice device,
5029 VkDescriptorPool descriptorPool,
5030 const VkAllocationCallbacks* pAllocator) {
5031 if (!descriptorPool) return;
5032
5033 VkEncoder* enc = (VkEncoder*)context;
5034
5035 clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
5036
5037 enc->vkDestroyDescriptorPool(device, descriptorPool, pAllocator, true /* do lock */);
5038 }
5039
on_vkResetDescriptorPool(void * context,VkResult,VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)5040 VkResult ResourceTracker::on_vkResetDescriptorPool(void* context, VkResult, VkDevice device,
5041 VkDescriptorPool descriptorPool,
5042 VkDescriptorPoolResetFlags flags) {
5043 if (!descriptorPool) return VK_ERROR_INITIALIZATION_FAILED;
5044
5045 VkEncoder* enc = (VkEncoder*)context;
5046
5047 VkResult res = enc->vkResetDescriptorPool(device, descriptorPool, flags, true /* do lock */);
5048
5049 if (res != VK_SUCCESS) return res;
5050
5051 clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
5052 return res;
5053 }
5054
on_vkAllocateDescriptorSets(void * context,VkResult,VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)5055 VkResult ResourceTracker::on_vkAllocateDescriptorSets(
5056 void* context, VkResult, VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo,
5057 VkDescriptorSet* pDescriptorSets) {
5058 VkEncoder* enc = (VkEncoder*)context;
5059 auto ci = pAllocateInfo;
5060 auto sets = pDescriptorSets;
5061 if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate) {
5062 // Using the pool ID's we collected earlier from the host
5063 VkResult poolAllocResult = validateAndApplyVirtualDescriptorSetAllocation(ci, sets);
5064
5065 if (poolAllocResult != VK_SUCCESS) return poolAllocResult;
5066
5067 for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
5068 register_VkDescriptorSet(sets[i]);
5069 VkDescriptorSetLayout setLayout =
5070 as_goldfish_VkDescriptorSet(sets[i])->reified->setLayout;
5071
5072 // Need to add ref to the set layout in the virtual case
5073 // because the set itself might not be realized on host at the
5074 // same time
5075 struct goldfish_VkDescriptorSetLayout* dsl =
5076 as_goldfish_VkDescriptorSetLayout(setLayout);
5077 ++dsl->layoutInfo->refcount;
5078 }
5079 } else {
5080 VkResult allocRes = enc->vkAllocateDescriptorSets(device, ci, sets, true /* do lock */);
5081
5082 if (allocRes != VK_SUCCESS) return allocRes;
5083
5084 for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
5085 applyDescriptorSetAllocation(ci->descriptorPool, ci->pSetLayouts[i]);
5086 fillDescriptorSetInfoForPool(ci->descriptorPool, ci->pSetLayouts[i], sets[i]);
5087 }
5088 }
5089
5090 return VK_SUCCESS;
5091 }
5092
on_vkFreeDescriptorSets(void * context,VkResult,VkDevice device,VkDescriptorPool descriptorPool,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)5093 VkResult ResourceTracker::on_vkFreeDescriptorSets(void* context, VkResult, VkDevice device,
5094 VkDescriptorPool descriptorPool,
5095 uint32_t descriptorSetCount,
5096 const VkDescriptorSet* pDescriptorSets) {
5097 VkEncoder* enc = (VkEncoder*)context;
5098
5099 // Bit of robustness so that we can double free descriptor sets
5100 // and do other invalid usages
5101 // https://github.com/KhronosGroup/Vulkan-Docs/issues/1070
5102 // (people expect VK_SUCCESS to always be returned by vkFreeDescriptorSets)
5103 std::vector<VkDescriptorSet> toActuallyFree;
5104 {
5105 std::lock_guard<std::recursive_mutex> lock(mLock);
5106
5107 // Pool was destroyed
5108 if (info_VkDescriptorPool.find(descriptorPool) == info_VkDescriptorPool.end()) {
5109 return VK_SUCCESS;
5110 }
5111
5112 if (!descriptorPoolSupportsIndividualFreeLocked(descriptorPool)) return VK_SUCCESS;
5113
5114 std::vector<VkDescriptorSet> existingDescriptorSets;
5115 ;
5116
5117 // Check if this descriptor set was in the pool's set of allocated descriptor sets,
5118 // to guard against double free (Double free is allowed by the client)
5119 {
5120 auto allocedSets = as_goldfish_VkDescriptorPool(descriptorPool)->allocInfo->allocedSets;
5121
5122 for (uint32_t i = 0; i < descriptorSetCount; ++i) {
5123 if (allocedSets.end() == allocedSets.find(pDescriptorSets[i])) {
5124 mesa_loge(
5125 "%s: Warning: descriptor set %p not found in pool. Was this "
5126 "double-freed?\n",
5127 __func__, (void*)pDescriptorSets[i]);
5128 continue;
5129 }
5130
5131 auto it = info_VkDescriptorSet.find(pDescriptorSets[i]);
5132 if (it == info_VkDescriptorSet.end()) continue;
5133
5134 existingDescriptorSets.push_back(pDescriptorSets[i]);
5135 }
5136 }
5137
5138 for (auto set : existingDescriptorSets) {
5139 if (removeDescriptorSetFromPool(set,
5140 mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate)) {
5141 toActuallyFree.push_back(set);
5142 }
5143 }
5144
5145 if (toActuallyFree.empty()) return VK_SUCCESS;
5146 }
5147
5148 if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate) {
5149 // In the batched set update case, decrement refcount on the set layout
5150 // and only free on host if we satisfied a pending allocation on the
5151 // host.
5152 for (uint32_t i = 0; i < toActuallyFree.size(); ++i) {
5153 VkDescriptorSetLayout setLayout =
5154 as_goldfish_VkDescriptorSet(toActuallyFree[i])->reified->setLayout;
5155 decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
5156 }
5157 freeDescriptorSetsIfHostAllocated(enc, device, (uint32_t)toActuallyFree.size(),
5158 toActuallyFree.data());
5159 } else {
5160 // In the non-batched set update case, just free them directly.
5161 enc->vkFreeDescriptorSets(device, descriptorPool, (uint32_t)toActuallyFree.size(),
5162 toActuallyFree.data(), true /* do lock */);
5163 }
5164 return VK_SUCCESS;
5165 }
5166
on_vkCreateDescriptorSetLayout(void * context,VkResult,VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)5167 VkResult ResourceTracker::on_vkCreateDescriptorSetLayout(
5168 void* context, VkResult, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
5169 const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout) {
5170 VkEncoder* enc = (VkEncoder*)context;
5171
5172 VkResult res = enc->vkCreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout,
5173 true /* do lock */);
5174
5175 if (res != VK_SUCCESS) return res;
5176
5177 struct goldfish_VkDescriptorSetLayout* dsl = as_goldfish_VkDescriptorSetLayout(*pSetLayout);
5178 dsl->layoutInfo = new DescriptorSetLayoutInfo;
5179 for (uint32_t i = 0; i < pCreateInfo->bindingCount; ++i) {
5180 dsl->layoutInfo->bindings.push_back(pCreateInfo->pBindings[i]);
5181 }
5182 dsl->layoutInfo->refcount = 1;
5183
5184 return res;
5185 }
5186
on_vkUpdateDescriptorSets(void * context,VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)5187 void ResourceTracker::on_vkUpdateDescriptorSets(void* context, VkDevice device,
5188 uint32_t descriptorWriteCount,
5189 const VkWriteDescriptorSet* pDescriptorWrites,
5190 uint32_t descriptorCopyCount,
5191 const VkCopyDescriptorSet* pDescriptorCopies) {
5192 VkEncoder* enc = (VkEncoder*)context;
5193
5194 std::vector<VkDescriptorImageInfo> transformedImageInfos;
5195 std::vector<VkWriteDescriptorSet> transformedWrites(descriptorWriteCount);
5196
5197 memcpy(transformedWrites.data(), pDescriptorWrites,
5198 sizeof(VkWriteDescriptorSet) * descriptorWriteCount);
5199
5200 size_t imageInfosNeeded = 0;
5201 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5202 if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5203 if (!transformedWrites[i].pImageInfo) continue;
5204
5205 imageInfosNeeded += transformedWrites[i].descriptorCount;
5206 }
5207
5208 transformedImageInfos.resize(imageInfosNeeded);
5209
5210 size_t imageInfoIndex = 0;
5211 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5212 if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5213 if (!transformedWrites[i].pImageInfo) continue;
5214
5215 for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
5216 transformedImageInfos[imageInfoIndex] = transformedWrites[i].pImageInfo[j];
5217 ++imageInfoIndex;
5218 }
5219 transformedWrites[i].pImageInfo =
5220 &transformedImageInfos[imageInfoIndex - transformedWrites[i].descriptorCount];
5221 }
5222
5223 {
5224 // Validate and filter samplers
5225 std::lock_guard<std::recursive_mutex> lock(mLock);
5226 size_t imageInfoIndex = 0;
5227 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5228 if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5229 if (!transformedWrites[i].pImageInfo) continue;
5230
5231 bool isImmutableSampler = descriptorBindingIsImmutableSampler(
5232 transformedWrites[i].dstSet, transformedWrites[i].dstBinding);
5233
5234 for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
5235 if (isImmutableSampler) {
5236 transformedImageInfos[imageInfoIndex].sampler = 0;
5237 }
5238 transformedImageInfos[imageInfoIndex] =
5239 filterNonexistentSampler(transformedImageInfos[imageInfoIndex]);
5240 ++imageInfoIndex;
5241 }
5242 }
5243 }
5244
5245 if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate) {
5246 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5247 VkDescriptorSet set = transformedWrites[i].dstSet;
5248 doEmulatedDescriptorWrite(&transformedWrites[i],
5249 as_goldfish_VkDescriptorSet(set)->reified);
5250 }
5251
5252 for (uint32_t i = 0; i < descriptorCopyCount; ++i) {
5253 doEmulatedDescriptorCopy(
5254 &pDescriptorCopies[i],
5255 as_goldfish_VkDescriptorSet(pDescriptorCopies[i].srcSet)->reified,
5256 as_goldfish_VkDescriptorSet(pDescriptorCopies[i].dstSet)->reified);
5257 }
5258 } else {
5259 enc->vkUpdateDescriptorSets(device, descriptorWriteCount, transformedWrites.data(),
5260 descriptorCopyCount, pDescriptorCopies, true /* do lock */);
5261 }
5262 }
5263
on_vkDestroyImage(void * context,VkDevice device,VkImage image,const VkAllocationCallbacks * pAllocator)5264 void ResourceTracker::on_vkDestroyImage(void* context, VkDevice device, VkImage image,
5265 const VkAllocationCallbacks* pAllocator) {
5266 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5267 {
5268 std::lock_guard<std::recursive_mutex> lock(mLock); // do not guard encoder may cause
5269 // deadlock b/243339973
5270
5271 // Wait for any pending QSRIs to prevent a race between the Gfxstream host
5272 // potentially processing the below `vkDestroyImage()` from the VK encoder
5273 // command stream before processing a previously submitted
5274 // `VIRTIO_GPU_NATIVE_SYNC_VULKAN_QSRI_EXPORT` from the virtio-gpu command
5275 // stream which relies on the image existing.
5276 auto imageInfoIt = info_VkImage.find(image);
5277 if (imageInfoIt != info_VkImage.end()) {
5278 auto& imageInfo = imageInfoIt->second;
5279 for (int syncFd : imageInfo.pendingQsriSyncFds) {
5280 int syncWaitRet = mSyncHelper->wait(syncFd, 3000);
5281 if (syncWaitRet < 0) {
5282 mesa_loge("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d",
5283 __func__, strerror(errno), errno);
5284 }
5285
5286 #if GFXSTREAM_SYNC_DEBUG
5287 mSyncHelper->debugPrint(syncFd);
5288 #endif
5289 mSyncHelper->close(syncFd);
5290 }
5291 imageInfo.pendingQsriSyncFds.clear();
5292 }
5293 }
5294 #endif
5295 VkEncoder* enc = (VkEncoder*)context;
5296 #if defined(LINUX_GUEST_BUILD)
5297 auto imageInfoIt = info_VkImage.find(image);
5298 if (imageInfoIt != info_VkImage.end()) {
5299 auto& imageInfo = imageInfoIt->second;
5300 if (imageInfo.linearPeerImage) {
5301 enc->vkDestroyImage(device, imageInfo.linearPeerImage, pAllocator, true /* do lock */);
5302 }
5303 }
5304 #endif
5305 enc->vkDestroyImage(device, image, pAllocator, true /* do lock */);
5306 }
5307
on_vkGetImageMemoryRequirements(void * context,VkDevice device,VkImage image,VkMemoryRequirements * pMemoryRequirements)5308 void ResourceTracker::on_vkGetImageMemoryRequirements(void* context, VkDevice device, VkImage image,
5309 VkMemoryRequirements* pMemoryRequirements) {
5310 std::unique_lock<std::recursive_mutex> lock(mLock);
5311
5312 auto it = info_VkImage.find(image);
5313 if (it == info_VkImage.end()) return;
5314
5315 auto& info = it->second;
5316
5317 if (info.baseRequirementsKnown) {
5318 *pMemoryRequirements = info.baseRequirements;
5319 return;
5320 }
5321
5322 lock.unlock();
5323
5324 VkEncoder* enc = (VkEncoder*)context;
5325
5326 enc->vkGetImageMemoryRequirements(device, image, pMemoryRequirements, true /* do lock */);
5327
5328 lock.lock();
5329
5330 transformImageMemoryRequirementsForGuestLocked(image, pMemoryRequirements);
5331
5332 info.baseRequirementsKnown = true;
5333 info.baseRequirements = *pMemoryRequirements;
5334 }
5335
on_vkGetImageMemoryRequirements2(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5336 void ResourceTracker::on_vkGetImageMemoryRequirements2(void* context, VkDevice device,
5337 const VkImageMemoryRequirementsInfo2* pInfo,
5338 VkMemoryRequirements2* pMemoryRequirements) {
5339 VkEncoder* enc = (VkEncoder*)context;
5340 enc->vkGetImageMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */);
5341 transformImageMemoryRequirements2ForGuest(pInfo->image, pMemoryRequirements);
5342 }
5343
on_vkGetImageMemoryRequirements2KHR(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5344 void ResourceTracker::on_vkGetImageMemoryRequirements2KHR(
5345 void* context, VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
5346 VkMemoryRequirements2* pMemoryRequirements) {
5347 VkEncoder* enc = (VkEncoder*)context;
5348 enc->vkGetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */);
5349 transformImageMemoryRequirements2ForGuest(pInfo->image, pMemoryRequirements);
5350 }
5351
on_vkGetImageSubresourceLayout(void * context,VkDevice device,VkImage image,const VkImageSubresource * pSubresource,VkSubresourceLayout * pLayout)5352 void ResourceTracker::on_vkGetImageSubresourceLayout(void* context, VkDevice device, VkImage image,
5353 const VkImageSubresource* pSubresource,
5354 VkSubresourceLayout* pLayout) {
5355 VkEncoder* enc = (VkEncoder*)context;
5356 VkImage targetImage = image;
5357 #if defined(LINUX_GUEST_BUILD)
5358 auto it = info_VkImage.find(image);
5359 if (it == info_VkImage.end()) return;
5360 const auto& info = it->second;
5361 if (info.linearPeerImage) {
5362 targetImage = info.linearPeerImage;
5363 }
5364 #endif
5365 enc->vkGetImageSubresourceLayout(device, targetImage, pSubresource, pLayout,
5366 true /* do lock */);
5367 }
5368
on_vkBindImageMemory(void * context,VkResult,VkDevice device,VkImage image,VkDeviceMemory memory,VkDeviceSize memoryOffset)5369 VkResult ResourceTracker::on_vkBindImageMemory(void* context, VkResult, VkDevice device,
5370 VkImage image, VkDeviceMemory memory,
5371 VkDeviceSize memoryOffset) {
5372 VkEncoder* enc = (VkEncoder*)context;
5373 // Do not forward calls with invalid handles to host.
5374 if (info_VkDeviceMemory.find(memory) == info_VkDeviceMemory.end() ||
5375 info_VkImage.find(image) == info_VkImage.end()) {
5376 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5377 }
5378 return enc->vkBindImageMemory(device, image, memory, memoryOffset, true /* do lock */);
5379 }
5380
on_vkBindImageMemory2(void * context,VkResult,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)5381 VkResult ResourceTracker::on_vkBindImageMemory2(void* context, VkResult, VkDevice device,
5382 uint32_t bindingCount,
5383 const VkBindImageMemoryInfo* pBindInfos) {
5384 VkEncoder* enc = (VkEncoder*)context;
5385
5386 if (bindingCount < 1 || !pBindInfos) {
5387 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5388 }
5389
5390 for (uint32_t i = 0; i < bindingCount; i++) {
5391 const VkBindImageMemoryInfo& bimi = pBindInfos[i];
5392
5393 auto imageIt = info_VkImage.find(bimi.image);
5394 if (imageIt == info_VkImage.end()) {
5395 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5396 }
5397
5398 if (bimi.memory != VK_NULL_HANDLE) {
5399 auto memoryIt = info_VkDeviceMemory.find(bimi.memory);
5400 if (memoryIt == info_VkDeviceMemory.end()) {
5401 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5402 }
5403 }
5404 }
5405
5406 return enc->vkBindImageMemory2(device, bindingCount, pBindInfos, true /* do lock */);
5407 }
5408
on_vkBindImageMemory2KHR(void * context,VkResult result,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)5409 VkResult ResourceTracker::on_vkBindImageMemory2KHR(void* context, VkResult result, VkDevice device,
5410 uint32_t bindingCount,
5411 const VkBindImageMemoryInfo* pBindInfos) {
5412 return on_vkBindImageMemory2(context, result, device, bindingCount, pBindInfos);
5413 }
5414
on_vkCreateBuffer(void * context,VkResult,VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)5415 VkResult ResourceTracker::on_vkCreateBuffer(void* context, VkResult, VkDevice device,
5416 const VkBufferCreateInfo* pCreateInfo,
5417 const VkAllocationCallbacks* pAllocator,
5418 VkBuffer* pBuffer) {
5419 VkEncoder* enc = (VkEncoder*)context;
5420
5421 VkBufferCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
5422 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
5423 VkExternalMemoryBufferCreateInfo localExtBufCi;
5424
5425 const VkExternalMemoryBufferCreateInfo* extBufCiPtr =
5426 vk_find_struct_const(pCreateInfo, EXTERNAL_MEMORY_BUFFER_CREATE_INFO);
5427 if (extBufCiPtr) {
5428 localExtBufCi = vk_make_orphan_copy(*extBufCiPtr);
5429 vk_append_struct(&structChainIter, &localExtBufCi);
5430 }
5431
5432 VkBufferOpaqueCaptureAddressCreateInfo localCapAddrCi;
5433 const VkBufferOpaqueCaptureAddressCreateInfo* pCapAddrCi =
5434 vk_find_struct_const(pCreateInfo, BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO);
5435 if (pCapAddrCi) {
5436 localCapAddrCi = vk_make_orphan_copy(*pCapAddrCi);
5437 vk_append_struct(&structChainIter, &localCapAddrCi);
5438 }
5439
5440 VkBufferDeviceAddressCreateInfoEXT localDevAddrCi;
5441 const VkBufferDeviceAddressCreateInfoEXT* pDevAddrCi =
5442 vk_find_struct_const(pCreateInfo, BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT);
5443 if (pDevAddrCi) {
5444 localDevAddrCi = vk_make_orphan_copy(*pDevAddrCi);
5445 vk_append_struct(&structChainIter, &localDevAddrCi);
5446 }
5447
5448 #ifdef VK_USE_PLATFORM_FUCHSIA
5449 std::optional<zx::vmo> vmo;
5450 bool isSysmemBackedMemory = false;
5451
5452 if (extBufCiPtr &&
5453 (extBufCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) {
5454 isSysmemBackedMemory = true;
5455 }
5456
5457 const VkBufferCollectionBufferCreateInfoFUCHSIA* extBufferCollectionPtr =
5458 vk_find_struct_const(pCreateInfo, BUFFER_COLLECTION_BUFFER_CREATE_INFO_FUCHSIA);
5459
5460 if (extBufferCollectionPtr) {
5461 const auto& collection =
5462 *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
5463 extBufferCollectionPtr->collection);
5464 uint32_t index = extBufferCollectionPtr->index;
5465
5466 auto result = collection->WaitForBuffersAllocated();
5467 if (result.ok() && result->status == ZX_OK) {
5468 auto& info = result->buffer_collection_info;
5469 if (index < info.buffer_count) {
5470 vmo = std::make_optional<zx::vmo>(std::move(info.buffers[index].vmo));
5471 }
5472 } else {
5473 mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
5474 GET_STATUS_SAFE(result, status));
5475 }
5476
5477 if (vmo && vmo->is_valid()) {
5478 fidl::Arena arena;
5479 fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena);
5480 createParams.set_size(arena, pCreateInfo->size)
5481 .set_memory_property(fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
5482
5483 auto result = mControlDevice->CreateBuffer2(std::move(*vmo), createParams);
5484 if (!result.ok() ||
5485 (result->is_error() != ZX_OK && result->error_value() != ZX_ERR_ALREADY_EXISTS)) {
5486 mesa_loge("CreateBuffer2 failed: %d:%d", result.status(),
5487 GET_STATUS_SAFE(result, error_value()));
5488 }
5489 isSysmemBackedMemory = true;
5490 }
5491 }
5492 #endif // VK_USE_PLATFORM_FUCHSIA
5493
5494 VkResult res;
5495 VkMemoryRequirements memReqs;
5496
5497 if (supportsCreateResourcesWithRequirements()) {
5498 res = enc->vkCreateBufferWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator,
5499 pBuffer, &memReqs, true /* do lock */);
5500 } else {
5501 res =
5502 enc->vkCreateBuffer(device, &localCreateInfo, pAllocator, pBuffer, true /* do lock */);
5503 }
5504
5505 if (res != VK_SUCCESS) return res;
5506
5507 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
5508 if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
5509 mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
5510 }
5511 if (extBufCiPtr &&
5512 ((extBufCiPtr->handleTypes &
5513 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) ||
5514 (extBufCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT))) {
5515 updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
5516 }
5517 #endif
5518
5519 std::lock_guard<std::recursive_mutex> lock(mLock);
5520
5521 auto it = info_VkBuffer.find(*pBuffer);
5522 if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
5523
5524 auto& info = it->second;
5525
5526 info.createInfo = localCreateInfo;
5527 info.createInfo.pNext = nullptr;
5528
5529 if (supportsCreateResourcesWithRequirements()) {
5530 info.baseRequirementsKnown = true;
5531 info.baseRequirements = memReqs;
5532 }
5533
5534 if (extBufCiPtr) {
5535 info.external = true;
5536 info.externalCreateInfo = *extBufCiPtr;
5537 }
5538
5539 #ifdef VK_USE_PLATFORM_FUCHSIA
5540 if (isSysmemBackedMemory) {
5541 info.isSysmemBackedMemory = true;
5542 }
5543 #endif
5544
5545 return res;
5546 }
5547
on_vkDestroyBuffer(void * context,VkDevice device,VkBuffer buffer,const VkAllocationCallbacks * pAllocator)5548 void ResourceTracker::on_vkDestroyBuffer(void* context, VkDevice device, VkBuffer buffer,
5549 const VkAllocationCallbacks* pAllocator) {
5550 VkEncoder* enc = (VkEncoder*)context;
5551 enc->vkDestroyBuffer(device, buffer, pAllocator, true /* do lock */);
5552 }
5553
on_vkGetBufferMemoryRequirements(void * context,VkDevice device,VkBuffer buffer,VkMemoryRequirements * pMemoryRequirements)5554 void ResourceTracker::on_vkGetBufferMemoryRequirements(void* context, VkDevice device,
5555 VkBuffer buffer,
5556 VkMemoryRequirements* pMemoryRequirements) {
5557 std::unique_lock<std::recursive_mutex> lock(mLock);
5558
5559 auto it = info_VkBuffer.find(buffer);
5560 if (it == info_VkBuffer.end()) return;
5561
5562 auto& info = it->second;
5563
5564 if (info.baseRequirementsKnown) {
5565 *pMemoryRequirements = info.baseRequirements;
5566 return;
5567 }
5568
5569 lock.unlock();
5570
5571 VkEncoder* enc = (VkEncoder*)context;
5572 enc->vkGetBufferMemoryRequirements(device, buffer, pMemoryRequirements, true /* do lock */);
5573
5574 lock.lock();
5575
5576 info.baseRequirementsKnown = true;
5577 info.baseRequirements = *pMemoryRequirements;
5578 }
5579
on_vkGetBufferMemoryRequirements2(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5580 void ResourceTracker::on_vkGetBufferMemoryRequirements2(
5581 void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5582 VkMemoryRequirements2* pMemoryRequirements) {
5583 VkEncoder* enc = (VkEncoder*)context;
5584 enc->vkGetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */);
5585 transformBufferMemoryRequirements2ForGuest(pInfo->buffer, pMemoryRequirements);
5586 }
5587
on_vkGetBufferMemoryRequirements2KHR(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5588 void ResourceTracker::on_vkGetBufferMemoryRequirements2KHR(
5589 void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5590 VkMemoryRequirements2* pMemoryRequirements) {
5591 VkEncoder* enc = (VkEncoder*)context;
5592 enc->vkGetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */);
5593 transformBufferMemoryRequirements2ForGuest(pInfo->buffer, pMemoryRequirements);
5594 }
5595
on_vkBindBufferMemory(void * context,VkResult,VkDevice device,VkBuffer buffer,VkDeviceMemory memory,VkDeviceSize memoryOffset)5596 VkResult ResourceTracker::on_vkBindBufferMemory(void* context, VkResult, VkDevice device,
5597 VkBuffer buffer, VkDeviceMemory memory,
5598 VkDeviceSize memoryOffset) {
5599 VkEncoder* enc = (VkEncoder*)context;
5600 return enc->vkBindBufferMemory(device, buffer, memory, memoryOffset, true /* do lock */);
5601 }
5602
on_vkBindBufferMemory2(void * context,VkResult,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)5603 VkResult ResourceTracker::on_vkBindBufferMemory2(void* context, VkResult, VkDevice device,
5604 uint32_t bindInfoCount,
5605 const VkBindBufferMemoryInfo* pBindInfos) {
5606 VkEncoder* enc = (VkEncoder*)context;
5607 return enc->vkBindBufferMemory2(device, bindInfoCount, pBindInfos, true /* do lock */);
5608 }
5609
on_vkBindBufferMemory2KHR(void * context,VkResult,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)5610 VkResult ResourceTracker::on_vkBindBufferMemory2KHR(void* context, VkResult, VkDevice device,
5611 uint32_t bindInfoCount,
5612 const VkBindBufferMemoryInfo* pBindInfos) {
5613 VkEncoder* enc = (VkEncoder*)context;
5614 return enc->vkBindBufferMemory2KHR(device, bindInfoCount, pBindInfos, true /* do lock */);
5615 }
5616
on_vkCreateSemaphore(void * context,VkResult input_result,VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)5617 VkResult ResourceTracker::on_vkCreateSemaphore(void* context, VkResult input_result,
5618 VkDevice device,
5619 const VkSemaphoreCreateInfo* pCreateInfo,
5620 const VkAllocationCallbacks* pAllocator,
5621 VkSemaphore* pSemaphore) {
5622 (void)input_result;
5623 VkEncoder* enc = (VkEncoder*)context;
5624
5625 VkSemaphoreCreateInfo finalCreateInfo = *pCreateInfo;
5626
5627 const VkExportSemaphoreCreateInfoKHR* exportSemaphoreInfoPtr =
5628 vk_find_struct_const(pCreateInfo, EXPORT_SEMAPHORE_CREATE_INFO);
5629
5630 #ifdef VK_USE_PLATFORM_FUCHSIA
5631 bool exportEvent =
5632 exportSemaphoreInfoPtr && (exportSemaphoreInfoPtr->handleTypes &
5633 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA);
5634
5635 if (exportEvent) {
5636 finalCreateInfo.pNext = nullptr;
5637 // If we have timeline semaphores externally, leave it there.
5638 const VkSemaphoreTypeCreateInfo* typeCi =
5639 vk_find_struct_const(pCreateInfo, SEMAPHORE_TYPE_CREATE_INFO);
5640 if (typeCi) finalCreateInfo.pNext = typeCi;
5641 }
5642 #endif
5643
5644 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
5645 bool exportSyncFd = exportSemaphoreInfoPtr && (exportSemaphoreInfoPtr->handleTypes &
5646 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
5647
5648 if (exportSyncFd) {
5649 finalCreateInfo.pNext = nullptr;
5650 // If we have timeline semaphores externally, leave it there.
5651 const VkSemaphoreTypeCreateInfo* typeCi =
5652 vk_find_struct_const(pCreateInfo, SEMAPHORE_TYPE_CREATE_INFO);
5653 if (typeCi) finalCreateInfo.pNext = typeCi;
5654 }
5655 #endif
5656 input_result = enc->vkCreateSemaphore(device, &finalCreateInfo, pAllocator, pSemaphore,
5657 true /* do lock */);
5658
5659 zx_handle_t event_handle = ZX_HANDLE_INVALID;
5660
5661 #ifdef VK_USE_PLATFORM_FUCHSIA
5662 if (exportEvent) {
5663 zx_event_create(0, &event_handle);
5664 }
5665 #endif
5666
5667 std::lock_guard<std::recursive_mutex> lock(mLock);
5668
5669 auto it = info_VkSemaphore.find(*pSemaphore);
5670 if (it == info_VkSemaphore.end()) return VK_ERROR_INITIALIZATION_FAILED;
5671
5672 auto& info = it->second;
5673
5674 info.device = device;
5675 info.eventHandle = event_handle;
5676 #ifdef VK_USE_PLATFORM_FUCHSIA
5677 info.eventKoid = getEventKoid(info.eventHandle);
5678 #endif
5679
5680 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
5681 if (exportSyncFd) {
5682 if (mFeatureInfo.hasVirtioGpuNativeSync &&
5683 !(mCaps.params[kParamFencePassing] && mCaps.vulkanCapset.externalSync)) {
5684 VkResult result;
5685 int64_t osHandle;
5686 uint64_t hostFenceHandle = get_host_u64_VkSemaphore(*pSemaphore);
5687
5688 result = createFence(device, hostFenceHandle, osHandle);
5689 if (result != VK_SUCCESS) return result;
5690
5691 info.syncFd.emplace(osHandle);
5692 } else {
5693 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
5694 ensureSyncDeviceFd();
5695
5696 if (exportSyncFd) {
5697 int syncFd = -1;
5698 goldfish_sync_queue_work(
5699 mSyncDeviceFd, get_host_u64_VkSemaphore(*pSemaphore) /* the handle */,
5700 GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */
5701 ,
5702 &syncFd);
5703 info.syncFd.emplace(syncFd);
5704 }
5705 #endif
5706 }
5707 }
5708 #endif
5709
5710 return VK_SUCCESS;
5711 }
5712
on_vkDestroySemaphore(void * context,VkDevice device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)5713 void ResourceTracker::on_vkDestroySemaphore(void* context, VkDevice device, VkSemaphore semaphore,
5714 const VkAllocationCallbacks* pAllocator) {
5715 VkEncoder* enc = (VkEncoder*)context;
5716 enc->vkDestroySemaphore(device, semaphore, pAllocator, true /* do lock */);
5717 }
5718
5719 // https://registry.khronos.org/vulkan/specs/latest/html/vkspec.html#vkGetSemaphoreFdKHR
5720 // Each call to vkGetSemaphoreFdKHR must create a new file descriptor and transfer ownership
5721 // of it to the application. To avoid leaking resources, the application must release ownership
5722 // of the file descriptor when it is no longer needed.
on_vkGetSemaphoreFdKHR(void * context,VkResult,VkDevice device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd)5723 VkResult ResourceTracker::on_vkGetSemaphoreFdKHR(void* context, VkResult, VkDevice device,
5724 const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
5725 int* pFd) {
5726 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
5727 VkEncoder* enc = (VkEncoder*)context;
5728 bool getSyncFd = pGetFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
5729
5730 if (getSyncFd) {
5731 if (mCaps.params[kParamFencePassing] && mCaps.vulkanCapset.externalSync) {
5732 uint64_t syncId = ++mAtomicId;
5733 int64_t osHandle = -1;
5734
5735 VkResult result = enc->vkGetSemaphoreGOOGLE(device, pGetFdInfo->semaphore, syncId,
5736 true /* do lock */);
5737 if (result != VK_SUCCESS) {
5738 mesa_loge("unable to get the semaphore");
5739 return result;
5740 }
5741
5742 result = acquireSync(syncId, osHandle);
5743 if (result != VK_SUCCESS) {
5744 mesa_loge("unable to create host sync object");
5745 return result;
5746 }
5747
5748 *pFd = (int)osHandle;
5749 return VK_SUCCESS;
5750 } else {
5751 // Doesn't this assume that sync file descriptor generated via the non-fence
5752 // passing path during "on_vkCreateSemaphore" is the same one that would be
5753 // generated via guest's "okGetSemaphoreFdKHR" call?
5754 std::lock_guard<std::recursive_mutex> lock(mLock);
5755 auto it = info_VkSemaphore.find(pGetFdInfo->semaphore);
5756 if (it == info_VkSemaphore.end()) return VK_ERROR_OUT_OF_HOST_MEMORY;
5757 auto& semInfo = it->second;
5758 // syncFd is supposed to have value.
5759 *pFd = mSyncHelper->dup(semInfo.syncFd.value_or(-1));
5760 return VK_SUCCESS;
5761 }
5762 } else {
5763 // opaque fd
5764 int hostFd = 0;
5765 int32_t size = 0;
5766 VkResult result = enc->vkGetSemaphoreFdKHR(device, pGetFdInfo, &hostFd, true /* do lock */);
5767 if (result != VK_SUCCESS) {
5768 return result;
5769 }
5770 *pFd = os_create_anonymous_file(size, "vk_opaque_fd");
5771 int write_result = write(*pFd, &hostFd, sizeof(hostFd));
5772 (void)write_result;
5773 return VK_SUCCESS;
5774 }
5775 #else
5776 (void)context;
5777 (void)device;
5778 (void)pGetFdInfo;
5779 (void)pFd;
5780 return VK_ERROR_INCOMPATIBLE_DRIVER;
5781 #endif
5782 }
5783
on_vkImportSemaphoreFdKHR(void * context,VkResult input_result,VkDevice device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)5784 VkResult ResourceTracker::on_vkImportSemaphoreFdKHR(
5785 void* context, VkResult input_result, VkDevice device,
5786 const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) {
5787 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
5788 VkEncoder* enc = (VkEncoder*)context;
5789 if (input_result != VK_SUCCESS) {
5790 return input_result;
5791 }
5792
5793 if (pImportSemaphoreFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
5794 std::lock_guard<std::recursive_mutex> lock(mLock);
5795
5796 auto semaphoreIt = info_VkSemaphore.find(pImportSemaphoreFdInfo->semaphore);
5797 auto& info = semaphoreIt->second;
5798
5799 if (info.syncFd.value_or(-1) >= 0) {
5800 mSyncHelper->close(info.syncFd.value());
5801 }
5802
5803 info.syncFd.emplace(pImportSemaphoreFdInfo->fd);
5804
5805 return VK_SUCCESS;
5806 } else {
5807 int fd = pImportSemaphoreFdInfo->fd;
5808 int err = lseek(fd, 0, SEEK_SET);
5809 if (err == -1) {
5810 mesa_loge("lseek fail on import semaphore");
5811 }
5812 int hostFd = 0;
5813 int read_result = read(fd, &hostFd, sizeof(hostFd));
5814 (void)read_result;
5815 VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo;
5816 tmpInfo.fd = hostFd;
5817 VkResult result = enc->vkImportSemaphoreFdKHR(device, &tmpInfo, true /* do lock */);
5818 mSyncHelper->close(fd);
5819 return result;
5820 }
5821 #else
5822 (void)context;
5823 (void)input_result;
5824 (void)device;
5825 (void)pImportSemaphoreFdInfo;
5826 return VK_ERROR_INCOMPATIBLE_DRIVER;
5827 #endif
5828 }
5829
on_vkGetMemoryFdPropertiesKHR(void * context,VkResult,VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,int fd,VkMemoryFdPropertiesKHR * pMemoryFdProperties)5830 VkResult ResourceTracker::on_vkGetMemoryFdPropertiesKHR(
5831 void* context, VkResult, VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd,
5832 VkMemoryFdPropertiesKHR* pMemoryFdProperties) {
5833 #if DETECT_OS_LINUX && !defined(VK_USE_PLATFORM_ANDROID_KHR)
5834 if (!(handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)) {
5835 mesa_loge("%s: VK_KHR_external_memory_fd behavior not defined for handleType: 0x%x\n",
5836 __func__, handleType);
5837 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
5838 }
5839 // Sanity-check device
5840 std::lock_guard<std::recursive_mutex> lock(mLock);
5841 auto deviceIt = info_VkDevice.find(device);
5842 if (deviceIt == info_VkDevice.end()) {
5843 return VK_ERROR_OUT_OF_HOST_MEMORY;
5844 }
5845 // TODO: Verify FD valid ?
5846 (void)fd;
5847
5848 if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
5849 mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
5850 }
5851
5852 updateMemoryTypeBits(&pMemoryFdProperties->memoryTypeBits,
5853 mCaps.vulkanCapset.colorBufferMemoryIndex);
5854
5855 return VK_SUCCESS;
5856 #else
5857 (void)context;
5858 (void)device;
5859 (void)handleType;
5860 (void)fd;
5861 (void)pMemoryFdProperties;
5862 return VK_ERROR_INCOMPATIBLE_DRIVER;
5863 #endif
5864 }
5865
on_vkGetMemoryFdKHR(void * context,VkResult,VkDevice device,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFd)5866 VkResult ResourceTracker::on_vkGetMemoryFdKHR(void* context, VkResult, VkDevice device,
5867 const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd) {
5868 #if DETECT_OS_LINUX && !defined(VK_USE_PLATFORM_ANDROID_KHR)
5869 if (!pGetFdInfo) return VK_ERROR_OUT_OF_HOST_MEMORY;
5870 if (!pGetFdInfo->memory) return VK_ERROR_OUT_OF_HOST_MEMORY;
5871
5872 if (!(pGetFdInfo->handleType & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
5873 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT))) {
5874 mesa_loge("%s: Export operation not defined for handleType: 0x%x\n", __func__,
5875 pGetFdInfo->handleType);
5876 return VK_ERROR_OUT_OF_HOST_MEMORY;
5877 }
5878 // Sanity-check device
5879 std::lock_guard<std::recursive_mutex> lock(mLock);
5880 auto deviceIt = info_VkDevice.find(device);
5881 if (deviceIt == info_VkDevice.end()) {
5882 return VK_ERROR_OUT_OF_HOST_MEMORY;
5883 }
5884
5885 auto deviceMemIt = info_VkDeviceMemory.find(pGetFdInfo->memory);
5886 if (deviceMemIt == info_VkDeviceMemory.end()) {
5887 return VK_ERROR_OUT_OF_HOST_MEMORY;
5888 }
5889 auto& info = deviceMemIt->second;
5890
5891 if (!info.blobPtr) {
5892 mesa_loge("%s: VkDeviceMemory does not have a resource available for export.\n", __func__);
5893 return VK_ERROR_OUT_OF_HOST_MEMORY;
5894 }
5895
5896 VirtGpuExternalHandle handle{};
5897 int ret = info.blobPtr->exportBlob(handle);
5898 if (ret != 0 || handle.osHandle < 0) {
5899 mesa_loge("%s: Failed to export host resource to FD.\n", __func__);
5900 return VK_ERROR_OUT_OF_HOST_MEMORY;
5901 }
5902 *pFd = handle.osHandle;
5903 return VK_SUCCESS;
5904 #else
5905 (void)context;
5906 (void)device;
5907 (void)pGetFdInfo;
5908 (void)pFd;
5909 return VK_ERROR_INCOMPATIBLE_DRIVER;
5910 #endif
5911 }
5912
flushCommandBufferPendingCommandsBottomUp(void * context,VkQueue queue,const std::vector<VkCommandBuffer> & workingSet)5913 void ResourceTracker::flushCommandBufferPendingCommandsBottomUp(
5914 void* context, VkQueue queue, const std::vector<VkCommandBuffer>& workingSet) {
5915 if (workingSet.empty()) return;
5916
5917 std::vector<VkCommandBuffer> nextLevel;
5918 for (auto commandBuffer : workingSet) {
5919 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
5920 forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
5921 nextLevel.push_back((VkCommandBuffer)secondary);
5922 });
5923 }
5924
5925 flushCommandBufferPendingCommandsBottomUp(context, queue, nextLevel);
5926
5927 // After this point, everyone at the previous level has been flushed
5928 for (auto cmdbuf : workingSet) {
5929 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
5930
5931 // There's no pending commands here, skip. (case 1)
5932 if (!cb->privateStream) continue;
5933
5934 unsigned char* writtenPtr = 0;
5935 size_t written = 0;
5936 CommandBufferStagingStream* cmdBufStream =
5937 static_cast<CommandBufferStagingStream*>(cb->privateStream);
5938 cmdBufStream->getWritten(&writtenPtr, &written);
5939
5940 // There's no pending commands here, skip. (case 2, stream created but no new recordings)
5941 if (!written) continue;
5942
5943 // There are pending commands to flush.
5944 VkEncoder* enc = (VkEncoder*)context;
5945 VkDeviceMemory deviceMemory = cmdBufStream->getDeviceMemory();
5946 VkDeviceSize dataOffset = 0;
5947 if (mFeatureInfo.hasVulkanAuxCommandMemory) {
5948 // for suballocations, deviceMemory is an alias VkDeviceMemory
5949 // get underling VkDeviceMemory for given alias
5950 deviceMemoryTransform_tohost(&deviceMemory, 1 /*memoryCount*/, &dataOffset,
5951 1 /*offsetCount*/, nullptr /*size*/, 0 /*sizeCount*/,
5952 nullptr /*typeIndex*/, 0 /*typeIndexCount*/,
5953 nullptr /*typeBits*/, 0 /*typeBitCounts*/);
5954
5955 // mark stream as flushing before flushing commands
5956 cmdBufStream->markFlushing();
5957 enc->vkQueueFlushCommandsFromAuxMemoryGOOGLE(queue, cmdbuf, deviceMemory, dataOffset,
5958 written, true /*do lock*/);
5959 } else {
5960 enc->vkQueueFlushCommandsGOOGLE(queue, cmdbuf, written, (const void*)writtenPtr,
5961 true /* do lock */);
5962 }
5963 // Reset this stream.
5964 // flushing happens on vkQueueSubmit
5965 // vulkan api states that on queue submit,
5966 // applications MUST not attempt to modify the command buffer in any way
5967 // -as the device may be processing the commands recorded to it.
5968 // It is safe to call reset() here for this reason.
5969 // Command Buffer associated with this stream will only leave pending state
5970 // after queue submit is complete and host has read the data
5971 cmdBufStream->reset();
5972 }
5973 }
5974
syncEncodersForQueue(VkQueue queue,VkEncoder * currentEncoder)5975 uint32_t ResourceTracker::syncEncodersForQueue(VkQueue queue, VkEncoder* currentEncoder) {
5976 if (!supportsAsyncQueueSubmit()) {
5977 return 0;
5978 }
5979
5980 struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
5981 if (!q) return 0;
5982
5983 auto lastEncoder = q->lastUsedEncoder;
5984
5985 if (lastEncoder == currentEncoder) return 0;
5986
5987 currentEncoder->incRef();
5988
5989 q->lastUsedEncoder = currentEncoder;
5990
5991 if (!lastEncoder) return 0;
5992
5993 auto oldSeq = q->sequenceNumber;
5994 q->sequenceNumber += 2;
5995 lastEncoder->vkQueueHostSyncGOOGLE(queue, false, oldSeq + 1, true /* do lock */);
5996 lastEncoder->flush();
5997 currentEncoder->vkQueueHostSyncGOOGLE(queue, true, oldSeq + 2, true /* do lock */);
5998
5999 if (lastEncoder->decRef()) {
6000 q->lastUsedEncoder = nullptr;
6001 }
6002
6003 return 0;
6004 }
6005
6006 template <class VkSubmitInfoType>
flushStagingStreams(void * context,VkQueue queue,uint32_t submitCount,const VkSubmitInfoType * pSubmits)6007 void ResourceTracker::flushStagingStreams(void* context, VkQueue queue, uint32_t submitCount,
6008 const VkSubmitInfoType* pSubmits) {
6009 std::vector<VkCommandBuffer> toFlush;
6010 for (uint32_t i = 0; i < submitCount; ++i) {
6011 for (uint32_t j = 0; j < getCommandBufferCount(pSubmits[i]); ++j) {
6012 toFlush.push_back(getCommandBuffer(pSubmits[i], j));
6013 }
6014 }
6015
6016 std::unordered_set<VkDescriptorSet> pendingSets;
6017 collectAllPendingDescriptorSetsBottomUp(toFlush, pendingSets);
6018 commitDescriptorSetUpdates(context, queue, pendingSets);
6019
6020 flushCommandBufferPendingCommandsBottomUp(context, queue, toFlush);
6021
6022 for (auto cb : toFlush) {
6023 resetCommandBufferPendingTopology(cb);
6024 }
6025 }
6026
on_vkQueueSubmit(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)6027 VkResult ResourceTracker::on_vkQueueSubmit(void* context, VkResult input_result, VkQueue queue,
6028 uint32_t submitCount, const VkSubmitInfo* pSubmits,
6029 VkFence fence) {
6030 MESA_TRACE_SCOPE("on_vkQueueSubmit");
6031
6032 /* From the Vulkan 1.3.204 spec:
6033 *
6034 * VUID-VkSubmitInfo-pNext-03240
6035 *
6036 * "If the pNext chain of this structure includes a VkTimelineSemaphoreSubmitInfo structure
6037 * and any element of pSignalSemaphores was created with a VkSemaphoreType of
6038 * VK_SEMAPHORE_TYPE_TIMELINE, then its signalSemaphoreValueCount member must equal
6039 * signalSemaphoreCount"
6040 *
6041 * Internally, Mesa WSI creates placeholder semaphores/fences (see transformVkSemaphore functions
6042 * in in gfxstream_vk_private.cpp). We don't want to forward that to the host, since there is
6043 * no host side Vulkan object associated with the placeholder sync objects.
6044 *
6045 * The way to test this behavior is Zink + glxgears, on Linux hosts. It should fail without
6046 * this check.
6047 */
6048 for (uint32_t i = 0; i < submitCount; i++) {
6049 VkTimelineSemaphoreSubmitInfo* tssi =
6050 vk_find_struct(const_cast<VkSubmitInfo*>(&pSubmits[i]), TIMELINE_SEMAPHORE_SUBMIT_INFO);
6051
6052 if (tssi) {
6053 uint32_t count = getSignalSemaphoreCount(pSubmits[i]);
6054 if (count != tssi->signalSemaphoreValueCount) {
6055 tssi->signalSemaphoreValueCount = count;
6056 }
6057 }
6058 }
6059
6060 return on_vkQueueSubmitTemplate<VkSubmitInfo>(context, input_result, queue, submitCount,
6061 pSubmits, fence);
6062 }
6063
on_vkQueueSubmit2(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)6064 VkResult ResourceTracker::on_vkQueueSubmit2(void* context, VkResult input_result, VkQueue queue,
6065 uint32_t submitCount, const VkSubmitInfo2* pSubmits,
6066 VkFence fence) {
6067 MESA_TRACE_SCOPE("on_vkQueueSubmit2");
6068 return on_vkQueueSubmitTemplate<VkSubmitInfo2>(context, input_result, queue, submitCount,
6069 pSubmits, fence);
6070 }
6071
vkQueueSubmitEnc(VkEncoder * enc,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)6072 VkResult ResourceTracker::vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount,
6073 const VkSubmitInfo* pSubmits, VkFence fence) {
6074 if (supportsAsyncQueueSubmit()) {
6075 enc->vkQueueSubmitAsyncGOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
6076 return VK_SUCCESS;
6077 } else {
6078 return enc->vkQueueSubmit(queue, submitCount, pSubmits, fence, true /* do lock */);
6079 }
6080 }
6081
vkQueueSubmitEnc(VkEncoder * enc,VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)6082 VkResult ResourceTracker::vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount,
6083 const VkSubmitInfo2* pSubmits, VkFence fence) {
6084 if (supportsAsyncQueueSubmit()) {
6085 enc->vkQueueSubmitAsync2GOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
6086 return VK_SUCCESS;
6087 } else {
6088 return enc->vkQueueSubmit2(queue, submitCount, pSubmits, fence, true /* do lock */);
6089 }
6090 }
6091
6092 template <typename VkSubmitInfoType>
on_vkQueueSubmitTemplate(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfoType * pSubmits,VkFence fence)6093 VkResult ResourceTracker::on_vkQueueSubmitTemplate(void* context, VkResult input_result,
6094 VkQueue queue, uint32_t submitCount,
6095 const VkSubmitInfoType* pSubmits,
6096 VkFence fence) {
6097 flushStagingStreams(context, queue, submitCount, pSubmits);
6098
6099 std::vector<VkSemaphore> pre_signal_semaphores;
6100 std::vector<zx_handle_t> pre_signal_events;
6101 std::vector<int> pre_signal_sync_fds;
6102 std::vector<std::pair<zx_handle_t, zx_koid_t>> post_wait_events;
6103 std::vector<int> post_wait_sync_fds;
6104
6105 VkEncoder* enc = (VkEncoder*)context;
6106
6107 std::unique_lock<std::recursive_mutex> lock(mLock);
6108
6109 for (uint32_t i = 0; i < submitCount; ++i) {
6110 for (uint32_t j = 0; j < getWaitSemaphoreCount(pSubmits[i]); ++j) {
6111 VkSemaphore semaphore = getWaitSemaphore(pSubmits[i], j);
6112 auto it = info_VkSemaphore.find(semaphore);
6113 if (it != info_VkSemaphore.end()) {
6114 auto& semInfo = it->second;
6115 #ifdef VK_USE_PLATFORM_FUCHSIA
6116 if (semInfo.eventHandle) {
6117 pre_signal_events.push_back(semInfo.eventHandle);
6118 pre_signal_semaphores.push_back(semaphore);
6119 }
6120 #endif
6121 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
6122 if (semInfo.syncFd.has_value()) {
6123 pre_signal_sync_fds.push_back(semInfo.syncFd.value());
6124 pre_signal_semaphores.push_back(semaphore);
6125 }
6126 #endif
6127 }
6128 }
6129 for (uint32_t j = 0; j < getSignalSemaphoreCount(pSubmits[i]); ++j) {
6130 auto it = info_VkSemaphore.find(getSignalSemaphore(pSubmits[i], j));
6131 if (it != info_VkSemaphore.end()) {
6132 auto& semInfo = it->second;
6133 #ifdef VK_USE_PLATFORM_FUCHSIA
6134 if (semInfo.eventHandle) {
6135 post_wait_events.push_back({semInfo.eventHandle, semInfo.eventKoid});
6136 #ifndef FUCHSIA_NO_TRACE
6137 if (semInfo.eventKoid != ZX_KOID_INVALID) {
6138 // TODO(fxbug.dev/42144867): Remove the "semaphore"
6139 // FLOW_END events once it is removed from clients
6140 // (for example, gfx Engine).
6141 TRACE_FLOW_END("gfx", "semaphore", semInfo.eventKoid);
6142 TRACE_FLOW_BEGIN("gfx", "goldfish_post_wait_event", semInfo.eventKoid);
6143 }
6144 #endif
6145 }
6146 #endif
6147 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
6148 if (semInfo.syncFd.value_or(-1) >= 0) {
6149 post_wait_sync_fds.push_back(semInfo.syncFd.value());
6150 }
6151 #endif
6152 }
6153 }
6154 }
6155 lock.unlock();
6156
6157 if (pre_signal_semaphores.empty()) {
6158 input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence);
6159 if (input_result != VK_SUCCESS) return input_result;
6160 } else {
6161 // Schedule waits on the OS external objects and
6162 // signal the wait semaphores
6163 // in a separate thread.
6164 #ifdef VK_USE_PLATFORM_FUCHSIA
6165 for (auto event : pre_signal_events) {
6166 preSignalTasks.push_back([event] {
6167 zx_object_wait_one(event, ZX_EVENT_SIGNALED, ZX_TIME_INFINITE, nullptr);
6168 });
6169 }
6170 #endif
6171 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
6172 for (auto fd : pre_signal_sync_fds) {
6173 // https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkImportSemaphoreFdInfoKHR.html
6174 // fd == -1 is treated as already signaled
6175 if (fd != -1) {
6176 mSyncHelper->wait(fd, 3000);
6177 #if GFXSTREAM_SYNC_DEBUG
6178 mSyncHelper->debugPrint(fd);
6179 #endif
6180 }
6181 }
6182 #endif
6183 // Use the old version of VkSubmitInfo
6184 VkSubmitInfo submit_info = {
6185 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
6186 .waitSemaphoreCount = 0,
6187 .pWaitSemaphores = nullptr,
6188 .pWaitDstStageMask = nullptr,
6189 .signalSemaphoreCount = static_cast<uint32_t>(pre_signal_semaphores.size()),
6190 .pSignalSemaphores = pre_signal_semaphores.data()};
6191 vkQueueSubmitEnc(enc, queue, 1, &submit_info, VK_NULL_HANDLE);
6192 input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence);
6193 if (input_result != VK_SUCCESS) return input_result;
6194 }
6195 lock.lock();
6196 int externalFenceFdToSignal = -1;
6197
6198 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || DETECT_OS_LINUX
6199 if (fence != VK_NULL_HANDLE) {
6200 auto it = info_VkFence.find(fence);
6201 if (it != info_VkFence.end()) {
6202 const auto& info = it->second;
6203 if (info.syncFd && *info.syncFd >= 0) {
6204 externalFenceFdToSignal = *info.syncFd;
6205 }
6206 }
6207 }
6208 #endif
6209 VkResult waitIdleRes = VK_SUCCESS;
6210 if (externalFenceFdToSignal >= 0 || !post_wait_events.empty() || !post_wait_sync_fds.empty()) {
6211 auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
6212 auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
6213 waitIdleRes = vkEncoder->vkQueueWaitIdle(queue, true /* do lock */);
6214 if (VK_SUCCESS == waitIdleRes) {
6215 #ifdef VK_USE_PLATFORM_FUCHSIA
6216 MESA_TRACE_SCOPE("on_vkQueueSubmit::SignalSemaphores");
6217 (void)externalFenceFdToSignal;
6218 for (auto& [event, koid] : post_wait_events) {
6219 #ifndef FUCHSIA_NO_TRACE
6220 if (koid != ZX_KOID_INVALID) {
6221 TRACE_FLOW_END("gfx", "goldfish_post_wait_event", koid);
6222 TRACE_FLOW_BEGIN("gfx", "event_signal", koid);
6223 }
6224 #endif
6225 zx_object_signal(event, 0, ZX_EVENT_SIGNALED);
6226 }
6227 #endif
6228 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
6229 for (auto& fd : post_wait_sync_fds) {
6230 goldfish_sync_signal(fd);
6231 }
6232
6233 if (externalFenceFdToSignal >= 0) {
6234 mesa_logd("%s: external fence real signal: %d\n", __func__,
6235 externalFenceFdToSignal);
6236 goldfish_sync_signal(externalFenceFdToSignal);
6237 }
6238 #endif
6239 }
6240 }
6241 return waitIdleRes;
6242 }
6243
on_vkQueueWaitIdle(void * context,VkResult,VkQueue queue)6244 VkResult ResourceTracker::on_vkQueueWaitIdle(void* context, VkResult, VkQueue queue) {
6245 VkEncoder* enc = (VkEncoder*)context;
6246
6247 // now done waiting, get the host's opinion
6248 return enc->vkQueueWaitIdle(queue, true /* do lock */);
6249 }
6250
6251 #ifdef VK_USE_PLATFORM_ANDROID_KHR
unwrap_VkNativeBufferANDROID(const VkNativeBufferANDROID * inputNativeInfo,VkNativeBufferANDROID * outputNativeInfo)6252 void ResourceTracker::unwrap_VkNativeBufferANDROID(const VkNativeBufferANDROID* inputNativeInfo,
6253 VkNativeBufferANDROID* outputNativeInfo) {
6254 if (!inputNativeInfo || !inputNativeInfo->handle) {
6255 return;
6256 }
6257
6258 if (!outputNativeInfo || !outputNativeInfo) {
6259 mesa_loge("FATAL: Local native buffer info not properly allocated!");
6260 abort();
6261 }
6262
6263 const native_handle_t* nativeHandle = (const native_handle_t*)inputNativeInfo->handle;
6264 *(uint32_t*)(outputNativeInfo->handle) = mGralloc->getHostHandle(nativeHandle);
6265 }
6266
unwrap_VkBindImageMemorySwapchainInfoKHR(const VkBindImageMemorySwapchainInfoKHR * inputBimsi,VkBindImageMemorySwapchainInfoKHR * outputBimsi)6267 void ResourceTracker::unwrap_VkBindImageMemorySwapchainInfoKHR(
6268 const VkBindImageMemorySwapchainInfoKHR* inputBimsi,
6269 VkBindImageMemorySwapchainInfoKHR* outputBimsi) {
6270 if (!inputBimsi || !inputBimsi->swapchain) {
6271 return;
6272 }
6273
6274 if (!outputBimsi || !outputBimsi->swapchain) {
6275 return;
6276 }
6277
6278 // Android based swapchains are implemented by the Android framework's
6279 // libvulkan. The only exist within the guest and should not be sent to
6280 // the host.
6281 outputBimsi->swapchain = VK_NULL_HANDLE;
6282 }
6283 #endif
6284
unwrap_vkCreateImage_pCreateInfo(const VkImageCreateInfo * pCreateInfo,VkImageCreateInfo * local_pCreateInfo)6285 void ResourceTracker::unwrap_vkCreateImage_pCreateInfo(const VkImageCreateInfo* pCreateInfo,
6286 VkImageCreateInfo* local_pCreateInfo) {
6287 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6288 const VkNativeBufferANDROID* inputNativeInfo =
6289 vk_find_struct_const(pCreateInfo, NATIVE_BUFFER_ANDROID);
6290
6291 VkNativeBufferANDROID* outputNativeInfo = vk_find_struct(local_pCreateInfo, NATIVE_BUFFER_ANDROID);
6292
6293 unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo);
6294 #endif
6295 }
6296
unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd,int * fd_out)6297 void ResourceTracker::unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd, int* fd_out) {
6298 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6299 (void)fd_out;
6300 if (fd != -1) {
6301 MESA_TRACE_SCOPE("waitNativeFenceInAcquire");
6302 // Implicit Synchronization
6303 mSyncHelper->wait(fd, 3000);
6304 // From libvulkan's swapchain.cpp:
6305 // """
6306 // NOTE: we're relying on AcquireImageANDROID to close fence_clone,
6307 // even if the call fails. We could close it ourselves on failure, but
6308 // that would create a race condition if the driver closes it on a
6309 // failure path: some other thread might create an fd with the same
6310 // number between the time the driver closes it and the time we close
6311 // it. We must assume one of: the driver *always* closes it even on
6312 // failure, or *never* closes it on failure.
6313 // """
6314 // Therefore, assume contract where we need to close fd in this driver
6315
6316 #if GFXSTREAM_SYNC_DEBUG
6317 mSyncHelper->debugPrint(fd);
6318 #endif
6319 mSyncHelper->close(fd);
6320 }
6321 #endif
6322 }
6323
unwrap_VkBindImageMemory2_pBindInfos(uint32_t bindInfoCount,const VkBindImageMemoryInfo * inputBindInfos,VkBindImageMemoryInfo * outputBindInfos)6324 void ResourceTracker::unwrap_VkBindImageMemory2_pBindInfos(
6325 uint32_t bindInfoCount, const VkBindImageMemoryInfo* inputBindInfos,
6326 VkBindImageMemoryInfo* outputBindInfos) {
6327 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6328 for (uint32_t i = 0; i < bindInfoCount; ++i) {
6329 const VkBindImageMemoryInfo* inputBindInfo = &inputBindInfos[i];
6330 VkBindImageMemoryInfo* outputBindInfo = &outputBindInfos[i];
6331
6332 const VkNativeBufferANDROID* inputNativeInfo =
6333 vk_find_struct_const(inputBindInfo, NATIVE_BUFFER_ANDROID);
6334
6335 VkNativeBufferANDROID* outputNativeInfo = vk_find_struct(outputBindInfo, NATIVE_BUFFER_ANDROID);
6336
6337 unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo);
6338
6339 const VkBindImageMemorySwapchainInfoKHR* inputBimsi =
6340 vk_find_struct_const(inputBindInfo, BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR);
6341
6342 VkBindImageMemorySwapchainInfoKHR* outputBimsi = vk_find_struct(outputBindInfo, BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR);
6343
6344 unwrap_VkBindImageMemorySwapchainInfoKHR(inputBimsi, outputBimsi);
6345 }
6346 #endif
6347 }
6348
6349 // Action of vkMapMemoryIntoAddressSpaceGOOGLE:
6350 // 1. preprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE_pre):
6351 // uses address space device to reserve the right size of
6352 // memory.
6353 // 2. the reservation results in a physical address. the physical
6354 // address is set as |*pAddress|.
6355 // 3. after pre, the API call is encoded to the host, where the
6356 // value of pAddress is also sent (the physical address).
6357 // 4. the host will obtain the actual gpu pointer and send it
6358 // back out in |*pAddress|.
6359 // 5. postprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE) will run,
6360 // using the mmap() method of GoldfishAddressSpaceBlock to obtain
6361 // a pointer in guest userspace corresponding to the host pointer.
on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void *,VkResult,VkDevice,VkDeviceMemory memory,uint64_t * pAddress)6362 VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void*, VkResult, VkDevice,
6363 VkDeviceMemory memory,
6364 uint64_t* pAddress) {
6365 std::lock_guard<std::recursive_mutex> lock(mLock);
6366
6367 auto it = info_VkDeviceMemory.find(memory);
6368 if (it == info_VkDeviceMemory.end()) {
6369 return VK_ERROR_OUT_OF_HOST_MEMORY;
6370 }
6371
6372 #if DETECT_OS_ANDROID
6373 auto& memInfo = it->second;
6374
6375 GoldfishAddressSpaceBlockPtr block = std::make_shared<GoldfishAddressSpaceBlock>();
6376 block->allocate(mGoldfishAddressSpaceBlockProvider.get(), memInfo.coherentMemorySize);
6377
6378 memInfo.goldfishBlock = block;
6379 *pAddress = block->physAddr();
6380
6381 return VK_SUCCESS;
6382 #else
6383 (void)pAddress;
6384 return VK_ERROR_MEMORY_MAP_FAILED;
6385 #endif
6386 }
6387
on_vkMapMemoryIntoAddressSpaceGOOGLE(void *,VkResult input_result,VkDevice,VkDeviceMemory memory,uint64_t * pAddress)6388 VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE(void*, VkResult input_result,
6389 VkDevice, VkDeviceMemory memory,
6390 uint64_t* pAddress) {
6391 (void)memory;
6392 (void)pAddress;
6393
6394 if (input_result != VK_SUCCESS) {
6395 return input_result;
6396 }
6397
6398 return input_result;
6399 }
6400
initDescriptorUpdateTemplateBuffers(const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,VkDescriptorUpdateTemplate descriptorUpdateTemplate)6401 VkResult ResourceTracker::initDescriptorUpdateTemplateBuffers(
6402 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6403 VkDescriptorUpdateTemplate descriptorUpdateTemplate) {
6404 std::lock_guard<std::recursive_mutex> lock(mLock);
6405
6406 auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6407 if (it == info_VkDescriptorUpdateTemplate.end()) {
6408 return VK_ERROR_INITIALIZATION_FAILED;
6409 }
6410
6411 auto& info = it->second;
6412 uint32_t inlineUniformBlockBufferSize = 0;
6413
6414 for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6415 const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6416 uint32_t descCount = entry.descriptorCount;
6417 VkDescriptorType descType = entry.descriptorType;
6418 ++info.templateEntryCount;
6419 if (isDescriptorTypeInlineUniformBlock(descType)) {
6420 inlineUniformBlockBufferSize += descCount;
6421 ++info.inlineUniformBlockCount;
6422 } else {
6423 for (uint32_t j = 0; j < descCount; ++j) {
6424 if (isDescriptorTypeImageInfo(descType)) {
6425 ++info.imageInfoCount;
6426 } else if (isDescriptorTypeBufferInfo(descType)) {
6427 ++info.bufferInfoCount;
6428 } else if (isDescriptorTypeBufferView(descType)) {
6429 ++info.bufferViewCount;
6430 } else {
6431 mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6432 // abort();
6433 }
6434 }
6435 }
6436 }
6437
6438 if (info.templateEntryCount)
6439 info.templateEntries = new VkDescriptorUpdateTemplateEntry[info.templateEntryCount];
6440
6441 if (info.imageInfoCount) {
6442 info.imageInfoIndices = new uint32_t[info.imageInfoCount];
6443 info.imageInfos = new VkDescriptorImageInfo[info.imageInfoCount];
6444 }
6445
6446 if (info.bufferInfoCount) {
6447 info.bufferInfoIndices = new uint32_t[info.bufferInfoCount];
6448 info.bufferInfos = new VkDescriptorBufferInfo[info.bufferInfoCount];
6449 }
6450
6451 if (info.bufferViewCount) {
6452 info.bufferViewIndices = new uint32_t[info.bufferViewCount];
6453 info.bufferViews = new VkBufferView[info.bufferViewCount];
6454 }
6455
6456 if (info.inlineUniformBlockCount) {
6457 info.inlineUniformBlockBuffer.resize(inlineUniformBlockBufferSize);
6458 info.inlineUniformBlockBytesPerBlocks.resize(info.inlineUniformBlockCount);
6459 }
6460
6461 uint32_t imageInfoIndex = 0;
6462 uint32_t bufferInfoIndex = 0;
6463 uint32_t bufferViewIndex = 0;
6464 uint32_t inlineUniformBlockIndex = 0;
6465
6466 for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6467 const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6468 uint32_t descCount = entry.descriptorCount;
6469 VkDescriptorType descType = entry.descriptorType;
6470
6471 info.templateEntries[i] = entry;
6472
6473 if (isDescriptorTypeInlineUniformBlock(descType)) {
6474 info.inlineUniformBlockBytesPerBlocks[inlineUniformBlockIndex] = descCount;
6475 ++inlineUniformBlockIndex;
6476 } else {
6477 for (uint32_t j = 0; j < descCount; ++j) {
6478 if (isDescriptorTypeImageInfo(descType)) {
6479 info.imageInfoIndices[imageInfoIndex] = i;
6480 ++imageInfoIndex;
6481 } else if (isDescriptorTypeBufferInfo(descType)) {
6482 info.bufferInfoIndices[bufferInfoIndex] = i;
6483 ++bufferInfoIndex;
6484 } else if (isDescriptorTypeBufferView(descType)) {
6485 info.bufferViewIndices[bufferViewIndex] = i;
6486 ++bufferViewIndex;
6487 } else {
6488 mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6489 // abort();
6490 }
6491 }
6492 }
6493 }
6494
6495 return VK_SUCCESS;
6496 }
6497
on_vkCreateDescriptorUpdateTemplate(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)6498 VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplate(
6499 void* context, VkResult input_result, VkDevice device,
6500 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6501 const VkAllocationCallbacks* pAllocator,
6502 VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6503 (void)context;
6504 (void)device;
6505 (void)pAllocator;
6506
6507 if (input_result != VK_SUCCESS) return input_result;
6508
6509 return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6510 }
6511
on_vkCreateDescriptorUpdateTemplateKHR(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)6512 VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplateKHR(
6513 void* context, VkResult input_result, VkDevice device,
6514 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6515 const VkAllocationCallbacks* pAllocator,
6516 VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6517 (void)context;
6518 (void)device;
6519 (void)pAllocator;
6520
6521 if (input_result != VK_SUCCESS) return input_result;
6522
6523 return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6524 }
6525
on_vkUpdateDescriptorSetWithTemplate(void * context,VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)6526 void ResourceTracker::on_vkUpdateDescriptorSetWithTemplate(
6527 void* context, VkDevice device, VkDescriptorSet descriptorSet,
6528 VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData) {
6529 VkEncoder* enc = (VkEncoder*)context;
6530
6531 uint8_t* userBuffer = (uint8_t*)pData;
6532 if (!userBuffer) return;
6533
6534 // TODO: Make this thread safe
6535 std::unique_lock<std::recursive_mutex> lock(mLock);
6536
6537 auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6538 if (it == info_VkDescriptorUpdateTemplate.end()) {
6539 return;
6540 }
6541
6542 auto& info = it->second;
6543
6544 uint32_t templateEntryCount = info.templateEntryCount;
6545 VkDescriptorUpdateTemplateEntry* templateEntries = info.templateEntries;
6546
6547 uint32_t imageInfoCount = info.imageInfoCount;
6548 uint32_t bufferInfoCount = info.bufferInfoCount;
6549 uint32_t bufferViewCount = info.bufferViewCount;
6550 uint32_t* imageInfoIndices = info.imageInfoIndices;
6551 uint32_t* bufferInfoIndices = info.bufferInfoIndices;
6552 uint32_t* bufferViewIndices = info.bufferViewIndices;
6553 VkDescriptorImageInfo* imageInfos = info.imageInfos;
6554 VkDescriptorBufferInfo* bufferInfos = info.bufferInfos;
6555 VkBufferView* bufferViews = info.bufferViews;
6556 uint8_t* inlineUniformBlockBuffer = info.inlineUniformBlockBuffer.data();
6557 uint32_t* inlineUniformBlockBytesPerBlocks = info.inlineUniformBlockBytesPerBlocks.data();
6558
6559 lock.unlock();
6560
6561 size_t currImageInfoOffset = 0;
6562 size_t currBufferInfoOffset = 0;
6563 size_t currBufferViewOffset = 0;
6564 size_t inlineUniformBlockOffset = 0;
6565 size_t inlineUniformBlockIdx = 0;
6566
6567 struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(descriptorSet);
6568 ReifiedDescriptorSet* reified = ds->reified;
6569
6570 bool batched = mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate;
6571
6572 for (uint32_t i = 0; i < templateEntryCount; ++i) {
6573 const auto& entry = templateEntries[i];
6574 VkDescriptorType descType = entry.descriptorType;
6575 uint32_t dstBinding = entry.dstBinding;
6576
6577 auto offset = entry.offset;
6578 auto stride = entry.stride;
6579 auto dstArrayElement = entry.dstArrayElement;
6580
6581 uint32_t descCount = entry.descriptorCount;
6582
6583 if (isDescriptorTypeImageInfo(descType)) {
6584 if (!stride) stride = sizeof(VkDescriptorImageInfo);
6585
6586 const VkDescriptorImageInfo* currImageInfoBegin =
6587 (const VkDescriptorImageInfo*)((uint8_t*)imageInfos + currImageInfoOffset);
6588
6589 for (uint32_t j = 0; j < descCount; ++j) {
6590 const VkDescriptorImageInfo* user =
6591 (const VkDescriptorImageInfo*)(userBuffer + offset + j * stride);
6592
6593 memcpy(((uint8_t*)imageInfos) + currImageInfoOffset, user,
6594 sizeof(VkDescriptorImageInfo));
6595 currImageInfoOffset += sizeof(VkDescriptorImageInfo);
6596 }
6597
6598 if (batched) {
6599 doEmulatedDescriptorImageInfoWriteFromTemplate(
6600 descType, dstBinding, dstArrayElement, descCount, currImageInfoBegin, reified);
6601 }
6602 } else if (isDescriptorTypeBufferInfo(descType)) {
6603 if (!stride) stride = sizeof(VkDescriptorBufferInfo);
6604
6605 const VkDescriptorBufferInfo* currBufferInfoBegin =
6606 (const VkDescriptorBufferInfo*)((uint8_t*)bufferInfos + currBufferInfoOffset);
6607
6608 for (uint32_t j = 0; j < descCount; ++j) {
6609 const VkDescriptorBufferInfo* user =
6610 (const VkDescriptorBufferInfo*)(userBuffer + offset + j * stride);
6611
6612 memcpy(((uint8_t*)bufferInfos) + currBufferInfoOffset, user,
6613 sizeof(VkDescriptorBufferInfo));
6614
6615 // TODO(b/355497683): move this into gfxstream_vk_UpdateDescriptorSetWithTemplate().
6616 #if DETECT_OS_LINUX || defined(VK_USE_PLATFORM_ANDROID_KHR)
6617 // Convert mesa to internal for objects in the user buffer
6618 VkDescriptorBufferInfo* internalBufferInfo =
6619 (VkDescriptorBufferInfo*)(((uint8_t*)bufferInfos) + currBufferInfoOffset);
6620 VK_FROM_HANDLE(gfxstream_vk_buffer, gfxstream_buffer, internalBufferInfo->buffer);
6621 internalBufferInfo->buffer = gfxstream_buffer->internal_object;
6622 #endif
6623 currBufferInfoOffset += sizeof(VkDescriptorBufferInfo);
6624 }
6625
6626 if (batched) {
6627 doEmulatedDescriptorBufferInfoWriteFromTemplate(
6628 descType, dstBinding, dstArrayElement, descCount, currBufferInfoBegin, reified);
6629 }
6630
6631 } else if (isDescriptorTypeBufferView(descType)) {
6632 if (!stride) stride = sizeof(VkBufferView);
6633
6634 const VkBufferView* currBufferViewBegin =
6635 (const VkBufferView*)((uint8_t*)bufferViews + currBufferViewOffset);
6636
6637 for (uint32_t j = 0; j < descCount; ++j) {
6638 const VkBufferView* user = (const VkBufferView*)(userBuffer + offset + j * stride);
6639
6640 memcpy(((uint8_t*)bufferViews) + currBufferViewOffset, user, sizeof(VkBufferView));
6641 currBufferViewOffset += sizeof(VkBufferView);
6642 }
6643
6644 if (batched) {
6645 doEmulatedDescriptorBufferViewWriteFromTemplate(
6646 descType, dstBinding, dstArrayElement, descCount, currBufferViewBegin, reified);
6647 }
6648 } else if (isDescriptorTypeInlineUniformBlock(descType)) {
6649 uint32_t inlineUniformBlockBytesPerBlock =
6650 inlineUniformBlockBytesPerBlocks[inlineUniformBlockIdx];
6651 uint8_t* currInlineUniformBlockBufferBegin =
6652 inlineUniformBlockBuffer + inlineUniformBlockOffset;
6653 memcpy(currInlineUniformBlockBufferBegin, userBuffer + offset,
6654 inlineUniformBlockBytesPerBlock);
6655 inlineUniformBlockIdx++;
6656 inlineUniformBlockOffset += inlineUniformBlockBytesPerBlock;
6657
6658 if (batched) {
6659 doEmulatedDescriptorInlineUniformBlockFromTemplate(
6660 descType, dstBinding, dstArrayElement, descCount,
6661 currInlineUniformBlockBufferBegin, reified);
6662 }
6663 } else {
6664 mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6665 abort();
6666 }
6667 }
6668
6669 if (batched) return;
6670
6671 enc->vkUpdateDescriptorSetWithTemplateSized2GOOGLE(
6672 device, descriptorSet, descriptorUpdateTemplate, imageInfoCount, bufferInfoCount,
6673 bufferViewCount, static_cast<uint32_t>(info.inlineUniformBlockBuffer.size()),
6674 imageInfoIndices, bufferInfoIndices, bufferViewIndices, imageInfos, bufferInfos,
6675 bufferViews, inlineUniformBlockBuffer, true /* do lock */);
6676 }
6677
on_vkUpdateDescriptorSetWithTemplateKHR(void * context,VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)6678 void ResourceTracker::on_vkUpdateDescriptorSetWithTemplateKHR(
6679 void* context, VkDevice device, VkDescriptorSet descriptorSet,
6680 VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData) {
6681 on_vkUpdateDescriptorSetWithTemplate(context, device, descriptorSet, descriptorUpdateTemplate,
6682 pData);
6683 }
6684
on_vkGetPhysicalDeviceImageFormatProperties2_common(bool isKhr,void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6685 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2_common(
6686 bool isKhr, void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6687 const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6688 VkImageFormatProperties2* pImageFormatProperties) {
6689 VkEncoder* enc = (VkEncoder*)context;
6690 (void)input_result;
6691
6692 VkPhysicalDeviceImageFormatInfo2 localImageFormatInfo = *pImageFormatInfo;
6693
6694 uint32_t supportedHandleType = 0;
6695 VkExternalImageFormatProperties* ext_img_properties =
6696 vk_find_struct(pImageFormatProperties, EXTERNAL_IMAGE_FORMAT_PROPERTIES);
6697
6698 #ifdef VK_USE_PLATFORM_FUCHSIA
6699
6700 constexpr VkFormat kExternalImageSupportedFormats[] = {
6701 VK_FORMAT_B8G8R8A8_SINT, VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_B8G8R8A8_SRGB,
6702 VK_FORMAT_B8G8R8A8_SNORM, VK_FORMAT_B8G8R8A8_SSCALED, VK_FORMAT_B8G8R8A8_USCALED,
6703 VK_FORMAT_R8G8B8A8_SINT, VK_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_SRGB,
6704 VK_FORMAT_R8G8B8A8_SNORM, VK_FORMAT_R8G8B8A8_SSCALED, VK_FORMAT_R8G8B8A8_USCALED,
6705 VK_FORMAT_R8_UNORM, VK_FORMAT_R8_UINT, VK_FORMAT_R8_USCALED,
6706 VK_FORMAT_R8_SNORM, VK_FORMAT_R8_SINT, VK_FORMAT_R8_SSCALED,
6707 VK_FORMAT_R8_SRGB, VK_FORMAT_R8G8_UNORM, VK_FORMAT_R8G8_UINT,
6708 VK_FORMAT_R8G8_USCALED, VK_FORMAT_R8G8_SNORM, VK_FORMAT_R8G8_SINT,
6709 VK_FORMAT_R8G8_SSCALED, VK_FORMAT_R8G8_SRGB,
6710 };
6711
6712 if (ext_img_properties) {
6713 if (std::find(std::begin(kExternalImageSupportedFormats),
6714 std::end(kExternalImageSupportedFormats),
6715 pImageFormatInfo->format) == std::end(kExternalImageSupportedFormats)) {
6716 return VK_ERROR_FORMAT_NOT_SUPPORTED;
6717 }
6718 }
6719 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
6720 #endif
6721
6722 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6723 VkAndroidHardwareBufferUsageANDROID* output_ahw_usage = vk_find_struct(pImageFormatProperties, ANDROID_HARDWARE_BUFFER_USAGE_ANDROID);
6724 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
6725 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
6726 #endif
6727 const VkPhysicalDeviceExternalImageFormatInfo* ext_img_info =
6728 vk_find_struct_const(pImageFormatInfo, PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO);
6729 if (supportedHandleType && ext_img_info) {
6730 // 0 is a valid handleType so we don't check against 0
6731 if (ext_img_info->handleType != (ext_img_info->handleType & supportedHandleType)) {
6732 return VK_ERROR_FORMAT_NOT_SUPPORTED;
6733 }
6734 }
6735
6736 #ifdef LINUX_GUEST_BUILD
6737 const VkPhysicalDeviceImageDrmFormatModifierInfoEXT* drmFmtMod =
6738 vk_find_struct_const(pImageFormatInfo, PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT);
6739 VkDrmFormatModifierPropertiesListEXT* emulatedDrmFmtModPropsList = nullptr;
6740 if (drmFmtMod) {
6741 if (getHostDeviceExtensionIndex(VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME) != -1) {
6742 // Host supports DRM format modifiers => leave the input unchanged.
6743 } else {
6744 mesa_logd("emulating DRM_FORMAT_MOD_LINEAR with VK_IMAGE_TILING_LINEAR");
6745 emulatedDrmFmtModPropsList =
6746 vk_find_struct(pImageFormatProperties, DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT);
6747
6748 // Host doesn't support DRM format modifiers, try emulating.
6749 if (drmFmtMod) {
6750
6751 if (drmFmtMod->drmFormatModifier == DRM_FORMAT_MOD_LINEAR) {
6752 localImageFormatInfo.tiling = VK_IMAGE_TILING_LINEAR;
6753 pImageFormatInfo = &localImageFormatInfo;
6754 // Leave drmFormatMod in the input; it should be ignored when
6755 // tiling is not VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT
6756 } else {
6757 return VK_ERROR_FORMAT_NOT_SUPPORTED;
6758 }
6759 }
6760 }
6761 }
6762 #endif // LINUX_GUEST_BUILD
6763
6764 VkResult hostRes;
6765
6766 if (isKhr) {
6767 hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2KHR(
6768 physicalDevice, &localImageFormatInfo, pImageFormatProperties, true /* do lock */);
6769 } else {
6770 hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2(
6771 physicalDevice, &localImageFormatInfo, pImageFormatProperties, true /* do lock */);
6772 }
6773
6774 if (hostRes != VK_SUCCESS) return hostRes;
6775
6776 #ifdef LINUX_GUEST_BUILD
6777 if (emulatedDrmFmtModPropsList) {
6778 VkFormatProperties formatProperties;
6779 enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, localImageFormatInfo.format,
6780 &formatProperties, true /* do lock */);
6781
6782 emulatedDrmFmtModPropsList->drmFormatModifierCount = 1;
6783 if (emulatedDrmFmtModPropsList->pDrmFormatModifierProperties) {
6784 emulatedDrmFmtModPropsList->pDrmFormatModifierProperties[0] = {
6785 .drmFormatModifier = DRM_FORMAT_MOD_LINEAR,
6786 .drmFormatModifierPlaneCount = 1,
6787 .drmFormatModifierTilingFeatures = formatProperties.linearTilingFeatures,
6788 };
6789 }
6790 }
6791 #endif // LINUX_GUEST_BUILD
6792
6793 #ifdef VK_USE_PLATFORM_FUCHSIA
6794 if (ext_img_properties) {
6795 if (ext_img_info) {
6796 if (static_cast<uint32_t>(ext_img_info->handleType) ==
6797 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
6798 ext_img_properties->externalMemoryProperties = {
6799 .externalMemoryFeatures = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
6800 VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
6801 .exportFromImportedHandleTypes =
6802 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6803 .compatibleHandleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6804 };
6805 }
6806 }
6807 }
6808 #endif
6809
6810 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6811 if (output_ahw_usage) {
6812 output_ahw_usage->androidHardwareBufferUsage = getAndroidHardwareBufferUsageFromVkUsage(
6813 pImageFormatInfo->flags, pImageFormatInfo->usage);
6814 }
6815 #endif
6816 if (ext_img_properties) {
6817 transformImpl_VkExternalMemoryProperties_fromhost(
6818 &ext_img_properties->externalMemoryProperties, 0);
6819 }
6820 return hostRes;
6821 }
6822
on_vkGetPhysicalDeviceImageFormatProperties2(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6823 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2(
6824 void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6825 const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6826 VkImageFormatProperties2* pImageFormatProperties) {
6827 return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6828 false /* not KHR */, context, input_result, physicalDevice, pImageFormatInfo,
6829 pImageFormatProperties);
6830 }
6831
on_vkGetPhysicalDeviceImageFormatProperties2KHR(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6832 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2KHR(
6833 void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6834 const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6835 VkImageFormatProperties2* pImageFormatProperties) {
6836 return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6837 true /* is KHR */, context, input_result, physicalDevice, pImageFormatInfo,
6838 pImageFormatProperties);
6839 }
6840
on_vkGetPhysicalDeviceExternalBufferProperties_common(bool isKhr,void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)6841 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferProperties_common(
6842 bool isKhr, void* context, VkPhysicalDevice physicalDevice,
6843 const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
6844 VkExternalBufferProperties* pExternalBufferProperties) {
6845 VkEncoder* enc = (VkEncoder*)context;
6846
6847 #if defined(ANDROID)
6848 // Older versions of Goldfish's Gralloc did not support allocating AHARDWAREBUFFER_FORMAT_BLOB
6849 // with GPU usage (b/299520213).
6850 if (mGralloc->treatBlobAsImage() &&
6851 pExternalBufferInfo->handleType ==
6852 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) {
6853 pExternalBufferProperties->externalMemoryProperties.externalMemoryFeatures = 0;
6854 pExternalBufferProperties->externalMemoryProperties.exportFromImportedHandleTypes = 0;
6855 pExternalBufferProperties->externalMemoryProperties.compatibleHandleTypes = 0;
6856 return;
6857 }
6858 #endif
6859
6860 uint32_t supportedHandleType = 0;
6861 #ifdef VK_USE_PLATFORM_FUCHSIA
6862 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
6863 #endif
6864 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6865 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
6866 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
6867 #endif
6868 if (supportedHandleType) {
6869 // 0 is a valid handleType so we can't check against 0
6870 if (pExternalBufferInfo->handleType !=
6871 (pExternalBufferInfo->handleType & supportedHandleType)) {
6872 return;
6873 }
6874 }
6875
6876 if (isKhr) {
6877 enc->vkGetPhysicalDeviceExternalBufferPropertiesKHR(
6878 physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */);
6879 } else {
6880 enc->vkGetPhysicalDeviceExternalBufferProperties(
6881 physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */);
6882 }
6883 transformImpl_VkExternalMemoryProperties_fromhost(
6884 &pExternalBufferProperties->externalMemoryProperties, 0);
6885 }
6886
on_vkGetPhysicalDeviceExternalBufferProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)6887 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferProperties(
6888 void* context, VkPhysicalDevice physicalDevice,
6889 const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
6890 VkExternalBufferProperties* pExternalBufferProperties) {
6891 return on_vkGetPhysicalDeviceExternalBufferProperties_common(
6892 false /* not KHR */, context, physicalDevice, pExternalBufferInfo,
6893 pExternalBufferProperties);
6894 }
6895
on_vkGetPhysicalDeviceExternalBufferPropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfoKHR * pExternalBufferInfo,VkExternalBufferPropertiesKHR * pExternalBufferProperties)6896 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferPropertiesKHR(
6897 void* context, VkPhysicalDevice physicalDevice,
6898 const VkPhysicalDeviceExternalBufferInfoKHR* pExternalBufferInfo,
6899 VkExternalBufferPropertiesKHR* pExternalBufferProperties) {
6900 return on_vkGetPhysicalDeviceExternalBufferProperties_common(
6901 true /* is KHR */, context, physicalDevice, pExternalBufferInfo, pExternalBufferProperties);
6902 }
6903
on_vkGetPhysicalDeviceExternalSemaphoreProperties(void *,VkPhysicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)6904 void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphoreProperties(
6905 void*, VkPhysicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
6906 VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
6907 (void)pExternalSemaphoreInfo;
6908 (void)pExternalSemaphoreProperties;
6909 #ifdef VK_USE_PLATFORM_FUCHSIA
6910 if (pExternalSemaphoreInfo->handleType ==
6911 static_cast<uint32_t>(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA)) {
6912 pExternalSemaphoreProperties->compatibleHandleTypes |=
6913 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6914 pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6915 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6916 pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6917 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6918 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6919 }
6920 #else
6921 const VkSemaphoreTypeCreateInfo* semaphoreTypeCi =
6922 vk_find_struct_const(pExternalSemaphoreInfo, SEMAPHORE_TYPE_CREATE_INFO);
6923 bool isSemaphoreTimeline =
6924 semaphoreTypeCi != nullptr && semaphoreTypeCi->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE;
6925 if (isSemaphoreTimeline) {
6926 // b/304373623
6927 // dEQP-VK.api.external.semaphore.sync_fd#info_timeline
6928 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
6929 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
6930 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
6931 } else if (pExternalSemaphoreInfo->handleType ==
6932 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
6933 pExternalSemaphoreProperties->compatibleHandleTypes |=
6934 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6935 pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6936 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6937 pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6938 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6939 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6940 }
6941 #endif // VK_USE_PLATFORM_FUCHSIA
6942 }
6943
on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)6944 void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(
6945 void* context, VkPhysicalDevice physicalDevice,
6946 const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
6947 VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
6948 on_vkGetPhysicalDeviceExternalSemaphoreProperties(
6949 context, physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties);
6950 }
6951
registerEncoderCleanupCallback(const VkEncoder * encoder,void * object,CleanupCallback callback)6952 void ResourceTracker::registerEncoderCleanupCallback(const VkEncoder* encoder, void* object,
6953 CleanupCallback callback) {
6954 std::lock_guard<std::recursive_mutex> lock(mLock);
6955 auto& callbacks = mEncoderCleanupCallbacks[encoder];
6956 callbacks[object] = callback;
6957 }
6958
unregisterEncoderCleanupCallback(const VkEncoder * encoder,void * object)6959 void ResourceTracker::unregisterEncoderCleanupCallback(const VkEncoder* encoder, void* object) {
6960 std::lock_guard<std::recursive_mutex> lock(mLock);
6961 mEncoderCleanupCallbacks[encoder].erase(object);
6962 }
6963
onEncoderDeleted(const VkEncoder * encoder)6964 void ResourceTracker::onEncoderDeleted(const VkEncoder* encoder) {
6965 std::unique_lock<std::recursive_mutex> lock(mLock);
6966 if (mEncoderCleanupCallbacks.find(encoder) == mEncoderCleanupCallbacks.end()) return;
6967
6968 std::unordered_map<void*, CleanupCallback> callbackCopies = mEncoderCleanupCallbacks[encoder];
6969
6970 mEncoderCleanupCallbacks.erase(encoder);
6971 lock.unlock();
6972
6973 for (auto it : callbackCopies) {
6974 it.second();
6975 }
6976 }
6977
getAlloc()6978 CommandBufferStagingStream::Alloc ResourceTracker::getAlloc() {
6979 if (mFeatureInfo.hasVulkanAuxCommandMemory) {
6980 return [this](size_t size) -> CommandBufferStagingStream::Memory {
6981 VkMemoryAllocateInfo info{
6982 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
6983 .pNext = nullptr,
6984 .allocationSize = size,
6985 .memoryTypeIndex = VK_MAX_MEMORY_TYPES // indicates auxiliary memory
6986 };
6987
6988 auto enc = ResourceTracker::getThreadLocalEncoder();
6989 VkDevice device = VK_NULL_HANDLE;
6990 VkDeviceMemory vkDeviceMem = VK_NULL_HANDLE;
6991 VkResult result = getCoherentMemory(&info, enc, device, &vkDeviceMem);
6992 if (result != VK_SUCCESS) {
6993 mesa_loge("Failed to get coherent memory %u", result);
6994 return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
6995 }
6996
6997 // getCoherentMemory() uses suballocations.
6998 // To retrieve the suballocated memory address, look up
6999 // VkDeviceMemory filled in by getCoherentMemory()
7000 // scope of mLock
7001 {
7002 std::lock_guard<std::recursive_mutex> lock(mLock);
7003 const auto it = info_VkDeviceMemory.find(vkDeviceMem);
7004 if (it == info_VkDeviceMemory.end()) {
7005 mesa_loge("Coherent memory allocated %u not found", result);
7006 return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
7007 };
7008
7009 const auto& info = it->second;
7010 return {.deviceMemory = vkDeviceMem, .ptr = info.ptr};
7011 }
7012 };
7013 }
7014 return nullptr;
7015 }
7016
getFree()7017 CommandBufferStagingStream::Free ResourceTracker::getFree() {
7018 if (mFeatureInfo.hasVulkanAuxCommandMemory) {
7019 return [this](const CommandBufferStagingStream::Memory& memory) {
7020 // deviceMemory may not be the actual backing auxiliary VkDeviceMemory
7021 // for suballocations, deviceMemory is a alias VkDeviceMemory hand;
7022 // freeCoherentMemoryLocked maps the alias to the backing VkDeviceMemory
7023 VkDeviceMemory deviceMemory = memory.deviceMemory;
7024 std::unique_lock<std::recursive_mutex> lock(mLock);
7025 auto it = info_VkDeviceMemory.find(deviceMemory);
7026 if (it == info_VkDeviceMemory.end()) {
7027 mesa_loge("Device memory to free not found");
7028 return;
7029 }
7030 auto coherentMemory = freeCoherentMemoryLocked(deviceMemory, it->second);
7031 // We have to release the lock before we could possibly free a
7032 // CoherentMemory, because that will call into VkEncoder, which
7033 // shouldn't be called when the lock is held.
7034 lock.unlock();
7035 coherentMemory = nullptr;
7036 };
7037 }
7038 return nullptr;
7039 }
7040
on_vkBeginCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)7041 VkResult ResourceTracker::on_vkBeginCommandBuffer(void* context, VkResult input_result,
7042 VkCommandBuffer commandBuffer,
7043 const VkCommandBufferBeginInfo* pBeginInfo) {
7044 (void)context;
7045
7046 resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
7047 true /* also clear pending descriptor sets */);
7048
7049 VkEncoder* enc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
7050 (void)input_result;
7051
7052 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7053 cb->flags = pBeginInfo->flags;
7054
7055 VkCommandBufferBeginInfo modifiedBeginInfo;
7056
7057 if (pBeginInfo->pInheritanceInfo && !cb->isSecondary) {
7058 modifiedBeginInfo = *pBeginInfo;
7059 modifiedBeginInfo.pInheritanceInfo = nullptr;
7060 pBeginInfo = &modifiedBeginInfo;
7061 }
7062
7063 if (!supportsDeferredCommands()) {
7064 return enc->vkBeginCommandBuffer(commandBuffer, pBeginInfo, true /* do lock */);
7065 }
7066
7067 enc->vkBeginCommandBufferAsyncGOOGLE(commandBuffer, pBeginInfo, true /* do lock */);
7068
7069 return VK_SUCCESS;
7070 }
7071
on_vkEndCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer)7072 VkResult ResourceTracker::on_vkEndCommandBuffer(void* context, VkResult input_result,
7073 VkCommandBuffer commandBuffer) {
7074 VkEncoder* enc = (VkEncoder*)context;
7075 (void)input_result;
7076
7077 if (!supportsDeferredCommands()) {
7078 return enc->vkEndCommandBuffer(commandBuffer, true /* do lock */);
7079 }
7080
7081 enc->vkEndCommandBufferAsyncGOOGLE(commandBuffer, true /* do lock */);
7082
7083 return VK_SUCCESS;
7084 }
7085
on_vkResetCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)7086 VkResult ResourceTracker::on_vkResetCommandBuffer(void* context, VkResult input_result,
7087 VkCommandBuffer commandBuffer,
7088 VkCommandBufferResetFlags flags) {
7089 VkEncoder* enc = (VkEncoder*)context;
7090 (void)input_result;
7091
7092 if (!supportsDeferredCommands()) {
7093 VkResult res = enc->vkResetCommandBuffer(commandBuffer, flags, true /* do lock */);
7094 resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
7095 true /* also clear pending descriptor sets */);
7096 return res;
7097 }
7098
7099 enc->vkResetCommandBufferAsyncGOOGLE(commandBuffer, flags, true /* do lock */);
7100 resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
7101 true /* also clear pending descriptor sets */);
7102 return VK_SUCCESS;
7103 }
7104
on_vkCreateImageView(void * context,VkResult input_result,VkDevice device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)7105 VkResult ResourceTracker::on_vkCreateImageView(void* context, VkResult input_result,
7106 VkDevice device,
7107 const VkImageViewCreateInfo* pCreateInfo,
7108 const VkAllocationCallbacks* pAllocator,
7109 VkImageView* pView) {
7110 VkEncoder* enc = (VkEncoder*)context;
7111 (void)input_result;
7112
7113 VkImageViewCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
7114
7115 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
7116 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
7117 if (pCreateInfo->format == VK_FORMAT_UNDEFINED) {
7118 std::lock_guard<std::recursive_mutex> lock(mLock);
7119
7120 auto it = info_VkImage.find(pCreateInfo->image);
7121 if (it != info_VkImage.end() && it->second.hasExternalFormat) {
7122 localCreateInfo.format = vk_format_from_fourcc(it->second.externalFourccFormat);
7123 }
7124 }
7125 VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
7126 const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo = vk_find_struct_const(pCreateInfo, SAMPLER_YCBCR_CONVERSION_INFO);
7127 if (samplerYcbcrConversionInfo) {
7128 if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
7129 localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
7130 vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
7131 }
7132 }
7133 #endif
7134
7135 return enc->vkCreateImageView(device, &localCreateInfo, pAllocator, pView, true /* do lock */);
7136 }
7137
on_vkCmdExecuteCommands(void * context,VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)7138 void ResourceTracker::on_vkCmdExecuteCommands(void* context, VkCommandBuffer commandBuffer,
7139 uint32_t commandBufferCount,
7140 const VkCommandBuffer* pCommandBuffers) {
7141 VkEncoder* enc = (VkEncoder*)context;
7142
7143 if (!mFeatureInfo.hasVulkanQueueSubmitWithCommands) {
7144 enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers,
7145 true /* do lock */);
7146 return;
7147 }
7148
7149 struct goldfish_VkCommandBuffer* primary = as_goldfish_VkCommandBuffer(commandBuffer);
7150 for (uint32_t i = 0; i < commandBufferCount; ++i) {
7151 struct goldfish_VkCommandBuffer* secondary =
7152 as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7153 appendObject(&secondary->superObjects, primary);
7154 appendObject(&primary->subObjects, secondary);
7155 }
7156
7157 enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers,
7158 true /* do lock */);
7159 }
7160
on_vkCmdBindDescriptorSets(void * context,VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)7161 void ResourceTracker::on_vkCmdBindDescriptorSets(void* context, VkCommandBuffer commandBuffer,
7162 VkPipelineBindPoint pipelineBindPoint,
7163 VkPipelineLayout layout, uint32_t firstSet,
7164 uint32_t descriptorSetCount,
7165 const VkDescriptorSet* pDescriptorSets,
7166 uint32_t dynamicOffsetCount,
7167 const uint32_t* pDynamicOffsets) {
7168 VkEncoder* enc = (VkEncoder*)context;
7169
7170 if (mFeatureInfo.hasVulkanBatchedDescriptorSetUpdate)
7171 addPendingDescriptorSets(commandBuffer, descriptorSetCount, pDescriptorSets);
7172
7173 enc->vkCmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet,
7174 descriptorSetCount, pDescriptorSets, dynamicOffsetCount,
7175 pDynamicOffsets, true /* do lock */);
7176 }
7177
on_vkCmdPipelineBarrier(void * context,VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)7178 void ResourceTracker::on_vkCmdPipelineBarrier(
7179 void* context, VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
7180 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
7181 uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
7182 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
7183 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers) {
7184 VkEncoder* enc = (VkEncoder*)context;
7185
7186 std::vector<VkImageMemoryBarrier> updatedImageMemoryBarriers;
7187 updatedImageMemoryBarriers.reserve(imageMemoryBarrierCount);
7188 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
7189 VkImageMemoryBarrier barrier = pImageMemoryBarriers[i];
7190
7191 #ifdef VK_USE_PLATFORM_ANDROID_KHR
7192 // Unfortunetly, Android does not yet have a mechanism for sharing the expected
7193 // VkImageLayout when passing around AHardwareBuffer-s so many existing users
7194 // that import AHardwareBuffer-s into VkImage-s/VkDeviceMemory-s simply use
7195 // VK_IMAGE_LAYOUT_UNDEFINED. However, the Vulkan spec's image layout transition
7196 // sections says "If the old layout is VK_IMAGE_LAYOUT_UNDEFINED, the contents of
7197 // that range may be discarded." Some Vulkan drivers have been observed to actually
7198 // perform the discard which leads to AHardwareBuffer-s being unintentionally
7199 // cleared. See go/ahb-vkimagelayout for more information.
7200 if (barrier.srcQueueFamilyIndex != barrier.dstQueueFamilyIndex &&
7201 (barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
7202 barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) &&
7203 barrier.oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
7204 // This is not a complete solution as the Vulkan spec does not require that
7205 // Vulkan drivers perform a no-op in the case when oldLayout equals newLayout
7206 // but this has been observed to be enough to work for now to avoid clearing
7207 // out images.
7208 // TODO(b/236179843): figure out long term solution.
7209 barrier.oldLayout = barrier.newLayout;
7210 }
7211 #endif
7212
7213 updatedImageMemoryBarriers.push_back(barrier);
7214 }
7215
7216 enc->vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
7217 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7218 pBufferMemoryBarriers, updatedImageMemoryBarriers.size(),
7219 updatedImageMemoryBarriers.data(), true /* do lock */);
7220 }
7221
on_vkDestroyDescriptorSetLayout(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)7222 void ResourceTracker::on_vkDestroyDescriptorSetLayout(void* context, VkDevice device,
7223 VkDescriptorSetLayout descriptorSetLayout,
7224 const VkAllocationCallbacks* pAllocator) {
7225 decDescriptorSetLayoutRef(context, device, descriptorSetLayout, pAllocator);
7226 }
7227
on_vkAllocateCommandBuffers(void * context,VkResult input_result,VkDevice device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)7228 VkResult ResourceTracker::on_vkAllocateCommandBuffers(
7229 void* context, VkResult input_result, VkDevice device,
7230 const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers) {
7231 (void)input_result;
7232
7233 VkEncoder* enc = (VkEncoder*)context;
7234 VkResult res =
7235 enc->vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers, true /* do lock */);
7236 if (VK_SUCCESS != res) return res;
7237
7238 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; ++i) {
7239 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7240 cb->isSecondary = pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY;
7241 cb->device = device;
7242 }
7243
7244 return res;
7245 }
7246
7247 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
exportSyncFdForQSRILocked(VkImage image,int * fd)7248 VkResult ResourceTracker::exportSyncFdForQSRILocked(VkImage image, int* fd) {
7249 mesa_logd("%s: call for image %p host image handle 0x%llx\n", __func__, (void*)image,
7250 (unsigned long long)get_host_u64_VkImage(image));
7251
7252 if (mFeatureInfo.hasVirtioGpuNativeSync) {
7253 struct VirtGpuExecBuffer exec = {};
7254 struct gfxstreamCreateQSRIExportVK exportQSRI = {};
7255 VirtGpuDevice* instance = VirtGpuDevice::getInstance();
7256
7257 uint64_t hostImageHandle = get_host_u64_VkImage(image);
7258
7259 exportQSRI.hdr.opCode = GFXSTREAM_CREATE_QSRI_EXPORT_VK;
7260 exportQSRI.imageHandleLo = (uint32_t)hostImageHandle;
7261 exportQSRI.imageHandleHi = (uint32_t)(hostImageHandle >> 32);
7262
7263 exec.command = static_cast<void*>(&exportQSRI);
7264 exec.command_size = sizeof(exportQSRI);
7265 exec.flags = kFenceOut | kRingIdx;
7266 if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY;
7267
7268 *fd = exec.handle.osHandle;
7269 } else {
7270 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
7271 ensureSyncDeviceFd();
7272 goldfish_sync_queue_work(
7273 mSyncDeviceFd, get_host_u64_VkImage(image) /* the handle */,
7274 GOLDFISH_SYNC_VULKAN_QSRI /* thread handle (doubling as type field) */, fd);
7275 #endif
7276 }
7277
7278 mesa_logd("%s: got fd: %d\n", __func__, *fd);
7279 auto imageInfoIt = info_VkImage.find(image);
7280 if (imageInfoIt != info_VkImage.end()) {
7281 auto& imageInfo = imageInfoIt->second;
7282
7283 // Remove any pending QSRI sync fds that are already signaled.
7284 auto syncFdIt = imageInfo.pendingQsriSyncFds.begin();
7285 while (syncFdIt != imageInfo.pendingQsriSyncFds.end()) {
7286 int syncFd = *syncFdIt;
7287 int syncWaitRet = mSyncHelper->wait(syncFd, /*timeout msecs*/ 0);
7288 if (syncWaitRet == 0) {
7289 // Sync fd is signaled.
7290 syncFdIt = imageInfo.pendingQsriSyncFds.erase(syncFdIt);
7291 mSyncHelper->close(syncFd);
7292 } else {
7293 if (errno != ETIME) {
7294 mesa_loge("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d",
7295 __func__, strerror(errno), errno);
7296 }
7297 break;
7298 }
7299 }
7300
7301 int syncFdDup = mSyncHelper->dup(*fd);
7302 if (syncFdDup < 0) {
7303 mesa_loge("%s: Failed to dup() QSRI sync fd : sterror: %s errno: %d", __func__,
7304 strerror(errno), errno);
7305 } else {
7306 imageInfo.pendingQsriSyncFds.push_back(syncFdDup);
7307 }
7308 }
7309
7310 return VK_SUCCESS;
7311 }
7312
on_vkQueueSignalReleaseImageANDROID(void * context,VkResult input_result,VkQueue queue,uint32_t waitSemaphoreCount,const VkSemaphore * pWaitSemaphores,VkImage image,int * pNativeFenceFd)7313 VkResult ResourceTracker::on_vkQueueSignalReleaseImageANDROID(void* context, VkResult input_result,
7314 VkQueue queue,
7315 uint32_t waitSemaphoreCount,
7316 const VkSemaphore* pWaitSemaphores,
7317 VkImage image, int* pNativeFenceFd) {
7318 (void)input_result;
7319
7320 VkEncoder* enc = (VkEncoder*)context;
7321
7322 if (!mFeatureInfo.hasVulkanAsyncQsri) {
7323 return enc->vkQueueSignalReleaseImageANDROID(queue, waitSemaphoreCount, pWaitSemaphores,
7324 image, pNativeFenceFd, true /* lock */);
7325 }
7326
7327 {
7328 std::lock_guard<std::recursive_mutex> lock(mLock);
7329 auto it = info_VkImage.find(image);
7330 if (it == info_VkImage.end()) {
7331 if (pNativeFenceFd) *pNativeFenceFd = -1;
7332 return VK_ERROR_INITIALIZATION_FAILED;
7333 }
7334 }
7335
7336 enc->vkQueueSignalReleaseImageANDROIDAsyncGOOGLE(queue, waitSemaphoreCount, pWaitSemaphores,
7337 image, true /* lock */);
7338
7339 std::lock_guard<std::recursive_mutex> lock(mLock);
7340 VkResult result;
7341 if (pNativeFenceFd) {
7342 result = exportSyncFdForQSRILocked(image, pNativeFenceFd);
7343 } else {
7344 int syncFd;
7345 result = exportSyncFdForQSRILocked(image, &syncFd);
7346
7347 if (syncFd >= 0) {
7348 mSyncHelper->close(syncFd);
7349 }
7350 }
7351
7352 return result;
7353 }
7354 #endif
7355
on_vkCreateGraphicsPipelines(void * context,VkResult input_result,VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)7356 VkResult ResourceTracker::on_vkCreateGraphicsPipelines(
7357 void* context, VkResult input_result, VkDevice device, VkPipelineCache pipelineCache,
7358 uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos,
7359 const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) {
7360 (void)input_result;
7361 VkEncoder* enc = (VkEncoder*)context;
7362 std::vector<VkGraphicsPipelineCreateInfo> localCreateInfos(pCreateInfos,
7363 pCreateInfos + createInfoCount);
7364 for (VkGraphicsPipelineCreateInfo& graphicsPipelineCreateInfo : localCreateInfos) {
7365 // dEQP-VK.api.pipeline.pipeline_invalid_pointers_unused_structs#graphics
7366 bool requireViewportState = false;
7367 // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750
7368 requireViewportState |=
7369 graphicsPipelineCreateInfo.pRasterizationState != nullptr &&
7370 graphicsPipelineCreateInfo.pRasterizationState->rasterizerDiscardEnable == VK_FALSE;
7371 // VUID-VkGraphicsPipelineCreateInfo-pViewportState-04892
7372 #ifdef VK_EXT_extended_dynamic_state2
7373 if (!requireViewportState && graphicsPipelineCreateInfo.pDynamicState) {
7374 for (uint32_t i = 0; i < graphicsPipelineCreateInfo.pDynamicState->dynamicStateCount;
7375 i++) {
7376 if (VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT ==
7377 graphicsPipelineCreateInfo.pDynamicState->pDynamicStates[i]) {
7378 requireViewportState = true;
7379 break;
7380 }
7381 }
7382 }
7383 #endif // VK_EXT_extended_dynamic_state2
7384 if (!requireViewportState) {
7385 graphicsPipelineCreateInfo.pViewportState = nullptr;
7386 }
7387
7388 // It has the same requirement as for pViewportState.
7389 bool shouldIncludeFragmentShaderState = requireViewportState;
7390
7391 // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00751
7392 if (!shouldIncludeFragmentShaderState) {
7393 graphicsPipelineCreateInfo.pMultisampleState = nullptr;
7394 }
7395
7396 bool forceDepthStencilState = false;
7397 bool forceColorBlendState = false;
7398
7399 const VkPipelineRenderingCreateInfo* pipelineRenderingInfo =
7400 vk_find_struct_const(&graphicsPipelineCreateInfo, PIPELINE_RENDERING_CREATE_INFO);
7401
7402 if (pipelineRenderingInfo) {
7403 forceDepthStencilState |=
7404 pipelineRenderingInfo->depthAttachmentFormat != VK_FORMAT_UNDEFINED;
7405 forceDepthStencilState |=
7406 pipelineRenderingInfo->stencilAttachmentFormat != VK_FORMAT_UNDEFINED;
7407 forceColorBlendState |= pipelineRenderingInfo->colorAttachmentCount != 0;
7408 }
7409
7410 // VUID-VkGraphicsPipelineCreateInfo-renderPass-06043
7411 // VUID-VkGraphicsPipelineCreateInfo-renderPass-06044
7412 if (graphicsPipelineCreateInfo.renderPass == VK_NULL_HANDLE ||
7413 !shouldIncludeFragmentShaderState) {
7414 // VUID-VkGraphicsPipelineCreateInfo-renderPass-06053
7415 if (!forceDepthStencilState) {
7416 graphicsPipelineCreateInfo.pDepthStencilState = nullptr;
7417 }
7418 if (!forceColorBlendState) {
7419 graphicsPipelineCreateInfo.pColorBlendState = nullptr;
7420 }
7421 }
7422 }
7423 return enc->vkCreateGraphicsPipelines(device, pipelineCache, localCreateInfos.size(),
7424 localCreateInfos.data(), pAllocator, pPipelines,
7425 true /* do lock */);
7426 }
7427
getApiVersionFromInstance(VkInstance instance)7428 uint32_t ResourceTracker::getApiVersionFromInstance(VkInstance instance) {
7429 std::lock_guard<std::recursive_mutex> lock(mLock);
7430 uint32_t api = kDefaultApiVersion;
7431
7432 auto it = info_VkInstance.find(instance);
7433 if (it == info_VkInstance.end()) return api;
7434
7435 api = it->second.highestApiVersion;
7436
7437 return api;
7438 }
7439
getApiVersionFromDevice(VkDevice device)7440 uint32_t ResourceTracker::getApiVersionFromDevice(VkDevice device) {
7441 std::lock_guard<std::recursive_mutex> lock(mLock);
7442
7443 uint32_t api = kDefaultApiVersion;
7444
7445 auto it = info_VkDevice.find(device);
7446 if (it == info_VkDevice.end()) return api;
7447
7448 api = it->second.apiVersion;
7449
7450 return api;
7451 }
7452
hasInstanceExtension(VkInstance instance,const std::string & name)7453 bool ResourceTracker::hasInstanceExtension(VkInstance instance, const std::string& name) {
7454 std::lock_guard<std::recursive_mutex> lock(mLock);
7455
7456 auto it = info_VkInstance.find(instance);
7457 if (it == info_VkInstance.end()) return false;
7458
7459 return it->second.enabledExtensions.find(name) != it->second.enabledExtensions.end();
7460 }
7461
hasDeviceExtension(VkDevice device,const std::string & name)7462 bool ResourceTracker::hasDeviceExtension(VkDevice device, const std::string& name) {
7463 std::lock_guard<std::recursive_mutex> lock(mLock);
7464
7465 auto it = info_VkDevice.find(device);
7466 if (it == info_VkDevice.end()) return false;
7467
7468 return it->second.enabledExtensions.find(name) != it->second.enabledExtensions.end();
7469 }
7470
getDevice(VkCommandBuffer commandBuffer) const7471 VkDevice ResourceTracker::getDevice(VkCommandBuffer commandBuffer) const {
7472 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7473 if (!cb) {
7474 return nullptr;
7475 }
7476 return cb->device;
7477 }
7478
7479 // Resets staging stream for this command buffer and primary command buffers
7480 // where this command buffer has been recorded. If requested, also clears the pending
7481 // descriptor sets.
resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer,bool alsoResetPrimaries,bool alsoClearPendingDescriptorSets)7482 void ResourceTracker::resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer,
7483 bool alsoResetPrimaries,
7484 bool alsoClearPendingDescriptorSets) {
7485 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7486 if (!cb) {
7487 return;
7488 }
7489 if (cb->privateEncoder) {
7490 sStaging.pushStaging((CommandBufferStagingStream*)cb->privateStream, cb->privateEncoder);
7491 cb->privateEncoder = nullptr;
7492 cb->privateStream = nullptr;
7493 }
7494
7495 if (alsoClearPendingDescriptorSets && cb->userPtr) {
7496 CommandBufferPendingDescriptorSets* pendingSets =
7497 (CommandBufferPendingDescriptorSets*)cb->userPtr;
7498 pendingSets->sets.clear();
7499 }
7500
7501 if (alsoResetPrimaries) {
7502 forAllObjects(cb->superObjects, [this, alsoResetPrimaries,
7503 alsoClearPendingDescriptorSets](void* obj) {
7504 VkCommandBuffer superCommandBuffer = (VkCommandBuffer)obj;
7505 this->resetCommandBufferStagingInfo(superCommandBuffer, alsoResetPrimaries,
7506 alsoClearPendingDescriptorSets);
7507 });
7508 eraseObjects(&cb->superObjects);
7509 }
7510
7511 forAllObjects(cb->subObjects, [cb](void* obj) {
7512 VkCommandBuffer subCommandBuffer = (VkCommandBuffer)obj;
7513 struct goldfish_VkCommandBuffer* subCb = as_goldfish_VkCommandBuffer(subCommandBuffer);
7514 // We don't do resetCommandBufferStagingInfo(subCommandBuffer)
7515 // since the user still might have submittable stuff pending there.
7516 eraseObject(&subCb->superObjects, (void*)cb);
7517 });
7518
7519 eraseObjects(&cb->subObjects);
7520 }
7521
7522 // Unlike resetCommandBufferStagingInfo, this does not always erase its
7523 // superObjects pointers because the command buffer has merely been
7524 // submitted, not reset. However, if the command buffer was recorded with
7525 // ONE_TIME_SUBMIT_BIT, then it will also reset its primaries.
7526 //
7527 // Also, we save the set of descriptor sets referenced by this command
7528 // buffer because we only submitted the command buffer and it's possible to
7529 // update the descriptor set again and re-submit the same command without
7530 // recording it (Update-after-bind descriptor sets)
resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer)7531 void ResourceTracker::resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer) {
7532 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7533 if (cb->flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) {
7534 resetCommandBufferStagingInfo(commandBuffer, true /* reset primaries */,
7535 true /* clear pending descriptor sets */);
7536 } else {
7537 resetCommandBufferStagingInfo(commandBuffer, false /* Don't reset primaries */,
7538 false /* Don't clear pending descriptor sets */);
7539 }
7540 }
7541
resetCommandPoolStagingInfo(VkCommandPool commandPool)7542 void ResourceTracker::resetCommandPoolStagingInfo(VkCommandPool commandPool) {
7543 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7544
7545 if (!p) return;
7546
7547 forAllObjects(p->subObjects, [this](void* commandBuffer) {
7548 this->resetCommandBufferStagingInfo((VkCommandBuffer)commandBuffer,
7549 true /* also reset primaries */,
7550 true /* also clear pending descriptor sets */);
7551 });
7552 }
7553
addToCommandPool(VkCommandPool commandPool,uint32_t commandBufferCount,VkCommandBuffer * pCommandBuffers)7554 void ResourceTracker::addToCommandPool(VkCommandPool commandPool, uint32_t commandBufferCount,
7555 VkCommandBuffer* pCommandBuffers) {
7556 for (uint32_t i = 0; i < commandBufferCount; ++i) {
7557 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7558 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7559 appendObject(&p->subObjects, (void*)(pCommandBuffers[i]));
7560 appendObject(&cb->poolObjects, (void*)commandPool);
7561 }
7562 }
7563
clearCommandPool(VkCommandPool commandPool)7564 void ResourceTracker::clearCommandPool(VkCommandPool commandPool) {
7565 resetCommandPoolStagingInfo(commandPool);
7566 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7567 forAllObjects(p->subObjects, [this](void* commandBuffer) {
7568 this->unregister_VkCommandBuffer((VkCommandBuffer)commandBuffer);
7569 });
7570 eraseObjects(&p->subObjects);
7571 }
7572
getPhysicalDeviceMemoryProperties(void * context,VkDevice device,VkPhysicalDevice physicalDevice)7573 const VkPhysicalDeviceMemoryProperties& ResourceTracker::getPhysicalDeviceMemoryProperties(
7574 void* context, VkDevice device, VkPhysicalDevice physicalDevice) {
7575 if (!mCachedPhysicalDeviceMemoryProps) {
7576 if (physicalDevice == VK_NULL_HANDLE) {
7577 std::lock_guard<std::recursive_mutex> lock(mLock);
7578
7579 auto deviceInfoIt = info_VkDevice.find(device);
7580 if (deviceInfoIt == info_VkDevice.end()) {
7581 mesa_loge("Failed to pass device or physical device.");
7582 abort();
7583 }
7584 const auto& deviceInfo = deviceInfoIt->second;
7585 physicalDevice = deviceInfo.physdev;
7586 }
7587
7588 VkEncoder* enc = (VkEncoder*)context;
7589
7590 VkPhysicalDeviceMemoryProperties properties;
7591 enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &properties, true /* no lock */);
7592
7593 mCachedPhysicalDeviceMemoryProps.emplace(std::move(properties));
7594 }
7595 return *mCachedPhysicalDeviceMemoryProps;
7596 }
7597
7598 static ResourceTracker* sTracker = nullptr;
7599
ResourceTracker()7600 ResourceTracker::ResourceTracker() {
7601 mCreateMapping = new CreateMapping();
7602 mDestroyMapping = new DestroyMapping();
7603 // nothing to do
7604 }
7605
~ResourceTracker()7606 ResourceTracker::~ResourceTracker() {
7607 delete mCreateMapping;
7608 delete mDestroyMapping;
7609 }
7610
createMapping()7611 VulkanHandleMapping* ResourceTracker::createMapping() { return mCreateMapping; }
7612
destroyMapping()7613 VulkanHandleMapping* ResourceTracker::destroyMapping() { return mDestroyMapping; }
7614
7615 // static
get()7616 ResourceTracker* ResourceTracker::get() {
7617 if (!sTracker) {
7618 // To be initialized once on vulkan device open.
7619 sTracker = new ResourceTracker;
7620 }
7621 return sTracker;
7622 }
7623
7624 // static
getCommandBufferEncoder(VkCommandBuffer commandBuffer)7625 ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getCommandBufferEncoder(
7626 VkCommandBuffer commandBuffer) {
7627 if (!(ResourceTracker::streamFeatureBits &
7628 VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
7629 auto enc = ResourceTracker::getThreadLocalEncoder();
7630 ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, enc);
7631 return enc;
7632 }
7633
7634 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7635 if (!cb->privateEncoder) {
7636 sStaging.setAllocFree(ResourceTracker::get()->getAlloc(),
7637 ResourceTracker::get()->getFree());
7638 sStaging.popStaging((CommandBufferStagingStream**)&cb->privateStream, &cb->privateEncoder);
7639 }
7640 uint8_t* writtenPtr;
7641 size_t written;
7642 ((CommandBufferStagingStream*)cb->privateStream)->getWritten(&writtenPtr, &written);
7643 return cb->privateEncoder;
7644 }
7645
7646 // static
getQueueEncoder(VkQueue queue)7647 ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getQueueEncoder(VkQueue queue) {
7648 auto enc = ResourceTracker::getThreadLocalEncoder();
7649 if (!(ResourceTracker::streamFeatureBits &
7650 VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
7651 ResourceTracker::get()->syncEncodersForQueue(queue, enc);
7652 }
7653 return enc;
7654 }
7655
7656 // static
getThreadLocalEncoder()7657 ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getThreadLocalEncoder() {
7658 auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
7659 auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
7660 return vkEncoder;
7661 }
7662
7663 // static
setSeqnoPtr(uint32_t * seqnoptr)7664 void ResourceTracker::setSeqnoPtr(uint32_t* seqnoptr) { sSeqnoPtr = seqnoptr; }
7665
7666 // static
nextSeqno()7667 ALWAYS_INLINE_GFXSTREAM uint32_t ResourceTracker::nextSeqno() {
7668 uint32_t res = __atomic_add_fetch(sSeqnoPtr, 1, __ATOMIC_SEQ_CST);
7669 return res;
7670 }
7671
7672 // static
getSeqno()7673 ALWAYS_INLINE_GFXSTREAM uint32_t ResourceTracker::getSeqno() {
7674 uint32_t res = __atomic_load_n(sSeqnoPtr, __ATOMIC_SEQ_CST);
7675 return res;
7676 }
7677
transformImpl_VkExternalMemoryProperties_tohost(VkExternalMemoryProperties *,uint32_t)7678 void ResourceTracker::transformImpl_VkExternalMemoryProperties_tohost(VkExternalMemoryProperties*,
7679 uint32_t) {}
7680
transformImpl_VkImageCreateInfo_fromhost(const VkImageCreateInfo *,uint32_t)7681 void ResourceTracker::transformImpl_VkImageCreateInfo_fromhost(const VkImageCreateInfo*, uint32_t) {
7682 }
transformImpl_VkImageCreateInfo_tohost(const VkImageCreateInfo *,uint32_t)7683 void ResourceTracker::transformImpl_VkImageCreateInfo_tohost(const VkImageCreateInfo*, uint32_t) {}
7684
7685 #define DEFINE_TRANSFORMED_TYPE_IMPL(type) \
7686 void ResourceTracker::transformImpl_##type##_tohost(type*, uint32_t) {} \
7687 void ResourceTracker::transformImpl_##type##_fromhost(type*, uint32_t) {}
7688
7689 LIST_TRIVIAL_TRANSFORMED_TYPES(DEFINE_TRANSFORMED_TYPE_IMPL)
7690
7691 } // namespace vk
7692 } // namespace gfxstream
7693