1 // Copyright (C) 2018 The Android Open Source Project
2 // Copyright (C) 2018 Google Inc.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15
16 #include "ResourceTracker.h"
17
18 #include "../OpenglSystemCommon/EmulatorFeatureInfo.h"
19 #include "../OpenglSystemCommon/HostConnection.h"
20 #include "CommandBufferStagingStream.h"
21 #include "DescriptorSetVirtualization.h"
22 #include "HostVisibleMemoryVirtualization.h"
23 #include "Resources.h"
24 #include "VkEncoder.h"
25 #include "aemu/base/AlignedBuf.h"
26 #include "gfxstream_vk_private.h"
27 #include "goldfish_address_space.h"
28 #include "goldfish_vk_private_defs.h"
29 #include "util.h"
30 #include "virtgpu_gfxstream_protocol.h"
31 #include "vulkan/vk_enum_string_helper.h"
32 #include "vulkan/vulkan_core.h"
33 #ifdef VK_USE_PLATFORM_ANDROID_KHR
34 #include "vk_format_info.h"
35 #endif
36 #include <stdlib.h>
37 #include <vndk/hardware_buffer.h>
38
39 #include <algorithm>
40 #include <set>
41 #include <string>
42 #include <unordered_map>
43 #include <unordered_set>
44
45 #include "vk_struct_id.h"
46 #include "vk_util.h"
47
48 #if defined(__ANDROID__) || defined(__linux__) || defined(__APPLE__)
49
50 #include <sys/mman.h>
51 #include <sys/syscall.h>
52
53
inline_memfd_create(const char * name,unsigned int flags)54 static inline int inline_memfd_create(const char* name, unsigned int flags) {
55 #if defined(__ANDROID__)
56 return syscall(SYS_memfd_create, name, flags);
57 #else
58 return -1;
59 #endif
60 }
61
62 #define memfd_create inline_memfd_create
63 #endif
64
65 #ifndef VK_USE_PLATFORM_FUCHSIA
zx_handle_close(zx_handle_t)66 void zx_handle_close(zx_handle_t) {}
zx_event_create(int,zx_handle_t *)67 void zx_event_create(int, zx_handle_t*) {}
68 #endif
69
70 static constexpr uint32_t kDefaultApiVersion = VK_MAKE_VERSION(1, 1, 0);
71
72 namespace gfxstream {
73 namespace vk {
74
75 #define MAKE_HANDLE_MAPPING_FOREACH(type_name, map_impl, map_to_u64_impl, map_from_u64_impl) \
76 void mapHandles_##type_name(type_name* handles, size_t count) override { \
77 for (size_t i = 0; i < count; ++i) { \
78 map_impl; \
79 } \
80 } \
81 void mapHandles_##type_name##_u64(const type_name* handles, uint64_t* handle_u64s, \
82 size_t count) override { \
83 for (size_t i = 0; i < count; ++i) { \
84 map_to_u64_impl; \
85 } \
86 } \
87 void mapHandles_u64_##type_name(const uint64_t* handle_u64s, type_name* handles, size_t count) \
88 override { \
89 for (size_t i = 0; i < count; ++i) { \
90 map_from_u64_impl; \
91 } \
92 }
93
94 #define DEFINE_RESOURCE_TRACKING_CLASS(class_name, impl) \
95 class class_name : public VulkanHandleMapping { \
96 public: \
97 virtual ~class_name() {} \
98 GOLDFISH_VK_LIST_HANDLE_TYPES(impl) \
99 };
100
101 #define CREATE_MAPPING_IMPL_FOR_TYPE(type_name) \
102 MAKE_HANDLE_MAPPING_FOREACH( \
103 type_name, handles[i] = new_from_host_##type_name(handles[i]); \
104 ResourceTracker::get()->register_##type_name(handles[i]); \
105 , handle_u64s[i] = (uint64_t)new_from_host_##type_name(handles[i]), \
106 handles[i] = (type_name)new_from_host_u64_##type_name(handle_u64s[i]); \
107 ResourceTracker::get()->register_##type_name(handles[i]);)
108
109 #define UNWRAP_MAPPING_IMPL_FOR_TYPE(type_name) \
110 MAKE_HANDLE_MAPPING_FOREACH( \
111 type_name, handles[i] = get_host_##type_name(handles[i]), \
112 handle_u64s[i] = (uint64_t)get_host_u64_##type_name(handles[i]), \
113 handles[i] = (type_name)get_host_##type_name((type_name)handle_u64s[i]))
114
115 #define DESTROY_MAPPING_IMPL_FOR_TYPE(type_name) \
116 MAKE_HANDLE_MAPPING_FOREACH(type_name, \
117 ResourceTracker::get()->unregister_##type_name(handles[i]); \
118 delete_goldfish_##type_name(handles[i]), (void)handle_u64s[i]; \
119 delete_goldfish_##type_name(handles[i]), (void)handles[i]; \
120 delete_goldfish_##type_name((type_name)handle_u64s[i]))
121
122 DEFINE_RESOURCE_TRACKING_CLASS(CreateMapping, CREATE_MAPPING_IMPL_FOR_TYPE)
123 DEFINE_RESOURCE_TRACKING_CLASS(DestroyMapping, DESTROY_MAPPING_IMPL_FOR_TYPE)
124
125 static uint32_t* sSeqnoPtr = nullptr;
126
127 // static
128 uint32_t ResourceTracker::streamFeatureBits = 0;
129 ResourceTracker::ThreadingCallbacks ResourceTracker::threadingCallbacks;
130
131 struct StagingInfo {
132 Lock mLock;
133 std::vector<CommandBufferStagingStream*> streams;
134 std::vector<VkEncoder*> encoders;
135 /// \brief sets alloc and free callbacks for memory allocation for CommandBufferStagingStream(s)
136 /// \param allocFn is the callback to allocate memory
137 /// \param freeFn is the callback to free memory
setAllocFreegfxstream::vk::StagingInfo138 void setAllocFree(CommandBufferStagingStream::Alloc&& allocFn,
139 CommandBufferStagingStream::Free&& freeFn) {
140 mAlloc = allocFn;
141 mFree = freeFn;
142 }
143
~StagingInfogfxstream::vk::StagingInfo144 ~StagingInfo() {
145 for (auto stream : streams) {
146 delete stream;
147 }
148
149 for (auto encoder : encoders) {
150 delete encoder;
151 }
152 }
153
pushStaginggfxstream::vk::StagingInfo154 void pushStaging(CommandBufferStagingStream* stream, VkEncoder* encoder) {
155 AutoLock<Lock> lock(mLock);
156 stream->reset();
157 streams.push_back(stream);
158 encoders.push_back(encoder);
159 }
160
popStaginggfxstream::vk::StagingInfo161 void popStaging(CommandBufferStagingStream** streamOut, VkEncoder** encoderOut) {
162 AutoLock<Lock> lock(mLock);
163 CommandBufferStagingStream* stream;
164 VkEncoder* encoder;
165 if (streams.empty()) {
166 if (mAlloc && mFree) {
167 // if custom allocators are provided, forward them to CommandBufferStagingStream
168 stream = new CommandBufferStagingStream(mAlloc, mFree);
169 } else {
170 stream = new CommandBufferStagingStream;
171 }
172 encoder = new VkEncoder(stream);
173 } else {
174 stream = streams.back();
175 encoder = encoders.back();
176 streams.pop_back();
177 encoders.pop_back();
178 }
179 *streamOut = stream;
180 *encoderOut = encoder;
181 }
182
183 private:
184 CommandBufferStagingStream::Alloc mAlloc = nullptr;
185 CommandBufferStagingStream::Free mFree = nullptr;
186 };
187
188 static StagingInfo sStaging;
189
190 struct CommandBufferPendingDescriptorSets {
191 std::unordered_set<VkDescriptorSet> sets;
192 };
193
194 #define HANDLE_REGISTER_IMPL_IMPL(type) \
195 void ResourceTracker::register_##type(type obj) { \
196 AutoLock<RecursiveLock> lock(mLock); \
197 info_##type[obj] = type##_Info(); \
198 }
199
200 #define HANDLE_UNREGISTER_IMPL_IMPL(type) \
201 void ResourceTracker::unregister_##type(type obj) { \
202 AutoLock<RecursiveLock> lock(mLock); \
203 info_##type.erase(obj); \
204 }
205
206 GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL_IMPL)
GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)207 GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)
208 uint32_t getWaitSemaphoreCount(const VkSubmitInfo& pSubmit) { return pSubmit.waitSemaphoreCount; }
209
getWaitSemaphoreCount(const VkSubmitInfo2 & pSubmit)210 uint32_t getWaitSemaphoreCount(const VkSubmitInfo2& pSubmit) {
211 return pSubmit.waitSemaphoreInfoCount;
212 }
213
getCommandBufferCount(const VkSubmitInfo & pSubmit)214 uint32_t getCommandBufferCount(const VkSubmitInfo& pSubmit) { return pSubmit.commandBufferCount; }
215
getCommandBufferCount(const VkSubmitInfo2 & pSubmit)216 uint32_t getCommandBufferCount(const VkSubmitInfo2& pSubmit) {
217 return pSubmit.commandBufferInfoCount;
218 }
219
getSignalSemaphoreCount(const VkSubmitInfo & pSubmit)220 uint32_t getSignalSemaphoreCount(const VkSubmitInfo& pSubmit) {
221 return pSubmit.signalSemaphoreCount;
222 }
223
getSignalSemaphoreCount(const VkSubmitInfo2 & pSubmit)224 uint32_t getSignalSemaphoreCount(const VkSubmitInfo2& pSubmit) {
225 return pSubmit.signalSemaphoreInfoCount;
226 }
227
getWaitSemaphore(const VkSubmitInfo & pSubmit,int i)228 VkSemaphore getWaitSemaphore(const VkSubmitInfo& pSubmit, int i) {
229 return pSubmit.pWaitSemaphores[i];
230 }
231
getWaitSemaphore(const VkSubmitInfo2 & pSubmit,int i)232 VkSemaphore getWaitSemaphore(const VkSubmitInfo2& pSubmit, int i) {
233 return pSubmit.pWaitSemaphoreInfos[i].semaphore;
234 }
235
getSignalSemaphore(const VkSubmitInfo & pSubmit,int i)236 VkSemaphore getSignalSemaphore(const VkSubmitInfo& pSubmit, int i) {
237 return pSubmit.pSignalSemaphores[i];
238 }
239
getSignalSemaphore(const VkSubmitInfo2 & pSubmit,int i)240 VkSemaphore getSignalSemaphore(const VkSubmitInfo2& pSubmit, int i) {
241 return pSubmit.pSignalSemaphoreInfos[i].semaphore;
242 }
243
getCommandBuffer(const VkSubmitInfo & pSubmit,int i)244 VkCommandBuffer getCommandBuffer(const VkSubmitInfo& pSubmit, int i) {
245 return pSubmit.pCommandBuffers[i];
246 }
247
getCommandBuffer(const VkSubmitInfo2 & pSubmit,int i)248 VkCommandBuffer getCommandBuffer(const VkSubmitInfo2& pSubmit, int i) {
249 return pSubmit.pCommandBufferInfos[i].commandBuffer;
250 }
251
descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool)252 bool descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool) {
253 return as_goldfish_VkDescriptorPool(pool)->allocInfo->createFlags &
254 VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
255 }
256
createImmutableSamplersFilteredImageInfo(VkDescriptorType descType,VkDescriptorSet descSet,uint32_t binding,const VkDescriptorImageInfo * pImageInfo)257 VkDescriptorImageInfo createImmutableSamplersFilteredImageInfo(
258 VkDescriptorType descType, VkDescriptorSet descSet, uint32_t binding,
259 const VkDescriptorImageInfo* pImageInfo) {
260 VkDescriptorImageInfo res = *pImageInfo;
261
262 if (descType != VK_DESCRIPTOR_TYPE_SAMPLER &&
263 descType != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
264 return res;
265
266 bool immutableSampler =
267 as_goldfish_VkDescriptorSet(descSet)->reified->bindingIsImmutableSampler[binding];
268
269 if (!immutableSampler) return res;
270
271 res.sampler = 0;
272
273 return res;
274 }
275
descriptorBindingIsImmutableSampler(VkDescriptorSet dstSet,uint32_t dstBinding)276 bool descriptorBindingIsImmutableSampler(VkDescriptorSet dstSet, uint32_t dstBinding) {
277 return as_goldfish_VkDescriptorSet(dstSet)->reified->bindingIsImmutableSampler[dstBinding];
278 }
279
filterNonexistentSampler(const VkDescriptorImageInfo & inputInfo)280 VkDescriptorImageInfo ResourceTracker::filterNonexistentSampler(
281 const VkDescriptorImageInfo& inputInfo) {
282 VkSampler sampler = inputInfo.sampler;
283
284 VkDescriptorImageInfo res = inputInfo;
285
286 if (sampler) {
287 auto it = info_VkSampler.find(sampler);
288 bool samplerExists = it != info_VkSampler.end();
289 if (!samplerExists) res.sampler = 0;
290 }
291
292 return res;
293 }
294
emitDeviceMemoryReport(VkDevice_Info info,VkDeviceMemoryReportEventTypeEXT type,uint64_t memoryObjectId,VkDeviceSize size,VkObjectType objectType,uint64_t objectHandle,uint32_t heapIndex)295 void ResourceTracker::emitDeviceMemoryReport(VkDevice_Info info,
296 VkDeviceMemoryReportEventTypeEXT type,
297 uint64_t memoryObjectId, VkDeviceSize size,
298 VkObjectType objectType, uint64_t objectHandle,
299 uint32_t heapIndex) {
300 if (info.deviceMemoryReportCallbacks.empty()) return;
301
302 const VkDeviceMemoryReportCallbackDataEXT callbackData = {
303 VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT, // sType
304 nullptr, // pNext
305 0, // flags
306 type, // type
307 memoryObjectId, // memoryObjectId
308 size, // size
309 objectType, // objectType
310 objectHandle, // objectHandle
311 heapIndex, // heapIndex
312 };
313 for (const auto& callback : info.deviceMemoryReportCallbacks) {
314 callback.first(&callbackData, callback.second);
315 }
316 }
317
318 #ifdef VK_USE_PLATFORM_FUCHSIA
defaultBufferCollectionConstraints(size_t minSizeBytes,size_t minBufferCount,size_t maxBufferCount=0u,size_t minBufferCountForCamping=0u,size_t minBufferCountForDedicatedSlack=0u,size_t minBufferCountForSharedSlack=0u)319 inline fuchsia_sysmem::wire::BufferCollectionConstraints defaultBufferCollectionConstraints(
320 size_t minSizeBytes, size_t minBufferCount, size_t maxBufferCount = 0u,
321 size_t minBufferCountForCamping = 0u, size_t minBufferCountForDedicatedSlack = 0u,
322 size_t minBufferCountForSharedSlack = 0u) {
323 fuchsia_sysmem::wire::BufferCollectionConstraints constraints = {};
324 constraints.min_buffer_count = minBufferCount;
325 if (maxBufferCount > 0) {
326 constraints.max_buffer_count = maxBufferCount;
327 }
328 if (minBufferCountForCamping) {
329 constraints.min_buffer_count_for_camping = minBufferCountForCamping;
330 }
331 if (minBufferCountForSharedSlack) {
332 constraints.min_buffer_count_for_shared_slack = minBufferCountForSharedSlack;
333 }
334 constraints.has_buffer_memory_constraints = true;
335 fuchsia_sysmem::wire::BufferMemoryConstraints& buffer_constraints =
336 constraints.buffer_memory_constraints;
337
338 buffer_constraints.min_size_bytes = minSizeBytes;
339 buffer_constraints.max_size_bytes = 0xffffffff;
340 buffer_constraints.physically_contiguous_required = false;
341 buffer_constraints.secure_required = false;
342
343 // No restrictions on coherency domain or Heaps.
344 buffer_constraints.ram_domain_supported = true;
345 buffer_constraints.cpu_domain_supported = true;
346 buffer_constraints.inaccessible_domain_supported = true;
347 buffer_constraints.heap_permitted_count = 2;
348 buffer_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
349 buffer_constraints.heap_permitted[1] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
350
351 return constraints;
352 }
353
getBufferCollectionConstraintsVulkanImageUsage(const VkImageCreateInfo * pImageInfo)354 uint32_t getBufferCollectionConstraintsVulkanImageUsage(const VkImageCreateInfo* pImageInfo) {
355 uint32_t usage = 0u;
356 VkImageUsageFlags imageUsage = pImageInfo->usage;
357
358 #define SetUsageBit(BIT, VALUE) \
359 if (imageUsage & VK_IMAGE_USAGE_##BIT##_BIT) { \
360 usage |= fuchsia_sysmem::wire::kVulkanImageUsage##VALUE; \
361 }
362
363 SetUsageBit(COLOR_ATTACHMENT, ColorAttachment);
364 SetUsageBit(TRANSFER_SRC, TransferSrc);
365 SetUsageBit(TRANSFER_DST, TransferDst);
366 SetUsageBit(SAMPLED, Sampled);
367
368 #undef SetUsageBit
369 return usage;
370 }
371
getBufferCollectionConstraintsVulkanBufferUsage(VkBufferUsageFlags bufferUsage)372 uint32_t getBufferCollectionConstraintsVulkanBufferUsage(VkBufferUsageFlags bufferUsage) {
373 uint32_t usage = 0u;
374
375 #define SetUsageBit(BIT, VALUE) \
376 if (bufferUsage & VK_BUFFER_USAGE_##BIT##_BIT) { \
377 usage |= fuchsia_sysmem::wire::kVulkanBufferUsage##VALUE; \
378 }
379
380 SetUsageBit(TRANSFER_SRC, TransferSrc);
381 SetUsageBit(TRANSFER_DST, TransferDst);
382 SetUsageBit(UNIFORM_TEXEL_BUFFER, UniformTexelBuffer);
383 SetUsageBit(STORAGE_TEXEL_BUFFER, StorageTexelBuffer);
384 SetUsageBit(UNIFORM_BUFFER, UniformBuffer);
385 SetUsageBit(STORAGE_BUFFER, StorageBuffer);
386 SetUsageBit(INDEX_BUFFER, IndexBuffer);
387 SetUsageBit(VERTEX_BUFFER, VertexBuffer);
388 SetUsageBit(INDIRECT_BUFFER, IndirectBuffer);
389
390 #undef SetUsageBit
391 return usage;
392 }
393
getBufferCollectionConstraintsVulkanBufferUsage(const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)394 uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
395 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
396 VkBufferUsageFlags bufferUsage = pBufferConstraintsInfo->createInfo.usage;
397 return getBufferCollectionConstraintsVulkanBufferUsage(bufferUsage);
398 }
399
vkFormatTypeToSysmem(VkFormat format)400 static fuchsia_sysmem::wire::PixelFormatType vkFormatTypeToSysmem(VkFormat format) {
401 switch (format) {
402 case VK_FORMAT_B8G8R8A8_SINT:
403 case VK_FORMAT_B8G8R8A8_UNORM:
404 case VK_FORMAT_B8G8R8A8_SRGB:
405 case VK_FORMAT_B8G8R8A8_SNORM:
406 case VK_FORMAT_B8G8R8A8_SSCALED:
407 case VK_FORMAT_B8G8R8A8_USCALED:
408 return fuchsia_sysmem::wire::PixelFormatType::kBgra32;
409 case VK_FORMAT_R8G8B8A8_SINT:
410 case VK_FORMAT_R8G8B8A8_UNORM:
411 case VK_FORMAT_R8G8B8A8_SRGB:
412 case VK_FORMAT_R8G8B8A8_SNORM:
413 case VK_FORMAT_R8G8B8A8_SSCALED:
414 case VK_FORMAT_R8G8B8A8_USCALED:
415 return fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
416 case VK_FORMAT_R8_UNORM:
417 case VK_FORMAT_R8_UINT:
418 case VK_FORMAT_R8_USCALED:
419 case VK_FORMAT_R8_SNORM:
420 case VK_FORMAT_R8_SINT:
421 case VK_FORMAT_R8_SSCALED:
422 case VK_FORMAT_R8_SRGB:
423 return fuchsia_sysmem::wire::PixelFormatType::kR8;
424 case VK_FORMAT_R8G8_UNORM:
425 case VK_FORMAT_R8G8_UINT:
426 case VK_FORMAT_R8G8_USCALED:
427 case VK_FORMAT_R8G8_SNORM:
428 case VK_FORMAT_R8G8_SINT:
429 case VK_FORMAT_R8G8_SSCALED:
430 case VK_FORMAT_R8G8_SRGB:
431 return fuchsia_sysmem::wire::PixelFormatType::kR8G8;
432 default:
433 return fuchsia_sysmem::wire::PixelFormatType::kInvalid;
434 }
435 }
436
vkFormatMatchesSysmemFormat(VkFormat vkFormat,fuchsia_sysmem::wire::PixelFormatType sysmemFormat)437 static bool vkFormatMatchesSysmemFormat(VkFormat vkFormat,
438 fuchsia_sysmem::wire::PixelFormatType sysmemFormat) {
439 switch (vkFormat) {
440 case VK_FORMAT_B8G8R8A8_SINT:
441 case VK_FORMAT_B8G8R8A8_UNORM:
442 case VK_FORMAT_B8G8R8A8_SRGB:
443 case VK_FORMAT_B8G8R8A8_SNORM:
444 case VK_FORMAT_B8G8R8A8_SSCALED:
445 case VK_FORMAT_B8G8R8A8_USCALED:
446 return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kBgra32;
447 case VK_FORMAT_R8G8B8A8_SINT:
448 case VK_FORMAT_R8G8B8A8_UNORM:
449 case VK_FORMAT_R8G8B8A8_SRGB:
450 case VK_FORMAT_R8G8B8A8_SNORM:
451 case VK_FORMAT_R8G8B8A8_SSCALED:
452 case VK_FORMAT_R8G8B8A8_USCALED:
453 return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
454 case VK_FORMAT_R8_UNORM:
455 case VK_FORMAT_R8_UINT:
456 case VK_FORMAT_R8_USCALED:
457 case VK_FORMAT_R8_SNORM:
458 case VK_FORMAT_R8_SINT:
459 case VK_FORMAT_R8_SSCALED:
460 case VK_FORMAT_R8_SRGB:
461 return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8 ||
462 sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kL8;
463 case VK_FORMAT_R8G8_UNORM:
464 case VK_FORMAT_R8G8_UINT:
465 case VK_FORMAT_R8G8_USCALED:
466 case VK_FORMAT_R8G8_SNORM:
467 case VK_FORMAT_R8G8_SINT:
468 case VK_FORMAT_R8G8_SSCALED:
469 case VK_FORMAT_R8G8_SRGB:
470 return sysmemFormat == fuchsia_sysmem::wire::PixelFormatType::kR8G8;
471 default:
472 return false;
473 }
474 }
475
sysmemPixelFormatTypeToVk(fuchsia_sysmem::wire::PixelFormatType format)476 static VkFormat sysmemPixelFormatTypeToVk(fuchsia_sysmem::wire::PixelFormatType format) {
477 switch (format) {
478 case fuchsia_sysmem::wire::PixelFormatType::kBgra32:
479 return VK_FORMAT_B8G8R8A8_SRGB;
480 case fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8:
481 return VK_FORMAT_R8G8B8A8_SRGB;
482 case fuchsia_sysmem::wire::PixelFormatType::kL8:
483 case fuchsia_sysmem::wire::PixelFormatType::kR8:
484 return VK_FORMAT_R8_UNORM;
485 case fuchsia_sysmem::wire::PixelFormatType::kR8G8:
486 return VK_FORMAT_R8G8_UNORM;
487 default:
488 return VK_FORMAT_UNDEFINED;
489 }
490 }
491
492 // TODO(fxbug.dev/42172354): This is currently only used for allocating
493 // memory for dedicated external images. It should be migrated to use
494 // SetBufferCollectionImageConstraintsFUCHSIA.
setBufferCollectionConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * collection,const VkImageCreateInfo * pImageInfo)495 VkResult ResourceTracker::setBufferCollectionConstraintsFUCHSIA(
496 VkEncoder* enc, VkDevice device,
497 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
498 const VkImageCreateInfo* pImageInfo) {
499 if (pImageInfo == nullptr) {
500 mesa_loge("setBufferCollectionConstraints: pImageInfo cannot be null.");
501 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
502 }
503
504 const VkSysmemColorSpaceFUCHSIA kDefaultColorSpace = {
505 .sType = VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA,
506 .pNext = nullptr,
507 .colorSpace = static_cast<uint32_t>(fuchsia_sysmem::wire::ColorSpaceType::kSrgb),
508 };
509
510 std::vector<VkImageFormatConstraintsInfoFUCHSIA> formatInfos;
511 if (pImageInfo->format == VK_FORMAT_UNDEFINED) {
512 const auto kFormats = {
513 VK_FORMAT_B8G8R8A8_SRGB,
514 VK_FORMAT_R8G8B8A8_SRGB,
515 };
516 for (auto format : kFormats) {
517 // shallow copy, using pNext from pImageInfo directly.
518 auto createInfo = *pImageInfo;
519 createInfo.format = format;
520 formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
521 .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
522 .pNext = nullptr,
523 .imageCreateInfo = createInfo,
524 .colorSpaceCount = 1,
525 .pColorSpaces = &kDefaultColorSpace,
526 });
527 }
528 } else {
529 formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
530 .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
531 .pNext = nullptr,
532 .imageCreateInfo = *pImageInfo,
533 .colorSpaceCount = 1,
534 .pColorSpaces = &kDefaultColorSpace,
535 });
536 }
537
538 VkImageConstraintsInfoFUCHSIA imageConstraints = {
539 .sType = VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA,
540 .pNext = nullptr,
541 .formatConstraintsCount = static_cast<uint32_t>(formatInfos.size()),
542 .pFormatConstraints = formatInfos.data(),
543 .bufferCollectionConstraints =
544 VkBufferCollectionConstraintsInfoFUCHSIA{
545 .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
546 .pNext = nullptr,
547 .minBufferCount = 1,
548 .maxBufferCount = 0,
549 .minBufferCountForCamping = 0,
550 .minBufferCountForDedicatedSlack = 0,
551 .minBufferCountForSharedSlack = 0,
552 },
553 .flags = 0u,
554 };
555
556 return setBufferCollectionImageConstraintsFUCHSIA(enc, device, collection, &imageConstraints);
557 }
558
addImageBufferCollectionConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,VkPhysicalDevice physicalDevice,const VkImageFormatConstraintsInfoFUCHSIA * formatConstraints,VkImageTiling tiling,fuchsia_sysmem::wire::BufferCollectionConstraints * constraints)559 VkResult addImageBufferCollectionConstraintsFUCHSIA(
560 VkEncoder* enc, VkDevice device, VkPhysicalDevice physicalDevice,
561 const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints, // always non-zero
562 VkImageTiling tiling, fuchsia_sysmem::wire::BufferCollectionConstraints* constraints) {
563 // First check if the format, tiling and usage is supported on host.
564 VkImageFormatProperties imageFormatProperties;
565 auto createInfo = &formatConstraints->imageCreateInfo;
566 auto result = enc->vkGetPhysicalDeviceImageFormatProperties(
567 physicalDevice, createInfo->format, createInfo->imageType, tiling, createInfo->usage,
568 createInfo->flags, &imageFormatProperties, true /* do lock */);
569 if (result != VK_SUCCESS) {
570 mesa_logd(
571 "%s: Image format (%u) type (%u) tiling (%u) "
572 "usage (%u) flags (%u) not supported by physical "
573 "device",
574 __func__, static_cast<uint32_t>(createInfo->format),
575 static_cast<uint32_t>(createInfo->imageType), static_cast<uint32_t>(tiling),
576 static_cast<uint32_t>(createInfo->usage), static_cast<uint32_t>(createInfo->flags));
577 return VK_ERROR_FORMAT_NOT_SUPPORTED;
578 }
579
580 // Check if format constraints contains unsupported format features.
581 {
582 VkFormatProperties formatProperties;
583 enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, createInfo->format,
584 &formatProperties, true /* do lock */);
585
586 auto supportedFeatures = (tiling == VK_IMAGE_TILING_LINEAR)
587 ? formatProperties.linearTilingFeatures
588 : formatProperties.optimalTilingFeatures;
589 auto requiredFeatures = formatConstraints->requiredFormatFeatures;
590 if ((~supportedFeatures) & requiredFeatures) {
591 mesa_logd(
592 "%s: Host device support features for %s tiling: %08x, "
593 "required features: %08x, feature bits %08x missing",
594 __func__, tiling == VK_IMAGE_TILING_LINEAR ? "LINEAR" : "OPTIMAL",
595 static_cast<uint32_t>(requiredFeatures), static_cast<uint32_t>(supportedFeatures),
596 static_cast<uint32_t>((~supportedFeatures) & requiredFeatures));
597 return VK_ERROR_FORMAT_NOT_SUPPORTED;
598 }
599 }
600
601 fuchsia_sysmem::wire::ImageFormatConstraints imageConstraints;
602 if (formatConstraints->sysmemPixelFormat != 0) {
603 auto pixelFormat = static_cast<fuchsia_sysmem::wire::PixelFormatType>(
604 formatConstraints->sysmemPixelFormat);
605 if (createInfo->format != VK_FORMAT_UNDEFINED &&
606 !vkFormatMatchesSysmemFormat(createInfo->format, pixelFormat)) {
607 mesa_logd("%s: VkFormat %u doesn't match sysmem pixelFormat %lu", __func__,
608 static_cast<uint32_t>(createInfo->format), formatConstraints->sysmemPixelFormat);
609 return VK_ERROR_FORMAT_NOT_SUPPORTED;
610 }
611 imageConstraints.pixel_format.type = pixelFormat;
612 } else {
613 auto pixel_format = vkFormatTypeToSysmem(createInfo->format);
614 if (pixel_format == fuchsia_sysmem::wire::PixelFormatType::kInvalid) {
615 mesa_logd("%s: Unsupported VkFormat %u", __func__,
616 static_cast<uint32_t>(createInfo->format));
617 return VK_ERROR_FORMAT_NOT_SUPPORTED;
618 }
619 imageConstraints.pixel_format.type = pixel_format;
620 }
621
622 imageConstraints.color_spaces_count = formatConstraints->colorSpaceCount;
623 for (size_t i = 0; i < formatConstraints->colorSpaceCount; i++) {
624 imageConstraints.color_space[0].type = static_cast<fuchsia_sysmem::wire::ColorSpaceType>(
625 formatConstraints->pColorSpaces[i].colorSpace);
626 }
627
628 // Get row alignment from host GPU.
629 VkDeviceSize offset = 0;
630 VkDeviceSize rowPitchAlignment = 1u;
631
632 if (tiling == VK_IMAGE_TILING_LINEAR) {
633 VkImageCreateInfo createInfoDup = *createInfo;
634 createInfoDup.pNext = nullptr;
635 enc->vkGetLinearImageLayout2GOOGLE(device, &createInfoDup, &offset, &rowPitchAlignment,
636 true /* do lock */);
637 mesa_logd(
638 "vkGetLinearImageLayout2GOOGLE: format %d offset %lu "
639 "rowPitchAlignment = %lu",
640 (int)createInfo->format, offset, rowPitchAlignment);
641 }
642
643 imageConstraints.min_coded_width = createInfo->extent.width;
644 imageConstraints.max_coded_width = 0xfffffff;
645 imageConstraints.min_coded_height = createInfo->extent.height;
646 imageConstraints.max_coded_height = 0xffffffff;
647 // The min_bytes_per_row can be calculated by sysmem using
648 // |min_coded_width|, |bytes_per_row_divisor| and color format.
649 imageConstraints.min_bytes_per_row = 0;
650 imageConstraints.max_bytes_per_row = 0xffffffff;
651 imageConstraints.max_coded_width_times_coded_height = 0xffffffff;
652
653 imageConstraints.layers = 1;
654 imageConstraints.coded_width_divisor = 1;
655 imageConstraints.coded_height_divisor = 1;
656 imageConstraints.bytes_per_row_divisor = rowPitchAlignment;
657 imageConstraints.start_offset_divisor = 1;
658 imageConstraints.display_width_divisor = 1;
659 imageConstraints.display_height_divisor = 1;
660 imageConstraints.pixel_format.has_format_modifier = true;
661 imageConstraints.pixel_format.format_modifier.value =
662 (tiling == VK_IMAGE_TILING_LINEAR)
663 ? fuchsia_sysmem::wire::kFormatModifierLinear
664 : fuchsia_sysmem::wire::kFormatModifierGoogleGoldfishOptimal;
665
666 constraints->image_format_constraints[constraints->image_format_constraints_count++] =
667 imageConstraints;
668 return VK_SUCCESS;
669 }
670
setBufferCollectionBufferConstraintsImpl(fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)671 SetBufferCollectionBufferConstraintsResult setBufferCollectionBufferConstraintsImpl(
672 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
673 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
674 const auto& collection = *pCollection;
675 if (pBufferConstraintsInfo == nullptr) {
676 mesa_loge(
677 "setBufferCollectionBufferConstraints: "
678 "pBufferConstraintsInfo cannot be null.");
679 return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
680 }
681
682 fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
683 defaultBufferCollectionConstraints(
684 /* min_size_bytes */ pBufferConstraintsInfo->createInfo.size,
685 /* buffer_count */ pBufferConstraintsInfo->bufferCollectionConstraints.minBufferCount);
686 constraints.usage.vulkan =
687 getBufferCollectionConstraintsVulkanBufferUsage(pBufferConstraintsInfo);
688
689 constexpr uint32_t kVulkanPriority = 5;
690 const char kName[] = "GoldfishBufferSysmemShared";
691 collection->SetName(kVulkanPriority, fidl::StringView(kName));
692
693 auto result = collection->SetConstraints(true, constraints);
694 if (!result.ok()) {
695 mesa_loge("setBufferCollectionConstraints: SetConstraints failed: %d", result.status());
696 return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
697 }
698
699 return {VK_SUCCESS, constraints};
700 }
701 #endif
702
getAHardwareBufferId(AHardwareBuffer * ahw)703 uint64_t getAHardwareBufferId(AHardwareBuffer* ahw) {
704 uint64_t id = 0;
705 #if defined(ANDROID)
706 auto* gralloc = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
707 gralloc->getId(ahw, &id);
708 #else
709 (void)ahw;
710 #endif
711 return id;
712 }
713
transformExternalResourceMemoryDedicatedRequirementsForGuest(VkMemoryDedicatedRequirements * dedicatedReqs)714 void transformExternalResourceMemoryDedicatedRequirementsForGuest(
715 VkMemoryDedicatedRequirements* dedicatedReqs) {
716 dedicatedReqs->prefersDedicatedAllocation = VK_TRUE;
717 dedicatedReqs->requiresDedicatedAllocation = VK_TRUE;
718 }
719
transformImageMemoryRequirementsForGuestLocked(VkImage image,VkMemoryRequirements * reqs)720 void ResourceTracker::transformImageMemoryRequirementsForGuestLocked(VkImage image,
721 VkMemoryRequirements* reqs) {
722 #ifdef VK_USE_PLATFORM_FUCHSIA
723 auto it = info_VkImage.find(image);
724 if (it == info_VkImage.end()) return;
725 auto& info = it->second;
726 if (info.isSysmemBackedMemory) {
727 auto width = info.createInfo.extent.width;
728 auto height = info.createInfo.extent.height;
729 reqs->size = width * height * 4;
730 }
731 #else
732 // Bypass "unused parameter" checks.
733 (void)image;
734 (void)reqs;
735 #endif
736 }
737
freeCoherentMemoryLocked(VkDeviceMemory memory,VkDeviceMemory_Info & info)738 CoherentMemoryPtr ResourceTracker::freeCoherentMemoryLocked(VkDeviceMemory memory,
739 VkDeviceMemory_Info& info) {
740 if (info.coherentMemory && info.ptr) {
741 if (info.coherentMemory->getDeviceMemory() != memory) {
742 delete_goldfish_VkDeviceMemory(memory);
743 }
744
745 if (info.ptr) {
746 info.coherentMemory->release(info.ptr);
747 info.ptr = nullptr;
748 }
749
750 return std::move(info.coherentMemory);
751 }
752
753 return nullptr;
754 }
755
createFence(VkDevice device,uint64_t hostFenceHandle,int64_t & osHandle)756 VkResult createFence(VkDevice device, uint64_t hostFenceHandle, int64_t& osHandle) {
757 struct VirtGpuExecBuffer exec = {};
758 struct gfxstreamCreateExportSyncVK exportSync = {};
759 VirtGpuDevice* instance = VirtGpuDevice::getInstance();
760
761 uint64_t hostDeviceHandle = get_host_u64_VkDevice(device);
762
763 exportSync.hdr.opCode = GFXSTREAM_CREATE_EXPORT_SYNC_VK;
764 exportSync.deviceHandleLo = (uint32_t)hostDeviceHandle;
765 exportSync.deviceHandleHi = (uint32_t)(hostDeviceHandle >> 32);
766 exportSync.fenceHandleLo = (uint32_t)hostFenceHandle;
767 exportSync.fenceHandleHi = (uint32_t)(hostFenceHandle >> 32);
768
769 exec.command = static_cast<void*>(&exportSync);
770 exec.command_size = sizeof(exportSync);
771 exec.flags = kFenceOut | kRingIdx;
772 if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY;
773
774 osHandle = exec.handle.osHandle;
775 return VK_SUCCESS;
776 }
777
collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer> & workingSet,std::unordered_set<VkDescriptorSet> & allDs)778 void collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer>& workingSet,
779 std::unordered_set<VkDescriptorSet>& allDs) {
780 if (workingSet.empty()) return;
781
782 std::vector<VkCommandBuffer> nextLevel;
783 for (auto commandBuffer : workingSet) {
784 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
785 forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
786 nextLevel.push_back((VkCommandBuffer)secondary);
787 });
788 }
789
790 collectAllPendingDescriptorSetsBottomUp(nextLevel, allDs);
791
792 for (auto cmdbuf : workingSet) {
793 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
794
795 if (!cb->userPtr) {
796 continue; // No descriptors to update.
797 }
798
799 CommandBufferPendingDescriptorSets* pendingDescriptorSets =
800 (CommandBufferPendingDescriptorSets*)(cb->userPtr);
801
802 if (pendingDescriptorSets->sets.empty()) {
803 continue; // No descriptors to update.
804 }
805
806 allDs.insert(pendingDescriptorSets->sets.begin(), pendingDescriptorSets->sets.end());
807 }
808 }
809
commitDescriptorSetUpdates(void * context,VkQueue queue,const std::unordered_set<VkDescriptorSet> & sets)810 void commitDescriptorSetUpdates(void* context, VkQueue queue,
811 const std::unordered_set<VkDescriptorSet>& sets) {
812 VkEncoder* enc = (VkEncoder*)context;
813
814 std::unordered_map<VkDescriptorPool, uint32_t> poolSet;
815 std::vector<VkDescriptorPool> pools;
816 std::vector<VkDescriptorSetLayout> setLayouts;
817 std::vector<uint64_t> poolIds;
818 std::vector<uint32_t> descriptorSetWhichPool;
819 std::vector<uint32_t> pendingAllocations;
820 std::vector<uint32_t> writeStartingIndices;
821 std::vector<VkWriteDescriptorSet> writesForHost;
822
823 uint32_t poolIndex = 0;
824 uint32_t currentWriteIndex = 0;
825 for (auto set : sets) {
826 ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
827 VkDescriptorPool pool = reified->pool;
828 VkDescriptorSetLayout setLayout = reified->setLayout;
829
830 auto it = poolSet.find(pool);
831 if (it == poolSet.end()) {
832 poolSet[pool] = poolIndex;
833 descriptorSetWhichPool.push_back(poolIndex);
834 pools.push_back(pool);
835 ++poolIndex;
836 } else {
837 uint32_t savedPoolIndex = it->second;
838 descriptorSetWhichPool.push_back(savedPoolIndex);
839 }
840
841 poolIds.push_back(reified->poolId);
842 setLayouts.push_back(setLayout);
843 pendingAllocations.push_back(reified->allocationPending ? 1 : 0);
844 writeStartingIndices.push_back(currentWriteIndex);
845
846 auto& writes = reified->allWrites;
847
848 for (size_t i = 0; i < writes.size(); ++i) {
849 uint32_t binding = i;
850
851 for (size_t j = 0; j < writes[i].size(); ++j) {
852 auto& write = writes[i][j];
853
854 if (write.type == DescriptorWriteType::Empty) continue;
855
856 uint32_t dstArrayElement = 0;
857
858 VkDescriptorImageInfo* imageInfo = nullptr;
859 VkDescriptorBufferInfo* bufferInfo = nullptr;
860 VkBufferView* bufferView = nullptr;
861
862 switch (write.type) {
863 case DescriptorWriteType::Empty:
864 break;
865 case DescriptorWriteType::ImageInfo:
866 dstArrayElement = j;
867 imageInfo = &write.imageInfo;
868 break;
869 case DescriptorWriteType::BufferInfo:
870 dstArrayElement = j;
871 bufferInfo = &write.bufferInfo;
872 break;
873 case DescriptorWriteType::BufferView:
874 dstArrayElement = j;
875 bufferView = &write.bufferView;
876 break;
877 case DescriptorWriteType::InlineUniformBlock:
878 case DescriptorWriteType::AccelerationStructure:
879 // TODO
880 mesa_loge(
881 "Encountered pending inline uniform block or acceleration structure "
882 "desc write, abort (NYI)\n");
883 abort();
884 default:
885 break;
886 }
887
888 // TODO: Combine multiple writes into one VkWriteDescriptorSet.
889 VkWriteDescriptorSet forHost = {
890 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
891 0 /* TODO: inline uniform block */,
892 set,
893 binding,
894 dstArrayElement,
895 1,
896 write.descriptorType,
897 imageInfo,
898 bufferInfo,
899 bufferView,
900 };
901
902 writesForHost.push_back(forHost);
903 ++currentWriteIndex;
904
905 // Set it back to empty.
906 write.type = DescriptorWriteType::Empty;
907 }
908 }
909 }
910
911 // Skip out if there's nothing to VkWriteDescriptorSet home about.
912 if (writesForHost.empty()) {
913 return;
914 }
915
916 enc->vkQueueCommitDescriptorSetUpdatesGOOGLE(
917 queue, (uint32_t)pools.size(), pools.data(), (uint32_t)sets.size(), setLayouts.data(),
918 poolIds.data(), descriptorSetWhichPool.data(), pendingAllocations.data(),
919 writeStartingIndices.data(), (uint32_t)writesForHost.size(), writesForHost.data(),
920 false /* no lock */);
921
922 // If we got here, then we definitely serviced the allocations.
923 for (auto set : sets) {
924 ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
925 reified->allocationPending = false;
926 }
927 }
928
syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer,VkEncoder * currentEncoder)929 uint32_t ResourceTracker::syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer,
930 VkEncoder* currentEncoder) {
931 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
932 if (!cb) return 0;
933
934 auto lastEncoder = cb->lastUsedEncoder;
935
936 if (lastEncoder == currentEncoder) return 0;
937
938 currentEncoder->incRef();
939
940 cb->lastUsedEncoder = currentEncoder;
941
942 if (!lastEncoder) return 0;
943
944 auto oldSeq = cb->sequenceNumber;
945 cb->sequenceNumber += 2;
946 lastEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, false, oldSeq + 1,
947 true /* do lock */);
948 lastEncoder->flush();
949 currentEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, true, oldSeq + 2,
950 true /* do lock */);
951
952 if (lastEncoder->decRef()) {
953 cb->lastUsedEncoder = nullptr;
954 }
955 return 0;
956 }
957
addPendingDescriptorSets(VkCommandBuffer commandBuffer,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)958 void addPendingDescriptorSets(VkCommandBuffer commandBuffer, uint32_t descriptorSetCount,
959 const VkDescriptorSet* pDescriptorSets) {
960 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
961
962 if (!cb->userPtr) {
963 CommandBufferPendingDescriptorSets* newPendingSets = new CommandBufferPendingDescriptorSets;
964 cb->userPtr = newPendingSets;
965 }
966
967 CommandBufferPendingDescriptorSets* pendingSets =
968 (CommandBufferPendingDescriptorSets*)cb->userPtr;
969
970 for (uint32_t i = 0; i < descriptorSetCount; ++i) {
971 pendingSets->sets.insert(pDescriptorSets[i]);
972 }
973 }
974
decDescriptorSetLayoutRef(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)975 void decDescriptorSetLayoutRef(void* context, VkDevice device,
976 VkDescriptorSetLayout descriptorSetLayout,
977 const VkAllocationCallbacks* pAllocator) {
978 if (!descriptorSetLayout) return;
979
980 struct goldfish_VkDescriptorSetLayout* setLayout =
981 as_goldfish_VkDescriptorSetLayout(descriptorSetLayout);
982
983 if (0 == --setLayout->layoutInfo->refcount) {
984 VkEncoder* enc = (VkEncoder*)context;
985 enc->vkDestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator,
986 true /* do lock */);
987 }
988 }
989
ensureSyncDeviceFd()990 void ResourceTracker::ensureSyncDeviceFd() {
991 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
992 if (mSyncDeviceFd >= 0) return;
993 mSyncDeviceFd = goldfish_sync_open();
994 if (mSyncDeviceFd >= 0) {
995 mesa_logd("%s: created sync device for current Vulkan process: %d\n", __func__, mSyncDeviceFd);
996 } else {
997 mesa_logd("%s: failed to create sync device for current Vulkan process\n", __func__);
998 }
999 #endif
1000 }
1001
unregister_VkInstance(VkInstance instance)1002 void ResourceTracker::unregister_VkInstance(VkInstance instance) {
1003 AutoLock<RecursiveLock> lock(mLock);
1004
1005 auto it = info_VkInstance.find(instance);
1006 if (it == info_VkInstance.end()) return;
1007 auto info = it->second;
1008 info_VkInstance.erase(instance);
1009 lock.unlock();
1010 }
1011
unregister_VkDevice(VkDevice device)1012 void ResourceTracker::unregister_VkDevice(VkDevice device) {
1013 AutoLock<RecursiveLock> lock(mLock);
1014
1015 auto it = info_VkDevice.find(device);
1016 if (it == info_VkDevice.end()) return;
1017 auto info = it->second;
1018 info_VkDevice.erase(device);
1019 lock.unlock();
1020 }
1021
unregister_VkCommandPool(VkCommandPool pool)1022 void ResourceTracker::unregister_VkCommandPool(VkCommandPool pool) {
1023 if (!pool) return;
1024
1025 clearCommandPool(pool);
1026
1027 AutoLock<RecursiveLock> lock(mLock);
1028 info_VkCommandPool.erase(pool);
1029 }
1030
unregister_VkSampler(VkSampler sampler)1031 void ResourceTracker::unregister_VkSampler(VkSampler sampler) {
1032 if (!sampler) return;
1033
1034 AutoLock<RecursiveLock> lock(mLock);
1035 info_VkSampler.erase(sampler);
1036 }
1037
unregister_VkCommandBuffer(VkCommandBuffer commandBuffer)1038 void ResourceTracker::unregister_VkCommandBuffer(VkCommandBuffer commandBuffer) {
1039 resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
1040 true /* also clear pending descriptor sets */);
1041
1042 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
1043 if (!cb) return;
1044 if (cb->lastUsedEncoder) {
1045 cb->lastUsedEncoder->decRef();
1046 }
1047 eraseObjects(&cb->subObjects);
1048 forAllObjects(cb->poolObjects, [cb](void* commandPool) {
1049 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool((VkCommandPool)commandPool);
1050 eraseObject(&p->subObjects, (void*)cb);
1051 });
1052 eraseObjects(&cb->poolObjects);
1053
1054 if (cb->userPtr) {
1055 CommandBufferPendingDescriptorSets* pendingSets =
1056 (CommandBufferPendingDescriptorSets*)cb->userPtr;
1057 delete pendingSets;
1058 }
1059
1060 AutoLock<RecursiveLock> lock(mLock);
1061 info_VkCommandBuffer.erase(commandBuffer);
1062 }
1063
unregister_VkQueue(VkQueue queue)1064 void ResourceTracker::unregister_VkQueue(VkQueue queue) {
1065 struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
1066 if (!q) return;
1067 if (q->lastUsedEncoder) {
1068 q->lastUsedEncoder->decRef();
1069 }
1070
1071 AutoLock<RecursiveLock> lock(mLock);
1072 info_VkQueue.erase(queue);
1073 }
1074
unregister_VkDeviceMemory(VkDeviceMemory mem)1075 void ResourceTracker::unregister_VkDeviceMemory(VkDeviceMemory mem) {
1076 AutoLock<RecursiveLock> lock(mLock);
1077
1078 auto it = info_VkDeviceMemory.find(mem);
1079 if (it == info_VkDeviceMemory.end()) return;
1080
1081 auto& memInfo = it->second;
1082
1083 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1084 if (memInfo.ahw) {
1085 auto* gralloc =
1086 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
1087 gralloc->release(memInfo.ahw);
1088 }
1089 #endif
1090
1091 if (memInfo.vmoHandle != ZX_HANDLE_INVALID) {
1092 zx_handle_close(memInfo.vmoHandle);
1093 }
1094
1095 info_VkDeviceMemory.erase(mem);
1096 }
1097
unregister_VkImage(VkImage img)1098 void ResourceTracker::unregister_VkImage(VkImage img) {
1099 AutoLock<RecursiveLock> lock(mLock);
1100
1101 auto it = info_VkImage.find(img);
1102 if (it == info_VkImage.end()) return;
1103
1104 auto& imageInfo = it->second;
1105
1106 info_VkImage.erase(img);
1107 }
1108
unregister_VkBuffer(VkBuffer buf)1109 void ResourceTracker::unregister_VkBuffer(VkBuffer buf) {
1110 AutoLock<RecursiveLock> lock(mLock);
1111
1112 auto it = info_VkBuffer.find(buf);
1113 if (it == info_VkBuffer.end()) return;
1114
1115 info_VkBuffer.erase(buf);
1116 }
1117
unregister_VkSemaphore(VkSemaphore sem)1118 void ResourceTracker::unregister_VkSemaphore(VkSemaphore sem) {
1119 AutoLock<RecursiveLock> lock(mLock);
1120
1121 auto it = info_VkSemaphore.find(sem);
1122 if (it == info_VkSemaphore.end()) return;
1123
1124 auto& semInfo = it->second;
1125
1126 if (semInfo.eventHandle != ZX_HANDLE_INVALID) {
1127 zx_handle_close(semInfo.eventHandle);
1128 }
1129
1130 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1131 if (semInfo.syncFd.value_or(-1) >= 0) {
1132 auto* syncHelper =
1133 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
1134 syncHelper->close(semInfo.syncFd.value());
1135 }
1136 #endif
1137
1138 info_VkSemaphore.erase(sem);
1139 }
1140
unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ)1141 void ResourceTracker::unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ) {
1142 AutoLock<RecursiveLock> lock(mLock);
1143 auto it = info_VkDescriptorUpdateTemplate.find(templ);
1144 if (it == info_VkDescriptorUpdateTemplate.end()) return;
1145
1146 auto& info = it->second;
1147 if (info.templateEntryCount) delete[] info.templateEntries;
1148 if (info.imageInfoCount) {
1149 delete[] info.imageInfoIndices;
1150 delete[] info.imageInfos;
1151 }
1152 if (info.bufferInfoCount) {
1153 delete[] info.bufferInfoIndices;
1154 delete[] info.bufferInfos;
1155 }
1156 if (info.bufferViewCount) {
1157 delete[] info.bufferViewIndices;
1158 delete[] info.bufferViews;
1159 }
1160 info_VkDescriptorUpdateTemplate.erase(it);
1161 }
1162
unregister_VkFence(VkFence fence)1163 void ResourceTracker::unregister_VkFence(VkFence fence) {
1164 AutoLock<RecursiveLock> lock(mLock);
1165 auto it = info_VkFence.find(fence);
1166 if (it == info_VkFence.end()) return;
1167
1168 auto& fenceInfo = it->second;
1169 (void)fenceInfo;
1170
1171 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1172 if (fenceInfo.syncFd >= 0) {
1173 auto* syncHelper =
1174 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
1175 syncHelper->close(fenceInfo.syncFd);
1176 }
1177 #endif
1178
1179 info_VkFence.erase(fence);
1180 }
1181
1182 #ifdef VK_USE_PLATFORM_FUCHSIA
unregister_VkBufferCollectionFUCHSIA(VkBufferCollectionFUCHSIA collection)1183 void ResourceTracker::unregister_VkBufferCollectionFUCHSIA(VkBufferCollectionFUCHSIA collection) {
1184 AutoLock<RecursiveLock> lock(mLock);
1185 info_VkBufferCollectionFUCHSIA.erase(collection);
1186 }
1187 #endif
1188
unregister_VkDescriptorSet_locked(VkDescriptorSet set)1189 void ResourceTracker::unregister_VkDescriptorSet_locked(VkDescriptorSet set) {
1190 struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(set);
1191 delete ds->reified;
1192 info_VkDescriptorSet.erase(set);
1193 }
1194
unregister_VkDescriptorSet(VkDescriptorSet set)1195 void ResourceTracker::unregister_VkDescriptorSet(VkDescriptorSet set) {
1196 if (!set) return;
1197
1198 AutoLock<RecursiveLock> lock(mLock);
1199 unregister_VkDescriptorSet_locked(set);
1200 }
1201
unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout)1202 void ResourceTracker::unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout) {
1203 if (!setLayout) return;
1204
1205 AutoLock<RecursiveLock> lock(mLock);
1206 delete as_goldfish_VkDescriptorSetLayout(setLayout)->layoutInfo;
1207 info_VkDescriptorSetLayout.erase(setLayout);
1208 }
1209
freeDescriptorSetsIfHostAllocated(VkEncoder * enc,VkDevice device,uint32_t descriptorSetCount,const VkDescriptorSet * sets)1210 void ResourceTracker::freeDescriptorSetsIfHostAllocated(VkEncoder* enc, VkDevice device,
1211 uint32_t descriptorSetCount,
1212 const VkDescriptorSet* sets) {
1213 for (uint32_t i = 0; i < descriptorSetCount; ++i) {
1214 struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(sets[i]);
1215 if (ds->reified->allocationPending) {
1216 unregister_VkDescriptorSet(sets[i]);
1217 delete_goldfish_VkDescriptorSet(sets[i]);
1218 } else {
1219 enc->vkFreeDescriptorSets(device, ds->reified->pool, 1, &sets[i], false /* no lock */);
1220 }
1221 }
1222 }
1223
clearDescriptorPoolAndUnregisterDescriptorSets(void * context,VkDevice device,VkDescriptorPool pool)1224 void ResourceTracker::clearDescriptorPoolAndUnregisterDescriptorSets(void* context, VkDevice device,
1225 VkDescriptorPool pool) {
1226 std::vector<VkDescriptorSet> toClear =
1227 clearDescriptorPool(pool, mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate);
1228
1229 for (auto set : toClear) {
1230 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
1231 VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(set)->reified->setLayout;
1232 decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
1233 }
1234 unregister_VkDescriptorSet(set);
1235 delete_goldfish_VkDescriptorSet(set);
1236 }
1237 }
1238
unregister_VkDescriptorPool(VkDescriptorPool pool)1239 void ResourceTracker::unregister_VkDescriptorPool(VkDescriptorPool pool) {
1240 if (!pool) return;
1241
1242 AutoLock<RecursiveLock> lock(mLock);
1243
1244 struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
1245 delete dp->allocInfo;
1246
1247 info_VkDescriptorPool.erase(pool);
1248 }
1249
deviceMemoryTransform_fromhost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)1250 void ResourceTracker::deviceMemoryTransform_fromhost(VkDeviceMemory* memory, uint32_t memoryCount,
1251 VkDeviceSize* offset, uint32_t offsetCount,
1252 VkDeviceSize* size, uint32_t sizeCount,
1253 uint32_t* typeIndex, uint32_t typeIndexCount,
1254 uint32_t* typeBits, uint32_t typeBitsCount) {
1255 (void)memory;
1256 (void)memoryCount;
1257 (void)offset;
1258 (void)offsetCount;
1259 (void)size;
1260 (void)sizeCount;
1261 (void)typeIndex;
1262 (void)typeIndexCount;
1263 (void)typeBits;
1264 (void)typeBitsCount;
1265 }
1266
transformImpl_VkExternalMemoryProperties_fromhost(VkExternalMemoryProperties * pProperties,uint32_t)1267 void ResourceTracker::transformImpl_VkExternalMemoryProperties_fromhost(
1268 VkExternalMemoryProperties* pProperties, uint32_t) {
1269 VkExternalMemoryHandleTypeFlags supportedHandleType = 0u;
1270 #ifdef VK_USE_PLATFORM_FUCHSIA
1271 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
1272 #endif // VK_USE_PLATFORM_FUCHSIA
1273 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1274 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
1275 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
1276 #endif // VK_USE_PLATFORM_ANDROID_KHR
1277 if (supportedHandleType) {
1278 pProperties->compatibleHandleTypes &= supportedHandleType;
1279 pProperties->exportFromImportedHandleTypes &= supportedHandleType;
1280 }
1281 }
1282
setInstanceInfo(VkInstance instance,uint32_t enabledExtensionCount,const char * const * ppEnabledExtensionNames,uint32_t apiVersion)1283 void ResourceTracker::setInstanceInfo(VkInstance instance, uint32_t enabledExtensionCount,
1284 const char* const* ppEnabledExtensionNames,
1285 uint32_t apiVersion) {
1286 AutoLock<RecursiveLock> lock(mLock);
1287 auto& info = info_VkInstance[instance];
1288 info.highestApiVersion = apiVersion;
1289
1290 if (!ppEnabledExtensionNames) return;
1291
1292 for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
1293 info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
1294 }
1295 }
1296
setDeviceInfo(VkDevice device,VkPhysicalDevice physdev,VkPhysicalDeviceProperties props,VkPhysicalDeviceMemoryProperties memProps,uint32_t enabledExtensionCount,const char * const * ppEnabledExtensionNames,const void * pNext)1297 void ResourceTracker::setDeviceInfo(VkDevice device, VkPhysicalDevice physdev,
1298 VkPhysicalDeviceProperties props,
1299 VkPhysicalDeviceMemoryProperties memProps,
1300 uint32_t enabledExtensionCount,
1301 const char* const* ppEnabledExtensionNames, const void* pNext) {
1302 AutoLock<RecursiveLock> lock(mLock);
1303 auto& info = info_VkDevice[device];
1304 info.physdev = physdev;
1305 info.props = props;
1306 info.memProps = memProps;
1307 info.apiVersion = props.apiVersion;
1308
1309 const VkBaseInStructure* extensionCreateInfo =
1310 reinterpret_cast<const VkBaseInStructure*>(pNext);
1311 while (extensionCreateInfo) {
1312 if (extensionCreateInfo->sType ==
1313 VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT) {
1314 auto deviceMemoryReportCreateInfo =
1315 reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT*>(
1316 extensionCreateInfo);
1317 if (deviceMemoryReportCreateInfo->pfnUserCallback != nullptr) {
1318 info.deviceMemoryReportCallbacks.emplace_back(
1319 deviceMemoryReportCreateInfo->pfnUserCallback,
1320 deviceMemoryReportCreateInfo->pUserData);
1321 }
1322 }
1323 extensionCreateInfo = extensionCreateInfo->pNext;
1324 }
1325
1326 if (!ppEnabledExtensionNames) return;
1327
1328 for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
1329 info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
1330 }
1331 }
1332
setDeviceMemoryInfo(VkDevice device,VkDeviceMemory memory,VkDeviceSize allocationSize,uint8_t * ptr,uint32_t memoryTypeIndex,AHardwareBuffer * ahw,bool imported,zx_handle_t vmoHandle,VirtGpuResourcePtr blobPtr)1333 void ResourceTracker::setDeviceMemoryInfo(VkDevice device, VkDeviceMemory memory,
1334 VkDeviceSize allocationSize, uint8_t* ptr,
1335 uint32_t memoryTypeIndex, AHardwareBuffer* ahw,
1336 bool imported, zx_handle_t vmoHandle,
1337 VirtGpuResourcePtr blobPtr) {
1338 AutoLock<RecursiveLock> lock(mLock);
1339 auto& info = info_VkDeviceMemory[memory];
1340
1341 info.device = device;
1342 info.allocationSize = allocationSize;
1343 info.ptr = ptr;
1344 info.memoryTypeIndex = memoryTypeIndex;
1345 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1346 info.ahw = ahw;
1347 #endif
1348 info.imported = imported;
1349 info.vmoHandle = vmoHandle;
1350 info.blobPtr = blobPtr;
1351 }
1352
setImageInfo(VkImage image,VkDevice device,const VkImageCreateInfo * pCreateInfo)1353 void ResourceTracker::setImageInfo(VkImage image, VkDevice device,
1354 const VkImageCreateInfo* pCreateInfo) {
1355 AutoLock<RecursiveLock> lock(mLock);
1356 auto& info = info_VkImage[image];
1357
1358 info.device = device;
1359 info.createInfo = *pCreateInfo;
1360 }
1361
getMappedPointer(VkDeviceMemory memory)1362 uint8_t* ResourceTracker::getMappedPointer(VkDeviceMemory memory) {
1363 AutoLock<RecursiveLock> lock(mLock);
1364 const auto it = info_VkDeviceMemory.find(memory);
1365 if (it == info_VkDeviceMemory.end()) return nullptr;
1366
1367 const auto& info = it->second;
1368 return info.ptr;
1369 }
1370
getMappedSize(VkDeviceMemory memory)1371 VkDeviceSize ResourceTracker::getMappedSize(VkDeviceMemory memory) {
1372 AutoLock<RecursiveLock> lock(mLock);
1373 const auto it = info_VkDeviceMemory.find(memory);
1374 if (it == info_VkDeviceMemory.end()) return 0;
1375
1376 const auto& info = it->second;
1377 return info.allocationSize;
1378 }
1379
isValidMemoryRange(const VkMappedMemoryRange & range) const1380 bool ResourceTracker::isValidMemoryRange(const VkMappedMemoryRange& range) const {
1381 AutoLock<RecursiveLock> lock(mLock);
1382 const auto it = info_VkDeviceMemory.find(range.memory);
1383 if (it == info_VkDeviceMemory.end()) return false;
1384 const auto& info = it->second;
1385
1386 if (!info.ptr) return false;
1387
1388 VkDeviceSize offset = range.offset;
1389 VkDeviceSize size = range.size;
1390
1391 if (size == VK_WHOLE_SIZE) {
1392 return offset <= info.allocationSize;
1393 }
1394
1395 return offset + size <= info.allocationSize;
1396 }
1397
setupCaps(uint32_t & noRenderControlEnc)1398 void ResourceTracker::setupCaps(uint32_t& noRenderControlEnc) {
1399 VirtGpuDevice* instance = VirtGpuDevice::getInstance(kCapsetGfxStreamVulkan);
1400 mCaps = instance->getCaps();
1401
1402 // Delete once goldfish Linux drivers are gone
1403 if (mCaps.vulkanCapset.protocolVersion == 0) {
1404 mCaps.vulkanCapset.colorBufferMemoryIndex = 0xFFFFFFFF;
1405 } else {
1406 // Don't query the render control encoder for features, since for virtio-gpu the
1407 // capabilities provide versioning. Set features to be unconditionally true, since
1408 // using virtio-gpu encompasses all prior goldfish features. mFeatureInfo should be
1409 // deprecated in favor of caps.
1410
1411 mFeatureInfo.reset(new EmulatorFeatureInfo);
1412
1413 mFeatureInfo->hasVulkanNullOptionalStrings = true;
1414 mFeatureInfo->hasVulkanIgnoredHandles = true;
1415 mFeatureInfo->hasVulkanShaderFloat16Int8 = true;
1416 mFeatureInfo->hasVulkanQueueSubmitWithCommands = true;
1417 mFeatureInfo->hasDeferredVulkanCommands = true;
1418 mFeatureInfo->hasVulkanAsyncQueueSubmit = true;
1419 mFeatureInfo->hasVulkanCreateResourcesWithRequirements = true;
1420 mFeatureInfo->hasVirtioGpuNext = true;
1421 mFeatureInfo->hasVirtioGpuNativeSync = true;
1422 mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate = true;
1423 mFeatureInfo->hasVulkanAsyncQsri = true;
1424
1425 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
1426 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
1427 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
1428 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
1429 }
1430
1431 noRenderControlEnc = mCaps.vulkanCapset.noRenderControlEnc;
1432 }
1433
setupFeatures(const EmulatorFeatureInfo * features)1434 void ResourceTracker::setupFeatures(const EmulatorFeatureInfo* features) {
1435 if (!features || mFeatureInfo) return;
1436 mFeatureInfo.reset(new EmulatorFeatureInfo);
1437 *mFeatureInfo = *features;
1438
1439 #if defined(__ANDROID__)
1440 if (mFeatureInfo->hasDirectMem) {
1441 mGoldfishAddressSpaceBlockProvider.reset(
1442 new GoldfishAddressSpaceBlockProvider(GoldfishAddressSpaceSubdeviceType::NoSubdevice));
1443 }
1444 #endif // defined(__ANDROID__)
1445
1446 #ifdef VK_USE_PLATFORM_FUCHSIA
1447 if (mFeatureInfo->hasVulkan) {
1448 fidl::ClientEnd<fuchsia_hardware_goldfish::ControlDevice> channel{zx::channel(
1449 GetConnectToServiceFunction()("/loader-gpu-devices/class/goldfish-control/000"))};
1450 if (!channel) {
1451 mesa_loge("failed to open control device");
1452 abort();
1453 }
1454 mControlDevice =
1455 fidl::WireSyncClient<fuchsia_hardware_goldfish::ControlDevice>(std::move(channel));
1456
1457 fidl::ClientEnd<fuchsia_sysmem::Allocator> sysmem_channel{
1458 zx::channel(GetConnectToServiceFunction()("/svc/fuchsia.sysmem.Allocator"))};
1459 if (!sysmem_channel) {
1460 mesa_loge("failed to open sysmem connection");
1461 }
1462 mSysmemAllocator =
1463 fidl::WireSyncClient<fuchsia_sysmem::Allocator>(std::move(sysmem_channel));
1464 char name[ZX_MAX_NAME_LEN] = {};
1465 zx_object_get_property(zx_process_self(), ZX_PROP_NAME, name, sizeof(name));
1466 std::string client_name(name);
1467 client_name += "-goldfish";
1468 zx_info_handle_basic_t info;
1469 zx_object_get_info(zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, sizeof(info), nullptr,
1470 nullptr);
1471 mSysmemAllocator->SetDebugClientInfo(fidl::StringView::FromExternal(client_name),
1472 info.koid);
1473 }
1474 #endif
1475
1476 if (mFeatureInfo->hasVulkanNullOptionalStrings) {
1477 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
1478 }
1479 if (mFeatureInfo->hasVulkanIgnoredHandles) {
1480 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
1481 }
1482 if (mFeatureInfo->hasVulkanShaderFloat16Int8) {
1483 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
1484 }
1485 if (mFeatureInfo->hasVulkanQueueSubmitWithCommands) {
1486 ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
1487 }
1488 }
1489
setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks & callbacks)1490 void ResourceTracker::setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks& callbacks) {
1491 ResourceTracker::threadingCallbacks = callbacks;
1492 }
1493
hostSupportsVulkan() const1494 bool ResourceTracker::hostSupportsVulkan() const {
1495 if (!mFeatureInfo) return false;
1496
1497 return mFeatureInfo->hasVulkan;
1498 }
1499
usingDirectMapping() const1500 bool ResourceTracker::usingDirectMapping() const { return true; }
1501
getStreamFeatures() const1502 uint32_t ResourceTracker::getStreamFeatures() const { return ResourceTracker::streamFeatureBits; }
1503
supportsDeferredCommands() const1504 bool ResourceTracker::supportsDeferredCommands() const {
1505 if (!mFeatureInfo) return false;
1506 return mFeatureInfo->hasDeferredVulkanCommands;
1507 }
1508
supportsAsyncQueueSubmit() const1509 bool ResourceTracker::supportsAsyncQueueSubmit() const {
1510 if (!mFeatureInfo) return false;
1511 return mFeatureInfo->hasVulkanAsyncQueueSubmit;
1512 }
1513
supportsCreateResourcesWithRequirements() const1514 bool ResourceTracker::supportsCreateResourcesWithRequirements() const {
1515 if (!mFeatureInfo) return false;
1516 return mFeatureInfo->hasVulkanCreateResourcesWithRequirements;
1517 }
1518
getHostInstanceExtensionIndex(const std::string & extName) const1519 int ResourceTracker::getHostInstanceExtensionIndex(const std::string& extName) const {
1520 int i = 0;
1521 for (const auto& prop : mHostInstanceExtensions) {
1522 if (extName == std::string(prop.extensionName)) {
1523 return i;
1524 }
1525 ++i;
1526 }
1527 return -1;
1528 }
1529
getHostDeviceExtensionIndex(const std::string & extName) const1530 int ResourceTracker::getHostDeviceExtensionIndex(const std::string& extName) const {
1531 int i = 0;
1532 for (const auto& prop : mHostDeviceExtensions) {
1533 if (extName == std::string(prop.extensionName)) {
1534 return i;
1535 }
1536 ++i;
1537 }
1538 return -1;
1539 }
1540
deviceMemoryTransform_tohost(VkDeviceMemory * memory,uint32_t memoryCount,VkDeviceSize * offset,uint32_t offsetCount,VkDeviceSize * size,uint32_t sizeCount,uint32_t * typeIndex,uint32_t typeIndexCount,uint32_t * typeBits,uint32_t typeBitsCount)1541 void ResourceTracker::deviceMemoryTransform_tohost(VkDeviceMemory* memory, uint32_t memoryCount,
1542 VkDeviceSize* offset, uint32_t offsetCount,
1543 VkDeviceSize* size, uint32_t sizeCount,
1544 uint32_t* typeIndex, uint32_t typeIndexCount,
1545 uint32_t* typeBits, uint32_t typeBitsCount) {
1546 (void)memoryCount;
1547 (void)offsetCount;
1548 (void)sizeCount;
1549 (void)typeIndex;
1550 (void)typeIndexCount;
1551 (void)typeBits;
1552 (void)typeBitsCount;
1553
1554 if (memory) {
1555 AutoLock<RecursiveLock> lock(mLock);
1556
1557 for (uint32_t i = 0; i < memoryCount; ++i) {
1558 VkDeviceMemory mem = memory[i];
1559
1560 auto it = info_VkDeviceMemory.find(mem);
1561 if (it == info_VkDeviceMemory.end()) return;
1562
1563 const auto& info = it->second;
1564
1565 if (!info.coherentMemory) continue;
1566
1567 memory[i] = info.coherentMemory->getDeviceMemory();
1568
1569 if (offset) {
1570 offset[i] = info.coherentMemoryOffset + offset[i];
1571 }
1572
1573 if (size && size[i] == VK_WHOLE_SIZE) {
1574 size[i] = info.allocationSize;
1575 }
1576
1577 // TODO
1578 (void)memory;
1579 (void)offset;
1580 (void)size;
1581 }
1582 }
1583 }
1584
getColorBufferMemoryIndex(void * context,VkDevice device)1585 uint32_t ResourceTracker::getColorBufferMemoryIndex(void* context, VkDevice device) {
1586 // Create test image to get the memory requirements
1587 VkEncoder* enc = (VkEncoder*)context;
1588 VkImageCreateInfo createInfo = {
1589 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1590 .imageType = VK_IMAGE_TYPE_2D,
1591 .format = VK_FORMAT_R8G8B8A8_UNORM,
1592 .extent = {64, 64, 1},
1593 .mipLevels = 1,
1594 .arrayLayers = 1,
1595 .samples = VK_SAMPLE_COUNT_1_BIT,
1596 .tiling = VK_IMAGE_TILING_OPTIMAL,
1597 .usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
1598 VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
1599 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
1600 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
1601 };
1602 VkImage image = VK_NULL_HANDLE;
1603 VkResult res = enc->vkCreateImage(device, &createInfo, nullptr, &image, true /* do lock */);
1604
1605 if (res != VK_SUCCESS) {
1606 return 0;
1607 }
1608
1609 VkMemoryRequirements memReqs;
1610 enc->vkGetImageMemoryRequirements(device, image, &memReqs, true /* do lock */);
1611 enc->vkDestroyImage(device, image, nullptr, true /* do lock */);
1612
1613 const VkPhysicalDeviceMemoryProperties& memProps =
1614 getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE);
1615
1616 // Currently, host looks for the last index that has with memory
1617 // property type VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
1618 VkMemoryPropertyFlags memoryProperty = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1619 for (int i = VK_MAX_MEMORY_TYPES - 1; i >= 0; --i) {
1620 if ((memReqs.memoryTypeBits & (1u << i)) &&
1621 (memProps.memoryTypes[i].propertyFlags & memoryProperty)) {
1622 return i;
1623 }
1624 }
1625
1626 return 0;
1627 }
1628
on_vkEnumerateInstanceExtensionProperties(void * context,VkResult,const char *,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)1629 VkResult ResourceTracker::on_vkEnumerateInstanceExtensionProperties(
1630 void* context, VkResult, const char*, uint32_t* pPropertyCount,
1631 VkExtensionProperties* pProperties) {
1632 std::vector<const char*> allowedExtensionNames = {
1633 "VK_KHR_get_physical_device_properties2",
1634 "VK_KHR_sampler_ycbcr_conversion",
1635 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1636 "VK_KHR_external_semaphore_capabilities",
1637 "VK_KHR_external_memory_capabilities",
1638 "VK_KHR_external_fence_capabilities",
1639 "VK_EXT_debug_utils",
1640 #endif
1641 };
1642
1643 VkEncoder* enc = (VkEncoder*)context;
1644
1645 // Only advertise a select set of extensions.
1646 if (mHostInstanceExtensions.empty()) {
1647 uint32_t hostPropCount = 0;
1648 enc->vkEnumerateInstanceExtensionProperties(nullptr, &hostPropCount, nullptr,
1649 true /* do lock */);
1650 mHostInstanceExtensions.resize(hostPropCount);
1651
1652 VkResult hostRes = enc->vkEnumerateInstanceExtensionProperties(
1653 nullptr, &hostPropCount, mHostInstanceExtensions.data(), true /* do lock */);
1654
1655 if (hostRes != VK_SUCCESS) {
1656 return hostRes;
1657 }
1658 }
1659
1660 std::vector<VkExtensionProperties> filteredExts;
1661
1662 for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1663 auto extIndex = getHostInstanceExtensionIndex(allowedExtensionNames[i]);
1664 if (extIndex != -1) {
1665 filteredExts.push_back(mHostInstanceExtensions[extIndex]);
1666 }
1667 }
1668
1669 VkExtensionProperties anbExtProps[] = {
1670 #ifdef VK_USE_PLATFORM_FUCHSIA
1671 {"VK_KHR_external_memory_capabilities", 1},
1672 {"VK_KHR_external_semaphore_capabilities", 1},
1673 #endif
1674 };
1675
1676 for (auto& anbExtProp : anbExtProps) {
1677 filteredExts.push_back(anbExtProp);
1678 }
1679
1680 // Spec:
1681 //
1682 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
1683 //
1684 // If pProperties is NULL, then the number of extensions properties
1685 // available is returned in pPropertyCount. Otherwise, pPropertyCount
1686 // must point to a variable set by the user to the number of elements
1687 // in the pProperties array, and on return the variable is overwritten
1688 // with the number of structures actually written to pProperties. If
1689 // pPropertyCount is less than the number of extension properties
1690 // available, at most pPropertyCount structures will be written. If
1691 // pPropertyCount is smaller than the number of extensions available,
1692 // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1693 // that not all the available properties were returned.
1694 //
1695 // pPropertyCount must be a valid pointer to a uint32_t value
1696 if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1697
1698 if (!pProperties) {
1699 *pPropertyCount = (uint32_t)filteredExts.size();
1700 return VK_SUCCESS;
1701 } else {
1702 auto actualExtensionCount = (uint32_t)filteredExts.size();
1703 if (*pPropertyCount > actualExtensionCount) {
1704 *pPropertyCount = actualExtensionCount;
1705 }
1706
1707 for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1708 pProperties[i] = filteredExts[i];
1709 }
1710
1711 if (actualExtensionCount > *pPropertyCount) {
1712 return VK_INCOMPLETE;
1713 }
1714
1715 return VK_SUCCESS;
1716 }
1717 }
1718
on_vkEnumerateDeviceExtensionProperties(void * context,VkResult,VkPhysicalDevice physdev,const char *,uint32_t * pPropertyCount,VkExtensionProperties * pProperties)1719 VkResult ResourceTracker::on_vkEnumerateDeviceExtensionProperties(
1720 void* context, VkResult, VkPhysicalDevice physdev, const char*, uint32_t* pPropertyCount,
1721 VkExtensionProperties* pProperties) {
1722 std::vector<const char*> allowedExtensionNames = {
1723 "VK_KHR_vulkan_memory_model",
1724 "VK_KHR_buffer_device_address",
1725 "VK_KHR_maintenance1",
1726 "VK_KHR_maintenance2",
1727 "VK_KHR_maintenance3",
1728 "VK_KHR_bind_memory2",
1729 "VK_KHR_dedicated_allocation",
1730 "VK_KHR_get_memory_requirements2",
1731 "VK_KHR_sampler_ycbcr_conversion",
1732 "VK_KHR_shader_float16_int8",
1733 // Timeline semaphores buggy in newer NVIDIA drivers
1734 // (vkWaitSemaphoresKHR causes further vkCommandBuffer dispatches to deadlock)
1735 #ifndef VK_USE_PLATFORM_ANDROID_KHR
1736 "VK_KHR_timeline_semaphore",
1737 #endif
1738 "VK_AMD_gpu_shader_half_float",
1739 "VK_NV_shader_subgroup_partitioned",
1740 "VK_KHR_shader_subgroup_extended_types",
1741 "VK_EXT_subgroup_size_control",
1742 "VK_EXT_provoking_vertex",
1743 "VK_EXT_line_rasterization",
1744 "VK_KHR_shader_terminate_invocation",
1745 "VK_EXT_transform_feedback",
1746 "VK_EXT_primitive_topology_list_restart",
1747 "VK_EXT_index_type_uint8",
1748 "VK_EXT_load_store_op_none",
1749 "VK_EXT_swapchain_colorspace",
1750 "VK_EXT_image_robustness",
1751 "VK_EXT_custom_border_color",
1752 "VK_EXT_shader_stencil_export",
1753 "VK_KHR_image_format_list",
1754 "VK_KHR_incremental_present",
1755 "VK_KHR_pipeline_executable_properties",
1756 "VK_EXT_queue_family_foreign",
1757 "VK_EXT_scalar_block_layout",
1758 "VK_KHR_descriptor_update_template",
1759 "VK_KHR_storage_buffer_storage_class",
1760 "VK_EXT_depth_clip_enable",
1761 "VK_KHR_create_renderpass2",
1762 "VK_EXT_vertex_attribute_divisor",
1763 "VK_EXT_host_query_reset",
1764 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1765 "VK_KHR_external_semaphore",
1766 "VK_KHR_external_semaphore_fd",
1767 // "VK_KHR_external_semaphore_win32", not exposed because it's translated to fd
1768 "VK_KHR_external_memory",
1769 "VK_KHR_external_fence",
1770 "VK_KHR_external_fence_fd",
1771 "VK_EXT_device_memory_report",
1772 #endif
1773 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
1774 "VK_KHR_imageless_framebuffer",
1775 #endif
1776 // Vulkan 1.3
1777 "VK_KHR_synchronization2",
1778 "VK_EXT_private_data",
1779 };
1780
1781 VkEncoder* enc = (VkEncoder*)context;
1782
1783 if (mHostDeviceExtensions.empty()) {
1784 uint32_t hostPropCount = 0;
1785 enc->vkEnumerateDeviceExtensionProperties(physdev, nullptr, &hostPropCount, nullptr,
1786 true /* do lock */);
1787 mHostDeviceExtensions.resize(hostPropCount);
1788
1789 VkResult hostRes = enc->vkEnumerateDeviceExtensionProperties(
1790 physdev, nullptr, &hostPropCount, mHostDeviceExtensions.data(), true /* do lock */);
1791
1792 if (hostRes != VK_SUCCESS) {
1793 return hostRes;
1794 }
1795 }
1796
1797 std::vector<VkExtensionProperties> filteredExts;
1798
1799 for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
1800 auto extIndex = getHostDeviceExtensionIndex(allowedExtensionNames[i]);
1801 if (extIndex != -1) {
1802 filteredExts.push_back(mHostDeviceExtensions[extIndex]);
1803 }
1804 }
1805
1806 VkExtensionProperties anbExtProps[] = {
1807 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1808 {"VK_ANDROID_native_buffer", 7},
1809 #endif
1810 #ifdef VK_USE_PLATFORM_FUCHSIA
1811 {"VK_KHR_external_memory", 1},
1812 {"VK_KHR_external_semaphore", 1},
1813 {"VK_FUCHSIA_external_semaphore", 1},
1814 #endif
1815 };
1816
1817 for (auto& anbExtProp : anbExtProps) {
1818 filteredExts.push_back(anbExtProp);
1819 }
1820
1821 /*
1822 * GfxstreamEnd2EndVkTest::DeviceMemoryReport always assumes the memory report
1823 * extension is present. It's is filtered out when sent host side, since for a
1824 * virtual GPU this is quite difficult to implement.
1825 *
1826 * Mesa runtime checks physical device features. So if the test tries to enable
1827 * device level extension without it definitely existing, the test will fail.
1828 *
1829 * The test can also be modified to check VkPhysicalDeviceDeviceMemoryReportFeaturesEXT,
1830 * but that's more involved. Work around this by always advertising the extension.
1831 * Tracking bug: b/338270042
1832 */
1833 filteredExts.push_back(VkExtensionProperties{"VK_EXT_device_memory_report", 1});
1834
1835 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1836 bool hostSupportsExternalFenceFd =
1837 getHostDeviceExtensionIndex("VK_KHR_external_fence_fd") != -1;
1838 if (!hostSupportsExternalFenceFd) {
1839 filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_fence_fd", 1});
1840 }
1841 #endif
1842
1843 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
1844 bool hostHasPosixExternalSemaphore =
1845 getHostDeviceExtensionIndex("VK_KHR_external_semaphore_fd") != -1;
1846 if (!hostHasPosixExternalSemaphore) {
1847 // Always advertise posix external semaphore capabilities on Android/Linux.
1848 // SYNC_FD handles will always work, regardless of host support. Support
1849 // for non-sync, opaque FDs, depends on host driver support, but will
1850 // be handled accordingly by host.
1851 filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_semaphore_fd", 1});
1852 }
1853 #endif
1854
1855 bool win32ExtMemAvailable = getHostDeviceExtensionIndex("VK_KHR_external_memory_win32") != -1;
1856 bool posixExtMemAvailable = getHostDeviceExtensionIndex("VK_KHR_external_memory_fd") != -1;
1857 bool moltenVkExtAvailable = getHostDeviceExtensionIndex("VK_MVK_moltenvk") != -1;
1858 bool qnxExtMemAvailable =
1859 getHostDeviceExtensionIndex("VK_QNX_external_memory_screen_buffer") != -1;
1860
1861 bool hostHasExternalMemorySupport =
1862 win32ExtMemAvailable || posixExtMemAvailable || moltenVkExtAvailable || qnxExtMemAvailable;
1863
1864 if (hostHasExternalMemorySupport) {
1865 #ifdef VK_USE_PLATFORM_ANDROID_KHR
1866 filteredExts.push_back(
1867 VkExtensionProperties{"VK_ANDROID_external_memory_android_hardware_buffer", 7});
1868 filteredExts.push_back(VkExtensionProperties{"VK_EXT_queue_family_foreign", 1});
1869 #endif
1870 #ifdef VK_USE_PLATFORM_FUCHSIA
1871 filteredExts.push_back(VkExtensionProperties{"VK_FUCHSIA_external_memory", 1});
1872 filteredExts.push_back(VkExtensionProperties{"VK_FUCHSIA_buffer_collection", 1});
1873 #endif
1874 #if !defined(VK_USE_PLATFORM_ANDROID_KHR) && defined(__linux__)
1875 filteredExts.push_back(VkExtensionProperties{"VK_KHR_external_memory_fd", 1});
1876 filteredExts.push_back(VkExtensionProperties{"VK_EXT_external_memory_dma_buf", 1});
1877 #endif
1878 }
1879
1880 // NOTE: the Vulkan Loader's trampoline functions will remove duplicates. This can lead
1881 // to lead errors if this function returns VK_SUCCESS with N elements (including a duplicate)
1882 // but the Vulkan Loader's trampoline function returns VK_INCOMPLETE with N-1 elements
1883 // (without the duplicate).
1884 std::sort(filteredExts.begin(),
1885 filteredExts.end(),
1886 [](const VkExtensionProperties& a,
1887 const VkExtensionProperties& b) {
1888 return strcmp(a.extensionName, b.extensionName) < 0;
1889 });
1890 filteredExts.erase(std::unique(filteredExts.begin(),
1891 filteredExts.end(),
1892 [](const VkExtensionProperties& a,
1893 const VkExtensionProperties& b) {
1894 return strcmp(a.extensionName, b.extensionName) == 0;
1895 }),
1896 filteredExts.end());
1897
1898 // Spec:
1899 //
1900 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateDeviceExtensionProperties.html
1901 //
1902 // pPropertyCount is a pointer to an integer related to the number of
1903 // extension properties available or queried, and is treated in the
1904 // same fashion as the
1905 // vkEnumerateInstanceExtensionProperties::pPropertyCount parameter.
1906 //
1907 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
1908 //
1909 // If pProperties is NULL, then the number of extensions properties
1910 // available is returned in pPropertyCount. Otherwise, pPropertyCount
1911 // must point to a variable set by the user to the number of elements
1912 // in the pProperties array, and on return the variable is overwritten
1913 // with the number of structures actually written to pProperties. If
1914 // pPropertyCount is less than the number of extension properties
1915 // available, at most pPropertyCount structures will be written. If
1916 // pPropertyCount is smaller than the number of extensions available,
1917 // VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
1918 // that not all the available properties were returned.
1919 //
1920 // pPropertyCount must be a valid pointer to a uint32_t value
1921
1922 if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
1923
1924 if (!pProperties) {
1925 *pPropertyCount = (uint32_t)filteredExts.size();
1926 return VK_SUCCESS;
1927 } else {
1928 auto actualExtensionCount = (uint32_t)filteredExts.size();
1929 if (*pPropertyCount > actualExtensionCount) {
1930 *pPropertyCount = actualExtensionCount;
1931 }
1932
1933 for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1934 pProperties[i] = filteredExts[i];
1935 }
1936
1937 if (actualExtensionCount > *pPropertyCount) {
1938 return VK_INCOMPLETE;
1939 }
1940
1941 return VK_SUCCESS;
1942 }
1943 }
1944
on_vkEnumeratePhysicalDevices(void * context,VkResult,VkInstance instance,uint32_t * pPhysicalDeviceCount,VkPhysicalDevice * pPhysicalDevices)1945 VkResult ResourceTracker::on_vkEnumeratePhysicalDevices(void* context, VkResult,
1946 VkInstance instance,
1947 uint32_t* pPhysicalDeviceCount,
1948 VkPhysicalDevice* pPhysicalDevices) {
1949 VkEncoder* enc = (VkEncoder*)context;
1950
1951 if (!instance) return VK_ERROR_INITIALIZATION_FAILED;
1952
1953 if (!pPhysicalDeviceCount) return VK_ERROR_INITIALIZATION_FAILED;
1954
1955 AutoLock<RecursiveLock> lock(mLock);
1956
1957 // When this function is called, we actually need to do two things:
1958 // - Get full information about physical devices from the host,
1959 // even if the guest did not ask for it
1960 // - Serve the guest query according to the spec:
1961 //
1962 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
1963
1964 auto it = info_VkInstance.find(instance);
1965
1966 if (it == info_VkInstance.end()) return VK_ERROR_INITIALIZATION_FAILED;
1967
1968 auto& info = it->second;
1969
1970 // Get the full host information here if it doesn't exist already.
1971 if (info.physicalDevices.empty()) {
1972 uint32_t hostPhysicalDeviceCount = 0;
1973
1974 lock.unlock();
1975 VkResult countRes = enc->vkEnumeratePhysicalDevices(instance, &hostPhysicalDeviceCount,
1976 nullptr, false /* no lock */);
1977 lock.lock();
1978
1979 if (countRes != VK_SUCCESS) {
1980 mesa_loge(
1981 "%s: failed: could not count host physical devices. "
1982 "Error %d\n",
1983 __func__, countRes);
1984 return countRes;
1985 }
1986
1987 info.physicalDevices.resize(hostPhysicalDeviceCount);
1988
1989 lock.unlock();
1990 VkResult enumRes = enc->vkEnumeratePhysicalDevices(
1991 instance, &hostPhysicalDeviceCount, info.physicalDevices.data(), false /* no lock */);
1992 lock.lock();
1993
1994 if (enumRes != VK_SUCCESS) {
1995 mesa_loge(
1996 "%s: failed: could not retrieve host physical devices. "
1997 "Error %d\n",
1998 __func__, enumRes);
1999 return enumRes;
2000 }
2001 }
2002
2003 // Serve the guest query according to the spec.
2004 //
2005 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
2006 //
2007 // If pPhysicalDevices is NULL, then the number of physical devices
2008 // available is returned in pPhysicalDeviceCount. Otherwise,
2009 // pPhysicalDeviceCount must point to a variable set by the user to the
2010 // number of elements in the pPhysicalDevices array, and on return the
2011 // variable is overwritten with the number of handles actually written
2012 // to pPhysicalDevices. If pPhysicalDeviceCount is less than the number
2013 // of physical devices available, at most pPhysicalDeviceCount
2014 // structures will be written. If pPhysicalDeviceCount is smaller than
2015 // the number of physical devices available, VK_INCOMPLETE will be
2016 // returned instead of VK_SUCCESS, to indicate that not all the
2017 // available physical devices were returned.
2018
2019 if (!pPhysicalDevices) {
2020 *pPhysicalDeviceCount = (uint32_t)info.physicalDevices.size();
2021 return VK_SUCCESS;
2022 } else {
2023 uint32_t actualDeviceCount = (uint32_t)info.physicalDevices.size();
2024 uint32_t toWrite =
2025 actualDeviceCount < *pPhysicalDeviceCount ? actualDeviceCount : *pPhysicalDeviceCount;
2026
2027 for (uint32_t i = 0; i < toWrite; ++i) {
2028 pPhysicalDevices[i] = info.physicalDevices[i];
2029 }
2030
2031 *pPhysicalDeviceCount = toWrite;
2032
2033 if (actualDeviceCount > *pPhysicalDeviceCount) {
2034 return VK_INCOMPLETE;
2035 }
2036
2037 return VK_SUCCESS;
2038 }
2039 }
2040
on_vkGetPhysicalDeviceProperties(void *,VkPhysicalDevice,VkPhysicalDeviceProperties * pProperties)2041 void ResourceTracker::on_vkGetPhysicalDeviceProperties(void*, VkPhysicalDevice,
2042 VkPhysicalDeviceProperties* pProperties) {
2043 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
2044 if (pProperties) {
2045 if (VK_PHYSICAL_DEVICE_TYPE_CPU == pProperties->deviceType) {
2046 /* For Linux guest: Even if host driver reports DEVICE_TYPE_CPU,
2047 * override this to VIRTUAL_GPU, otherwise Linux DRM interfaces
2048 * will take unexpected code paths to deal with "software" driver
2049 */
2050 pProperties->deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
2051 }
2052 }
2053 #endif
2054 }
2055
on_vkGetPhysicalDeviceFeatures2(void *,VkPhysicalDevice,VkPhysicalDeviceFeatures2 * pFeatures)2056 void ResourceTracker::on_vkGetPhysicalDeviceFeatures2(void*, VkPhysicalDevice,
2057 VkPhysicalDeviceFeatures2* pFeatures) {
2058 if (pFeatures) {
2059 VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
2060 vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pFeatures);
2061 if (memoryReportFeaturesEXT) {
2062 memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
2063 }
2064 }
2065 }
2066
on_vkGetPhysicalDeviceFeatures2KHR(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceFeatures2 * pFeatures)2067 void ResourceTracker::on_vkGetPhysicalDeviceFeatures2KHR(void* context,
2068 VkPhysicalDevice physicalDevice,
2069 VkPhysicalDeviceFeatures2* pFeatures) {
2070 on_vkGetPhysicalDeviceFeatures2(context, physicalDevice, pFeatures);
2071 }
2072
on_vkGetPhysicalDeviceProperties2(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)2073 void ResourceTracker::on_vkGetPhysicalDeviceProperties2(void* context,
2074 VkPhysicalDevice physicalDevice,
2075 VkPhysicalDeviceProperties2* pProperties) {
2076 if (pProperties) {
2077 VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
2078 vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pProperties);
2079 if (memoryReportFeaturesEXT) {
2080 memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
2081 }
2082 on_vkGetPhysicalDeviceProperties(context, physicalDevice, &pProperties->properties);
2083 }
2084 }
2085
on_vkGetPhysicalDeviceProperties2KHR(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceProperties2 * pProperties)2086 void ResourceTracker::on_vkGetPhysicalDeviceProperties2KHR(
2087 void* context, VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties) {
2088 on_vkGetPhysicalDeviceProperties2(context, physicalDevice, pProperties);
2089 }
2090
on_vkGetPhysicalDeviceMemoryProperties(void * context,VkPhysicalDevice physicalDevice,VkPhysicalDeviceMemoryProperties * out)2091 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties(
2092 void* context, VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* out) {
2093 // gfxstream decides which physical device to expose to the guest on startup.
2094 // Otherwise, we would need a physical device to properties mapping.
2095 *out = getPhysicalDeviceMemoryProperties(context, VK_NULL_HANDLE, physicalDevice);
2096 }
2097
on_vkGetPhysicalDeviceMemoryProperties2(void *,VkPhysicalDevice physdev,VkPhysicalDeviceMemoryProperties2 * out)2098 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties2(
2099 void*, VkPhysicalDevice physdev, VkPhysicalDeviceMemoryProperties2* out) {
2100 on_vkGetPhysicalDeviceMemoryProperties(nullptr, physdev, &out->memoryProperties);
2101 }
2102
on_vkGetDeviceQueue(void *,VkDevice device,uint32_t,uint32_t,VkQueue * pQueue)2103 void ResourceTracker::on_vkGetDeviceQueue(void*, VkDevice device, uint32_t, uint32_t,
2104 VkQueue* pQueue) {
2105 AutoLock<RecursiveLock> lock(mLock);
2106 info_VkQueue[*pQueue].device = device;
2107 }
2108
on_vkGetDeviceQueue2(void *,VkDevice device,const VkDeviceQueueInfo2 *,VkQueue * pQueue)2109 void ResourceTracker::on_vkGetDeviceQueue2(void*, VkDevice device, const VkDeviceQueueInfo2*,
2110 VkQueue* pQueue) {
2111 AutoLock<RecursiveLock> lock(mLock);
2112 info_VkQueue[*pQueue].device = device;
2113 }
2114
on_vkCreateInstance(void * context,VkResult input_result,const VkInstanceCreateInfo * createInfo,const VkAllocationCallbacks *,VkInstance * pInstance)2115 VkResult ResourceTracker::on_vkCreateInstance(void* context, VkResult input_result,
2116 const VkInstanceCreateInfo* createInfo,
2117 const VkAllocationCallbacks*, VkInstance* pInstance) {
2118 if (input_result != VK_SUCCESS) return input_result;
2119
2120 VkEncoder* enc = (VkEncoder*)context;
2121
2122 uint32_t apiVersion;
2123 VkResult enumInstanceVersionRes =
2124 enc->vkEnumerateInstanceVersion(&apiVersion, false /* no lock */);
2125
2126 setInstanceInfo(*pInstance, createInfo->enabledExtensionCount,
2127 createInfo->ppEnabledExtensionNames, apiVersion);
2128
2129 return input_result;
2130 }
2131
on_vkCreateDevice(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkDeviceCreateInfo * pCreateInfo,const VkAllocationCallbacks *,VkDevice * pDevice)2132 VkResult ResourceTracker::on_vkCreateDevice(void* context, VkResult input_result,
2133 VkPhysicalDevice physicalDevice,
2134 const VkDeviceCreateInfo* pCreateInfo,
2135 const VkAllocationCallbacks*, VkDevice* pDevice) {
2136 if (input_result != VK_SUCCESS) return input_result;
2137
2138 VkEncoder* enc = (VkEncoder*)context;
2139
2140 VkPhysicalDeviceProperties props;
2141 VkPhysicalDeviceMemoryProperties memProps;
2142 enc->vkGetPhysicalDeviceProperties(physicalDevice, &props, false /* no lock */);
2143 enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps, false /* no lock */);
2144
2145 setDeviceInfo(*pDevice, physicalDevice, props, memProps, pCreateInfo->enabledExtensionCount,
2146 pCreateInfo->ppEnabledExtensionNames, pCreateInfo->pNext);
2147
2148 return input_result;
2149 }
2150
on_vkDestroyDevice_pre(void * context,VkDevice device,const VkAllocationCallbacks *)2151 void ResourceTracker::on_vkDestroyDevice_pre(void* context, VkDevice device,
2152 const VkAllocationCallbacks*) {
2153 (void)context;
2154 AutoLock<RecursiveLock> lock(mLock);
2155
2156 auto it = info_VkDevice.find(device);
2157 if (it == info_VkDevice.end()) return;
2158
2159 for (auto itr = info_VkDeviceMemory.cbegin(); itr != info_VkDeviceMemory.cend();) {
2160 auto& memInfo = itr->second;
2161 if (memInfo.device == device) {
2162 itr = info_VkDeviceMemory.erase(itr);
2163 } else {
2164 itr++;
2165 }
2166 }
2167 }
2168
2169 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
updateMemoryTypeBits(uint32_t * memoryTypeBits,uint32_t memoryIndex)2170 void updateMemoryTypeBits(uint32_t* memoryTypeBits, uint32_t memoryIndex) {
2171 *memoryTypeBits = 1u << memoryIndex;
2172 }
2173 #endif
2174
2175 #ifdef VK_USE_PLATFORM_ANDROID_KHR
2176
on_vkGetAndroidHardwareBufferPropertiesANDROID(void * context,VkResult,VkDevice device,const AHardwareBuffer * buffer,VkAndroidHardwareBufferPropertiesANDROID * pProperties)2177 VkResult ResourceTracker::on_vkGetAndroidHardwareBufferPropertiesANDROID(
2178 void* context, VkResult, VkDevice device, const AHardwareBuffer* buffer,
2179 VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
2180 auto grallocHelper =
2181 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
2182
2183 // Delete once goldfish Linux drivers are gone
2184 if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
2185 mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
2186 }
2187
2188 updateMemoryTypeBits(&pProperties->memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
2189
2190 return getAndroidHardwareBufferPropertiesANDROID(grallocHelper, buffer, pProperties);
2191 }
2192
on_vkGetMemoryAndroidHardwareBufferANDROID(void *,VkResult,VkDevice device,const VkMemoryGetAndroidHardwareBufferInfoANDROID * pInfo,struct AHardwareBuffer ** pBuffer)2193 VkResult ResourceTracker::on_vkGetMemoryAndroidHardwareBufferANDROID(
2194 void*, VkResult, VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
2195 struct AHardwareBuffer** pBuffer) {
2196 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2197 if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
2198
2199 AutoLock<RecursiveLock> lock(mLock);
2200
2201 auto deviceIt = info_VkDevice.find(device);
2202
2203 if (deviceIt == info_VkDevice.end()) {
2204 return VK_ERROR_INITIALIZATION_FAILED;
2205 }
2206
2207 auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
2208
2209 if (memoryIt == info_VkDeviceMemory.end()) {
2210 return VK_ERROR_INITIALIZATION_FAILED;
2211 }
2212
2213 auto& info = memoryIt->second;
2214
2215 auto* gralloc = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
2216 VkResult queryRes = getMemoryAndroidHardwareBufferANDROID(gralloc, &info.ahw);
2217
2218 if (queryRes != VK_SUCCESS) return queryRes;
2219
2220 *pBuffer = info.ahw;
2221
2222 return queryRes;
2223 }
2224 #endif
2225
2226 #ifdef VK_USE_PLATFORM_FUCHSIA
on_vkGetMemoryZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkMemoryGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)2227 VkResult ResourceTracker::on_vkGetMemoryZirconHandleFUCHSIA(
2228 void*, VkResult, VkDevice device, const VkMemoryGetZirconHandleInfoFUCHSIA* pInfo,
2229 uint32_t* pHandle) {
2230 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2231 if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
2232
2233 AutoLock<RecursiveLock> lock(mLock);
2234
2235 auto deviceIt = info_VkDevice.find(device);
2236
2237 if (deviceIt == info_VkDevice.end()) {
2238 return VK_ERROR_INITIALIZATION_FAILED;
2239 }
2240
2241 auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
2242
2243 if (memoryIt == info_VkDeviceMemory.end()) {
2244 return VK_ERROR_INITIALIZATION_FAILED;
2245 }
2246
2247 auto& info = memoryIt->second;
2248
2249 if (info.vmoHandle == ZX_HANDLE_INVALID) {
2250 mesa_loge("%s: memory cannot be exported", __func__);
2251 return VK_ERROR_INITIALIZATION_FAILED;
2252 }
2253
2254 *pHandle = ZX_HANDLE_INVALID;
2255 zx_handle_duplicate(info.vmoHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
2256 return VK_SUCCESS;
2257 }
2258
on_vkGetMemoryZirconHandlePropertiesFUCHSIA(void *,VkResult,VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,uint32_t handle,VkMemoryZirconHandlePropertiesFUCHSIA * pProperties)2259 VkResult ResourceTracker::on_vkGetMemoryZirconHandlePropertiesFUCHSIA(
2260 void*, VkResult, VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType,
2261 uint32_t handle, VkMemoryZirconHandlePropertiesFUCHSIA* pProperties) {
2262 using fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal;
2263 using fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
2264
2265 if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
2266 return VK_ERROR_INITIALIZATION_FAILED;
2267 }
2268
2269 zx_info_handle_basic_t handleInfo;
2270 zx_status_t status = zx::unowned_vmo(handle)->get_info(ZX_INFO_HANDLE_BASIC, &handleInfo,
2271 sizeof(handleInfo), nullptr, nullptr);
2272 if (status != ZX_OK || handleInfo.type != ZX_OBJ_TYPE_VMO) {
2273 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
2274 }
2275
2276 AutoLock<RecursiveLock> lock(mLock);
2277
2278 auto deviceIt = info_VkDevice.find(device);
2279
2280 if (deviceIt == info_VkDevice.end()) {
2281 return VK_ERROR_INITIALIZATION_FAILED;
2282 }
2283
2284 auto& info = deviceIt->second;
2285
2286 zx::vmo vmo_dup;
2287 status = zx::unowned_vmo(handle)->duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
2288 if (status != ZX_OK) {
2289 mesa_loge("zx_handle_duplicate() error: %d", status);
2290 return VK_ERROR_INITIALIZATION_FAILED;
2291 }
2292
2293 uint32_t memoryProperty = 0u;
2294
2295 auto result = mControlDevice->GetBufferHandleInfo(std::move(vmo_dup));
2296 if (!result.ok()) {
2297 mesa_loge("mControlDevice->GetBufferHandleInfo fatal error: epitaph: %d", result.status());
2298 return VK_ERROR_INITIALIZATION_FAILED;
2299 }
2300 if (result.value().is_ok()) {
2301 memoryProperty = result.value().value()->info.memory_property();
2302 } else if (result.value().error_value() == ZX_ERR_NOT_FOUND) {
2303 // If a VMO is allocated while ColorBuffer/Buffer is not created,
2304 // it must be a device-local buffer, since for host-visible buffers,
2305 // ColorBuffer/Buffer is created at sysmem allocation time.
2306 memoryProperty = kMemoryPropertyDeviceLocal;
2307 } else {
2308 // Importing read-only host memory into the Vulkan driver should not
2309 // work, but it is not an error to try to do so. Returning a
2310 // VkMemoryZirconHandlePropertiesFUCHSIA with no available
2311 // memoryType bits should be enough for clients. See fxbug.dev/42098398
2312 // for other issues this this flow.
2313 mesa_logw("GetBufferHandleInfo failed: %d", result.value().error_value());
2314 pProperties->memoryTypeBits = 0;
2315 return VK_SUCCESS;
2316 }
2317
2318 pProperties->memoryTypeBits = 0;
2319 for (uint32_t i = 0; i < info.memProps.memoryTypeCount; ++i) {
2320 if (((memoryProperty & kMemoryPropertyDeviceLocal) &&
2321 (info.memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
2322 ((memoryProperty & kMemoryPropertyHostVisible) &&
2323 (info.memProps.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
2324 pProperties->memoryTypeBits |= 1ull << i;
2325 }
2326 }
2327 return VK_SUCCESS;
2328 }
2329
getEventKoid(zx_handle_t eventHandle)2330 zx_koid_t getEventKoid(zx_handle_t eventHandle) {
2331 if (eventHandle == ZX_HANDLE_INVALID) {
2332 return ZX_KOID_INVALID;
2333 }
2334
2335 zx_info_handle_basic_t info;
2336 zx_status_t status = zx_object_get_info(eventHandle, ZX_INFO_HANDLE_BASIC, &info, sizeof(info),
2337 nullptr, nullptr);
2338 if (status != ZX_OK) {
2339 mesa_loge("Cannot get object info of handle %u: %d", eventHandle, status);
2340 return ZX_KOID_INVALID;
2341 }
2342 return info.koid;
2343 }
2344
on_vkImportSemaphoreZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkImportSemaphoreZirconHandleInfoFUCHSIA * pInfo)2345 VkResult ResourceTracker::on_vkImportSemaphoreZirconHandleFUCHSIA(
2346 void*, VkResult, VkDevice device, const VkImportSemaphoreZirconHandleInfoFUCHSIA* pInfo) {
2347 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2348 if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
2349
2350 AutoLock<RecursiveLock> lock(mLock);
2351
2352 auto deviceIt = info_VkDevice.find(device);
2353
2354 if (deviceIt == info_VkDevice.end()) {
2355 return VK_ERROR_INITIALIZATION_FAILED;
2356 }
2357
2358 auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
2359
2360 if (semaphoreIt == info_VkSemaphore.end()) {
2361 return VK_ERROR_INITIALIZATION_FAILED;
2362 }
2363
2364 auto& info = semaphoreIt->second;
2365
2366 if (info.eventHandle != ZX_HANDLE_INVALID) {
2367 zx_handle_close(info.eventHandle);
2368 }
2369 #if VK_HEADER_VERSION < 174
2370 info.eventHandle = pInfo->handle;
2371 #else // VK_HEADER_VERSION >= 174
2372 info.eventHandle = pInfo->zirconHandle;
2373 #endif // VK_HEADER_VERSION < 174
2374 if (info.eventHandle != ZX_HANDLE_INVALID) {
2375 info.eventKoid = getEventKoid(info.eventHandle);
2376 }
2377
2378 return VK_SUCCESS;
2379 }
2380
on_vkGetSemaphoreZirconHandleFUCHSIA(void *,VkResult,VkDevice device,const VkSemaphoreGetZirconHandleInfoFUCHSIA * pInfo,uint32_t * pHandle)2381 VkResult ResourceTracker::on_vkGetSemaphoreZirconHandleFUCHSIA(
2382 void*, VkResult, VkDevice device, const VkSemaphoreGetZirconHandleInfoFUCHSIA* pInfo,
2383 uint32_t* pHandle) {
2384 if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
2385 if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
2386
2387 AutoLock<RecursiveLock> lock(mLock);
2388
2389 auto deviceIt = info_VkDevice.find(device);
2390
2391 if (deviceIt == info_VkDevice.end()) {
2392 return VK_ERROR_INITIALIZATION_FAILED;
2393 }
2394
2395 auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
2396
2397 if (semaphoreIt == info_VkSemaphore.end()) {
2398 return VK_ERROR_INITIALIZATION_FAILED;
2399 }
2400
2401 auto& info = semaphoreIt->second;
2402
2403 if (info.eventHandle == ZX_HANDLE_INVALID) {
2404 return VK_ERROR_INITIALIZATION_FAILED;
2405 }
2406
2407 *pHandle = ZX_HANDLE_INVALID;
2408 zx_handle_duplicate(info.eventHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
2409 return VK_SUCCESS;
2410 }
2411
on_vkCreateBufferCollectionFUCHSIA(void *,VkResult,VkDevice,const VkBufferCollectionCreateInfoFUCHSIA * pInfo,const VkAllocationCallbacks *,VkBufferCollectionFUCHSIA * pCollection)2412 VkResult ResourceTracker::on_vkCreateBufferCollectionFUCHSIA(
2413 void*, VkResult, VkDevice, const VkBufferCollectionCreateInfoFUCHSIA* pInfo,
2414 const VkAllocationCallbacks*, VkBufferCollectionFUCHSIA* pCollection) {
2415 fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken> token_client;
2416
2417 if (pInfo->collectionToken) {
2418 token_client = fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken>(
2419 zx::channel(pInfo->collectionToken));
2420 } else {
2421 auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
2422 if (!endpoints.is_ok()) {
2423 mesa_loge("zx_channel_create failed: %d", endpoints.status_value());
2424 return VK_ERROR_INITIALIZATION_FAILED;
2425 }
2426
2427 auto result = mSysmemAllocator->AllocateSharedCollection(std::move(endpoints->server));
2428 if (!result.ok()) {
2429 mesa_loge("AllocateSharedCollection failed: %d", result.status());
2430 return VK_ERROR_INITIALIZATION_FAILED;
2431 }
2432 token_client = std::move(endpoints->client);
2433 }
2434
2435 auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
2436 if (!endpoints.is_ok()) {
2437 mesa_loge("zx_channel_create failed: %d", endpoints.status_value());
2438 return VK_ERROR_INITIALIZATION_FAILED;
2439 }
2440 auto [collection_client, collection_server] = std::move(endpoints.value());
2441
2442 auto result = mSysmemAllocator->BindSharedCollection(std::move(token_client),
2443 std::move(collection_server));
2444 if (!result.ok()) {
2445 mesa_loge("BindSharedCollection failed: %d", result.status());
2446 return VK_ERROR_INITIALIZATION_FAILED;
2447 }
2448
2449 auto* sysmem_collection =
2450 new fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>(std::move(collection_client));
2451 *pCollection = reinterpret_cast<VkBufferCollectionFUCHSIA>(sysmem_collection);
2452
2453 register_VkBufferCollectionFUCHSIA(*pCollection);
2454 return VK_SUCCESS;
2455 }
2456
on_vkDestroyBufferCollectionFUCHSIA(void *,VkResult,VkDevice,VkBufferCollectionFUCHSIA collection,const VkAllocationCallbacks *)2457 void ResourceTracker::on_vkDestroyBufferCollectionFUCHSIA(void*, VkResult, VkDevice,
2458 VkBufferCollectionFUCHSIA collection,
2459 const VkAllocationCallbacks*) {
2460 auto sysmem_collection =
2461 reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2462 if (sysmem_collection) {
2463 (*sysmem_collection)->Close();
2464 }
2465 delete sysmem_collection;
2466
2467 unregister_VkBufferCollectionFUCHSIA(collection);
2468 }
2469
setBufferCollectionImageConstraintsImpl(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2470 SetBufferCollectionImageConstraintsResult ResourceTracker::setBufferCollectionImageConstraintsImpl(
2471 VkEncoder* enc, VkDevice device,
2472 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2473 const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2474 const auto& collection = *pCollection;
2475 if (!pImageConstraintsInfo ||
2476 pImageConstraintsInfo->sType != VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA) {
2477 mesa_loge("%s: invalid pImageConstraintsInfo", __func__);
2478 return {VK_ERROR_INITIALIZATION_FAILED};
2479 }
2480
2481 if (pImageConstraintsInfo->formatConstraintsCount == 0) {
2482 mesa_loge("%s: formatConstraintsCount must be greater than 0", __func__);
2483 abort();
2484 }
2485
2486 fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
2487 defaultBufferCollectionConstraints(
2488 /* min_size_bytes */ 0,
2489 pImageConstraintsInfo->bufferCollectionConstraints.minBufferCount,
2490 pImageConstraintsInfo->bufferCollectionConstraints.maxBufferCount,
2491 pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForCamping,
2492 pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForDedicatedSlack,
2493 pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForSharedSlack);
2494
2495 std::vector<fuchsia_sysmem::wire::ImageFormatConstraints> format_constraints;
2496
2497 VkPhysicalDevice physicalDevice;
2498 {
2499 AutoLock<RecursiveLock> lock(mLock);
2500 auto deviceIt = info_VkDevice.find(device);
2501 if (deviceIt == info_VkDevice.end()) {
2502 return {VK_ERROR_INITIALIZATION_FAILED};
2503 }
2504 physicalDevice = deviceIt->second.physdev;
2505 }
2506
2507 std::vector<uint32_t> createInfoIndex;
2508
2509 bool hasOptimalTiling = false;
2510 for (uint32_t i = 0; i < pImageConstraintsInfo->formatConstraintsCount; i++) {
2511 const VkImageCreateInfo* createInfo =
2512 &pImageConstraintsInfo->pFormatConstraints[i].imageCreateInfo;
2513 const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints =
2514 &pImageConstraintsInfo->pFormatConstraints[i];
2515
2516 // add ImageFormatConstraints for *optimal* tiling
2517 VkResult optimalResult = VK_ERROR_FORMAT_NOT_SUPPORTED;
2518 if (createInfo->tiling == VK_IMAGE_TILING_OPTIMAL) {
2519 optimalResult = addImageBufferCollectionConstraintsFUCHSIA(
2520 enc, device, physicalDevice, formatConstraints, VK_IMAGE_TILING_OPTIMAL,
2521 &constraints);
2522 if (optimalResult == VK_SUCCESS) {
2523 createInfoIndex.push_back(i);
2524 hasOptimalTiling = true;
2525 }
2526 }
2527
2528 // Add ImageFormatConstraints for *linear* tiling
2529 VkResult linearResult = addImageBufferCollectionConstraintsFUCHSIA(
2530 enc, device, physicalDevice, formatConstraints, VK_IMAGE_TILING_LINEAR, &constraints);
2531 if (linearResult == VK_SUCCESS) {
2532 createInfoIndex.push_back(i);
2533 }
2534
2535 // Update usage and BufferMemoryConstraints
2536 if (linearResult == VK_SUCCESS || optimalResult == VK_SUCCESS) {
2537 constraints.usage.vulkan |= getBufferCollectionConstraintsVulkanImageUsage(createInfo);
2538
2539 if (formatConstraints && formatConstraints->flags) {
2540 mesa_logw(
2541 "%s: Non-zero flags (%08x) in image format "
2542 "constraints; this is currently not supported, see "
2543 "fxbug.dev/42147900.",
2544 __func__, formatConstraints->flags);
2545 }
2546 }
2547 }
2548
2549 // Set buffer memory constraints based on optimal/linear tiling support
2550 // and flags.
2551 VkImageConstraintsInfoFlagsFUCHSIA flags = pImageConstraintsInfo->flags;
2552 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA)
2553 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageRead;
2554 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA)
2555 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageReadOften;
2556 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA)
2557 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWrite;
2558 if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA)
2559 constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWriteOften;
2560
2561 constraints.has_buffer_memory_constraints = true;
2562 auto& memory_constraints = constraints.buffer_memory_constraints;
2563 memory_constraints.cpu_domain_supported = true;
2564 memory_constraints.ram_domain_supported = true;
2565 memory_constraints.inaccessible_domain_supported =
2566 hasOptimalTiling && !(flags & (VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA |
2567 VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA |
2568 VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA |
2569 VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA));
2570
2571 if (memory_constraints.inaccessible_domain_supported) {
2572 memory_constraints.heap_permitted_count = 2;
2573 memory_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2574 memory_constraints.heap_permitted[1] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2575 } else {
2576 memory_constraints.heap_permitted_count = 1;
2577 memory_constraints.heap_permitted[0] = fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2578 }
2579
2580 if (constraints.image_format_constraints_count == 0) {
2581 mesa_loge("%s: none of the specified formats is supported by device", __func__);
2582 return {VK_ERROR_FORMAT_NOT_SUPPORTED};
2583 }
2584
2585 constexpr uint32_t kVulkanPriority = 5;
2586 const char kName[] = "GoldfishSysmemShared";
2587 collection->SetName(kVulkanPriority, fidl::StringView(kName));
2588
2589 auto result = collection->SetConstraints(true, constraints);
2590 if (!result.ok()) {
2591 mesa_loge("setBufferCollectionConstraints: SetConstraints failed: %d", result.status());
2592 return {VK_ERROR_INITIALIZATION_FAILED};
2593 }
2594
2595 return {VK_SUCCESS, constraints, std::move(createInfoIndex)};
2596 }
2597
setBufferCollectionImageConstraintsFUCHSIA(VkEncoder * enc,VkDevice device,fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2598 VkResult ResourceTracker::setBufferCollectionImageConstraintsFUCHSIA(
2599 VkEncoder* enc, VkDevice device,
2600 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2601 const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2602 const auto& collection = *pCollection;
2603
2604 auto setConstraintsResult =
2605 setBufferCollectionImageConstraintsImpl(enc, device, pCollection, pImageConstraintsInfo);
2606 if (setConstraintsResult.result != VK_SUCCESS) {
2607 return setConstraintsResult.result;
2608 }
2609
2610 // copy constraints to info_VkBufferCollectionFUCHSIA if
2611 // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2612 AutoLock<RecursiveLock> lock(mLock);
2613 VkBufferCollectionFUCHSIA buffer_collection =
2614 reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
2615 if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2616 info_VkBufferCollectionFUCHSIA.end()) {
2617 info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2618 gfxstream::guest::makeOptional(std::move(setConstraintsResult.constraints));
2619 info_VkBufferCollectionFUCHSIA[buffer_collection].createInfoIndex =
2620 std::move(setConstraintsResult.createInfoIndex);
2621 }
2622
2623 return VK_SUCCESS;
2624 }
2625
setBufferCollectionBufferConstraintsFUCHSIA(fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> * pCollection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2626 VkResult ResourceTracker::setBufferCollectionBufferConstraintsFUCHSIA(
2627 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
2628 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2629 auto setConstraintsResult =
2630 setBufferCollectionBufferConstraintsImpl(pCollection, pBufferConstraintsInfo);
2631 if (setConstraintsResult.result != VK_SUCCESS) {
2632 return setConstraintsResult.result;
2633 }
2634
2635 // copy constraints to info_VkBufferCollectionFUCHSIA if
2636 // |collection| is a valid VkBufferCollectionFUCHSIA handle.
2637 AutoLock<RecursiveLock> lock(mLock);
2638 VkBufferCollectionFUCHSIA buffer_collection =
2639 reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
2640 if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
2641 info_VkBufferCollectionFUCHSIA.end()) {
2642 info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
2643 gfxstream::guest::makeOptional(setConstraintsResult.constraints);
2644 }
2645
2646 return VK_SUCCESS;
2647 }
2648
on_vkSetBufferCollectionImageConstraintsFUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,const VkImageConstraintsInfoFUCHSIA * pImageConstraintsInfo)2649 VkResult ResourceTracker::on_vkSetBufferCollectionImageConstraintsFUCHSIA(
2650 void* context, VkResult, VkDevice device, VkBufferCollectionFUCHSIA collection,
2651 const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
2652 VkEncoder* enc = (VkEncoder*)context;
2653 auto sysmem_collection =
2654 reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2655 return setBufferCollectionImageConstraintsFUCHSIA(enc, device, sysmem_collection,
2656 pImageConstraintsInfo);
2657 }
2658
on_vkSetBufferCollectionBufferConstraintsFUCHSIA(void *,VkResult,VkDevice,VkBufferCollectionFUCHSIA collection,const VkBufferConstraintsInfoFUCHSIA * pBufferConstraintsInfo)2659 VkResult ResourceTracker::on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
2660 void*, VkResult, VkDevice, VkBufferCollectionFUCHSIA collection,
2661 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
2662 auto sysmem_collection =
2663 reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2664 return setBufferCollectionBufferConstraintsFUCHSIA(sysmem_collection, pBufferConstraintsInfo);
2665 }
2666
getBufferCollectionImageCreateInfoIndexLocked(VkBufferCollectionFUCHSIA collection,fuchsia_sysmem::wire::BufferCollectionInfo2 & info,uint32_t * outCreateInfoIndex)2667 VkResult ResourceTracker::getBufferCollectionImageCreateInfoIndexLocked(
2668 VkBufferCollectionFUCHSIA collection, fuchsia_sysmem::wire::BufferCollectionInfo2& info,
2669 uint32_t* outCreateInfoIndex) {
2670 if (!info_VkBufferCollectionFUCHSIA[collection].constraints.hasValue()) {
2671 mesa_loge("%s: constraints not set", __func__);
2672 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2673 }
2674
2675 if (!info.settings.has_image_format_constraints) {
2676 // no image format constraints, skip getting createInfoIndex.
2677 return VK_SUCCESS;
2678 }
2679
2680 const auto& constraints = *info_VkBufferCollectionFUCHSIA[collection].constraints;
2681 const auto& createInfoIndices = info_VkBufferCollectionFUCHSIA[collection].createInfoIndex;
2682 const auto& out = info.settings.image_format_constraints;
2683 bool foundCreateInfo = false;
2684
2685 for (size_t imageFormatIndex = 0; imageFormatIndex < constraints.image_format_constraints_count;
2686 imageFormatIndex++) {
2687 const auto& in = constraints.image_format_constraints[imageFormatIndex];
2688 // These checks are sorted in order of how often they're expected to
2689 // mismatch, from most likely to least likely. They aren't always
2690 // equality comparisons, since sysmem may change some values in
2691 // compatible ways on behalf of the other participants.
2692 if ((out.pixel_format.type != in.pixel_format.type) ||
2693 (out.pixel_format.has_format_modifier != in.pixel_format.has_format_modifier) ||
2694 (out.pixel_format.format_modifier.value != in.pixel_format.format_modifier.value) ||
2695 (out.min_bytes_per_row < in.min_bytes_per_row) ||
2696 (out.required_max_coded_width < in.required_max_coded_width) ||
2697 (out.required_max_coded_height < in.required_max_coded_height) ||
2698 (in.bytes_per_row_divisor != 0 &&
2699 out.bytes_per_row_divisor % in.bytes_per_row_divisor != 0)) {
2700 continue;
2701 }
2702 // Check if the out colorspaces are a subset of the in color spaces.
2703 bool all_color_spaces_found = true;
2704 for (uint32_t j = 0; j < out.color_spaces_count; j++) {
2705 bool found_matching_color_space = false;
2706 for (uint32_t k = 0; k < in.color_spaces_count; k++) {
2707 if (out.color_space[j].type == in.color_space[k].type) {
2708 found_matching_color_space = true;
2709 break;
2710 }
2711 }
2712 if (!found_matching_color_space) {
2713 all_color_spaces_found = false;
2714 break;
2715 }
2716 }
2717 if (!all_color_spaces_found) {
2718 continue;
2719 }
2720
2721 // Choose the first valid format for now.
2722 *outCreateInfoIndex = createInfoIndices[imageFormatIndex];
2723 return VK_SUCCESS;
2724 }
2725
2726 mesa_loge("%s: cannot find a valid image format in constraints", __func__);
2727 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2728 }
2729
on_vkGetBufferCollectionPropertiesFUCHSIA(void * context,VkResult,VkDevice device,VkBufferCollectionFUCHSIA collection,VkBufferCollectionPropertiesFUCHSIA * pProperties)2730 VkResult ResourceTracker::on_vkGetBufferCollectionPropertiesFUCHSIA(
2731 void* context, VkResult, VkDevice device, VkBufferCollectionFUCHSIA collection,
2732 VkBufferCollectionPropertiesFUCHSIA* pProperties) {
2733 VkEncoder* enc = (VkEncoder*)context;
2734 const auto& sysmem_collection =
2735 *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
2736
2737 auto result = sysmem_collection->WaitForBuffersAllocated();
2738 if (!result.ok() || result->status != ZX_OK) {
2739 mesa_loge("Failed wait for allocation: %d %d", result.status(),
2740 GET_STATUS_SAFE(result, status));
2741 return VK_ERROR_INITIALIZATION_FAILED;
2742 }
2743 fuchsia_sysmem::wire::BufferCollectionInfo2 info = std::move(result->buffer_collection_info);
2744
2745 bool is_host_visible =
2746 info.settings.buffer_settings.heap == fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
2747 bool is_device_local =
2748 info.settings.buffer_settings.heap == fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
2749 if (!is_host_visible && !is_device_local) {
2750 mesa_loge("buffer collection uses a non-goldfish heap (type 0x%lu)",
2751 static_cast<uint64_t>(info.settings.buffer_settings.heap));
2752 return VK_ERROR_INITIALIZATION_FAILED;
2753 }
2754
2755 // memoryTypeBits
2756 // ====================================================================
2757 {
2758 AutoLock<RecursiveLock> lock(mLock);
2759 auto deviceIt = info_VkDevice.find(device);
2760 if (deviceIt == info_VkDevice.end()) {
2761 return VK_ERROR_INITIALIZATION_FAILED;
2762 }
2763 auto& deviceInfo = deviceIt->second;
2764
2765 // Device local memory type supported.
2766 pProperties->memoryTypeBits = 0;
2767 for (uint32_t i = 0; i < deviceInfo.memProps.memoryTypeCount; ++i) {
2768 if ((is_device_local && (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2769 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
2770 (is_host_visible && (deviceInfo.memProps.memoryTypes[i].propertyFlags &
2771 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
2772 pProperties->memoryTypeBits |= 1ull << i;
2773 }
2774 }
2775 }
2776
2777 // bufferCount
2778 // ====================================================================
2779 pProperties->bufferCount = info.buffer_count;
2780
2781 auto storeProperties = [this, collection, pProperties]() -> VkResult {
2782 // store properties to storage
2783 AutoLock<RecursiveLock> lock(mLock);
2784 if (info_VkBufferCollectionFUCHSIA.find(collection) ==
2785 info_VkBufferCollectionFUCHSIA.end()) {
2786 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2787 }
2788
2789 info_VkBufferCollectionFUCHSIA[collection].properties =
2790 gfxstream::guest::makeOptional(*pProperties);
2791
2792 // We only do a shallow copy so we should remove all pNext pointers.
2793 info_VkBufferCollectionFUCHSIA[collection].properties->pNext = nullptr;
2794 info_VkBufferCollectionFUCHSIA[collection].properties->sysmemColorSpaceIndex.pNext =
2795 nullptr;
2796 return VK_SUCCESS;
2797 };
2798
2799 // The fields below only apply to buffer collections with image formats.
2800 if (!info.settings.has_image_format_constraints) {
2801 mesa_logd("%s: buffer collection doesn't have image format constraints", __func__);
2802 return storeProperties();
2803 }
2804
2805 // sysmemFormat
2806 // ====================================================================
2807
2808 pProperties->sysmemPixelFormat =
2809 static_cast<uint64_t>(info.settings.image_format_constraints.pixel_format.type);
2810
2811 // colorSpace
2812 // ====================================================================
2813 if (info.settings.image_format_constraints.color_spaces_count == 0) {
2814 mesa_loge(
2815 "%s: color space missing from allocated buffer collection "
2816 "constraints",
2817 __func__);
2818 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2819 }
2820 // Only report first colorspace for now.
2821 pProperties->sysmemColorSpaceIndex.colorSpace =
2822 static_cast<uint32_t>(info.settings.image_format_constraints.color_space[0].type);
2823
2824 // createInfoIndex
2825 // ====================================================================
2826 {
2827 AutoLock<RecursiveLock> lock(mLock);
2828 auto getIndexResult = getBufferCollectionImageCreateInfoIndexLocked(
2829 collection, info, &pProperties->createInfoIndex);
2830 if (getIndexResult != VK_SUCCESS) {
2831 return getIndexResult;
2832 }
2833 }
2834
2835 // formatFeatures
2836 // ====================================================================
2837 VkPhysicalDevice physicalDevice;
2838 {
2839 AutoLock<RecursiveLock> lock(mLock);
2840 auto deviceIt = info_VkDevice.find(device);
2841 if (deviceIt == info_VkDevice.end()) {
2842 return VK_ERROR_INITIALIZATION_FAILED;
2843 }
2844 physicalDevice = deviceIt->second.physdev;
2845 }
2846
2847 VkFormat vkFormat =
2848 sysmemPixelFormatTypeToVk(info.settings.image_format_constraints.pixel_format.type);
2849 VkFormatProperties formatProperties;
2850 enc->vkGetPhysicalDeviceFormatProperties(physicalDevice, vkFormat, &formatProperties,
2851 true /* do lock */);
2852 if (is_device_local) {
2853 pProperties->formatFeatures = formatProperties.optimalTilingFeatures;
2854 }
2855 if (is_host_visible) {
2856 pProperties->formatFeatures = formatProperties.linearTilingFeatures;
2857 }
2858
2859 // YCbCr properties
2860 // ====================================================================
2861 // TODO(59804): Implement this correctly when we support YUV pixel
2862 // formats in goldfish ICD.
2863 pProperties->samplerYcbcrConversionComponents.r = VK_COMPONENT_SWIZZLE_IDENTITY;
2864 pProperties->samplerYcbcrConversionComponents.g = VK_COMPONENT_SWIZZLE_IDENTITY;
2865 pProperties->samplerYcbcrConversionComponents.b = VK_COMPONENT_SWIZZLE_IDENTITY;
2866 pProperties->samplerYcbcrConversionComponents.a = VK_COMPONENT_SWIZZLE_IDENTITY;
2867 pProperties->suggestedYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
2868 pProperties->suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
2869 pProperties->suggestedXChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2870 pProperties->suggestedYChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
2871
2872 return storeProperties();
2873 }
2874 #endif
2875
getVirglFormat(VkFormat vkFormat)2876 static uint32_t getVirglFormat(VkFormat vkFormat) {
2877 uint32_t virglFormat = 0;
2878
2879 switch (vkFormat) {
2880 case VK_FORMAT_R8G8B8A8_SINT:
2881 case VK_FORMAT_R8G8B8A8_UNORM:
2882 case VK_FORMAT_R8G8B8A8_SRGB:
2883 case VK_FORMAT_R8G8B8A8_SNORM:
2884 case VK_FORMAT_R8G8B8A8_SSCALED:
2885 case VK_FORMAT_R8G8B8A8_USCALED:
2886 virglFormat = VIRGL_FORMAT_R8G8B8A8_UNORM;
2887 break;
2888 case VK_FORMAT_B8G8R8A8_SINT:
2889 case VK_FORMAT_B8G8R8A8_UNORM:
2890 case VK_FORMAT_B8G8R8A8_SRGB:
2891 case VK_FORMAT_B8G8R8A8_SNORM:
2892 case VK_FORMAT_B8G8R8A8_SSCALED:
2893 case VK_FORMAT_B8G8R8A8_USCALED:
2894 virglFormat = VIRGL_FORMAT_B8G8R8A8_UNORM;
2895 break;
2896 default:
2897 break;
2898 }
2899
2900 return virglFormat;
2901 }
2902
createCoherentMemory(VkDevice device,VkDeviceMemory mem,const VkMemoryAllocateInfo & hostAllocationInfo,VkEncoder * enc,VkResult & res)2903 CoherentMemoryPtr ResourceTracker::createCoherentMemory(
2904 VkDevice device, VkDeviceMemory mem, const VkMemoryAllocateInfo& hostAllocationInfo,
2905 VkEncoder* enc, VkResult& res) {
2906 CoherentMemoryPtr coherentMemory = nullptr;
2907
2908 #if defined(__ANDROID__)
2909 if (mFeatureInfo->hasDirectMem) {
2910 uint64_t gpuAddr = 0;
2911 GoldfishAddressSpaceBlockPtr block = nullptr;
2912 res = enc->vkMapMemoryIntoAddressSpaceGOOGLE(device, mem, &gpuAddr, true);
2913 if (res != VK_SUCCESS) {
2914 mesa_loge(
2915 "Failed to create coherent memory: vkMapMemoryIntoAddressSpaceGOOGLE "
2916 "returned:%d.",
2917 res);
2918 return coherentMemory;
2919 }
2920 {
2921 AutoLock<RecursiveLock> lock(mLock);
2922 auto it = info_VkDeviceMemory.find(mem);
2923 if (it == info_VkDeviceMemory.end()) {
2924 mesa_loge("Failed to create coherent memory: failed to find device memory.");
2925 res = VK_ERROR_OUT_OF_HOST_MEMORY;
2926 return coherentMemory;
2927 }
2928 auto& info = it->second;
2929 block = info.goldfishBlock;
2930 info.goldfishBlock = nullptr;
2931
2932 coherentMemory = std::make_shared<CoherentMemory>(
2933 block, gpuAddr, hostAllocationInfo.allocationSize, device, mem);
2934 }
2935 } else
2936 #endif // defined(__ANDROID__)
2937 if (mFeatureInfo->hasVirtioGpuNext) {
2938 struct VirtGpuCreateBlob createBlob = {0};
2939 uint64_t hvaSizeId[3];
2940 res = enc->vkGetMemoryHostAddressInfoGOOGLE(device, mem, &hvaSizeId[0], &hvaSizeId[1],
2941 &hvaSizeId[2], true /* do lock */);
2942 if (res != VK_SUCCESS) {
2943 mesa_loge(
2944 "Failed to create coherent memory: vkMapMemoryIntoAddressSpaceGOOGLE "
2945 "returned:%d.",
2946 res);
2947 return coherentMemory;
2948 }
2949 {
2950 AutoLock<RecursiveLock> lock(mLock);
2951 VirtGpuDevice* instance = VirtGpuDevice::getInstance((enum VirtGpuCapset)3);
2952 createBlob.blobMem = kBlobMemHost3d;
2953 createBlob.flags = kBlobFlagMappable;
2954 createBlob.blobId = hvaSizeId[2];
2955 createBlob.size = hostAllocationInfo.allocationSize;
2956
2957 auto blob = instance->createBlob(createBlob);
2958 if (!blob) {
2959 mesa_loge("Failed to create coherent memory: failed to create blob.");
2960 res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2961 return coherentMemory;
2962 }
2963
2964 VirtGpuResourceMappingPtr mapping = blob->createMapping();
2965 if (!mapping) {
2966 mesa_loge("Failed to create coherent memory: failed to create blob mapping.");
2967 res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
2968 return coherentMemory;
2969 }
2970
2971 coherentMemory =
2972 std::make_shared<CoherentMemory>(mapping, createBlob.size, device, mem);
2973 }
2974 } else {
2975 mesa_loge("FATAL: Unsupported virtual memory feature");
2976 abort();
2977 }
2978 return coherentMemory;
2979 }
2980
allocateCoherentMemory(VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,VkEncoder * enc,VkDeviceMemory * pMemory)2981 VkResult ResourceTracker::allocateCoherentMemory(VkDevice device,
2982 const VkMemoryAllocateInfo* pAllocateInfo,
2983 VkEncoder* enc, VkDeviceMemory* pMemory) {
2984 uint64_t blobId = 0;
2985 uint64_t offset = 0;
2986 uint8_t* ptr = nullptr;
2987 VkMemoryAllocateFlagsInfo allocFlagsInfo;
2988 VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
2989 VkCreateBlobGOOGLE createBlobInfo;
2990 VirtGpuResourcePtr guestBlob = nullptr;
2991
2992 memset(&createBlobInfo, 0, sizeof(struct VkCreateBlobGOOGLE));
2993 createBlobInfo.sType = VK_STRUCTURE_TYPE_CREATE_BLOB_GOOGLE;
2994
2995 const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
2996 vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
2997 const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
2998 vk_find_struct<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo);
2999
3000 bool deviceAddressMemoryAllocation =
3001 allocFlagsInfoPtr &&
3002 ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
3003 (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
3004
3005 bool dedicated = deviceAddressMemoryAllocation;
3006
3007 if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle])
3008 dedicated = true;
3009
3010 VkMemoryAllocateInfo hostAllocationInfo = vk_make_orphan_copy(*pAllocateInfo);
3011 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&hostAllocationInfo);
3012
3013 if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3014 hostAllocationInfo.allocationSize =
3015 ALIGN(pAllocateInfo->allocationSize, mCaps.vulkanCapset.blobAlignment);
3016 } else if (dedicated) {
3017 // Over-aligning to kLargestSize to some Windows drivers (b:152769369). Can likely
3018 // have host report the desired alignment.
3019 hostAllocationInfo.allocationSize = ALIGN(pAllocateInfo->allocationSize, kLargestPageSize);
3020 } else {
3021 VkDeviceSize roundedUpAllocSize = ALIGN(pAllocateInfo->allocationSize, kMegaByte);
3022 hostAllocationInfo.allocationSize = std::max(roundedUpAllocSize, kDefaultHostMemBlockSize);
3023 }
3024
3025 // Support device address capture/replay allocations
3026 if (deviceAddressMemoryAllocation) {
3027 if (allocFlagsInfoPtr) {
3028 mesa_logi("%s: has alloc flags\n", __func__);
3029 allocFlagsInfo = *allocFlagsInfoPtr;
3030 vk_append_struct(&structChainIter, &allocFlagsInfo);
3031 }
3032
3033 if (opaqueCaptureAddressAllocInfoPtr) {
3034 mesa_logi("%s: has opaque capture address\n", __func__);
3035 opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
3036 vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
3037 }
3038 }
3039
3040 if (mCaps.params[kParamCreateGuestHandle]) {
3041 struct VirtGpuCreateBlob createBlob = {0};
3042 struct VirtGpuExecBuffer exec = {};
3043 VirtGpuDevice* instance = VirtGpuDevice::getInstance();
3044 struct gfxstreamPlaceholderCommandVk placeholderCmd = {};
3045
3046 createBlobInfo.blobId = ++mBlobId;
3047 createBlobInfo.blobMem = kBlobMemGuest;
3048 createBlobInfo.blobFlags = kBlobFlagCreateGuestHandle;
3049 vk_append_struct(&structChainIter, &createBlobInfo);
3050
3051 createBlob.blobMem = kBlobMemGuest;
3052 createBlob.flags = kBlobFlagCreateGuestHandle;
3053 createBlob.blobId = createBlobInfo.blobId;
3054 createBlob.size = hostAllocationInfo.allocationSize;
3055
3056 guestBlob = instance->createBlob(createBlob);
3057 if (!guestBlob) {
3058 mesa_loge("Failed to allocate coherent memory: failed to create blob.");
3059 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3060 }
3061
3062 placeholderCmd.hdr.opCode = GFXSTREAM_PLACEHOLDER_COMMAND_VK;
3063 exec.command = static_cast<void*>(&placeholderCmd);
3064 exec.command_size = sizeof(placeholderCmd);
3065 exec.flags = kRingIdx;
3066 exec.ring_idx = 1;
3067 if (instance->execBuffer(exec, guestBlob.get())) {
3068 mesa_loge("Failed to allocate coherent memory: failed to execbuffer for wait.");
3069 return VK_ERROR_OUT_OF_HOST_MEMORY;
3070 }
3071
3072 guestBlob->wait();
3073 } else if (mCaps.vulkanCapset.deferredMapping) {
3074 createBlobInfo.blobId = ++mBlobId;
3075 createBlobInfo.blobMem = kBlobMemHost3d;
3076 vk_append_struct(&structChainIter, &createBlobInfo);
3077 }
3078
3079 VkDeviceMemory mem = VK_NULL_HANDLE;
3080 VkResult host_res =
3081 enc->vkAllocateMemory(device, &hostAllocationInfo, nullptr, &mem, true /* do lock */);
3082 if (host_res != VK_SUCCESS) {
3083 mesa_loge("Failed to allocate coherent memory: failed to allocate on the host: %d.",
3084 host_res);
3085 return host_res;
3086 }
3087
3088 struct VkDeviceMemory_Info info;
3089 if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3090 info.allocationSize = pAllocateInfo->allocationSize;
3091 info.blobId = createBlobInfo.blobId;
3092 }
3093
3094 if (guestBlob) {
3095 auto mapping = guestBlob->createMapping();
3096 if (!mapping) {
3097 mesa_loge("Failed to allocate coherent memory: failed to create blob mapping.");
3098 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3099 }
3100
3101 auto coherentMemory = std::make_shared<CoherentMemory>(
3102 mapping, hostAllocationInfo.allocationSize, device, mem);
3103
3104 coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset);
3105 info.coherentMemoryOffset = offset;
3106 info.coherentMemory = coherentMemory;
3107 info.ptr = ptr;
3108 }
3109
3110 info.coherentMemorySize = hostAllocationInfo.allocationSize;
3111 info.memoryTypeIndex = hostAllocationInfo.memoryTypeIndex;
3112 info.device = device;
3113 info.dedicated = dedicated;
3114 {
3115 // createCoherentMemory inside need to access info_VkDeviceMemory
3116 // information. set it before use.
3117 AutoLock<RecursiveLock> lock(mLock);
3118 info_VkDeviceMemory[mem] = info;
3119 }
3120
3121 if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle]) {
3122 *pMemory = mem;
3123 return host_res;
3124 }
3125
3126 auto coherentMemory = createCoherentMemory(device, mem, hostAllocationInfo, enc, host_res);
3127 if (coherentMemory) {
3128 AutoLock<RecursiveLock> lock(mLock);
3129 coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset);
3130 info.allocationSize = pAllocateInfo->allocationSize;
3131 info.coherentMemoryOffset = offset;
3132 info.coherentMemory = coherentMemory;
3133 info.ptr = ptr;
3134 info_VkDeviceMemory[mem] = info;
3135 *pMemory = mem;
3136 } else {
3137 enc->vkFreeMemory(device, mem, nullptr, true);
3138 AutoLock<RecursiveLock> lock(mLock);
3139 info_VkDeviceMemory.erase(mem);
3140 }
3141 return host_res;
3142 }
3143
getCoherentMemory(const VkMemoryAllocateInfo * pAllocateInfo,VkEncoder * enc,VkDevice device,VkDeviceMemory * pMemory)3144 VkResult ResourceTracker::getCoherentMemory(const VkMemoryAllocateInfo* pAllocateInfo,
3145 VkEncoder* enc, VkDevice device,
3146 VkDeviceMemory* pMemory) {
3147 VkMemoryAllocateFlagsInfo allocFlagsInfo;
3148 VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
3149
3150 // Add buffer device address capture structs
3151 const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3152 vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
3153
3154 bool dedicated =
3155 allocFlagsInfoPtr &&
3156 ((allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT) ||
3157 (allocFlagsInfoPtr->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT));
3158
3159 if (mCaps.vulkanCapset.deferredMapping || mCaps.params[kParamCreateGuestHandle])
3160 dedicated = true;
3161
3162 CoherentMemoryPtr coherentMemory = nullptr;
3163 uint8_t* ptr = nullptr;
3164 uint64_t offset = 0;
3165 {
3166 AutoLock<RecursiveLock> lock(mLock);
3167 for (const auto& [memory, info] : info_VkDeviceMemory) {
3168 if (info.device != device) continue;
3169
3170 if (info.memoryTypeIndex != pAllocateInfo->memoryTypeIndex) continue;
3171
3172 if (info.dedicated || dedicated) continue;
3173
3174 if (!info.coherentMemory) continue;
3175
3176 if (!info.coherentMemory->subAllocate(pAllocateInfo->allocationSize, &ptr, offset))
3177 continue;
3178
3179 coherentMemory = info.coherentMemory;
3180 break;
3181 }
3182 if (coherentMemory) {
3183 struct VkDeviceMemory_Info info;
3184 info.coherentMemoryOffset = offset;
3185 info.ptr = ptr;
3186 info.memoryTypeIndex = pAllocateInfo->memoryTypeIndex;
3187 info.allocationSize = pAllocateInfo->allocationSize;
3188 info.coherentMemory = coherentMemory;
3189 info.device = device;
3190
3191 // for suballocated memory, create an alias VkDeviceMemory handle for application
3192 // memory used for suballocations will still be VkDeviceMemory associated with
3193 // CoherentMemory
3194 auto mem = new_from_host_VkDeviceMemory(VK_NULL_HANDLE);
3195 info_VkDeviceMemory[mem] = info;
3196 *pMemory = mem;
3197 return VK_SUCCESS;
3198 }
3199 }
3200 return allocateCoherentMemory(device, pAllocateInfo, enc, pMemory);
3201 }
3202
on_vkAllocateMemory(void * context,VkResult input_result,VkDevice device,const VkMemoryAllocateInfo * pAllocateInfo,const VkAllocationCallbacks * pAllocator,VkDeviceMemory * pMemory)3203 VkResult ResourceTracker::on_vkAllocateMemory(void* context, VkResult input_result, VkDevice device,
3204 const VkMemoryAllocateInfo* pAllocateInfo,
3205 const VkAllocationCallbacks* pAllocator,
3206 VkDeviceMemory* pMemory) {
3207 #define _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(result) \
3208 { \
3209 auto it = info_VkDevice.find(device); \
3210 if (it == info_VkDevice.end()) return result; \
3211 emitDeviceMemoryReport(it->second, \
3212 VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT, 0, \
3213 pAllocateInfo->allocationSize, VK_OBJECT_TYPE_DEVICE_MEMORY, 0, \
3214 pAllocateInfo->memoryTypeIndex); \
3215 return result; \
3216 }
3217
3218 #define _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT \
3219 { \
3220 uint64_t memoryObjectId = (uint64_t)(void*)*pMemory; \
3221 if (ahw) { \
3222 memoryObjectId = getAHardwareBufferId(ahw); \
3223 } \
3224 emitDeviceMemoryReport(info_VkDevice[device], \
3225 isImport ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT \
3226 : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT, \
3227 memoryObjectId, pAllocateInfo->allocationSize, \
3228 VK_OBJECT_TYPE_DEVICE_MEMORY, (uint64_t)(void*)*pMemory, \
3229 pAllocateInfo->memoryTypeIndex); \
3230 return VK_SUCCESS; \
3231 }
3232
3233 if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
3234
3235 VkEncoder* enc = (VkEncoder*)context;
3236
3237 VkMemoryAllocateInfo finalAllocInfo = vk_make_orphan_copy(*pAllocateInfo);
3238 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&finalAllocInfo);
3239
3240 VkMemoryAllocateFlagsInfo allocFlagsInfo;
3241 VkMemoryOpaqueCaptureAddressAllocateInfo opaqueCaptureAddressAllocInfo;
3242
3243 // Add buffer device address capture structs
3244 const VkMemoryAllocateFlagsInfo* allocFlagsInfoPtr =
3245 vk_find_struct<VkMemoryAllocateFlagsInfo>(pAllocateInfo);
3246 const VkMemoryOpaqueCaptureAddressAllocateInfo* opaqueCaptureAddressAllocInfoPtr =
3247 vk_find_struct<VkMemoryOpaqueCaptureAddressAllocateInfo>(pAllocateInfo);
3248
3249 if (allocFlagsInfoPtr) {
3250 mesa_logi("%s: has alloc flags\n", __func__);
3251 allocFlagsInfo = *allocFlagsInfoPtr;
3252 vk_append_struct(&structChainIter, &allocFlagsInfo);
3253 }
3254
3255 if (opaqueCaptureAddressAllocInfoPtr) {
3256 mesa_logi("%s: has opaque capture address\n", __func__);
3257 opaqueCaptureAddressAllocInfo = *opaqueCaptureAddressAllocInfoPtr;
3258 vk_append_struct(&structChainIter, &opaqueCaptureAddressAllocInfo);
3259 }
3260
3261 VkMemoryDedicatedAllocateInfo dedicatedAllocInfo;
3262 VkImportColorBufferGOOGLE importCbInfo = {
3263 VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE,
3264 0,
3265 };
3266 VkImportBufferGOOGLE importBufferInfo = {
3267 VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE,
3268 0,
3269 };
3270 // VkImportPhysicalAddressGOOGLE importPhysAddrInfo = {
3271 // VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE, 0,
3272 // };
3273
3274 const VkExportMemoryAllocateInfo* exportAllocateInfoPtr =
3275 vk_find_struct<VkExportMemoryAllocateInfo>(pAllocateInfo);
3276
3277 #ifdef VK_USE_PLATFORM_ANDROID_KHR
3278 const VkImportAndroidHardwareBufferInfoANDROID* importAhbInfoPtr =
3279 vk_find_struct<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo);
3280 #else
3281 const void* importAhbInfoPtr = nullptr;
3282 #endif
3283
3284 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
3285 const VkImportMemoryFdInfoKHR* importFdInfoPtr =
3286 vk_find_struct<VkImportMemoryFdInfoKHR>(pAllocateInfo);
3287 #else
3288 const VkImportMemoryFdInfoKHR* importFdInfoPtr = nullptr;
3289 #endif
3290
3291 #ifdef VK_USE_PLATFORM_FUCHSIA
3292 const VkImportMemoryBufferCollectionFUCHSIA* importBufferCollectionInfoPtr =
3293 vk_find_struct<VkImportMemoryBufferCollectionFUCHSIA>(pAllocateInfo);
3294
3295 const VkImportMemoryZirconHandleInfoFUCHSIA* importVmoInfoPtr =
3296 vk_find_struct<VkImportMemoryZirconHandleInfoFUCHSIA>(pAllocateInfo);
3297 #else
3298 const void* importBufferCollectionInfoPtr = nullptr;
3299 const void* importVmoInfoPtr = nullptr;
3300 #endif // VK_USE_PLATFORM_FUCHSIA
3301
3302 const VkMemoryDedicatedAllocateInfo* dedicatedAllocInfoPtr =
3303 vk_find_struct<VkMemoryDedicatedAllocateInfo>(pAllocateInfo);
3304
3305 // Note for AHardwareBuffers, the Vulkan spec states:
3306 //
3307 // Android hardware buffers have intrinsic width, height, format, and usage
3308 // properties, so Vulkan images bound to memory imported from an Android
3309 // hardware buffer must use dedicated allocations
3310 //
3311 // so any allocation requests with a VkImportAndroidHardwareBufferInfoANDROID
3312 // will necessarily have a VkMemoryDedicatedAllocateInfo. However, the host
3313 // may or may not actually use a dedicated allocation to emulate
3314 // AHardwareBuffers. As such, the VkMemoryDedicatedAllocateInfo is passed to the
3315 // host and the host will decide whether or not to use it.
3316
3317 bool shouldPassThroughDedicatedAllocInfo =
3318 !exportAllocateInfoPtr && !importBufferCollectionInfoPtr && !importVmoInfoPtr;
3319
3320 const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProps =
3321 getPhysicalDeviceMemoryProperties(context, device, VK_NULL_HANDLE);
3322
3323 const bool requestedMemoryIsHostVisible =
3324 isHostVisible(&physicalDeviceMemoryProps, pAllocateInfo->memoryTypeIndex);
3325
3326 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
3327 shouldPassThroughDedicatedAllocInfo &= !requestedMemoryIsHostVisible;
3328 #endif // VK_USE_PLATFORM_FUCHSIA
3329
3330 if (shouldPassThroughDedicatedAllocInfo && dedicatedAllocInfoPtr) {
3331 dedicatedAllocInfo = vk_make_orphan_copy(*dedicatedAllocInfoPtr);
3332 vk_append_struct(&structChainIter, &dedicatedAllocInfo);
3333 }
3334
3335 // State needed for import/export.
3336 bool exportAhb = false;
3337 bool exportVmo = false;
3338 bool exportDmabuf = false;
3339 bool importAhb = false;
3340 bool importBufferCollection = false;
3341 bool importVmo = false;
3342 bool importDmabuf = false;
3343 (void)exportVmo;
3344
3345 // Even if we export allocate, the underlying operation
3346 // for the host is always going to be an import operation.
3347 // This is also how Intel's implementation works,
3348 // and is generally simpler;
3349 // even in an export allocation,
3350 // we perform AHardwareBuffer allocation
3351 // on the guest side, at this layer,
3352 // and then we attach a new VkDeviceMemory
3353 // to the AHardwareBuffer on the host via an "import" operation.
3354 AHardwareBuffer* ahw = nullptr;
3355
3356 if (exportAllocateInfoPtr) {
3357 exportAhb = exportAllocateInfoPtr->handleTypes &
3358 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
3359 #ifdef VK_USE_PLATFORM_FUCHSIA
3360 exportVmo = exportAllocateInfoPtr->handleTypes &
3361 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
3362 #endif // VK_USE_PLATFORM_FUCHSIA
3363 exportDmabuf =
3364 exportAllocateInfoPtr->handleTypes & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
3365 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
3366 } else if (importAhbInfoPtr) {
3367 importAhb = true;
3368 } else if (importBufferCollectionInfoPtr) {
3369 importBufferCollection = true;
3370 } else if (importVmoInfoPtr) {
3371 importVmo = true;
3372 }
3373
3374 if (importFdInfoPtr) {
3375 importDmabuf =
3376 (importFdInfoPtr->handleType & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
3377 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT));
3378 }
3379 bool isImport = importAhb || importBufferCollection || importVmo || importDmabuf;
3380
3381 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
3382 if (exportAhb) {
3383 bool hasDedicatedImage =
3384 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3385 bool hasDedicatedBuffer =
3386 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3387 VkExtent3D imageExtent = {0, 0, 0};
3388 uint32_t imageLayers = 0;
3389 VkFormat imageFormat = VK_FORMAT_UNDEFINED;
3390 VkImageUsageFlags imageUsage = 0;
3391 VkImageCreateFlags imageCreateFlags = 0;
3392 VkDeviceSize bufferSize = 0;
3393 VkDeviceSize allocationInfoAllocSize = finalAllocInfo.allocationSize;
3394
3395 if (hasDedicatedImage) {
3396 AutoLock<RecursiveLock> lock(mLock);
3397
3398 auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3399 if (it == info_VkImage.end())
3400 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3401 const auto& info = it->second;
3402 const auto& imgCi = info.createInfo;
3403
3404 imageExtent = imgCi.extent;
3405 imageLayers = imgCi.arrayLayers;
3406 imageFormat = imgCi.format;
3407 imageUsage = imgCi.usage;
3408 imageCreateFlags = imgCi.flags;
3409 }
3410
3411 if (hasDedicatedBuffer) {
3412 AutoLock<RecursiveLock> lock(mLock);
3413
3414 auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
3415 if (it == info_VkBuffer.end())
3416 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3417 const auto& info = it->second;
3418 const auto& bufCi = info.createInfo;
3419
3420 bufferSize = bufCi.size;
3421 }
3422
3423 VkResult ahbCreateRes = createAndroidHardwareBuffer(
3424 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(),
3425 hasDedicatedImage, hasDedicatedBuffer, imageExtent, imageLayers, imageFormat,
3426 imageUsage, imageCreateFlags, bufferSize, allocationInfoAllocSize, &ahw);
3427
3428 if (ahbCreateRes != VK_SUCCESS) {
3429 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(ahbCreateRes);
3430 }
3431 }
3432
3433 if (importAhb) {
3434 ahw = importAhbInfoPtr->buffer;
3435 // We still need to acquire the AHardwareBuffer.
3436 importAndroidHardwareBuffer(
3437 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(),
3438 importAhbInfoPtr, nullptr);
3439 }
3440
3441 if (ahw) {
3442 auto* gralloc =
3443 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
3444
3445 const uint32_t hostHandle = gralloc->getHostHandle(ahw);
3446 if (gralloc->getFormat(ahw) == AHARDWAREBUFFER_FORMAT_BLOB &&
3447 !gralloc->treatBlobAsImage()) {
3448 importBufferInfo.buffer = hostHandle;
3449 vk_append_struct(&structChainIter, &importBufferInfo);
3450 } else {
3451 importCbInfo.colorBuffer = hostHandle;
3452 vk_append_struct(&structChainIter, &importCbInfo);
3453 }
3454 }
3455 #endif
3456 zx_handle_t vmo_handle = ZX_HANDLE_INVALID;
3457
3458 #ifdef VK_USE_PLATFORM_FUCHSIA
3459 if (importBufferCollection) {
3460 const auto& collection =
3461 *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
3462 importBufferCollectionInfoPtr->collection);
3463 auto result = collection->WaitForBuffersAllocated();
3464 if (!result.ok() || result->status != ZX_OK) {
3465 mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
3466 GET_STATUS_SAFE(result, status));
3467 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3468 }
3469 fuchsia_sysmem::wire::BufferCollectionInfo2& info = result->buffer_collection_info;
3470 uint32_t index = importBufferCollectionInfoPtr->index;
3471 if (info.buffer_count < index) {
3472 mesa_loge("Invalid buffer index: %d %d", index);
3473 _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
3474 }
3475 vmo_handle = info.buffers[index].vmo.release();
3476 }
3477
3478 if (importVmo) {
3479 vmo_handle = importVmoInfoPtr->handle;
3480 }
3481
3482 if (exportVmo) {
3483 bool hasDedicatedImage =
3484 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3485 bool hasDedicatedBuffer =
3486 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3487
3488 if (hasDedicatedImage && hasDedicatedBuffer) {
3489 mesa_loge(
3490 "Invalid VkMemoryDedicatedAllocationInfo: At least one "
3491 "of image and buffer must be VK_NULL_HANDLE.");
3492 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3493 }
3494
3495 const VkImageCreateInfo* pImageCreateInfo = nullptr;
3496
3497 VkBufferConstraintsInfoFUCHSIA bufferConstraintsInfo = {
3498 .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA,
3499 .pNext = nullptr,
3500 .createInfo = {},
3501 .requiredFormatFeatures = 0,
3502 .bufferCollectionConstraints =
3503 VkBufferCollectionConstraintsInfoFUCHSIA{
3504 .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
3505 .pNext = nullptr,
3506 .minBufferCount = 1,
3507 .maxBufferCount = 0,
3508 .minBufferCountForCamping = 0,
3509 .minBufferCountForDedicatedSlack = 0,
3510 .minBufferCountForSharedSlack = 0,
3511 },
3512 };
3513 const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo = nullptr;
3514
3515 if (hasDedicatedImage) {
3516 AutoLock<RecursiveLock> lock(mLock);
3517
3518 auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3519 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
3520 const auto& imageInfo = it->second;
3521
3522 pImageCreateInfo = &imageInfo.createInfo;
3523 }
3524
3525 if (hasDedicatedBuffer) {
3526 AutoLock<RecursiveLock> lock(mLock);
3527
3528 auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
3529 if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
3530 const auto& bufferInfo = it->second;
3531
3532 bufferConstraintsInfo.createInfo = bufferInfo.createInfo;
3533 pBufferConstraintsInfo = &bufferConstraintsInfo;
3534 }
3535
3536 hasDedicatedImage =
3537 hasDedicatedImage && getBufferCollectionConstraintsVulkanImageUsage(pImageCreateInfo);
3538 hasDedicatedBuffer = hasDedicatedBuffer && getBufferCollectionConstraintsVulkanBufferUsage(
3539 pBufferConstraintsInfo);
3540
3541 if (hasDedicatedImage || hasDedicatedBuffer) {
3542 auto token_ends = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
3543 if (!token_ends.is_ok()) {
3544 mesa_loge("zx_channel_create failed: %d", token_ends.status_value());
3545 abort();
3546 }
3547
3548 {
3549 auto result =
3550 mSysmemAllocator->AllocateSharedCollection(std::move(token_ends->server));
3551 if (!result.ok()) {
3552 mesa_loge("AllocateSharedCollection failed: %d", result.status());
3553 abort();
3554 }
3555 }
3556
3557 auto collection_ends = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
3558 if (!collection_ends.is_ok()) {
3559 mesa_loge("zx_channel_create failed: %d", collection_ends.status_value());
3560 abort();
3561 }
3562
3563 {
3564 auto result = mSysmemAllocator->BindSharedCollection(
3565 std::move(token_ends->client), std::move(collection_ends->server));
3566 if (!result.ok()) {
3567 mesa_loge("BindSharedCollection failed: %d", result.status());
3568 abort();
3569 }
3570 }
3571
3572 fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> collection(
3573 std::move(collection_ends->client));
3574 if (hasDedicatedImage) {
3575 // TODO(fxbug.dev/42172354): Use setBufferCollectionImageConstraintsFUCHSIA.
3576 VkResult res = setBufferCollectionConstraintsFUCHSIA(enc, device, &collection,
3577 pImageCreateInfo);
3578 if (res == VK_ERROR_FORMAT_NOT_SUPPORTED) {
3579 mesa_loge("setBufferCollectionConstraints failed: format %u is not supported",
3580 pImageCreateInfo->format);
3581 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3582 }
3583 if (res != VK_SUCCESS) {
3584 mesa_loge("setBufferCollectionConstraints failed: %d", res);
3585 abort();
3586 }
3587 }
3588
3589 if (hasDedicatedBuffer) {
3590 VkResult res = setBufferCollectionBufferConstraintsFUCHSIA(&collection,
3591 pBufferConstraintsInfo);
3592 if (res != VK_SUCCESS) {
3593 mesa_loge("setBufferCollectionBufferConstraints failed: %d", res);
3594 abort();
3595 }
3596 }
3597
3598 {
3599 auto result = collection->WaitForBuffersAllocated();
3600 if (result.ok() && result->status == ZX_OK) {
3601 fuchsia_sysmem::wire::BufferCollectionInfo2& info =
3602 result->buffer_collection_info;
3603 if (!info.buffer_count) {
3604 mesa_loge(
3605 "WaitForBuffersAllocated returned "
3606 "invalid count: %d",
3607 info.buffer_count);
3608 abort();
3609 }
3610 vmo_handle = info.buffers[0].vmo.release();
3611 } else {
3612 mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
3613 GET_STATUS_SAFE(result, status));
3614 abort();
3615 }
3616 }
3617
3618 collection->Close();
3619
3620 zx::vmo vmo_copy;
3621 zx_status_t status = zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS,
3622 vmo_copy.reset_and_get_address());
3623 if (status != ZX_OK) {
3624 mesa_loge("Failed to duplicate VMO: %d", status);
3625 abort();
3626 }
3627
3628 if (pImageCreateInfo) {
3629 // Only device-local images need to create color buffer; for
3630 // host-visible images, the color buffer is already created
3631 // when sysmem allocates memory. Here we use the |tiling|
3632 // field of image creation info to determine if it uses
3633 // host-visible memory.
3634 bool isLinear = pImageCreateInfo->tiling == VK_IMAGE_TILING_LINEAR;
3635 if (!isLinear) {
3636 fuchsia_hardware_goldfish::wire::ColorBufferFormatType format;
3637 switch (pImageCreateInfo->format) {
3638 case VK_FORMAT_B8G8R8A8_SINT:
3639 case VK_FORMAT_B8G8R8A8_UNORM:
3640 case VK_FORMAT_B8G8R8A8_SRGB:
3641 case VK_FORMAT_B8G8R8A8_SNORM:
3642 case VK_FORMAT_B8G8R8A8_SSCALED:
3643 case VK_FORMAT_B8G8R8A8_USCALED:
3644 format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra;
3645 break;
3646 case VK_FORMAT_R8G8B8A8_SINT:
3647 case VK_FORMAT_R8G8B8A8_UNORM:
3648 case VK_FORMAT_R8G8B8A8_SRGB:
3649 case VK_FORMAT_R8G8B8A8_SNORM:
3650 case VK_FORMAT_R8G8B8A8_SSCALED:
3651 case VK_FORMAT_R8G8B8A8_USCALED:
3652 format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba;
3653 break;
3654 case VK_FORMAT_R8_UNORM:
3655 case VK_FORMAT_R8_UINT:
3656 case VK_FORMAT_R8_USCALED:
3657 case VK_FORMAT_R8_SNORM:
3658 case VK_FORMAT_R8_SINT:
3659 case VK_FORMAT_R8_SSCALED:
3660 case VK_FORMAT_R8_SRGB:
3661 format =
3662 fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kLuminance;
3663 break;
3664 case VK_FORMAT_R8G8_UNORM:
3665 case VK_FORMAT_R8G8_UINT:
3666 case VK_FORMAT_R8G8_USCALED:
3667 case VK_FORMAT_R8G8_SNORM:
3668 case VK_FORMAT_R8G8_SINT:
3669 case VK_FORMAT_R8G8_SSCALED:
3670 case VK_FORMAT_R8G8_SRGB:
3671 format = fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRg;
3672 break;
3673 default:
3674 mesa_loge("Unsupported format: %d", pImageCreateInfo->format);
3675 abort();
3676 }
3677
3678 fidl::Arena arena;
3679 fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(arena);
3680 createParams.set_width(pImageCreateInfo->extent.width)
3681 .set_height(pImageCreateInfo->extent.height)
3682 .set_format(format)
3683 .set_memory_property(
3684 fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
3685
3686 auto result = mControlDevice->CreateColorBuffer2(std::move(vmo_copy),
3687 std::move(createParams));
3688 if (!result.ok() || result->res != ZX_OK) {
3689 if (result.ok() && result->res == ZX_ERR_ALREADY_EXISTS) {
3690 mesa_logd(
3691 "CreateColorBuffer: color buffer already "
3692 "exists\n");
3693 } else {
3694 mesa_loge("CreateColorBuffer failed: %d:%d", result.status(),
3695 GET_STATUS_SAFE(result, res));
3696 abort();
3697 }
3698 }
3699 }
3700 }
3701
3702 if (pBufferConstraintsInfo) {
3703 fidl::Arena arena;
3704 fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena);
3705 createParams.set_size(arena, pBufferConstraintsInfo->createInfo.size)
3706 .set_memory_property(
3707 fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
3708
3709 auto result =
3710 mControlDevice->CreateBuffer2(std::move(vmo_copy), std::move(createParams));
3711 if (!result.ok() || result->is_error()) {
3712 mesa_loge("CreateBuffer2 failed: %d:%d", result.status(),
3713 GET_STATUS_SAFE(result, error_value()));
3714 abort();
3715 }
3716 }
3717 } else {
3718 mesa_logw(
3719 "Dedicated image / buffer not available. Cannot create "
3720 "BufferCollection to export VMOs.");
3721 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3722 }
3723 }
3724
3725 if (vmo_handle != ZX_HANDLE_INVALID) {
3726 zx::vmo vmo_copy;
3727 zx_status_t status =
3728 zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS, vmo_copy.reset_and_get_address());
3729 if (status != ZX_OK) {
3730 mesa_loge("Failed to duplicate VMO: %d", status);
3731 abort();
3732 }
3733 zx_status_t status2 = ZX_OK;
3734
3735 auto result = mControlDevice->GetBufferHandle(std::move(vmo_copy));
3736 if (!result.ok() || result->res != ZX_OK) {
3737 mesa_loge("GetBufferHandle failed: %d:%d", result.status(),
3738 GET_STATUS_SAFE(result, res));
3739 } else {
3740 fuchsia_hardware_goldfish::wire::BufferHandleType handle_type = result->type;
3741 uint32_t buffer_handle = result->id;
3742
3743 if (handle_type == fuchsia_hardware_goldfish::wire::BufferHandleType::kBuffer) {
3744 importBufferInfo.buffer = buffer_handle;
3745 vk_append_struct(&structChainIter, &importBufferInfo);
3746 } else {
3747 importCbInfo.colorBuffer = buffer_handle;
3748 vk_append_struct(&structChainIter, &importCbInfo);
3749 }
3750 }
3751 }
3752 #endif
3753
3754 VirtGpuResourcePtr colorBufferBlob = nullptr;
3755 #if defined(LINUX_GUEST_BUILD)
3756 if (exportDmabuf) {
3757 VirtGpuDevice* instance = VirtGpuDevice::getInstance();
3758 bool hasDedicatedImage =
3759 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
3760 bool hasDedicatedBuffer =
3761 dedicatedAllocInfoPtr && (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
3762
3763 if (hasDedicatedImage) {
3764 VkImageCreateInfo imageCreateInfo;
3765 bool isDmaBufImage = false;
3766 {
3767 AutoLock<RecursiveLock> lock(mLock);
3768
3769 auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
3770 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
3771 const auto& imageInfo = it->second;
3772
3773 imageCreateInfo = imageInfo.createInfo;
3774 isDmaBufImage = imageInfo.isDmaBufImage;
3775 }
3776
3777 // TODO (b/326956485): Support DRM format modifiers for dmabuf memory
3778 // For now, can only externalize memory for linear images
3779 if (isDmaBufImage) {
3780 const VkImageSubresource imageSubresource = {
3781 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
3782 .mipLevel = 0,
3783 .arrayLayer = 0,
3784 };
3785 VkSubresourceLayout subResourceLayout;
3786 on_vkGetImageSubresourceLayout(context, device, dedicatedAllocInfoPtr->image,
3787 &imageSubresource, &subResourceLayout);
3788 if (!subResourceLayout.rowPitch) {
3789 mesa_loge("%s: Failed to query stride for VirtGpu resource creation.");
3790 return VK_ERROR_INITIALIZATION_FAILED;
3791 }
3792
3793 uint32_t virglFormat = gfxstream::vk::getVirglFormat(imageCreateInfo.format);
3794 if (!virglFormat) {
3795 mesa_loge("Unsupported VK format for VirtGpu resource, vkFormat: 0x%x",
3796 imageCreateInfo.format);
3797 return VK_ERROR_FORMAT_NOT_SUPPORTED;
3798 }
3799 const uint32_t target = PIPE_TEXTURE_2D;
3800 uint32_t bind = VIRGL_BIND_RENDER_TARGET;
3801 if (VK_IMAGE_TILING_LINEAR == imageCreateInfo.tiling) {
3802 bind |= VIRGL_BIND_LINEAR;
3803 }
3804 colorBufferBlob = instance->createResource(
3805 imageCreateInfo.extent.width, imageCreateInfo.extent.height,
3806 subResourceLayout.rowPitch, virglFormat, target, bind);
3807 if (!colorBufferBlob) {
3808 mesa_loge("Failed to create colorBuffer resource for Image memory");
3809 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3810 }
3811 if (!colorBufferBlob->wait()) {
3812 mesa_loge("Failed to wait for colorBuffer resource for Image memory");
3813 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3814 }
3815 } else {
3816 mesa_logw(
3817 "The VkMemoryDedicatedAllocateInfo::image associated with VkDeviceMemory "
3818 "allocation cannot be used to create exportable resource "
3819 "(VkExportMemoryAllocateInfo).\n");
3820 }
3821 } else if (hasDedicatedBuffer) {
3822 mesa_logw(
3823 "VkDeviceMemory allocated with VkMemoryDedicatedAllocateInfo::buffer cannot be "
3824 "exported (VkExportMemoryAllocateInfo)");
3825 } else {
3826 mesa_logw(
3827 "VkDeviceMemory is not exportable (VkExportMemoryAllocateInfo). Requires "
3828 "VkMemoryDedicatedAllocateInfo::image to create external resource.");
3829 }
3830 }
3831
3832 if (importDmabuf) {
3833 VirtGpuExternalHandle importHandle = {};
3834 importHandle.osHandle = importFdInfoPtr->fd;
3835 importHandle.type = kMemHandleDmabuf;
3836
3837 auto instance = VirtGpuDevice::getInstance();
3838 colorBufferBlob = instance->importBlob(importHandle);
3839 if (!colorBufferBlob) {
3840 mesa_loge("%s: Failed to import colorBuffer resource\n", __func__);
3841 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3842 }
3843 }
3844
3845 if (colorBufferBlob) {
3846 importCbInfo.colorBuffer = colorBufferBlob->getResourceHandle();
3847 vk_append_struct(&structChainIter, &importCbInfo);
3848 }
3849 #endif
3850
3851 if (ahw || colorBufferBlob || !requestedMemoryIsHostVisible) {
3852 input_result =
3853 enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
3854
3855 if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
3856
3857 VkDeviceSize allocationSize = finalAllocInfo.allocationSize;
3858 setDeviceMemoryInfo(device, *pMemory, 0, nullptr, finalAllocInfo.memoryTypeIndex, ahw,
3859 isImport, vmo_handle, colorBufferBlob);
3860
3861 _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
3862 }
3863
3864 #ifdef VK_USE_PLATFORM_FUCHSIA
3865 if (vmo_handle != ZX_HANDLE_INVALID) {
3866 input_result =
3867 enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
3868
3869 // Get VMO handle rights, and only use allowed rights to map the
3870 // host memory.
3871 zx_info_handle_basic handle_info;
3872 zx_status_t status = zx_object_get_info(vmo_handle, ZX_INFO_HANDLE_BASIC, &handle_info,
3873 sizeof(handle_info), nullptr, nullptr);
3874 if (status != ZX_OK) {
3875 mesa_loge("%s: cannot get vmo object info: vmo = %u status: %d.", __func__, vmo_handle,
3876 status);
3877 return VK_ERROR_OUT_OF_HOST_MEMORY;
3878 }
3879
3880 zx_vm_option_t vm_permission = 0u;
3881 vm_permission |= (handle_info.rights & ZX_RIGHT_READ) ? ZX_VM_PERM_READ : 0;
3882 vm_permission |= (handle_info.rights & ZX_RIGHT_WRITE) ? ZX_VM_PERM_WRITE : 0;
3883
3884 zx_paddr_t addr;
3885 status = zx_vmar_map(zx_vmar_root_self(), vm_permission, 0, vmo_handle, 0,
3886 finalAllocInfo.allocationSize, &addr);
3887 if (status != ZX_OK) {
3888 mesa_loge("%s: cannot map vmar: status %d.", __func__, status);
3889 return VK_ERROR_OUT_OF_HOST_MEMORY;
3890 }
3891
3892 setDeviceMemoryInfo(device, *pMemory, finalAllocInfo.allocationSize,
3893 reinterpret_cast<uint8_t*>(addr), finalAllocInfo.memoryTypeIndex,
3894 /*ahw=*/nullptr, isImport, vmo_handle, /*blobPtr=*/nullptr);
3895 return VK_SUCCESS;
3896 }
3897 #endif
3898
3899 // Host visible memory with direct mapping
3900 VkResult result = getCoherentMemory(&finalAllocInfo, enc, device, pMemory);
3901 if (result != VK_SUCCESS) return result;
3902
3903 _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
3904 }
3905
on_vkFreeMemory(void * context,VkDevice device,VkDeviceMemory memory,const VkAllocationCallbacks * pAllocateInfo)3906 void ResourceTracker::on_vkFreeMemory(void* context, VkDevice device, VkDeviceMemory memory,
3907 const VkAllocationCallbacks* pAllocateInfo) {
3908 AutoLock<RecursiveLock> lock(mLock);
3909
3910 auto it = info_VkDeviceMemory.find(memory);
3911 if (it == info_VkDeviceMemory.end()) return;
3912 auto& info = it->second;
3913 uint64_t memoryObjectId = (uint64_t)(void*)memory;
3914 #ifdef VK_USE_PLATFORM_ANDROID_KHR
3915 if (info.ahw) {
3916 memoryObjectId = getAHardwareBufferId(info.ahw);
3917 }
3918 #endif
3919
3920 emitDeviceMemoryReport(info_VkDevice[device],
3921 info.imported ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT
3922 : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT,
3923 memoryObjectId, 0 /* size */, VK_OBJECT_TYPE_DEVICE_MEMORY,
3924 (uint64_t)(void*)memory);
3925
3926 #ifdef VK_USE_PLATFORM_FUCHSIA
3927 if (info.vmoHandle && info.ptr) {
3928 zx_status_t status = zx_vmar_unmap(
3929 zx_vmar_root_self(), reinterpret_cast<zx_paddr_t>(info.ptr), info.allocationSize);
3930 if (status != ZX_OK) {
3931 mesa_loge("%s: Cannot unmap ptr: status %d", status);
3932 }
3933 info.ptr = nullptr;
3934 }
3935 #endif
3936
3937 if (!info.coherentMemory) {
3938 lock.unlock();
3939 VkEncoder* enc = (VkEncoder*)context;
3940 enc->vkFreeMemory(device, memory, pAllocateInfo, true /* do lock */);
3941 return;
3942 }
3943
3944 auto coherentMemory = freeCoherentMemoryLocked(memory, info);
3945
3946 // We have to release the lock before we could possibly free a
3947 // CoherentMemory, because that will call into VkEncoder, which
3948 // shouldn't be called when the lock is held.
3949 lock.unlock();
3950 coherentMemory = nullptr;
3951 }
3952
on_vkMapMemory(void * context,VkResult host_result,VkDevice device,VkDeviceMemory memory,VkDeviceSize offset,VkDeviceSize size,VkMemoryMapFlags,void ** ppData)3953 VkResult ResourceTracker::on_vkMapMemory(void* context, VkResult host_result, VkDevice device,
3954 VkDeviceMemory memory, VkDeviceSize offset,
3955 VkDeviceSize size, VkMemoryMapFlags, void** ppData) {
3956 if (host_result != VK_SUCCESS) {
3957 mesa_loge("%s: Host failed to map", __func__);
3958 return host_result;
3959 }
3960
3961 AutoLock<RecursiveLock> lock(mLock);
3962
3963 auto deviceMemoryInfoIt = info_VkDeviceMemory.find(memory);
3964 if (deviceMemoryInfoIt == info_VkDeviceMemory.end()) {
3965 mesa_loge("%s: Failed to find VkDeviceMemory.", __func__);
3966 return VK_ERROR_MEMORY_MAP_FAILED;
3967 }
3968 auto& deviceMemoryInfo = deviceMemoryInfoIt->second;
3969
3970 if (deviceMemoryInfo.blobId && !deviceMemoryInfo.coherentMemory &&
3971 !mCaps.params[kParamCreateGuestHandle]) {
3972 // NOTE: must not hold lock while calling into the encoder.
3973 lock.unlock();
3974 VkEncoder* enc = (VkEncoder*)context;
3975 VkResult vkResult = enc->vkGetBlobGOOGLE(device, memory, /*doLock*/ false);
3976 if (vkResult != VK_SUCCESS) {
3977 mesa_loge("%s: Failed to vkGetBlobGOOGLE().", __func__);
3978 return vkResult;
3979 }
3980 lock.lock();
3981
3982 // NOTE: deviceMemoryInfoIt potentially invalidated but deviceMemoryInfo still okay.
3983
3984 struct VirtGpuCreateBlob createBlob = {};
3985 createBlob.blobMem = kBlobMemHost3d;
3986 createBlob.flags = kBlobFlagMappable;
3987 createBlob.blobId = deviceMemoryInfo.blobId;
3988 createBlob.size = deviceMemoryInfo.coherentMemorySize;
3989
3990 auto blob = VirtGpuDevice::getInstance()->createBlob(createBlob);
3991 if (!blob) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3992
3993 VirtGpuResourceMappingPtr mapping = blob->createMapping();
3994 if (!mapping) return VK_ERROR_OUT_OF_DEVICE_MEMORY;
3995
3996 auto coherentMemory =
3997 std::make_shared<CoherentMemory>(mapping, createBlob.size, device, memory);
3998
3999 uint8_t* ptr;
4000 uint64_t offset;
4001 coherentMemory->subAllocate(deviceMemoryInfo.allocationSize, &ptr, offset);
4002
4003 deviceMemoryInfo.coherentMemoryOffset = offset;
4004 deviceMemoryInfo.coherentMemory = coherentMemory;
4005 deviceMemoryInfo.ptr = ptr;
4006 }
4007
4008 if (!deviceMemoryInfo.ptr) {
4009 mesa_loge("%s: VkDeviceMemory has nullptr.", __func__);
4010 return VK_ERROR_MEMORY_MAP_FAILED;
4011 }
4012
4013 if (size != VK_WHOLE_SIZE && (deviceMemoryInfo.ptr + offset + size >
4014 deviceMemoryInfo.ptr + deviceMemoryInfo.allocationSize)) {
4015 mesa_loge(
4016 "%s: size is too big. alloc size 0x%llx while we wanted offset 0x%llx size 0x%llx "
4017 "total 0x%llx",
4018 __func__, (unsigned long long)deviceMemoryInfo.allocationSize,
4019 (unsigned long long)offset, (unsigned long long)size, (unsigned long long)offset);
4020 return VK_ERROR_MEMORY_MAP_FAILED;
4021 }
4022
4023 *ppData = deviceMemoryInfo.ptr + offset;
4024
4025 return host_result;
4026 }
4027
on_vkUnmapMemory(void *,VkDevice,VkDeviceMemory)4028 void ResourceTracker::on_vkUnmapMemory(void*, VkDevice, VkDeviceMemory) {
4029 // no-op
4030 }
4031
transformImageMemoryRequirements2ForGuest(VkImage image,VkMemoryRequirements2 * reqs2)4032 void ResourceTracker::transformImageMemoryRequirements2ForGuest(VkImage image,
4033 VkMemoryRequirements2* reqs2) {
4034 AutoLock<RecursiveLock> lock(mLock);
4035
4036 auto it = info_VkImage.find(image);
4037 if (it == info_VkImage.end()) return;
4038
4039 auto& info = it->second;
4040
4041 if (!info.external || !info.externalCreateInfo.handleTypes) {
4042 transformImageMemoryRequirementsForGuestLocked(image, &reqs2->memoryRequirements);
4043 return;
4044 }
4045
4046 transformImageMemoryRequirementsForGuestLocked(image, &reqs2->memoryRequirements);
4047
4048 VkMemoryDedicatedRequirements* dedicatedReqs =
4049 vk_find_struct<VkMemoryDedicatedRequirements>(reqs2);
4050
4051 if (!dedicatedReqs) return;
4052
4053 transformExternalResourceMemoryDedicatedRequirementsForGuest(dedicatedReqs);
4054 }
4055
transformBufferMemoryRequirements2ForGuest(VkBuffer buffer,VkMemoryRequirements2 * reqs2)4056 void ResourceTracker::transformBufferMemoryRequirements2ForGuest(VkBuffer buffer,
4057 VkMemoryRequirements2* reqs2) {
4058 AutoLock<RecursiveLock> lock(mLock);
4059
4060 auto it = info_VkBuffer.find(buffer);
4061 if (it == info_VkBuffer.end()) return;
4062
4063 auto& info = it->second;
4064
4065 if (!info.external || !info.externalCreateInfo.handleTypes) {
4066 return;
4067 }
4068
4069 VkMemoryDedicatedRequirements* dedicatedReqs =
4070 vk_find_struct<VkMemoryDedicatedRequirements>(reqs2);
4071
4072 if (!dedicatedReqs) return;
4073
4074 transformExternalResourceMemoryDedicatedRequirementsForGuest(dedicatedReqs);
4075 }
4076
on_vkCreateImage(void * context,VkResult,VkDevice device,const VkImageCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImage * pImage)4077 VkResult ResourceTracker::on_vkCreateImage(void* context, VkResult, VkDevice device,
4078 const VkImageCreateInfo* pCreateInfo,
4079 const VkAllocationCallbacks* pAllocator,
4080 VkImage* pImage) {
4081 VkEncoder* enc = (VkEncoder*)context;
4082
4083 VkImageCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4084 if (localCreateInfo.sharingMode != VK_SHARING_MODE_CONCURRENT) {
4085 localCreateInfo.queueFamilyIndexCount = 0;
4086 localCreateInfo.pQueueFamilyIndices = nullptr;
4087 }
4088
4089 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
4090 VkExternalMemoryImageCreateInfo localExtImgCi;
4091
4092 const VkExternalMemoryImageCreateInfo* extImgCiPtr =
4093 vk_find_struct<VkExternalMemoryImageCreateInfo>(pCreateInfo);
4094
4095 if (extImgCiPtr) {
4096 localExtImgCi = vk_make_orphan_copy(*extImgCiPtr);
4097 vk_append_struct(&structChainIter, &localExtImgCi);
4098 }
4099
4100 #if defined(LINUX_GUEST_BUILD)
4101 bool isDmaBufImage = false;
4102 if (extImgCiPtr &&
4103 (extImgCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)) {
4104 const wsi_image_create_info* wsiImageCi =
4105 vk_find_struct<wsi_image_create_info>(pCreateInfo);
4106 if (wsiImageCi) {
4107 if (!wsiImageCi->scanout) {
4108 mesa_logd(
4109 "gfxstream only supports native DRM image scanout path for Linux WSI "
4110 "(wsi_image_create_info::scanout)");
4111 return VK_ERROR_INITIALIZATION_FAILED;
4112 }
4113 // Linux WSI creates swapchain images with VK_IMAGE_CREATE_ALIAS_BIT. Vulkan spec
4114 // states: "If the pNext chain includes a VkExternalMemoryImageCreateInfo or
4115 // VkExternalMemoryImageCreateInfoNV structure whose handleTypes member is not 0, it is
4116 // as if VK_IMAGE_CREATE_ALIAS_BIT is set." To avoid flag mismatches on host driver,
4117 // remove the VK_IMAGE_CREATE_ALIAS_BIT here.
4118 localCreateInfo.flags &= ~VK_IMAGE_CREATE_ALIAS_BIT;
4119 // TODO (b/326956485): DRM format modifiers to support client/compositor awareness
4120 // For now, override WSI images to use linear tiling, as compositor will default to
4121 // DRM_FORMAT_MOD_LINEAR.
4122 localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
4123 }
4124 isDmaBufImage = true;
4125 }
4126 #endif
4127
4128 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4129 VkNativeBufferANDROID localAnb;
4130 const VkNativeBufferANDROID* anbInfoPtr = vk_find_struct<VkNativeBufferANDROID>(pCreateInfo);
4131 if (anbInfoPtr) {
4132 localAnb = vk_make_orphan_copy(*anbInfoPtr);
4133 vk_append_struct(&structChainIter, &localAnb);
4134 }
4135
4136 VkExternalFormatANDROID localExtFormatAndroid;
4137 const VkExternalFormatANDROID* extFormatAndroidPtr =
4138 vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4139 if (extFormatAndroidPtr) {
4140 localExtFormatAndroid = vk_make_orphan_copy(*extFormatAndroidPtr);
4141
4142 // Do not append external format android;
4143 // instead, replace the local image localCreateInfo format
4144 // with the corresponding Vulkan format
4145 if (extFormatAndroidPtr->externalFormat) {
4146 localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat);
4147 if (localCreateInfo.format == VK_FORMAT_UNDEFINED)
4148 return VK_ERROR_VALIDATION_FAILED_EXT;
4149 }
4150 }
4151 #endif
4152
4153 #ifdef VK_USE_PLATFORM_FUCHSIA
4154 const VkBufferCollectionImageCreateInfoFUCHSIA* extBufferCollectionPtr =
4155 vk_find_struct<VkBufferCollectionImageCreateInfoFUCHSIA>(pCreateInfo);
4156
4157 bool isSysmemBackedMemory = false;
4158
4159 if (extImgCiPtr &&
4160 (extImgCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) {
4161 isSysmemBackedMemory = true;
4162 }
4163
4164 if (extBufferCollectionPtr) {
4165 const auto& collection =
4166 *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
4167 extBufferCollectionPtr->collection);
4168 uint32_t index = extBufferCollectionPtr->index;
4169 zx::vmo vmo;
4170
4171 fuchsia_sysmem::wire::BufferCollectionInfo2 info;
4172
4173 auto result = collection->WaitForBuffersAllocated();
4174 if (result.ok() && result->status == ZX_OK) {
4175 info = std::move(result->buffer_collection_info);
4176 if (index < info.buffer_count && info.settings.has_image_format_constraints) {
4177 vmo = std::move(info.buffers[index].vmo);
4178 }
4179 } else {
4180 mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
4181 GET_STATUS_SAFE(result, status));
4182 }
4183
4184 if (vmo.is_valid()) {
4185 zx::vmo vmo_dup;
4186 if (zx_status_t status = vmo.duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
4187 status != ZX_OK) {
4188 mesa_loge("%s: zx_vmo_duplicate failed: %d", __func__, status);
4189 abort();
4190 }
4191
4192 auto buffer_handle_result = mControlDevice->GetBufferHandle(std::move(vmo_dup));
4193 if (!buffer_handle_result.ok()) {
4194 mesa_loge("%s: GetBufferHandle FIDL error: %d", __func__,
4195 buffer_handle_result.status());
4196 abort();
4197 }
4198 if (buffer_handle_result.value().res == ZX_OK) {
4199 // Buffer handle already exists.
4200 // If it is a ColorBuffer, no-op; Otherwise return error.
4201 if (buffer_handle_result.value().type !=
4202 fuchsia_hardware_goldfish::wire::BufferHandleType::kColorBuffer) {
4203 mesa_loge("%s: BufferHandle %u is not a ColorBuffer", __func__,
4204 buffer_handle_result.value().id);
4205 return VK_ERROR_OUT_OF_HOST_MEMORY;
4206 }
4207 } else if (buffer_handle_result.value().res == ZX_ERR_NOT_FOUND) {
4208 // Buffer handle not found. Create ColorBuffer based on buffer settings.
4209 auto format = info.settings.image_format_constraints.pixel_format.type ==
4210 fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8
4211 ? fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba
4212 : fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra;
4213
4214 uint32_t memory_property =
4215 info.settings.buffer_settings.heap ==
4216 fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal
4217 ? fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal
4218 : fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
4219
4220 fidl::Arena arena;
4221 fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(arena);
4222 createParams.set_width(info.settings.image_format_constraints.min_coded_width)
4223 .set_height(info.settings.image_format_constraints.min_coded_height)
4224 .set_format(format)
4225 .set_memory_property(memory_property);
4226
4227 auto result =
4228 mControlDevice->CreateColorBuffer2(std::move(vmo), std::move(createParams));
4229 if (result.ok() && result->res == ZX_ERR_ALREADY_EXISTS) {
4230 mesa_logd("CreateColorBuffer: color buffer already exists\n");
4231 } else if (!result.ok() || result->res != ZX_OK) {
4232 mesa_loge("CreateColorBuffer failed: %d:%d", result.status(),
4233 GET_STATUS_SAFE(result, res));
4234 }
4235 }
4236
4237 if (info.settings.buffer_settings.heap ==
4238 fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible) {
4239 mesa_logd(
4240 "%s: Image uses host visible memory heap; set tiling "
4241 "to linear to match host ImageCreateInfo",
4242 __func__);
4243 localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
4244 }
4245 }
4246 isSysmemBackedMemory = true;
4247 }
4248
4249 if (isSysmemBackedMemory) {
4250 localCreateInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
4251 }
4252 #endif
4253
4254 VkResult res;
4255 VkMemoryRequirements memReqs;
4256
4257 if (supportsCreateResourcesWithRequirements()) {
4258 res = enc->vkCreateImageWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator, pImage,
4259 &memReqs, true /* do lock */);
4260 } else {
4261 res = enc->vkCreateImage(device, &localCreateInfo, pAllocator, pImage, true /* do lock */);
4262 }
4263
4264 if (res != VK_SUCCESS) return res;
4265
4266 AutoLock<RecursiveLock> lock(mLock);
4267
4268 auto it = info_VkImage.find(*pImage);
4269 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
4270
4271 auto& info = it->second;
4272
4273 info.device = device;
4274 info.createInfo = *pCreateInfo;
4275 info.createInfo.pNext = nullptr;
4276
4277 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4278 if (extFormatAndroidPtr && extFormatAndroidPtr->externalFormat) {
4279 info.hasExternalFormat = true;
4280 info.externalFourccFormat = extFormatAndroidPtr->externalFormat;
4281 }
4282 #endif // VK_USE_PLATFORM_ANDROID_KHR
4283
4284 if (supportsCreateResourcesWithRequirements()) {
4285 info.baseRequirementsKnown = true;
4286 }
4287
4288 if (extImgCiPtr) {
4289 info.external = true;
4290 info.externalCreateInfo = *extImgCiPtr;
4291 }
4292
4293 #ifdef VK_USE_PLATFORM_FUCHSIA
4294 if (isSysmemBackedMemory) {
4295 info.isSysmemBackedMemory = true;
4296 }
4297 #endif
4298
4299 // Delete `protocolVersion` check goldfish drivers are gone.
4300 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
4301 if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
4302 mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
4303 }
4304 if ((extImgCiPtr && (extImgCiPtr->handleTypes &
4305 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) {
4306 updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
4307 }
4308 #endif
4309 #if defined(LINUX_GUEST_BUILD)
4310 if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
4311 mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
4312 }
4313 info.isDmaBufImage = isDmaBufImage;
4314 if (info.isDmaBufImage) {
4315 updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
4316 if (localCreateInfo.tiling == VK_IMAGE_TILING_OPTIMAL) {
4317 // Linux WSI calls vkGetImageSubresourceLayout() to query the stride for swapchain
4318 // support. Similarly, stride is also queried from vkGetImageSubresourceLayout() to
4319 // determine the stride for colorBuffer resource creation (guest-side dmabuf resource).
4320 // To satisfy valid usage of this API, must call on the linearPeerImage for the VkImage
4321 // in question. As long as these two use cases match, the rowPitch won't actually be
4322 // used by WSI.
4323 VkImageCreateInfo linearPeerImageCreateInfo = {
4324 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4325 .pNext = nullptr,
4326 .flags = {},
4327 .imageType = VK_IMAGE_TYPE_2D,
4328 .format = localCreateInfo.format,
4329 .extent = localCreateInfo.extent,
4330 .mipLevels = 1,
4331 .arrayLayers = 1,
4332 .samples = VK_SAMPLE_COUNT_1_BIT,
4333 .tiling = VK_IMAGE_TILING_LINEAR,
4334 .usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
4335 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
4336 .queueFamilyIndexCount = 0,
4337 .pQueueFamilyIndices = nullptr,
4338 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
4339 };
4340 res = enc->vkCreateImage(device, &linearPeerImageCreateInfo, pAllocator,
4341 &info.linearPeerImage, true /* do lock */);
4342 if (res != VK_SUCCESS) return res;
4343 }
4344 }
4345 #endif
4346
4347 if (info.baseRequirementsKnown) {
4348 transformImageMemoryRequirementsForGuestLocked(*pImage, &memReqs);
4349 info.baseRequirements = memReqs;
4350 }
4351 return res;
4352 }
4353
on_vkCreateSamplerYcbcrConversion(void * context,VkResult,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)4354 VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversion(
4355 void* context, VkResult, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4356 const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) {
4357 VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4358
4359 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4360 const VkExternalFormatANDROID* extFormatAndroidPtr =
4361 vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4362 if (extFormatAndroidPtr) {
4363 if (extFormatAndroidPtr->externalFormat == DRM_FORMAT_RGB565) {
4364 // We don't support external formats on host and it causes RGB565
4365 // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4366 // when passed as an external format.
4367 // We may consider doing this for all external formats.
4368 // See b/134771579.
4369 *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4370 return VK_SUCCESS;
4371 } else if (extFormatAndroidPtr->externalFormat) {
4372 localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat);
4373 }
4374 }
4375 #endif
4376
4377 VkEncoder* enc = (VkEncoder*)context;
4378 VkResult res = enc->vkCreateSamplerYcbcrConversion(device, &localCreateInfo, pAllocator,
4379 pYcbcrConversion, true /* do lock */);
4380
4381 if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
4382 mesa_loge(
4383 "FATAL: vkCreateSamplerYcbcrConversion returned a reserved value "
4384 "(VK_YCBCR_CONVERSION_DO_NOTHING)");
4385 abort();
4386 }
4387 return res;
4388 }
4389
on_vkDestroySamplerYcbcrConversion(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)4390 void ResourceTracker::on_vkDestroySamplerYcbcrConversion(void* context, VkDevice device,
4391 VkSamplerYcbcrConversion ycbcrConversion,
4392 const VkAllocationCallbacks* pAllocator) {
4393 VkEncoder* enc = (VkEncoder*)context;
4394 if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4395 enc->vkDestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator,
4396 true /* do lock */);
4397 }
4398 }
4399
on_vkCreateSamplerYcbcrConversionKHR(void * context,VkResult,VkDevice device,const VkSamplerYcbcrConversionCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSamplerYcbcrConversion * pYcbcrConversion)4400 VkResult ResourceTracker::on_vkCreateSamplerYcbcrConversionKHR(
4401 void* context, VkResult, VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
4402 const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) {
4403 VkSamplerYcbcrConversionCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4404
4405 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
4406 const VkExternalFormatANDROID* extFormatAndroidPtr =
4407 vk_find_struct<VkExternalFormatANDROID>(pCreateInfo);
4408 if (extFormatAndroidPtr) {
4409 if (extFormatAndroidPtr->externalFormat == DRM_FORMAT_RGB565) {
4410 // We don't support external formats on host and it causes RGB565
4411 // to fail in CtsGraphicsTestCases android.graphics.cts.BasicVulkanGpuTest
4412 // when passed as an external format.
4413 // We may consider doing this for all external formats.
4414 // See b/134771579.
4415 *pYcbcrConversion = VK_YCBCR_CONVERSION_DO_NOTHING;
4416 return VK_SUCCESS;
4417 } else if (extFormatAndroidPtr->externalFormat) {
4418 localCreateInfo.format = vk_format_from_fourcc(extFormatAndroidPtr->externalFormat);
4419 }
4420 }
4421 #endif
4422
4423 VkEncoder* enc = (VkEncoder*)context;
4424 VkResult res = enc->vkCreateSamplerYcbcrConversionKHR(device, &localCreateInfo, pAllocator,
4425 pYcbcrConversion, true /* do lock */);
4426
4427 if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
4428 mesa_loge(
4429 "FATAL: vkCreateSamplerYcbcrConversionKHR returned a reserved value "
4430 "(VK_YCBCR_CONVERSION_DO_NOTHING)");
4431 abort();
4432 }
4433 return res;
4434 }
4435
on_vkDestroySamplerYcbcrConversionKHR(void * context,VkDevice device,VkSamplerYcbcrConversion ycbcrConversion,const VkAllocationCallbacks * pAllocator)4436 void ResourceTracker::on_vkDestroySamplerYcbcrConversionKHR(
4437 void* context, VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
4438 const VkAllocationCallbacks* pAllocator) {
4439 VkEncoder* enc = (VkEncoder*)context;
4440 if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4441 enc->vkDestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator,
4442 true /* do lock */);
4443 }
4444 }
4445
on_vkCreateSampler(void * context,VkResult,VkDevice device,const VkSamplerCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSampler * pSampler)4446 VkResult ResourceTracker::on_vkCreateSampler(void* context, VkResult, VkDevice device,
4447 const VkSamplerCreateInfo* pCreateInfo,
4448 const VkAllocationCallbacks* pAllocator,
4449 VkSampler* pSampler) {
4450 VkSamplerCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
4451 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
4452
4453 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(VK_USE_PLATFORM_FUCHSIA)
4454 VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
4455 const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo =
4456 vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo);
4457 if (samplerYcbcrConversionInfo) {
4458 if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
4459 localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
4460 vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
4461 }
4462 }
4463
4464 VkSamplerCustomBorderColorCreateInfoEXT localVkSamplerCustomBorderColorCreateInfo;
4465 const VkSamplerCustomBorderColorCreateInfoEXT* samplerCustomBorderColorCreateInfo =
4466 vk_find_struct<VkSamplerCustomBorderColorCreateInfoEXT>(pCreateInfo);
4467 if (samplerCustomBorderColorCreateInfo) {
4468 localVkSamplerCustomBorderColorCreateInfo =
4469 vk_make_orphan_copy(*samplerCustomBorderColorCreateInfo);
4470 vk_append_struct(&structChainIter, &localVkSamplerCustomBorderColorCreateInfo);
4471 }
4472 #endif
4473
4474 VkEncoder* enc = (VkEncoder*)context;
4475 return enc->vkCreateSampler(device, &localCreateInfo, pAllocator, pSampler, true /* do lock */);
4476 }
4477
on_vkGetPhysicalDeviceExternalFenceProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)4478 void ResourceTracker::on_vkGetPhysicalDeviceExternalFenceProperties(
4479 void* context, VkPhysicalDevice physicalDevice,
4480 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
4481 VkExternalFenceProperties* pExternalFenceProperties) {
4482 (void)context;
4483 (void)physicalDevice;
4484
4485 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
4486 pExternalFenceProperties->compatibleHandleTypes = 0;
4487 pExternalFenceProperties->externalFenceFeatures = 0;
4488
4489 bool syncFd = pExternalFenceInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4490
4491 if (!syncFd) {
4492 return;
4493 }
4494
4495 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4496 pExternalFenceProperties->exportFromImportedHandleTypes =
4497 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4498 pExternalFenceProperties->compatibleHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4499 pExternalFenceProperties->externalFenceFeatures =
4500 VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT | VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT;
4501 #endif
4502 }
4503
on_vkGetPhysicalDeviceExternalFencePropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalFenceInfo * pExternalFenceInfo,VkExternalFenceProperties * pExternalFenceProperties)4504 void ResourceTracker::on_vkGetPhysicalDeviceExternalFencePropertiesKHR(
4505 void* context, VkPhysicalDevice physicalDevice,
4506 const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
4507 VkExternalFenceProperties* pExternalFenceProperties) {
4508 on_vkGetPhysicalDeviceExternalFenceProperties(context, physicalDevice, pExternalFenceInfo,
4509 pExternalFenceProperties);
4510 }
4511
on_vkCreateFence(void * context,VkResult input_result,VkDevice device,const VkFenceCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkFence * pFence)4512 VkResult ResourceTracker::on_vkCreateFence(void* context, VkResult input_result, VkDevice device,
4513 const VkFenceCreateInfo* pCreateInfo,
4514 const VkAllocationCallbacks* pAllocator,
4515 VkFence* pFence) {
4516 VkEncoder* enc = (VkEncoder*)context;
4517 VkFenceCreateInfo finalCreateInfo = *pCreateInfo;
4518
4519 const VkExportFenceCreateInfo* exportFenceInfoPtr =
4520 vk_find_struct<VkExportFenceCreateInfo>(pCreateInfo);
4521
4522 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4523 bool exportSyncFd = exportFenceInfoPtr && (exportFenceInfoPtr->handleTypes &
4524 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
4525 #endif
4526
4527 input_result =
4528 enc->vkCreateFence(device, &finalCreateInfo, pAllocator, pFence, true /* do lock */);
4529
4530 if (input_result != VK_SUCCESS) return input_result;
4531
4532 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4533 if (exportSyncFd) {
4534 if (!mFeatureInfo->hasVirtioGpuNativeSync) {
4535 mesa_logi("%s: ensure sync device\n", __func__);
4536 ensureSyncDeviceFd();
4537 }
4538
4539 mesa_logi("%s: getting fence info\n", __func__);
4540 AutoLock<RecursiveLock> lock(mLock);
4541 auto it = info_VkFence.find(*pFence);
4542
4543 if (it == info_VkFence.end()) return VK_ERROR_INITIALIZATION_FAILED;
4544
4545 auto& info = it->second;
4546
4547 info.external = true;
4548 info.exportFenceCreateInfo = *exportFenceInfoPtr;
4549 mesa_logi("%s: info set (fence still -1). fence: %p\n", __func__, (void*)(*pFence));
4550 // syncFd is still -1 because we expect user to explicitly
4551 // export it via vkGetFenceFdKHR
4552 }
4553 #endif
4554
4555 return input_result;
4556 }
4557
on_vkDestroyFence(void * context,VkDevice device,VkFence fence,const VkAllocationCallbacks * pAllocator)4558 void ResourceTracker::on_vkDestroyFence(void* context, VkDevice device, VkFence fence,
4559 const VkAllocationCallbacks* pAllocator) {
4560 VkEncoder* enc = (VkEncoder*)context;
4561 enc->vkDestroyFence(device, fence, pAllocator, true /* do lock */);
4562 }
4563
on_vkResetFences(void * context,VkResult,VkDevice device,uint32_t fenceCount,const VkFence * pFences)4564 VkResult ResourceTracker::on_vkResetFences(void* context, VkResult, VkDevice device,
4565 uint32_t fenceCount, const VkFence* pFences) {
4566 VkEncoder* enc = (VkEncoder*)context;
4567 VkResult res = enc->vkResetFences(device, fenceCount, pFences, true /* do lock */);
4568
4569 if (res != VK_SUCCESS) return res;
4570
4571 if (!fenceCount) return res;
4572
4573 // Permanence: temporary
4574 // on fence reset, close the fence fd
4575 // and act like we need to GetFenceFdKHR/ImportFenceFdKHR again
4576 AutoLock<RecursiveLock> lock(mLock);
4577 for (uint32_t i = 0; i < fenceCount; ++i) {
4578 VkFence fence = pFences[i];
4579 auto it = info_VkFence.find(fence);
4580 auto& info = it->second;
4581 if (!info.external) continue;
4582
4583 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
4584 if (info.syncFd >= 0) {
4585 mesa_logi("%s: resetting fence. make fd -1\n", __func__);
4586 goldfish_sync_signal(info.syncFd);
4587 auto* syncHelper =
4588 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
4589 syncHelper->close(info.syncFd);
4590 info.syncFd = -1;
4591 }
4592 #endif
4593 }
4594
4595 return res;
4596 }
4597
on_vkImportFenceFdKHR(void * context,VkResult,VkDevice device,const VkImportFenceFdInfoKHR * pImportFenceFdInfo)4598 VkResult ResourceTracker::on_vkImportFenceFdKHR(void* context, VkResult, VkDevice device,
4599 const VkImportFenceFdInfoKHR* pImportFenceFdInfo) {
4600 (void)context;
4601 (void)device;
4602 (void)pImportFenceFdInfo;
4603
4604 // Transference: copy
4605 // meaning dup() the incoming fd
4606
4607 VkEncoder* enc = (VkEncoder*)context;
4608
4609 bool hasFence = pImportFenceFdInfo->fence != VK_NULL_HANDLE;
4610
4611 if (!hasFence) return VK_ERROR_OUT_OF_HOST_MEMORY;
4612
4613 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4614
4615 bool syncFdImport = pImportFenceFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4616
4617 if (!syncFdImport) {
4618 mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd import\n", __func__);
4619 return VK_ERROR_OUT_OF_HOST_MEMORY;
4620 }
4621
4622 AutoLock<RecursiveLock> lock(mLock);
4623 auto it = info_VkFence.find(pImportFenceFdInfo->fence);
4624 if (it == info_VkFence.end()) {
4625 mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
4626 return VK_ERROR_OUT_OF_HOST_MEMORY;
4627 }
4628
4629 auto& info = it->second;
4630
4631 auto* syncHelper = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
4632 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
4633 if (info.syncFd >= 0) {
4634 mesa_logi("%s: previous sync fd exists, close it\n", __func__);
4635 goldfish_sync_signal(info.syncFd);
4636 syncHelper->close(info.syncFd);
4637 }
4638 #endif
4639
4640 if (pImportFenceFdInfo->fd < 0) {
4641 mesa_logi("%s: import -1, set to -1 and exit\n", __func__);
4642 info.syncFd = -1;
4643 } else {
4644 mesa_logi("%s: import actual fd, dup and close()\n", __func__);
4645 info.syncFd = syncHelper->dup(pImportFenceFdInfo->fd);
4646 syncHelper->close(pImportFenceFdInfo->fd);
4647 }
4648 return VK_SUCCESS;
4649 #else
4650 return VK_ERROR_OUT_OF_HOST_MEMORY;
4651 #endif
4652 }
4653
on_vkGetFenceFdKHR(void * context,VkResult,VkDevice device,const VkFenceGetFdInfoKHR * pGetFdInfo,int * pFd)4654 VkResult ResourceTracker::on_vkGetFenceFdKHR(void* context, VkResult, VkDevice device,
4655 const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd) {
4656 // export operation.
4657 // first check if fence is signaled
4658 // then if so, return -1
4659 // else, queue work
4660
4661 VkEncoder* enc = (VkEncoder*)context;
4662
4663 bool hasFence = pGetFdInfo->fence != VK_NULL_HANDLE;
4664
4665 if (!hasFence) {
4666 mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence\n", __func__);
4667 return VK_ERROR_OUT_OF_HOST_MEMORY;
4668 }
4669
4670 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4671 bool syncFdExport = pGetFdInfo->handleType & VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
4672
4673 if (!syncFdExport) {
4674 mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd fence\n", __func__);
4675 return VK_ERROR_OUT_OF_HOST_MEMORY;
4676 }
4677
4678 VkResult currentFenceStatus =
4679 enc->vkGetFenceStatus(device, pGetFdInfo->fence, true /* do lock */);
4680
4681 if (VK_ERROR_DEVICE_LOST == currentFenceStatus) { // Other error
4682 mesa_logi("%s: VK_ERROR_DEVICE_LOST: Other error\n", __func__);
4683 *pFd = -1;
4684 return VK_ERROR_DEVICE_LOST;
4685 }
4686
4687 if (VK_NOT_READY == currentFenceStatus || VK_SUCCESS == currentFenceStatus) {
4688 // Fence is valid. We also create a new sync fd for a signaled
4689 // fence, because ANGLE will use the returned fd directly to
4690 // implement eglDupNativeFenceFDANDROID, where -1 is only returned
4691 // when error occurs.
4692 AutoLock<RecursiveLock> lock(mLock);
4693
4694 auto it = info_VkFence.find(pGetFdInfo->fence);
4695 if (it == info_VkFence.end()) {
4696 mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no fence info\n", __func__);
4697 return VK_ERROR_OUT_OF_HOST_MEMORY;
4698 }
4699
4700 auto& info = it->second;
4701
4702 bool syncFdCreated = info.external && (info.exportFenceCreateInfo.handleTypes &
4703 VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT);
4704
4705 if (!syncFdCreated) {
4706 mesa_logi("%s: VK_ERROR_OUT_OF_HOST_MEMORY: no sync fd created\n", __func__);
4707 return VK_ERROR_OUT_OF_HOST_MEMORY;
4708 }
4709
4710 if (mFeatureInfo->hasVirtioGpuNativeSync) {
4711 VkResult result;
4712 int64_t osHandle;
4713 uint64_t hostFenceHandle = get_host_u64_VkFence(pGetFdInfo->fence);
4714
4715 result = createFence(device, hostFenceHandle, osHandle);
4716 if (result != VK_SUCCESS) return result;
4717
4718 *pFd = osHandle;
4719 } else {
4720 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
4721 goldfish_sync_queue_work(
4722 mSyncDeviceFd, get_host_u64_VkFence(pGetFdInfo->fence) /* the handle */,
4723 GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */,
4724 pFd);
4725 #endif
4726 }
4727
4728 // relinquish ownership
4729 info.syncFd = -1;
4730 mesa_logi("%s: got fd: %d\n", __func__, *pFd);
4731 return VK_SUCCESS;
4732 }
4733 return VK_ERROR_DEVICE_LOST;
4734 #else
4735 return VK_ERROR_OUT_OF_HOST_MEMORY;
4736 #endif
4737 }
4738
on_vkWaitForFences(void * context,VkResult,VkDevice device,uint32_t fenceCount,const VkFence * pFences,VkBool32 waitAll,uint64_t timeout)4739 VkResult ResourceTracker::on_vkWaitForFences(void* context, VkResult, VkDevice device,
4740 uint32_t fenceCount, const VkFence* pFences,
4741 VkBool32 waitAll, uint64_t timeout) {
4742 VkEncoder* enc = (VkEncoder*)context;
4743
4744 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
4745 std::vector<VkFence> fencesExternal;
4746 std::vector<int> fencesExternalWaitFds;
4747 std::vector<VkFence> fencesNonExternal;
4748
4749 AutoLock<RecursiveLock> lock(mLock);
4750
4751 for (uint32_t i = 0; i < fenceCount; ++i) {
4752 auto it = info_VkFence.find(pFences[i]);
4753 if (it == info_VkFence.end()) continue;
4754 const auto& info = it->second;
4755 if (info.syncFd >= 0) {
4756 fencesExternal.push_back(pFences[i]);
4757 fencesExternalWaitFds.push_back(info.syncFd);
4758 } else {
4759 fencesNonExternal.push_back(pFences[i]);
4760 }
4761 }
4762
4763 lock.unlock();
4764
4765 if (fencesExternal.empty()) {
4766 // No need for work pool, just wait with host driver.
4767 return enc->vkWaitForFences(device, fenceCount, pFences, waitAll, timeout,
4768 true /* do lock */);
4769 } else {
4770 // Depending on wait any or wait all,
4771 // schedule a wait group with waitAny/waitAll
4772 std::vector<WorkPool::Task> tasks;
4773
4774 mesa_logi("%s: scheduling ext waits\n", __func__);
4775
4776 for (auto fd : fencesExternalWaitFds) {
4777 mesa_logi("%s: wait on %d\n", __func__, fd);
4778 tasks.push_back([fd] {
4779 auto* syncHelper =
4780 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
4781 syncHelper->wait(fd, 3000);
4782 mesa_logi("done waiting on fd %d\n", fd);
4783 });
4784 }
4785
4786 if (!fencesNonExternal.empty()) {
4787 tasks.push_back(
4788 [this, fencesNonExternal /* copy of vector */, device, waitAll, timeout] {
4789 auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
4790 auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
4791 mesa_logi("%s: vkWaitForFences to host\n", __func__);
4792 vkEncoder->vkWaitForFences(device, fencesNonExternal.size(),
4793 fencesNonExternal.data(), waitAll, timeout,
4794 true /* do lock */);
4795 });
4796 }
4797
4798 auto waitGroupHandle = mWorkPool.schedule(tasks);
4799
4800 // Convert timeout to microseconds from nanoseconds
4801 bool waitRes = false;
4802 if (waitAll) {
4803 waitRes = mWorkPool.waitAll(waitGroupHandle, timeout / 1000);
4804 } else {
4805 waitRes = mWorkPool.waitAny(waitGroupHandle, timeout / 1000);
4806 }
4807
4808 if (waitRes) {
4809 mesa_logi("%s: VK_SUCCESS\n", __func__);
4810 return VK_SUCCESS;
4811 } else {
4812 mesa_logi("%s: VK_TIMEOUT\n", __func__);
4813 return VK_TIMEOUT;
4814 }
4815 }
4816 #else
4817 return enc->vkWaitForFences(device, fenceCount, pFences, waitAll, timeout, true /* do lock */);
4818 #endif
4819 }
4820
on_vkCreateDescriptorPool(void * context,VkResult,VkDevice device,const VkDescriptorPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorPool * pDescriptorPool)4821 VkResult ResourceTracker::on_vkCreateDescriptorPool(void* context, VkResult, VkDevice device,
4822 const VkDescriptorPoolCreateInfo* pCreateInfo,
4823 const VkAllocationCallbacks* pAllocator,
4824 VkDescriptorPool* pDescriptorPool) {
4825 VkEncoder* enc = (VkEncoder*)context;
4826
4827 VkResult res = enc->vkCreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool,
4828 true /* do lock */);
4829
4830 if (res != VK_SUCCESS) return res;
4831
4832 VkDescriptorPool pool = *pDescriptorPool;
4833
4834 struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
4835 dp->allocInfo = new DescriptorPoolAllocationInfo;
4836 dp->allocInfo->device = device;
4837 dp->allocInfo->createFlags = pCreateInfo->flags;
4838 dp->allocInfo->maxSets = pCreateInfo->maxSets;
4839 dp->allocInfo->usedSets = 0;
4840
4841 for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; ++i) {
4842 dp->allocInfo->descriptorCountInfo.push_back({
4843 pCreateInfo->pPoolSizes[i].type, pCreateInfo->pPoolSizes[i].descriptorCount,
4844 0, /* used */
4845 });
4846 }
4847
4848 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
4849 std::vector<uint64_t> poolIds(pCreateInfo->maxSets);
4850
4851 uint32_t count = pCreateInfo->maxSets;
4852 enc->vkCollectDescriptorPoolIdsGOOGLE(device, pool, &count, poolIds.data(),
4853 true /* do lock */);
4854
4855 dp->allocInfo->freePoolIds = poolIds;
4856 }
4857
4858 return res;
4859 }
4860
on_vkDestroyDescriptorPool(void * context,VkDevice device,VkDescriptorPool descriptorPool,const VkAllocationCallbacks * pAllocator)4861 void ResourceTracker::on_vkDestroyDescriptorPool(void* context, VkDevice device,
4862 VkDescriptorPool descriptorPool,
4863 const VkAllocationCallbacks* pAllocator) {
4864 if (!descriptorPool) return;
4865
4866 VkEncoder* enc = (VkEncoder*)context;
4867
4868 clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
4869
4870 enc->vkDestroyDescriptorPool(device, descriptorPool, pAllocator, true /* do lock */);
4871 }
4872
on_vkResetDescriptorPool(void * context,VkResult,VkDevice device,VkDescriptorPool descriptorPool,VkDescriptorPoolResetFlags flags)4873 VkResult ResourceTracker::on_vkResetDescriptorPool(void* context, VkResult, VkDevice device,
4874 VkDescriptorPool descriptorPool,
4875 VkDescriptorPoolResetFlags flags) {
4876 if (!descriptorPool) return VK_ERROR_INITIALIZATION_FAILED;
4877
4878 VkEncoder* enc = (VkEncoder*)context;
4879
4880 VkResult res = enc->vkResetDescriptorPool(device, descriptorPool, flags, true /* do lock */);
4881
4882 if (res != VK_SUCCESS) return res;
4883
4884 clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
4885 return res;
4886 }
4887
on_vkAllocateDescriptorSets(void * context,VkResult,VkDevice device,const VkDescriptorSetAllocateInfo * pAllocateInfo,VkDescriptorSet * pDescriptorSets)4888 VkResult ResourceTracker::on_vkAllocateDescriptorSets(
4889 void* context, VkResult, VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo,
4890 VkDescriptorSet* pDescriptorSets) {
4891 VkEncoder* enc = (VkEncoder*)context;
4892 auto ci = pAllocateInfo;
4893 auto sets = pDescriptorSets;
4894 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
4895 // Using the pool ID's we collected earlier from the host
4896 VkResult poolAllocResult = validateAndApplyVirtualDescriptorSetAllocation(ci, sets);
4897
4898 if (poolAllocResult != VK_SUCCESS) return poolAllocResult;
4899
4900 for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
4901 register_VkDescriptorSet(sets[i]);
4902 VkDescriptorSetLayout setLayout =
4903 as_goldfish_VkDescriptorSet(sets[i])->reified->setLayout;
4904
4905 // Need to add ref to the set layout in the virtual case
4906 // because the set itself might not be realized on host at the
4907 // same time
4908 struct goldfish_VkDescriptorSetLayout* dsl =
4909 as_goldfish_VkDescriptorSetLayout(setLayout);
4910 ++dsl->layoutInfo->refcount;
4911 }
4912 } else {
4913 VkResult allocRes = enc->vkAllocateDescriptorSets(device, ci, sets, true /* do lock */);
4914
4915 if (allocRes != VK_SUCCESS) return allocRes;
4916
4917 for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
4918 applyDescriptorSetAllocation(ci->descriptorPool, ci->pSetLayouts[i]);
4919 fillDescriptorSetInfoForPool(ci->descriptorPool, ci->pSetLayouts[i], sets[i]);
4920 }
4921 }
4922
4923 return VK_SUCCESS;
4924 }
4925
on_vkFreeDescriptorSets(void * context,VkResult,VkDevice device,VkDescriptorPool descriptorPool,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets)4926 VkResult ResourceTracker::on_vkFreeDescriptorSets(void* context, VkResult, VkDevice device,
4927 VkDescriptorPool descriptorPool,
4928 uint32_t descriptorSetCount,
4929 const VkDescriptorSet* pDescriptorSets) {
4930 VkEncoder* enc = (VkEncoder*)context;
4931
4932 // Bit of robustness so that we can double free descriptor sets
4933 // and do other invalid usages
4934 // https://github.com/KhronosGroup/Vulkan-Docs/issues/1070
4935 // (people expect VK_SUCCESS to always be returned by vkFreeDescriptorSets)
4936 std::vector<VkDescriptorSet> toActuallyFree;
4937 {
4938 AutoLock<RecursiveLock> lock(mLock);
4939
4940 // Pool was destroyed
4941 if (info_VkDescriptorPool.find(descriptorPool) == info_VkDescriptorPool.end()) {
4942 return VK_SUCCESS;
4943 }
4944
4945 if (!descriptorPoolSupportsIndividualFreeLocked(descriptorPool)) return VK_SUCCESS;
4946
4947 std::vector<VkDescriptorSet> existingDescriptorSets;
4948 ;
4949
4950 // Check if this descriptor set was in the pool's set of allocated descriptor sets,
4951 // to guard against double free (Double free is allowed by the client)
4952 {
4953 auto allocedSets = as_goldfish_VkDescriptorPool(descriptorPool)->allocInfo->allocedSets;
4954
4955 for (uint32_t i = 0; i < descriptorSetCount; ++i) {
4956 if (allocedSets.end() == allocedSets.find(pDescriptorSets[i])) {
4957 mesa_logi(
4958 "%s: Warning: descriptor set %p not found in pool. Was this "
4959 "double-freed?\n",
4960 __func__, (void*)pDescriptorSets[i]);
4961 continue;
4962 }
4963
4964 auto it = info_VkDescriptorSet.find(pDescriptorSets[i]);
4965 if (it == info_VkDescriptorSet.end()) continue;
4966
4967 existingDescriptorSets.push_back(pDescriptorSets[i]);
4968 }
4969 }
4970
4971 for (auto set : existingDescriptorSets) {
4972 if (removeDescriptorSetFromPool(set,
4973 mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate)) {
4974 toActuallyFree.push_back(set);
4975 }
4976 }
4977
4978 if (toActuallyFree.empty()) return VK_SUCCESS;
4979 }
4980
4981 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
4982 // In the batched set update case, decrement refcount on the set layout
4983 // and only free on host if we satisfied a pending allocation on the
4984 // host.
4985 for (uint32_t i = 0; i < toActuallyFree.size(); ++i) {
4986 VkDescriptorSetLayout setLayout =
4987 as_goldfish_VkDescriptorSet(toActuallyFree[i])->reified->setLayout;
4988 decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
4989 }
4990 freeDescriptorSetsIfHostAllocated(enc, device, (uint32_t)toActuallyFree.size(),
4991 toActuallyFree.data());
4992 } else {
4993 // In the non-batched set update case, just free them directly.
4994 enc->vkFreeDescriptorSets(device, descriptorPool, (uint32_t)toActuallyFree.size(),
4995 toActuallyFree.data(), true /* do lock */);
4996 }
4997 return VK_SUCCESS;
4998 }
4999
on_vkCreateDescriptorSetLayout(void * context,VkResult,VkDevice device,const VkDescriptorSetLayoutCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorSetLayout * pSetLayout)5000 VkResult ResourceTracker::on_vkCreateDescriptorSetLayout(
5001 void* context, VkResult, VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
5002 const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout) {
5003 VkEncoder* enc = (VkEncoder*)context;
5004
5005 VkResult res = enc->vkCreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout,
5006 true /* do lock */);
5007
5008 if (res != VK_SUCCESS) return res;
5009
5010 struct goldfish_VkDescriptorSetLayout* dsl = as_goldfish_VkDescriptorSetLayout(*pSetLayout);
5011 dsl->layoutInfo = new DescriptorSetLayoutInfo;
5012 for (uint32_t i = 0; i < pCreateInfo->bindingCount; ++i) {
5013 dsl->layoutInfo->bindings.push_back(pCreateInfo->pBindings[i]);
5014 }
5015 dsl->layoutInfo->refcount = 1;
5016
5017 return res;
5018 }
5019
on_vkUpdateDescriptorSets(void * context,VkDevice device,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites,uint32_t descriptorCopyCount,const VkCopyDescriptorSet * pDescriptorCopies)5020 void ResourceTracker::on_vkUpdateDescriptorSets(void* context, VkDevice device,
5021 uint32_t descriptorWriteCount,
5022 const VkWriteDescriptorSet* pDescriptorWrites,
5023 uint32_t descriptorCopyCount,
5024 const VkCopyDescriptorSet* pDescriptorCopies) {
5025 VkEncoder* enc = (VkEncoder*)context;
5026
5027 std::vector<VkDescriptorImageInfo> transformedImageInfos;
5028 std::vector<VkWriteDescriptorSet> transformedWrites(descriptorWriteCount);
5029
5030 memcpy(transformedWrites.data(), pDescriptorWrites,
5031 sizeof(VkWriteDescriptorSet) * descriptorWriteCount);
5032
5033 size_t imageInfosNeeded = 0;
5034 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5035 if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5036 if (!transformedWrites[i].pImageInfo) continue;
5037
5038 imageInfosNeeded += transformedWrites[i].descriptorCount;
5039 }
5040
5041 transformedImageInfos.resize(imageInfosNeeded);
5042
5043 size_t imageInfoIndex = 0;
5044 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5045 if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5046 if (!transformedWrites[i].pImageInfo) continue;
5047
5048 for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
5049 transformedImageInfos[imageInfoIndex] = transformedWrites[i].pImageInfo[j];
5050 ++imageInfoIndex;
5051 }
5052 transformedWrites[i].pImageInfo =
5053 &transformedImageInfos[imageInfoIndex - transformedWrites[i].descriptorCount];
5054 }
5055
5056 {
5057 // Validate and filter samplers
5058 AutoLock<RecursiveLock> lock(mLock);
5059 size_t imageInfoIndex = 0;
5060 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5061 if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
5062 if (!transformedWrites[i].pImageInfo) continue;
5063
5064 bool isImmutableSampler = descriptorBindingIsImmutableSampler(
5065 transformedWrites[i].dstSet, transformedWrites[i].dstBinding);
5066
5067 for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
5068 if (isImmutableSampler) {
5069 transformedImageInfos[imageInfoIndex].sampler = 0;
5070 }
5071 transformedImageInfos[imageInfoIndex] =
5072 filterNonexistentSampler(transformedImageInfos[imageInfoIndex]);
5073 ++imageInfoIndex;
5074 }
5075 }
5076 }
5077
5078 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
5079 for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
5080 VkDescriptorSet set = transformedWrites[i].dstSet;
5081 doEmulatedDescriptorWrite(&transformedWrites[i],
5082 as_goldfish_VkDescriptorSet(set)->reified);
5083 }
5084
5085 for (uint32_t i = 0; i < descriptorCopyCount; ++i) {
5086 doEmulatedDescriptorCopy(
5087 &pDescriptorCopies[i],
5088 as_goldfish_VkDescriptorSet(pDescriptorCopies[i].srcSet)->reified,
5089 as_goldfish_VkDescriptorSet(pDescriptorCopies[i].dstSet)->reified);
5090 }
5091 } else {
5092 enc->vkUpdateDescriptorSets(device, descriptorWriteCount, transformedWrites.data(),
5093 descriptorCopyCount, pDescriptorCopies, true /* do lock */);
5094 }
5095 }
5096
on_vkDestroyImage(void * context,VkDevice device,VkImage image,const VkAllocationCallbacks * pAllocator)5097 void ResourceTracker::on_vkDestroyImage(void* context, VkDevice device, VkImage image,
5098 const VkAllocationCallbacks* pAllocator) {
5099 #ifdef VK_USE_PLATFORM_ANDROID_KHR
5100 auto* syncHelper = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
5101 {
5102 AutoLock<RecursiveLock> lock(mLock); // do not guard encoder may cause
5103 // deadlock b/243339973
5104
5105 // Wait for any pending QSRIs to prevent a race between the Gfxstream host
5106 // potentially processing the below `vkDestroyImage()` from the VK encoder
5107 // command stream before processing a previously submitted
5108 // `VIRTIO_GPU_NATIVE_SYNC_VULKAN_QSRI_EXPORT` from the virtio-gpu command
5109 // stream which relies on the image existing.
5110 auto imageInfoIt = info_VkImage.find(image);
5111 if (imageInfoIt != info_VkImage.end()) {
5112 auto& imageInfo = imageInfoIt->second;
5113 for (int syncFd : imageInfo.pendingQsriSyncFds) {
5114 int syncWaitRet = syncHelper->wait(syncFd, 3000);
5115 if (syncWaitRet < 0) {
5116 mesa_loge("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d",
5117 __func__, strerror(errno), errno);
5118 }
5119 syncHelper->close(syncFd);
5120 }
5121 imageInfo.pendingQsriSyncFds.clear();
5122 }
5123 }
5124 #endif
5125 VkEncoder* enc = (VkEncoder*)context;
5126 #if defined(LINUX_GUEST_BUILD)
5127 auto imageInfoIt = info_VkImage.find(image);
5128 if (imageInfoIt != info_VkImage.end()) {
5129 auto& imageInfo = imageInfoIt->second;
5130 if (imageInfo.linearPeerImage) {
5131 enc->vkDestroyImage(device, imageInfo.linearPeerImage, pAllocator, true /* do lock */);
5132 }
5133 }
5134 #endif
5135 enc->vkDestroyImage(device, image, pAllocator, true /* do lock */);
5136 }
5137
on_vkGetImageMemoryRequirements(void * context,VkDevice device,VkImage image,VkMemoryRequirements * pMemoryRequirements)5138 void ResourceTracker::on_vkGetImageMemoryRequirements(void* context, VkDevice device, VkImage image,
5139 VkMemoryRequirements* pMemoryRequirements) {
5140 AutoLock<RecursiveLock> lock(mLock);
5141
5142 auto it = info_VkImage.find(image);
5143 if (it == info_VkImage.end()) return;
5144
5145 auto& info = it->second;
5146
5147 if (info.baseRequirementsKnown) {
5148 *pMemoryRequirements = info.baseRequirements;
5149 return;
5150 }
5151
5152 lock.unlock();
5153
5154 VkEncoder* enc = (VkEncoder*)context;
5155
5156 enc->vkGetImageMemoryRequirements(device, image, pMemoryRequirements, true /* do lock */);
5157
5158 lock.lock();
5159
5160 transformImageMemoryRequirementsForGuestLocked(image, pMemoryRequirements);
5161
5162 info.baseRequirementsKnown = true;
5163 info.baseRequirements = *pMemoryRequirements;
5164 }
5165
on_vkGetImageMemoryRequirements2(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5166 void ResourceTracker::on_vkGetImageMemoryRequirements2(void* context, VkDevice device,
5167 const VkImageMemoryRequirementsInfo2* pInfo,
5168 VkMemoryRequirements2* pMemoryRequirements) {
5169 VkEncoder* enc = (VkEncoder*)context;
5170 enc->vkGetImageMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */);
5171 transformImageMemoryRequirements2ForGuest(pInfo->image, pMemoryRequirements);
5172 }
5173
on_vkGetImageMemoryRequirements2KHR(void * context,VkDevice device,const VkImageMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5174 void ResourceTracker::on_vkGetImageMemoryRequirements2KHR(
5175 void* context, VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
5176 VkMemoryRequirements2* pMemoryRequirements) {
5177 VkEncoder* enc = (VkEncoder*)context;
5178 enc->vkGetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */);
5179 transformImageMemoryRequirements2ForGuest(pInfo->image, pMemoryRequirements);
5180 }
5181
on_vkGetImageSubresourceLayout(void * context,VkDevice device,VkImage image,const VkImageSubresource * pSubresource,VkSubresourceLayout * pLayout)5182 void ResourceTracker::on_vkGetImageSubresourceLayout(void* context, VkDevice device, VkImage image,
5183 const VkImageSubresource* pSubresource,
5184 VkSubresourceLayout* pLayout) {
5185 VkEncoder* enc = (VkEncoder*)context;
5186 VkImage targetImage = image;
5187 #if defined(LINUX_GUEST_BUILD)
5188 auto it = info_VkImage.find(image);
5189 if (it == info_VkImage.end()) return;
5190 const auto& info = it->second;
5191 if (info.linearPeerImage) {
5192 targetImage = info.linearPeerImage;
5193 }
5194 #endif
5195 enc->vkGetImageSubresourceLayout(device, targetImage, pSubresource, pLayout,
5196 true /* do lock */);
5197 }
5198
on_vkBindImageMemory(void * context,VkResult,VkDevice device,VkImage image,VkDeviceMemory memory,VkDeviceSize memoryOffset)5199 VkResult ResourceTracker::on_vkBindImageMemory(void* context, VkResult, VkDevice device,
5200 VkImage image, VkDeviceMemory memory,
5201 VkDeviceSize memoryOffset) {
5202 VkEncoder* enc = (VkEncoder*)context;
5203 // Do not forward calls with invalid handles to host.
5204 if (info_VkDeviceMemory.find(memory) == info_VkDeviceMemory.end() ||
5205 info_VkImage.find(image) == info_VkImage.end()) {
5206 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5207 }
5208 return enc->vkBindImageMemory(device, image, memory, memoryOffset, true /* do lock */);
5209 }
5210
on_vkBindImageMemory2(void * context,VkResult,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)5211 VkResult ResourceTracker::on_vkBindImageMemory2(void* context, VkResult, VkDevice device,
5212 uint32_t bindingCount,
5213 const VkBindImageMemoryInfo* pBindInfos) {
5214 VkEncoder* enc = (VkEncoder*)context;
5215
5216 if (bindingCount < 1 || !pBindInfos) {
5217 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5218 }
5219
5220 for (uint32_t i = 0; i < bindingCount; i++) {
5221 const VkBindImageMemoryInfo& bimi = pBindInfos[i];
5222
5223 auto imageIt = info_VkImage.find(bimi.image);
5224 if (imageIt == info_VkImage.end()) {
5225 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5226 }
5227
5228 if (bimi.memory != VK_NULL_HANDLE) {
5229 auto memoryIt = info_VkDeviceMemory.find(bimi.memory);
5230 if (memoryIt == info_VkDeviceMemory.end()) {
5231 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
5232 }
5233 }
5234 }
5235
5236 return enc->vkBindImageMemory2(device, bindingCount, pBindInfos, true /* do lock */);
5237 }
5238
on_vkBindImageMemory2KHR(void * context,VkResult result,VkDevice device,uint32_t bindingCount,const VkBindImageMemoryInfo * pBindInfos)5239 VkResult ResourceTracker::on_vkBindImageMemory2KHR(void* context, VkResult result, VkDevice device,
5240 uint32_t bindingCount,
5241 const VkBindImageMemoryInfo* pBindInfos) {
5242 return on_vkBindImageMemory2(context, result, device, bindingCount, pBindInfos);
5243 }
5244
on_vkCreateBuffer(void * context,VkResult,VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)5245 VkResult ResourceTracker::on_vkCreateBuffer(void* context, VkResult, VkDevice device,
5246 const VkBufferCreateInfo* pCreateInfo,
5247 const VkAllocationCallbacks* pAllocator,
5248 VkBuffer* pBuffer) {
5249 VkEncoder* enc = (VkEncoder*)context;
5250
5251 VkBufferCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
5252 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
5253 VkExternalMemoryBufferCreateInfo localExtBufCi;
5254
5255 const VkExternalMemoryBufferCreateInfo* extBufCiPtr =
5256 vk_find_struct<VkExternalMemoryBufferCreateInfo>(pCreateInfo);
5257 if (extBufCiPtr) {
5258 localExtBufCi = vk_make_orphan_copy(*extBufCiPtr);
5259 vk_append_struct(&structChainIter, &localExtBufCi);
5260 }
5261
5262 VkBufferOpaqueCaptureAddressCreateInfo localCapAddrCi;
5263 const VkBufferOpaqueCaptureAddressCreateInfo* pCapAddrCi =
5264 vk_find_struct<VkBufferOpaqueCaptureAddressCreateInfo>(pCreateInfo);
5265 if (pCapAddrCi) {
5266 localCapAddrCi = vk_make_orphan_copy(*pCapAddrCi);
5267 vk_append_struct(&structChainIter, &localCapAddrCi);
5268 }
5269
5270 VkBufferDeviceAddressCreateInfoEXT localDevAddrCi;
5271 const VkBufferDeviceAddressCreateInfoEXT* pDevAddrCi =
5272 vk_find_struct<VkBufferDeviceAddressCreateInfoEXT>(pCreateInfo);
5273 if (pDevAddrCi) {
5274 localDevAddrCi = vk_make_orphan_copy(*pDevAddrCi);
5275 vk_append_struct(&structChainIter, &localDevAddrCi);
5276 }
5277
5278 #ifdef VK_USE_PLATFORM_FUCHSIA
5279 Optional<zx::vmo> vmo;
5280 bool isSysmemBackedMemory = false;
5281
5282 if (extBufCiPtr &&
5283 (extBufCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA)) {
5284 isSysmemBackedMemory = true;
5285 }
5286
5287 const auto* extBufferCollectionPtr =
5288 vk_find_struct<VkBufferCollectionBufferCreateInfoFUCHSIA>(pCreateInfo);
5289
5290 if (extBufferCollectionPtr) {
5291 const auto& collection =
5292 *reinterpret_cast<fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
5293 extBufferCollectionPtr->collection);
5294 uint32_t index = extBufferCollectionPtr->index;
5295
5296 auto result = collection->WaitForBuffersAllocated();
5297 if (result.ok() && result->status == ZX_OK) {
5298 auto& info = result->buffer_collection_info;
5299 if (index < info.buffer_count) {
5300 vmo = gfxstream::guest::makeOptional(std::move(info.buffers[index].vmo));
5301 }
5302 } else {
5303 mesa_loge("WaitForBuffersAllocated failed: %d %d", result.status(),
5304 GET_STATUS_SAFE(result, status));
5305 }
5306
5307 if (vmo && vmo->is_valid()) {
5308 fidl::Arena arena;
5309 fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(arena);
5310 createParams.set_size(arena, pCreateInfo->size)
5311 .set_memory_property(fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
5312
5313 auto result = mControlDevice->CreateBuffer2(std::move(*vmo), createParams);
5314 if (!result.ok() ||
5315 (result->is_error() != ZX_OK && result->error_value() != ZX_ERR_ALREADY_EXISTS)) {
5316 mesa_loge("CreateBuffer2 failed: %d:%d", result.status(),
5317 GET_STATUS_SAFE(result, error_value()));
5318 }
5319 isSysmemBackedMemory = true;
5320 }
5321 }
5322 #endif // VK_USE_PLATFORM_FUCHSIA
5323
5324 VkResult res;
5325 VkMemoryRequirements memReqs;
5326
5327 if (supportsCreateResourcesWithRequirements()) {
5328 res = enc->vkCreateBufferWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator,
5329 pBuffer, &memReqs, true /* do lock */);
5330 } else {
5331 res =
5332 enc->vkCreateBuffer(device, &localCreateInfo, pAllocator, pBuffer, true /* do lock */);
5333 }
5334
5335 if (res != VK_SUCCESS) return res;
5336
5337 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5338 if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
5339 mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
5340 }
5341 if (extBufCiPtr &&
5342 ((extBufCiPtr->handleTypes &
5343 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) ||
5344 (extBufCiPtr->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT))) {
5345 updateMemoryTypeBits(&memReqs.memoryTypeBits, mCaps.vulkanCapset.colorBufferMemoryIndex);
5346 }
5347 #endif
5348
5349 AutoLock<RecursiveLock> lock(mLock);
5350
5351 auto it = info_VkBuffer.find(*pBuffer);
5352 if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
5353
5354 auto& info = it->second;
5355
5356 info.createInfo = localCreateInfo;
5357 info.createInfo.pNext = nullptr;
5358
5359 if (supportsCreateResourcesWithRequirements()) {
5360 info.baseRequirementsKnown = true;
5361 info.baseRequirements = memReqs;
5362 }
5363
5364 if (extBufCiPtr) {
5365 info.external = true;
5366 info.externalCreateInfo = *extBufCiPtr;
5367 }
5368
5369 #ifdef VK_USE_PLATFORM_FUCHSIA
5370 if (isSysmemBackedMemory) {
5371 info.isSysmemBackedMemory = true;
5372 }
5373 #endif
5374
5375 return res;
5376 }
5377
on_vkDestroyBuffer(void * context,VkDevice device,VkBuffer buffer,const VkAllocationCallbacks * pAllocator)5378 void ResourceTracker::on_vkDestroyBuffer(void* context, VkDevice device, VkBuffer buffer,
5379 const VkAllocationCallbacks* pAllocator) {
5380 VkEncoder* enc = (VkEncoder*)context;
5381 enc->vkDestroyBuffer(device, buffer, pAllocator, true /* do lock */);
5382 }
5383
on_vkGetBufferMemoryRequirements(void * context,VkDevice device,VkBuffer buffer,VkMemoryRequirements * pMemoryRequirements)5384 void ResourceTracker::on_vkGetBufferMemoryRequirements(void* context, VkDevice device,
5385 VkBuffer buffer,
5386 VkMemoryRequirements* pMemoryRequirements) {
5387 AutoLock<RecursiveLock> lock(mLock);
5388
5389 auto it = info_VkBuffer.find(buffer);
5390 if (it == info_VkBuffer.end()) return;
5391
5392 auto& info = it->second;
5393
5394 if (info.baseRequirementsKnown) {
5395 *pMemoryRequirements = info.baseRequirements;
5396 return;
5397 }
5398
5399 lock.unlock();
5400
5401 VkEncoder* enc = (VkEncoder*)context;
5402 enc->vkGetBufferMemoryRequirements(device, buffer, pMemoryRequirements, true /* do lock */);
5403
5404 lock.lock();
5405
5406 info.baseRequirementsKnown = true;
5407 info.baseRequirements = *pMemoryRequirements;
5408 }
5409
on_vkGetBufferMemoryRequirements2(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5410 void ResourceTracker::on_vkGetBufferMemoryRequirements2(
5411 void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5412 VkMemoryRequirements2* pMemoryRequirements) {
5413 VkEncoder* enc = (VkEncoder*)context;
5414 enc->vkGetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */);
5415 transformBufferMemoryRequirements2ForGuest(pInfo->buffer, pMemoryRequirements);
5416 }
5417
on_vkGetBufferMemoryRequirements2KHR(void * context,VkDevice device,const VkBufferMemoryRequirementsInfo2 * pInfo,VkMemoryRequirements2 * pMemoryRequirements)5418 void ResourceTracker::on_vkGetBufferMemoryRequirements2KHR(
5419 void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
5420 VkMemoryRequirements2* pMemoryRequirements) {
5421 VkEncoder* enc = (VkEncoder*)context;
5422 enc->vkGetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */);
5423 transformBufferMemoryRequirements2ForGuest(pInfo->buffer, pMemoryRequirements);
5424 }
5425
on_vkBindBufferMemory(void * context,VkResult,VkDevice device,VkBuffer buffer,VkDeviceMemory memory,VkDeviceSize memoryOffset)5426 VkResult ResourceTracker::on_vkBindBufferMemory(void* context, VkResult, VkDevice device,
5427 VkBuffer buffer, VkDeviceMemory memory,
5428 VkDeviceSize memoryOffset) {
5429 VkEncoder* enc = (VkEncoder*)context;
5430 return enc->vkBindBufferMemory(device, buffer, memory, memoryOffset, true /* do lock */);
5431 }
5432
on_vkBindBufferMemory2(void * context,VkResult,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)5433 VkResult ResourceTracker::on_vkBindBufferMemory2(void* context, VkResult, VkDevice device,
5434 uint32_t bindInfoCount,
5435 const VkBindBufferMemoryInfo* pBindInfos) {
5436 VkEncoder* enc = (VkEncoder*)context;
5437 return enc->vkBindBufferMemory2(device, bindInfoCount, pBindInfos, true /* do lock */);
5438 }
5439
on_vkBindBufferMemory2KHR(void * context,VkResult,VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)5440 VkResult ResourceTracker::on_vkBindBufferMemory2KHR(void* context, VkResult, VkDevice device,
5441 uint32_t bindInfoCount,
5442 const VkBindBufferMemoryInfo* pBindInfos) {
5443 VkEncoder* enc = (VkEncoder*)context;
5444 return enc->vkBindBufferMemory2KHR(device, bindInfoCount, pBindInfos, true /* do lock */);
5445 }
5446
on_vkCreateSemaphore(void * context,VkResult input_result,VkDevice device,const VkSemaphoreCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSemaphore * pSemaphore)5447 VkResult ResourceTracker::on_vkCreateSemaphore(void* context, VkResult input_result,
5448 VkDevice device,
5449 const VkSemaphoreCreateInfo* pCreateInfo,
5450 const VkAllocationCallbacks* pAllocator,
5451 VkSemaphore* pSemaphore) {
5452 (void)input_result;
5453 VkEncoder* enc = (VkEncoder*)context;
5454
5455 VkSemaphoreCreateInfo finalCreateInfo = *pCreateInfo;
5456
5457 const VkExportSemaphoreCreateInfoKHR* exportSemaphoreInfoPtr =
5458 vk_find_struct<VkExportSemaphoreCreateInfoKHR>(pCreateInfo);
5459
5460 #ifdef VK_USE_PLATFORM_FUCHSIA
5461 bool exportEvent =
5462 exportSemaphoreInfoPtr && (exportSemaphoreInfoPtr->handleTypes &
5463 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA);
5464
5465 if (exportEvent) {
5466 finalCreateInfo.pNext = nullptr;
5467 // If we have timeline semaphores externally, leave it there.
5468 const VkSemaphoreTypeCreateInfo* typeCi =
5469 vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo);
5470 if (typeCi) finalCreateInfo.pNext = typeCi;
5471 }
5472 #endif
5473
5474 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5475 bool exportSyncFd = exportSemaphoreInfoPtr && (exportSemaphoreInfoPtr->handleTypes &
5476 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT);
5477
5478 if (exportSyncFd) {
5479 finalCreateInfo.pNext = nullptr;
5480 // If we have timeline semaphores externally, leave it there.
5481 const VkSemaphoreTypeCreateInfo* typeCi =
5482 vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo);
5483 if (typeCi) finalCreateInfo.pNext = typeCi;
5484 }
5485 #endif
5486 input_result = enc->vkCreateSemaphore(device, &finalCreateInfo, pAllocator, pSemaphore,
5487 true /* do lock */);
5488
5489 zx_handle_t event_handle = ZX_HANDLE_INVALID;
5490
5491 #ifdef VK_USE_PLATFORM_FUCHSIA
5492 if (exportEvent) {
5493 zx_event_create(0, &event_handle);
5494 }
5495 #endif
5496
5497 AutoLock<RecursiveLock> lock(mLock);
5498
5499 auto it = info_VkSemaphore.find(*pSemaphore);
5500 if (it == info_VkSemaphore.end()) return VK_ERROR_INITIALIZATION_FAILED;
5501
5502 auto& info = it->second;
5503
5504 info.device = device;
5505 info.eventHandle = event_handle;
5506 #ifdef VK_USE_PLATFORM_FUCHSIA
5507 info.eventKoid = getEventKoid(info.eventHandle);
5508 #endif
5509
5510 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5511 if (exportSyncFd) {
5512 if (mFeatureInfo->hasVirtioGpuNativeSync) {
5513 VkResult result;
5514 int64_t osHandle;
5515 uint64_t hostFenceHandle = get_host_u64_VkSemaphore(*pSemaphore);
5516
5517 result = createFence(device, hostFenceHandle, osHandle);
5518 if (result != VK_SUCCESS) return result;
5519
5520 info.syncFd.emplace(osHandle);
5521 } else {
5522 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
5523 ensureSyncDeviceFd();
5524
5525 if (exportSyncFd) {
5526 int syncFd = -1;
5527 goldfish_sync_queue_work(
5528 mSyncDeviceFd, get_host_u64_VkSemaphore(*pSemaphore) /* the handle */,
5529 GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */
5530 ,
5531 &syncFd);
5532 info.syncFd.emplace(syncFd);
5533 }
5534 #endif
5535 }
5536 }
5537 #endif
5538
5539 return VK_SUCCESS;
5540 }
5541
on_vkDestroySemaphore(void * context,VkDevice device,VkSemaphore semaphore,const VkAllocationCallbacks * pAllocator)5542 void ResourceTracker::on_vkDestroySemaphore(void* context, VkDevice device, VkSemaphore semaphore,
5543 const VkAllocationCallbacks* pAllocator) {
5544 VkEncoder* enc = (VkEncoder*)context;
5545 enc->vkDestroySemaphore(device, semaphore, pAllocator, true /* do lock */);
5546 }
5547
5548 // https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#vkGetSemaphoreFdKHR
5549 // Each call to vkGetSemaphoreFdKHR must create a new file descriptor and transfer ownership
5550 // of it to the application. To avoid leaking resources, the application must release ownership
5551 // of the file descriptor when it is no longer needed.
on_vkGetSemaphoreFdKHR(void * context,VkResult,VkDevice device,const VkSemaphoreGetFdInfoKHR * pGetFdInfo,int * pFd)5552 VkResult ResourceTracker::on_vkGetSemaphoreFdKHR(void* context, VkResult, VkDevice device,
5553 const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
5554 int* pFd) {
5555 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5556 VkEncoder* enc = (VkEncoder*)context;
5557 bool getSyncFd = pGetFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
5558
5559 if (getSyncFd) {
5560 AutoLock<RecursiveLock> lock(mLock);
5561 auto it = info_VkSemaphore.find(pGetFdInfo->semaphore);
5562 if (it == info_VkSemaphore.end()) return VK_ERROR_OUT_OF_HOST_MEMORY;
5563 auto& semInfo = it->second;
5564 // syncFd is supposed to have value.
5565 auto* syncHelper =
5566 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
5567 *pFd = syncHelper->dup(semInfo.syncFd.value_or(-1));
5568 return VK_SUCCESS;
5569 } else {
5570 // opaque fd
5571 int hostFd = 0;
5572 VkResult result = enc->vkGetSemaphoreFdKHR(device, pGetFdInfo, &hostFd, true /* do lock */);
5573 if (result != VK_SUCCESS) {
5574 return result;
5575 }
5576 *pFd = memfd_create("vk_opaque_fd", 0);
5577 write(*pFd, &hostFd, sizeof(hostFd));
5578 return VK_SUCCESS;
5579 }
5580 #else
5581 (void)context;
5582 (void)device;
5583 (void)pGetFdInfo;
5584 (void)pFd;
5585 return VK_ERROR_INCOMPATIBLE_DRIVER;
5586 #endif
5587 }
5588
on_vkImportSemaphoreFdKHR(void * context,VkResult input_result,VkDevice device,const VkImportSemaphoreFdInfoKHR * pImportSemaphoreFdInfo)5589 VkResult ResourceTracker::on_vkImportSemaphoreFdKHR(
5590 void* context, VkResult input_result, VkDevice device,
5591 const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) {
5592 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5593 VkEncoder* enc = (VkEncoder*)context;
5594 if (input_result != VK_SUCCESS) {
5595 return input_result;
5596 }
5597
5598 auto* syncHelper = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
5599
5600 if (pImportSemaphoreFdInfo->handleType & VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
5601 VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo;
5602
5603 AutoLock<RecursiveLock> lock(mLock);
5604
5605 auto semaphoreIt = info_VkSemaphore.find(pImportSemaphoreFdInfo->semaphore);
5606 auto& info = semaphoreIt->second;
5607
5608 if (info.syncFd.value_or(-1) >= 0) {
5609 syncHelper->close(info.syncFd.value());
5610 }
5611
5612 info.syncFd.emplace(pImportSemaphoreFdInfo->fd);
5613
5614 return VK_SUCCESS;
5615 } else {
5616 int fd = pImportSemaphoreFdInfo->fd;
5617 int err = lseek(fd, 0, SEEK_SET);
5618 if (err == -1) {
5619 mesa_loge("lseek fail on import semaphore");
5620 }
5621 int hostFd = 0;
5622 read(fd, &hostFd, sizeof(hostFd));
5623 VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo;
5624 tmpInfo.fd = hostFd;
5625 VkResult result = enc->vkImportSemaphoreFdKHR(device, &tmpInfo, true /* do lock */);
5626 syncHelper->close(fd);
5627 return result;
5628 }
5629 #else
5630 (void)context;
5631 (void)input_result;
5632 (void)device;
5633 (void)pImportSemaphoreFdInfo;
5634 return VK_ERROR_INCOMPATIBLE_DRIVER;
5635 #endif
5636 }
5637
on_vkGetMemoryFdPropertiesKHR(void * context,VkResult,VkDevice device,VkExternalMemoryHandleTypeFlagBits handleType,int fd,VkMemoryFdPropertiesKHR * pMemoryFdProperties)5638 VkResult ResourceTracker::on_vkGetMemoryFdPropertiesKHR(
5639 void* context, VkResult, VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd,
5640 VkMemoryFdPropertiesKHR* pMemoryFdProperties) {
5641 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
5642 if (!(handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)) {
5643 mesa_loge("%s: VK_KHR_external_memory_fd behavior not defined for handleType: 0x%x\n",
5644 __func__, handleType);
5645 return VK_ERROR_INVALID_EXTERNAL_HANDLE;
5646 }
5647 // Sanity-check device
5648 AutoLock<RecursiveLock> lock(mLock);
5649 auto deviceIt = info_VkDevice.find(device);
5650 if (deviceIt == info_VkDevice.end()) {
5651 return VK_ERROR_OUT_OF_HOST_MEMORY;
5652 }
5653 // TODO: Verify FD valid ?
5654 (void)fd;
5655
5656 if (mCaps.vulkanCapset.colorBufferMemoryIndex == 0xFFFFFFFF) {
5657 mCaps.vulkanCapset.colorBufferMemoryIndex = getColorBufferMemoryIndex(context, device);
5658 }
5659
5660 updateMemoryTypeBits(&pMemoryFdProperties->memoryTypeBits,
5661 mCaps.vulkanCapset.colorBufferMemoryIndex);
5662
5663 return VK_SUCCESS;
5664 #else
5665 (void)context;
5666 (void)device;
5667 (void)handleType;
5668 (void)fd;
5669 (void)pMemoryFdProperties;
5670 return VK_ERROR_INCOMPATIBLE_DRIVER;
5671 #endif
5672 }
5673
on_vkGetMemoryFdKHR(void * context,VkResult,VkDevice device,const VkMemoryGetFdInfoKHR * pGetFdInfo,int * pFd)5674 VkResult ResourceTracker::on_vkGetMemoryFdKHR(void* context, VkResult, VkDevice device,
5675 const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd) {
5676 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
5677 if (!pGetFdInfo) return VK_ERROR_OUT_OF_HOST_MEMORY;
5678 if (!pGetFdInfo->memory) return VK_ERROR_OUT_OF_HOST_MEMORY;
5679
5680 if (!(pGetFdInfo->handleType & (VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
5681 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT))) {
5682 mesa_loge("%s: Export operation not defined for handleType: 0x%x\n", __func__,
5683 pGetFdInfo->handleType);
5684 return VK_ERROR_OUT_OF_HOST_MEMORY;
5685 }
5686 // Sanity-check device
5687 AutoLock<RecursiveLock> lock(mLock);
5688 auto deviceIt = info_VkDevice.find(device);
5689 if (deviceIt == info_VkDevice.end()) {
5690 return VK_ERROR_OUT_OF_HOST_MEMORY;
5691 }
5692
5693 auto deviceMemIt = info_VkDeviceMemory.find(pGetFdInfo->memory);
5694 if (deviceMemIt == info_VkDeviceMemory.end()) {
5695 return VK_ERROR_OUT_OF_HOST_MEMORY;
5696 }
5697 auto& info = deviceMemIt->second;
5698
5699 if (!info.blobPtr) {
5700 mesa_loge("%s: VkDeviceMemory does not have a resource available for export.\n", __func__);
5701 return VK_ERROR_OUT_OF_HOST_MEMORY;
5702 }
5703
5704 VirtGpuExternalHandle handle{};
5705 int ret = info.blobPtr->exportBlob(handle);
5706 if (ret != 0 || handle.osHandle < 0) {
5707 mesa_loge("%s: Failed to export host resource to FD.\n", __func__);
5708 return VK_ERROR_OUT_OF_HOST_MEMORY;
5709 }
5710 *pFd = handle.osHandle;
5711 return VK_SUCCESS;
5712 #else
5713 (void)context;
5714 (void)device;
5715 (void)pGetFdInfo;
5716 (void)pFd;
5717 return VK_ERROR_INCOMPATIBLE_DRIVER;
5718 #endif
5719 }
5720
flushCommandBufferPendingCommandsBottomUp(void * context,VkQueue queue,const std::vector<VkCommandBuffer> & workingSet)5721 void ResourceTracker::flushCommandBufferPendingCommandsBottomUp(
5722 void* context, VkQueue queue, const std::vector<VkCommandBuffer>& workingSet) {
5723 if (workingSet.empty()) return;
5724
5725 std::vector<VkCommandBuffer> nextLevel;
5726 for (auto commandBuffer : workingSet) {
5727 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
5728 forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
5729 nextLevel.push_back((VkCommandBuffer)secondary);
5730 });
5731 }
5732
5733 flushCommandBufferPendingCommandsBottomUp(context, queue, nextLevel);
5734
5735 // After this point, everyone at the previous level has been flushed
5736 for (auto cmdbuf : workingSet) {
5737 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
5738
5739 // There's no pending commands here, skip. (case 1)
5740 if (!cb->privateStream) continue;
5741
5742 unsigned char* writtenPtr = 0;
5743 size_t written = 0;
5744 CommandBufferStagingStream* cmdBufStream =
5745 static_cast<CommandBufferStagingStream*>(cb->privateStream);
5746 cmdBufStream->getWritten(&writtenPtr, &written);
5747
5748 // There's no pending commands here, skip. (case 2, stream created but no new recordings)
5749 if (!written) continue;
5750
5751 // There are pending commands to flush.
5752 VkEncoder* enc = (VkEncoder*)context;
5753 VkDeviceMemory deviceMemory = cmdBufStream->getDeviceMemory();
5754 VkDeviceSize dataOffset = 0;
5755 if (mFeatureInfo->hasVulkanAuxCommandMemory) {
5756 // for suballocations, deviceMemory is an alias VkDeviceMemory
5757 // get underling VkDeviceMemory for given alias
5758 deviceMemoryTransform_tohost(&deviceMemory, 1 /*memoryCount*/, &dataOffset,
5759 1 /*offsetCount*/, nullptr /*size*/, 0 /*sizeCount*/,
5760 nullptr /*typeIndex*/, 0 /*typeIndexCount*/,
5761 nullptr /*typeBits*/, 0 /*typeBitCounts*/);
5762
5763 // mark stream as flushing before flushing commands
5764 cmdBufStream->markFlushing();
5765 enc->vkQueueFlushCommandsFromAuxMemoryGOOGLE(queue, cmdbuf, deviceMemory, dataOffset,
5766 written, true /*do lock*/);
5767 } else {
5768 enc->vkQueueFlushCommandsGOOGLE(queue, cmdbuf, written, (const void*)writtenPtr,
5769 true /* do lock */);
5770 }
5771 // Reset this stream.
5772 // flushing happens on vkQueueSubmit
5773 // vulkan api states that on queue submit,
5774 // applications MUST not attempt to modify the command buffer in any way
5775 // -as the device may be processing the commands recorded to it.
5776 // It is safe to call reset() here for this reason.
5777 // Command Buffer associated with this stream will only leave pending state
5778 // after queue submit is complete and host has read the data
5779 cmdBufStream->reset();
5780 }
5781 }
5782
syncEncodersForQueue(VkQueue queue,VkEncoder * currentEncoder)5783 uint32_t ResourceTracker::syncEncodersForQueue(VkQueue queue, VkEncoder* currentEncoder) {
5784 if (!supportsAsyncQueueSubmit()) {
5785 return 0;
5786 }
5787
5788 struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
5789 if (!q) return 0;
5790
5791 auto lastEncoder = q->lastUsedEncoder;
5792
5793 if (lastEncoder == currentEncoder) return 0;
5794
5795 currentEncoder->incRef();
5796
5797 q->lastUsedEncoder = currentEncoder;
5798
5799 if (!lastEncoder) return 0;
5800
5801 auto oldSeq = q->sequenceNumber;
5802 q->sequenceNumber += 2;
5803 lastEncoder->vkQueueHostSyncGOOGLE(queue, false, oldSeq + 1, true /* do lock */);
5804 lastEncoder->flush();
5805 currentEncoder->vkQueueHostSyncGOOGLE(queue, true, oldSeq + 2, true /* do lock */);
5806
5807 if (lastEncoder->decRef()) {
5808 q->lastUsedEncoder = nullptr;
5809 }
5810
5811 return 0;
5812 }
5813
5814 template <class VkSubmitInfoType>
flushStagingStreams(void * context,VkQueue queue,uint32_t submitCount,const VkSubmitInfoType * pSubmits)5815 void ResourceTracker::flushStagingStreams(void* context, VkQueue queue, uint32_t submitCount,
5816 const VkSubmitInfoType* pSubmits) {
5817 std::vector<VkCommandBuffer> toFlush;
5818 for (uint32_t i = 0; i < submitCount; ++i) {
5819 for (uint32_t j = 0; j < getCommandBufferCount(pSubmits[i]); ++j) {
5820 toFlush.push_back(getCommandBuffer(pSubmits[i], j));
5821 }
5822 }
5823
5824 std::unordered_set<VkDescriptorSet> pendingSets;
5825 collectAllPendingDescriptorSetsBottomUp(toFlush, pendingSets);
5826 commitDescriptorSetUpdates(context, queue, pendingSets);
5827
5828 flushCommandBufferPendingCommandsBottomUp(context, queue, toFlush);
5829
5830 for (auto cb : toFlush) {
5831 resetCommandBufferPendingTopology(cb);
5832 }
5833 }
5834
on_vkQueueSubmit(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)5835 VkResult ResourceTracker::on_vkQueueSubmit(void* context, VkResult input_result, VkQueue queue,
5836 uint32_t submitCount, const VkSubmitInfo* pSubmits,
5837 VkFence fence) {
5838 AEMU_SCOPED_TRACE("on_vkQueueSubmit");
5839 return on_vkQueueSubmitTemplate<VkSubmitInfo>(context, input_result, queue, submitCount,
5840 pSubmits, fence);
5841 }
5842
on_vkQueueSubmit2(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)5843 VkResult ResourceTracker::on_vkQueueSubmit2(void* context, VkResult input_result, VkQueue queue,
5844 uint32_t submitCount, const VkSubmitInfo2* pSubmits,
5845 VkFence fence) {
5846 AEMU_SCOPED_TRACE("on_vkQueueSubmit2");
5847 return on_vkQueueSubmitTemplate<VkSubmitInfo2>(context, input_result, queue, submitCount,
5848 pSubmits, fence);
5849 }
5850
vkQueueSubmitEnc(VkEncoder * enc,VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)5851 VkResult ResourceTracker::vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount,
5852 const VkSubmitInfo* pSubmits, VkFence fence) {
5853 if (supportsAsyncQueueSubmit()) {
5854 enc->vkQueueSubmitAsyncGOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
5855 return VK_SUCCESS;
5856 } else {
5857 return enc->vkQueueSubmit(queue, submitCount, pSubmits, fence, true /* do lock */);
5858 }
5859 }
5860
vkQueueSubmitEnc(VkEncoder * enc,VkQueue queue,uint32_t submitCount,const VkSubmitInfo2 * pSubmits,VkFence fence)5861 VkResult ResourceTracker::vkQueueSubmitEnc(VkEncoder* enc, VkQueue queue, uint32_t submitCount,
5862 const VkSubmitInfo2* pSubmits, VkFence fence) {
5863 if (supportsAsyncQueueSubmit()) {
5864 enc->vkQueueSubmitAsync2GOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
5865 return VK_SUCCESS;
5866 } else {
5867 return enc->vkQueueSubmit2(queue, submitCount, pSubmits, fence, true /* do lock */);
5868 }
5869 }
5870
5871 template <typename VkSubmitInfoType>
on_vkQueueSubmitTemplate(void * context,VkResult input_result,VkQueue queue,uint32_t submitCount,const VkSubmitInfoType * pSubmits,VkFence fence)5872 VkResult ResourceTracker::on_vkQueueSubmitTemplate(void* context, VkResult input_result,
5873 VkQueue queue, uint32_t submitCount,
5874 const VkSubmitInfoType* pSubmits,
5875 VkFence fence) {
5876 flushStagingStreams(context, queue, submitCount, pSubmits);
5877
5878 std::vector<VkSemaphore> pre_signal_semaphores;
5879 std::vector<zx_handle_t> pre_signal_events;
5880 std::vector<int> pre_signal_sync_fds;
5881 std::vector<std::pair<zx_handle_t, zx_koid_t>> post_wait_events;
5882 std::vector<int> post_wait_sync_fds;
5883
5884 VkEncoder* enc = (VkEncoder*)context;
5885
5886 AutoLock<RecursiveLock> lock(mLock);
5887
5888 for (uint32_t i = 0; i < submitCount; ++i) {
5889 for (uint32_t j = 0; j < getWaitSemaphoreCount(pSubmits[i]); ++j) {
5890 VkSemaphore semaphore = getWaitSemaphore(pSubmits[i], j);
5891 auto it = info_VkSemaphore.find(semaphore);
5892 if (it != info_VkSemaphore.end()) {
5893 auto& semInfo = it->second;
5894 #ifdef VK_USE_PLATFORM_FUCHSIA
5895 if (semInfo.eventHandle) {
5896 pre_signal_events.push_back(semInfo.eventHandle);
5897 pre_signal_semaphores.push_back(semaphore);
5898 }
5899 #endif
5900 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5901 if (semInfo.syncFd.has_value()) {
5902 pre_signal_sync_fds.push_back(semInfo.syncFd.value());
5903 pre_signal_semaphores.push_back(semaphore);
5904 }
5905 #endif
5906 }
5907 }
5908 for (uint32_t j = 0; j < getSignalSemaphoreCount(pSubmits[i]); ++j) {
5909 auto it = info_VkSemaphore.find(getSignalSemaphore(pSubmits[i], j));
5910 if (it != info_VkSemaphore.end()) {
5911 auto& semInfo = it->second;
5912 #ifdef VK_USE_PLATFORM_FUCHSIA
5913 if (semInfo.eventHandle) {
5914 post_wait_events.push_back({semInfo.eventHandle, semInfo.eventKoid});
5915 #ifndef FUCHSIA_NO_TRACE
5916 if (semInfo.eventKoid != ZX_KOID_INVALID) {
5917 // TODO(fxbug.dev/42144867): Remove the "semaphore"
5918 // FLOW_END events once it is removed from clients
5919 // (for example, gfx Engine).
5920 TRACE_FLOW_END("gfx", "semaphore", semInfo.eventKoid);
5921 TRACE_FLOW_BEGIN("gfx", "goldfish_post_wait_event", semInfo.eventKoid);
5922 }
5923 #endif
5924 }
5925 #endif
5926 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5927 if (semInfo.syncFd.value_or(-1) >= 0) {
5928 post_wait_sync_fds.push_back(semInfo.syncFd.value());
5929 }
5930 #endif
5931 }
5932 }
5933 }
5934 lock.unlock();
5935
5936 if (pre_signal_semaphores.empty()) {
5937 input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence);
5938 if (input_result != VK_SUCCESS) return input_result;
5939 } else {
5940 // Schedule waits on the OS external objects and
5941 // signal the wait semaphores
5942 // in a separate thread.
5943 std::vector<WorkPool::Task> preSignalTasks;
5944 std::vector<WorkPool::Task> preSignalQueueSubmitTasks;
5945 ;
5946 #ifdef VK_USE_PLATFORM_FUCHSIA
5947 for (auto event : pre_signal_events) {
5948 preSignalTasks.push_back([event] {
5949 zx_object_wait_one(event, ZX_EVENT_SIGNALED, ZX_TIME_INFINITE, nullptr);
5950 });
5951 }
5952 #endif
5953 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5954 for (auto fd : pre_signal_sync_fds) {
5955 // https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkImportSemaphoreFdInfoKHR.html
5956 // fd == -1 is treated as already signaled
5957 if (fd != -1) {
5958 preSignalTasks.push_back([fd] {
5959 auto* syncHelper =
5960 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
5961 syncHelper->wait(fd, 3000);
5962 });
5963 }
5964 }
5965 #endif
5966 if (!preSignalTasks.empty()) {
5967 auto waitGroupHandle = mWorkPool.schedule(preSignalTasks);
5968 mWorkPool.waitAll(waitGroupHandle);
5969 }
5970
5971 // Use the old version of VkSubmitInfo
5972 VkSubmitInfo submit_info = {
5973 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
5974 .waitSemaphoreCount = 0,
5975 .pWaitSemaphores = nullptr,
5976 .pWaitDstStageMask = nullptr,
5977 .signalSemaphoreCount = static_cast<uint32_t>(pre_signal_semaphores.size()),
5978 .pSignalSemaphores = pre_signal_semaphores.data()};
5979 vkQueueSubmitEnc(enc, queue, 1, &submit_info, VK_NULL_HANDLE);
5980 input_result = vkQueueSubmitEnc(enc, queue, submitCount, pSubmits, fence);
5981 if (input_result != VK_SUCCESS) return input_result;
5982 }
5983 lock.lock();
5984 int externalFenceFdToSignal = -1;
5985
5986 #if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(__linux__)
5987 if (fence != VK_NULL_HANDLE) {
5988 auto it = info_VkFence.find(fence);
5989 if (it != info_VkFence.end()) {
5990 const auto& info = it->second;
5991 if (info.syncFd >= 0) {
5992 externalFenceFdToSignal = info.syncFd;
5993 }
5994 }
5995 }
5996 #endif
5997 if (externalFenceFdToSignal >= 0 || !post_wait_events.empty() || !post_wait_sync_fds.empty()) {
5998 std::vector<WorkPool::Task> tasks;
5999
6000 tasks.push_back([queue, externalFenceFdToSignal, post_wait_events /* copy of zx handles */,
6001 post_wait_sync_fds /* copy of sync fds */] {
6002 auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
6003 auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
6004 auto waitIdleRes = vkEncoder->vkQueueWaitIdle(queue, true /* do lock */);
6005 #ifdef VK_USE_PLATFORM_FUCHSIA
6006 AEMU_SCOPED_TRACE("on_vkQueueSubmit::SignalSemaphores");
6007 (void)externalFenceFdToSignal;
6008 for (auto& [event, koid] : post_wait_events) {
6009 #ifndef FUCHSIA_NO_TRACE
6010 if (koid != ZX_KOID_INVALID) {
6011 TRACE_FLOW_END("gfx", "goldfish_post_wait_event", koid);
6012 TRACE_FLOW_BEGIN("gfx", "event_signal", koid);
6013 }
6014 #endif
6015 zx_object_signal(event, 0, ZX_EVENT_SIGNALED);
6016 }
6017 #endif
6018 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
6019 for (auto& fd : post_wait_sync_fds) {
6020 goldfish_sync_signal(fd);
6021 }
6022
6023 if (externalFenceFdToSignal >= 0) {
6024 mesa_logi("%s: external fence real signal: %d\n", __func__, externalFenceFdToSignal);
6025 goldfish_sync_signal(externalFenceFdToSignal);
6026 }
6027 #endif
6028 });
6029 auto queueAsyncWaitHandle = mWorkPool.schedule(tasks);
6030 auto& queueWorkItems = mQueueSensitiveWorkPoolItems[queue];
6031 queueWorkItems.push_back(queueAsyncWaitHandle);
6032 }
6033 return VK_SUCCESS;
6034 }
6035
on_vkQueueWaitIdle(void * context,VkResult,VkQueue queue)6036 VkResult ResourceTracker::on_vkQueueWaitIdle(void* context, VkResult, VkQueue queue) {
6037 VkEncoder* enc = (VkEncoder*)context;
6038
6039 AutoLock<RecursiveLock> lock(mLock);
6040 std::vector<WorkPool::WaitGroupHandle> toWait = mQueueSensitiveWorkPoolItems[queue];
6041 mQueueSensitiveWorkPoolItems[queue].clear();
6042 lock.unlock();
6043
6044 if (toWait.empty()) {
6045 mesa_logi("%s: No queue-specific work pool items\n", __func__);
6046 return enc->vkQueueWaitIdle(queue, true /* do lock */);
6047 }
6048
6049 for (auto handle : toWait) {
6050 mesa_logi("%s: waiting on work group item: %llu\n", __func__, (unsigned long long)handle);
6051 mWorkPool.waitAll(handle);
6052 }
6053
6054 // now done waiting, get the host's opinion
6055 return enc->vkQueueWaitIdle(queue, true /* do lock */);
6056 }
6057
6058 #ifdef VK_USE_PLATFORM_ANDROID_KHR
unwrap_VkNativeBufferANDROID(const VkNativeBufferANDROID * inputNativeInfo,VkNativeBufferANDROID * outputNativeInfo)6059 void ResourceTracker::unwrap_VkNativeBufferANDROID(const VkNativeBufferANDROID* inputNativeInfo,
6060 VkNativeBufferANDROID* outputNativeInfo) {
6061 if (!inputNativeInfo || !inputNativeInfo->handle) {
6062 return;
6063 }
6064
6065 if (!outputNativeInfo || !outputNativeInfo) {
6066 mesa_loge("FATAL: Local native buffer info not properly allocated!");
6067 abort();
6068 }
6069
6070 auto* gralloc = ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
6071 const native_handle_t* nativeHandle = (const native_handle_t*)inputNativeInfo->handle;
6072 *(uint32_t*)(outputNativeInfo->handle) = gralloc->getHostHandle(nativeHandle);
6073 }
6074
unwrap_VkBindImageMemorySwapchainInfoKHR(const VkBindImageMemorySwapchainInfoKHR * inputBimsi,VkBindImageMemorySwapchainInfoKHR * outputBimsi)6075 void ResourceTracker::unwrap_VkBindImageMemorySwapchainInfoKHR(
6076 const VkBindImageMemorySwapchainInfoKHR* inputBimsi,
6077 VkBindImageMemorySwapchainInfoKHR* outputBimsi) {
6078 if (!inputBimsi || !inputBimsi->swapchain) {
6079 return;
6080 }
6081
6082 if (!outputBimsi || !outputBimsi->swapchain) {
6083 return;
6084 }
6085
6086 // Android based swapchains are implemented by the Android framework's
6087 // libvulkan. The only exist within the guest and should not be sent to
6088 // the host.
6089 outputBimsi->swapchain = VK_NULL_HANDLE;
6090 }
6091 #endif
6092
unwrap_vkCreateImage_pCreateInfo(const VkImageCreateInfo * pCreateInfo,VkImageCreateInfo * local_pCreateInfo)6093 void ResourceTracker::unwrap_vkCreateImage_pCreateInfo(const VkImageCreateInfo* pCreateInfo,
6094 VkImageCreateInfo* local_pCreateInfo) {
6095 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6096 const VkNativeBufferANDROID* inputNativeInfo =
6097 vk_find_struct<VkNativeBufferANDROID>(pCreateInfo);
6098
6099 VkNativeBufferANDROID* outputNativeInfo = const_cast<VkNativeBufferANDROID*>(
6100 vk_find_struct<VkNativeBufferANDROID>(local_pCreateInfo));
6101
6102 unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo);
6103 #endif
6104 }
6105
unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd,int * fd_out)6106 void ResourceTracker::unwrap_vkAcquireImageANDROID_nativeFenceFd(int fd, int* fd_out) {
6107 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6108 (void)fd_out;
6109 if (fd != -1) {
6110 AEMU_SCOPED_TRACE("waitNativeFenceInAcquire");
6111 // Implicit Synchronization
6112 auto* syncHelper =
6113 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
6114 syncHelper->wait(fd, 3000);
6115 // From libvulkan's swapchain.cpp:
6116 // """
6117 // NOTE: we're relying on AcquireImageANDROID to close fence_clone,
6118 // even if the call fails. We could close it ourselves on failure, but
6119 // that would create a race condition if the driver closes it on a
6120 // failure path: some other thread might create an fd with the same
6121 // number between the time the driver closes it and the time we close
6122 // it. We must assume one of: the driver *always* closes it even on
6123 // failure, or *never* closes it on failure.
6124 // """
6125 // Therefore, assume contract where we need to close fd in this driver
6126 syncHelper->close(fd);
6127 }
6128 #endif
6129 }
6130
unwrap_VkBindImageMemory2_pBindInfos(uint32_t bindInfoCount,const VkBindImageMemoryInfo * inputBindInfos,VkBindImageMemoryInfo * outputBindInfos)6131 void ResourceTracker::unwrap_VkBindImageMemory2_pBindInfos(
6132 uint32_t bindInfoCount, const VkBindImageMemoryInfo* inputBindInfos,
6133 VkBindImageMemoryInfo* outputBindInfos) {
6134 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6135 for (uint32_t i = 0; i < bindInfoCount; ++i) {
6136 const VkBindImageMemoryInfo* inputBindInfo = &inputBindInfos[i];
6137 VkBindImageMemoryInfo* outputBindInfo = &outputBindInfos[i];
6138
6139 const VkNativeBufferANDROID* inputNativeInfo =
6140 vk_find_struct<VkNativeBufferANDROID>(inputBindInfo);
6141
6142 VkNativeBufferANDROID* outputNativeInfo = const_cast<VkNativeBufferANDROID*>(
6143 vk_find_struct<VkNativeBufferANDROID>(outputBindInfo));
6144
6145 unwrap_VkNativeBufferANDROID(inputNativeInfo, outputNativeInfo);
6146
6147 const VkBindImageMemorySwapchainInfoKHR* inputBimsi =
6148 vk_find_struct<VkBindImageMemorySwapchainInfoKHR>(inputBindInfo);
6149
6150 VkBindImageMemorySwapchainInfoKHR* outputBimsi =
6151 const_cast<VkBindImageMemorySwapchainInfoKHR*>(
6152 vk_find_struct<VkBindImageMemorySwapchainInfoKHR>(outputBindInfo));
6153
6154 unwrap_VkBindImageMemorySwapchainInfoKHR(inputBimsi, outputBimsi);
6155 }
6156 #endif
6157 }
6158
6159 // Action of vkMapMemoryIntoAddressSpaceGOOGLE:
6160 // 1. preprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE_pre):
6161 // uses address space device to reserve the right size of
6162 // memory.
6163 // 2. the reservation results in a physical address. the physical
6164 // address is set as |*pAddress|.
6165 // 3. after pre, the API call is encoded to the host, where the
6166 // value of pAddress is also sent (the physical address).
6167 // 4. the host will obtain the actual gpu pointer and send it
6168 // back out in |*pAddress|.
6169 // 5. postprocess (on_vkMapMemoryIntoAddressSpaceGOOGLE) will run,
6170 // using the mmap() method of GoldfishAddressSpaceBlock to obtain
6171 // a pointer in guest userspace corresponding to the host pointer.
on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void *,VkResult,VkDevice,VkDeviceMemory memory,uint64_t * pAddress)6172 VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(void*, VkResult, VkDevice,
6173 VkDeviceMemory memory,
6174 uint64_t* pAddress) {
6175 AutoLock<RecursiveLock> lock(mLock);
6176
6177 auto it = info_VkDeviceMemory.find(memory);
6178 if (it == info_VkDeviceMemory.end()) {
6179 return VK_ERROR_OUT_OF_HOST_MEMORY;
6180 }
6181
6182 #if defined(__ANDROID__)
6183 auto& memInfo = it->second;
6184
6185 GoldfishAddressSpaceBlockPtr block = std::make_shared<GoldfishAddressSpaceBlock>();
6186 block->allocate(mGoldfishAddressSpaceBlockProvider.get(), memInfo.coherentMemorySize);
6187
6188 memInfo.goldfishBlock = block;
6189 *pAddress = block->physAddr();
6190
6191 return VK_SUCCESS;
6192 #else
6193 (void)pAddress;
6194 return VK_ERROR_MEMORY_MAP_FAILED;
6195 #endif
6196 }
6197
on_vkMapMemoryIntoAddressSpaceGOOGLE(void *,VkResult input_result,VkDevice,VkDeviceMemory memory,uint64_t * pAddress)6198 VkResult ResourceTracker::on_vkMapMemoryIntoAddressSpaceGOOGLE(void*, VkResult input_result,
6199 VkDevice, VkDeviceMemory memory,
6200 uint64_t* pAddress) {
6201 (void)memory;
6202 (void)pAddress;
6203
6204 if (input_result != VK_SUCCESS) {
6205 return input_result;
6206 }
6207
6208 return input_result;
6209 }
6210
initDescriptorUpdateTemplateBuffers(const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,VkDescriptorUpdateTemplate descriptorUpdateTemplate)6211 VkResult ResourceTracker::initDescriptorUpdateTemplateBuffers(
6212 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6213 VkDescriptorUpdateTemplate descriptorUpdateTemplate) {
6214 AutoLock<RecursiveLock> lock(mLock);
6215
6216 auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6217 if (it == info_VkDescriptorUpdateTemplate.end()) {
6218 return VK_ERROR_INITIALIZATION_FAILED;
6219 }
6220
6221 auto& info = it->second;
6222 uint32_t inlineUniformBlockBufferSize = 0;
6223
6224 for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6225 const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6226 uint32_t descCount = entry.descriptorCount;
6227 VkDescriptorType descType = entry.descriptorType;
6228 ++info.templateEntryCount;
6229 if (isDescriptorTypeInlineUniformBlock(descType)) {
6230 inlineUniformBlockBufferSize += descCount;
6231 ++info.inlineUniformBlockCount;
6232 } else {
6233 for (uint32_t j = 0; j < descCount; ++j) {
6234 if (isDescriptorTypeImageInfo(descType)) {
6235 ++info.imageInfoCount;
6236 } else if (isDescriptorTypeBufferInfo(descType)) {
6237 ++info.bufferInfoCount;
6238 } else if (isDescriptorTypeBufferView(descType)) {
6239 ++info.bufferViewCount;
6240 } else {
6241 mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6242 // abort();
6243 }
6244 }
6245 }
6246 }
6247
6248 if (info.templateEntryCount)
6249 info.templateEntries = new VkDescriptorUpdateTemplateEntry[info.templateEntryCount];
6250
6251 if (info.imageInfoCount) {
6252 info.imageInfoIndices = new uint32_t[info.imageInfoCount];
6253 info.imageInfos = new VkDescriptorImageInfo[info.imageInfoCount];
6254 }
6255
6256 if (info.bufferInfoCount) {
6257 info.bufferInfoIndices = new uint32_t[info.bufferInfoCount];
6258 info.bufferInfos = new VkDescriptorBufferInfo[info.bufferInfoCount];
6259 }
6260
6261 if (info.bufferViewCount) {
6262 info.bufferViewIndices = new uint32_t[info.bufferViewCount];
6263 info.bufferViews = new VkBufferView[info.bufferViewCount];
6264 }
6265
6266 if (info.inlineUniformBlockCount) {
6267 info.inlineUniformBlockBuffer.resize(inlineUniformBlockBufferSize);
6268 info.inlineUniformBlockBytesPerBlocks.resize(info.inlineUniformBlockCount);
6269 }
6270
6271 uint32_t imageInfoIndex = 0;
6272 uint32_t bufferInfoIndex = 0;
6273 uint32_t bufferViewIndex = 0;
6274 uint32_t inlineUniformBlockIndex = 0;
6275
6276 for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
6277 const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
6278 uint32_t descCount = entry.descriptorCount;
6279 VkDescriptorType descType = entry.descriptorType;
6280
6281 info.templateEntries[i] = entry;
6282
6283 if (isDescriptorTypeInlineUniformBlock(descType)) {
6284 info.inlineUniformBlockBytesPerBlocks[inlineUniformBlockIndex] = descCount;
6285 ++inlineUniformBlockIndex;
6286 } else {
6287 for (uint32_t j = 0; j < descCount; ++j) {
6288 if (isDescriptorTypeImageInfo(descType)) {
6289 info.imageInfoIndices[imageInfoIndex] = i;
6290 ++imageInfoIndex;
6291 } else if (isDescriptorTypeBufferInfo(descType)) {
6292 info.bufferInfoIndices[bufferInfoIndex] = i;
6293 ++bufferInfoIndex;
6294 } else if (isDescriptorTypeBufferView(descType)) {
6295 info.bufferViewIndices[bufferViewIndex] = i;
6296 ++bufferViewIndex;
6297 } else {
6298 mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6299 // abort();
6300 }
6301 }
6302 }
6303 }
6304
6305 return VK_SUCCESS;
6306 }
6307
on_vkCreateDescriptorUpdateTemplate(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)6308 VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplate(
6309 void* context, VkResult input_result, VkDevice device,
6310 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6311 const VkAllocationCallbacks* pAllocator,
6312 VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6313 (void)context;
6314 (void)device;
6315 (void)pAllocator;
6316
6317 if (input_result != VK_SUCCESS) return input_result;
6318
6319 return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6320 }
6321
on_vkCreateDescriptorUpdateTemplateKHR(void * context,VkResult input_result,VkDevice device,const VkDescriptorUpdateTemplateCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkDescriptorUpdateTemplate * pDescriptorUpdateTemplate)6322 VkResult ResourceTracker::on_vkCreateDescriptorUpdateTemplateKHR(
6323 void* context, VkResult input_result, VkDevice device,
6324 const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
6325 const VkAllocationCallbacks* pAllocator,
6326 VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) {
6327 (void)context;
6328 (void)device;
6329 (void)pAllocator;
6330
6331 if (input_result != VK_SUCCESS) return input_result;
6332
6333 return initDescriptorUpdateTemplateBuffers(pCreateInfo, *pDescriptorUpdateTemplate);
6334 }
6335
on_vkUpdateDescriptorSetWithTemplate(void * context,VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)6336 void ResourceTracker::on_vkUpdateDescriptorSetWithTemplate(
6337 void* context, VkDevice device, VkDescriptorSet descriptorSet,
6338 VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData) {
6339 VkEncoder* enc = (VkEncoder*)context;
6340
6341 uint8_t* userBuffer = (uint8_t*)pData;
6342 if (!userBuffer) return;
6343
6344 // TODO: Make this thread safe
6345 AutoLock<RecursiveLock> lock(mLock);
6346
6347 auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
6348 if (it == info_VkDescriptorUpdateTemplate.end()) {
6349 return;
6350 }
6351
6352 auto& info = it->second;
6353
6354 uint32_t templateEntryCount = info.templateEntryCount;
6355 VkDescriptorUpdateTemplateEntry* templateEntries = info.templateEntries;
6356
6357 uint32_t imageInfoCount = info.imageInfoCount;
6358 uint32_t bufferInfoCount = info.bufferInfoCount;
6359 uint32_t bufferViewCount = info.bufferViewCount;
6360 uint32_t inlineUniformBlockCount = info.inlineUniformBlockCount;
6361 uint32_t* imageInfoIndices = info.imageInfoIndices;
6362 uint32_t* bufferInfoIndices = info.bufferInfoIndices;
6363 uint32_t* bufferViewIndices = info.bufferViewIndices;
6364 VkDescriptorImageInfo* imageInfos = info.imageInfos;
6365 VkDescriptorBufferInfo* bufferInfos = info.bufferInfos;
6366 VkBufferView* bufferViews = info.bufferViews;
6367 uint8_t* inlineUniformBlockBuffer = info.inlineUniformBlockBuffer.data();
6368 uint32_t* inlineUniformBlockBytesPerBlocks = info.inlineUniformBlockBytesPerBlocks.data();
6369
6370 lock.unlock();
6371
6372 size_t currImageInfoOffset = 0;
6373 size_t currBufferInfoOffset = 0;
6374 size_t currBufferViewOffset = 0;
6375 size_t inlineUniformBlockOffset = 0;
6376 size_t inlineUniformBlockIdx = 0;
6377
6378 struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(descriptorSet);
6379 ReifiedDescriptorSet* reified = ds->reified;
6380
6381 bool batched = mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate;
6382
6383 for (uint32_t i = 0; i < templateEntryCount; ++i) {
6384 const auto& entry = templateEntries[i];
6385 VkDescriptorType descType = entry.descriptorType;
6386 uint32_t dstBinding = entry.dstBinding;
6387
6388 auto offset = entry.offset;
6389 auto stride = entry.stride;
6390 auto dstArrayElement = entry.dstArrayElement;
6391
6392 uint32_t descCount = entry.descriptorCount;
6393
6394 if (isDescriptorTypeImageInfo(descType)) {
6395 if (!stride) stride = sizeof(VkDescriptorImageInfo);
6396
6397 const VkDescriptorImageInfo* currImageInfoBegin =
6398 (const VkDescriptorImageInfo*)((uint8_t*)imageInfos + currImageInfoOffset);
6399
6400 for (uint32_t j = 0; j < descCount; ++j) {
6401 const VkDescriptorImageInfo* user =
6402 (const VkDescriptorImageInfo*)(userBuffer + offset + j * stride);
6403
6404 memcpy(((uint8_t*)imageInfos) + currImageInfoOffset, user,
6405 sizeof(VkDescriptorImageInfo));
6406 currImageInfoOffset += sizeof(VkDescriptorImageInfo);
6407 }
6408
6409 if (batched) {
6410 doEmulatedDescriptorImageInfoWriteFromTemplate(
6411 descType, dstBinding, dstArrayElement, descCount, currImageInfoBegin, reified);
6412 }
6413 } else if (isDescriptorTypeBufferInfo(descType)) {
6414 if (!stride) stride = sizeof(VkDescriptorBufferInfo);
6415
6416 const VkDescriptorBufferInfo* currBufferInfoBegin =
6417 (const VkDescriptorBufferInfo*)((uint8_t*)bufferInfos + currBufferInfoOffset);
6418
6419 for (uint32_t j = 0; j < descCount; ++j) {
6420 const VkDescriptorBufferInfo* user =
6421 (const VkDescriptorBufferInfo*)(userBuffer + offset + j * stride);
6422
6423 memcpy(((uint8_t*)bufferInfos) + currBufferInfoOffset, user,
6424 sizeof(VkDescriptorBufferInfo));
6425 #if defined(__linux__) && !defined(VK_USE_PLATFORM_ANDROID_KHR)
6426 // Convert mesa to internal for objects in the user buffer
6427 VkDescriptorBufferInfo* internalBufferInfo =
6428 (VkDescriptorBufferInfo*)(((uint8_t*)bufferInfos) + currBufferInfoOffset);
6429 VK_FROM_HANDLE(gfxstream_vk_buffer, gfxstream_buffer, internalBufferInfo->buffer);
6430 internalBufferInfo->buffer = gfxstream_buffer->internal_object;
6431 #endif
6432 currBufferInfoOffset += sizeof(VkDescriptorBufferInfo);
6433 }
6434
6435 if (batched) {
6436 doEmulatedDescriptorBufferInfoWriteFromTemplate(
6437 descType, dstBinding, dstArrayElement, descCount, currBufferInfoBegin, reified);
6438 }
6439
6440 } else if (isDescriptorTypeBufferView(descType)) {
6441 if (!stride) stride = sizeof(VkBufferView);
6442
6443 const VkBufferView* currBufferViewBegin =
6444 (const VkBufferView*)((uint8_t*)bufferViews + currBufferViewOffset);
6445
6446 for (uint32_t j = 0; j < descCount; ++j) {
6447 const VkBufferView* user = (const VkBufferView*)(userBuffer + offset + j * stride);
6448
6449 memcpy(((uint8_t*)bufferViews) + currBufferViewOffset, user, sizeof(VkBufferView));
6450 currBufferViewOffset += sizeof(VkBufferView);
6451 }
6452
6453 if (batched) {
6454 doEmulatedDescriptorBufferViewWriteFromTemplate(
6455 descType, dstBinding, dstArrayElement, descCount, currBufferViewBegin, reified);
6456 }
6457 } else if (isDescriptorTypeInlineUniformBlock(descType)) {
6458 uint32_t inlineUniformBlockBytesPerBlock =
6459 inlineUniformBlockBytesPerBlocks[inlineUniformBlockIdx];
6460 uint8_t* currInlineUniformBlockBufferBegin =
6461 inlineUniformBlockBuffer + inlineUniformBlockOffset;
6462 memcpy(currInlineUniformBlockBufferBegin, userBuffer + offset,
6463 inlineUniformBlockBytesPerBlock);
6464 inlineUniformBlockIdx++;
6465 inlineUniformBlockOffset += inlineUniformBlockBytesPerBlock;
6466
6467 if (batched) {
6468 doEmulatedDescriptorInlineUniformBlockFromTemplate(
6469 descType, dstBinding, dstArrayElement, descCount,
6470 currInlineUniformBlockBufferBegin, reified);
6471 }
6472 } else {
6473 mesa_loge("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
6474 abort();
6475 }
6476 }
6477
6478 if (batched) return;
6479
6480 enc->vkUpdateDescriptorSetWithTemplateSized2GOOGLE(
6481 device, descriptorSet, descriptorUpdateTemplate, imageInfoCount, bufferInfoCount,
6482 bufferViewCount, static_cast<uint32_t>(info.inlineUniformBlockBuffer.size()),
6483 imageInfoIndices, bufferInfoIndices, bufferViewIndices, imageInfos, bufferInfos,
6484 bufferViews, inlineUniformBlockBuffer, true /* do lock */);
6485 }
6486
on_vkUpdateDescriptorSetWithTemplateKHR(void * context,VkDevice device,VkDescriptorSet descriptorSet,VkDescriptorUpdateTemplate descriptorUpdateTemplate,const void * pData)6487 void ResourceTracker::on_vkUpdateDescriptorSetWithTemplateKHR(
6488 void* context, VkDevice device, VkDescriptorSet descriptorSet,
6489 VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData) {
6490 on_vkUpdateDescriptorSetWithTemplate(context, device, descriptorSet, descriptorUpdateTemplate,
6491 pData);
6492 }
6493
on_vkGetPhysicalDeviceImageFormatProperties2_common(bool isKhr,void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6494 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2_common(
6495 bool isKhr, void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6496 const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6497 VkImageFormatProperties2* pImageFormatProperties) {
6498 VkEncoder* enc = (VkEncoder*)context;
6499 (void)input_result;
6500
6501 uint32_t supportedHandleType = 0;
6502 VkExternalImageFormatProperties* ext_img_properties =
6503 vk_find_struct<VkExternalImageFormatProperties>(pImageFormatProperties);
6504
6505 #ifdef VK_USE_PLATFORM_FUCHSIA
6506
6507 constexpr VkFormat kExternalImageSupportedFormats[] = {
6508 VK_FORMAT_B8G8R8A8_SINT, VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_B8G8R8A8_SRGB,
6509 VK_FORMAT_B8G8R8A8_SNORM, VK_FORMAT_B8G8R8A8_SSCALED, VK_FORMAT_B8G8R8A8_USCALED,
6510 VK_FORMAT_R8G8B8A8_SINT, VK_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_SRGB,
6511 VK_FORMAT_R8G8B8A8_SNORM, VK_FORMAT_R8G8B8A8_SSCALED, VK_FORMAT_R8G8B8A8_USCALED,
6512 VK_FORMAT_R8_UNORM, VK_FORMAT_R8_UINT, VK_FORMAT_R8_USCALED,
6513 VK_FORMAT_R8_SNORM, VK_FORMAT_R8_SINT, VK_FORMAT_R8_SSCALED,
6514 VK_FORMAT_R8_SRGB, VK_FORMAT_R8G8_UNORM, VK_FORMAT_R8G8_UINT,
6515 VK_FORMAT_R8G8_USCALED, VK_FORMAT_R8G8_SNORM, VK_FORMAT_R8G8_SINT,
6516 VK_FORMAT_R8G8_SSCALED, VK_FORMAT_R8G8_SRGB,
6517 };
6518
6519 if (ext_img_properties) {
6520 if (std::find(std::begin(kExternalImageSupportedFormats),
6521 std::end(kExternalImageSupportedFormats),
6522 pImageFormatInfo->format) == std::end(kExternalImageSupportedFormats)) {
6523 return VK_ERROR_FORMAT_NOT_SUPPORTED;
6524 }
6525 }
6526 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
6527 #endif
6528
6529 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6530 VkAndroidHardwareBufferUsageANDROID* output_ahw_usage =
6531 vk_find_struct<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties);
6532 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
6533 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
6534 #endif
6535 const VkPhysicalDeviceExternalImageFormatInfo* ext_img_info =
6536 vk_find_struct<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo);
6537 if (supportedHandleType && ext_img_info) {
6538 // 0 is a valid handleType so we don't check against 0
6539 if (ext_img_info->handleType != (ext_img_info->handleType & supportedHandleType)) {
6540 return VK_ERROR_FORMAT_NOT_SUPPORTED;
6541 }
6542 }
6543
6544 VkResult hostRes;
6545
6546 if (isKhr) {
6547 hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2KHR(
6548 physicalDevice, pImageFormatInfo, pImageFormatProperties, true /* do lock */);
6549 } else {
6550 hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2(
6551 physicalDevice, pImageFormatInfo, pImageFormatProperties, true /* do lock */);
6552 }
6553
6554 if (hostRes != VK_SUCCESS) return hostRes;
6555
6556 #ifdef VK_USE_PLATFORM_FUCHSIA
6557 if (ext_img_properties) {
6558 if (ext_img_info) {
6559 if (static_cast<uint32_t>(ext_img_info->handleType) ==
6560 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
6561 ext_img_properties->externalMemoryProperties = {
6562 .externalMemoryFeatures = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
6563 VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
6564 .exportFromImportedHandleTypes =
6565 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6566 .compatibleHandleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
6567 };
6568 }
6569 }
6570 }
6571 #endif
6572
6573 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6574 if (output_ahw_usage) {
6575 output_ahw_usage->androidHardwareBufferUsage = getAndroidHardwareBufferUsageFromVkUsage(
6576 pImageFormatInfo->flags, pImageFormatInfo->usage);
6577 }
6578 #endif
6579 if (ext_img_properties) {
6580 transformImpl_VkExternalMemoryProperties_fromhost(
6581 &ext_img_properties->externalMemoryProperties, 0);
6582 }
6583 return hostRes;
6584 }
6585
on_vkGetPhysicalDeviceImageFormatProperties2(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6586 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2(
6587 void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6588 const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6589 VkImageFormatProperties2* pImageFormatProperties) {
6590 return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6591 false /* not KHR */, context, input_result, physicalDevice, pImageFormatInfo,
6592 pImageFormatProperties);
6593 }
6594
on_vkGetPhysicalDeviceImageFormatProperties2KHR(void * context,VkResult input_result,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceImageFormatInfo2 * pImageFormatInfo,VkImageFormatProperties2 * pImageFormatProperties)6595 VkResult ResourceTracker::on_vkGetPhysicalDeviceImageFormatProperties2KHR(
6596 void* context, VkResult input_result, VkPhysicalDevice physicalDevice,
6597 const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
6598 VkImageFormatProperties2* pImageFormatProperties) {
6599 return on_vkGetPhysicalDeviceImageFormatProperties2_common(
6600 true /* is KHR */, context, input_result, physicalDevice, pImageFormatInfo,
6601 pImageFormatProperties);
6602 }
6603
on_vkGetPhysicalDeviceExternalBufferProperties_common(bool isKhr,void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)6604 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferProperties_common(
6605 bool isKhr, void* context, VkPhysicalDevice physicalDevice,
6606 const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
6607 VkExternalBufferProperties* pExternalBufferProperties) {
6608 VkEncoder* enc = (VkEncoder*)context;
6609
6610 #if defined(ANDROID)
6611 // Older versions of Goldfish's Gralloc did not support allocating AHARDWAREBUFFER_FORMAT_BLOB
6612 // with GPU usage (b/299520213).
6613 if (ResourceTracker::threadingCallbacks.hostConnectionGetFunc()
6614 ->grallocHelper()
6615 ->treatBlobAsImage() &&
6616 pExternalBufferInfo->handleType ==
6617 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) {
6618 pExternalBufferProperties->externalMemoryProperties.externalMemoryFeatures = 0;
6619 pExternalBufferProperties->externalMemoryProperties.exportFromImportedHandleTypes = 0;
6620 pExternalBufferProperties->externalMemoryProperties.compatibleHandleTypes = 0;
6621 return;
6622 }
6623 #endif
6624
6625 uint32_t supportedHandleType = 0;
6626 #ifdef VK_USE_PLATFORM_FUCHSIA
6627 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
6628 #endif
6629 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6630 supportedHandleType |= VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
6631 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
6632 #endif
6633 if (supportedHandleType) {
6634 // 0 is a valid handleType so we can't check against 0
6635 if (pExternalBufferInfo->handleType !=
6636 (pExternalBufferInfo->handleType & supportedHandleType)) {
6637 return;
6638 }
6639 }
6640
6641 if (isKhr) {
6642 enc->vkGetPhysicalDeviceExternalBufferPropertiesKHR(
6643 physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */);
6644 } else {
6645 enc->vkGetPhysicalDeviceExternalBufferProperties(
6646 physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */);
6647 }
6648 transformImpl_VkExternalMemoryProperties_fromhost(
6649 &pExternalBufferProperties->externalMemoryProperties, 0);
6650 }
6651
on_vkGetPhysicalDeviceExternalBufferProperties(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)6652 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferProperties(
6653 void* context, VkPhysicalDevice physicalDevice,
6654 const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
6655 VkExternalBufferProperties* pExternalBufferProperties) {
6656 return on_vkGetPhysicalDeviceExternalBufferProperties_common(
6657 false /* not KHR */, context, physicalDevice, pExternalBufferInfo,
6658 pExternalBufferProperties);
6659 }
6660
on_vkGetPhysicalDeviceExternalBufferPropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfoKHR * pExternalBufferInfo,VkExternalBufferPropertiesKHR * pExternalBufferProperties)6661 void ResourceTracker::on_vkGetPhysicalDeviceExternalBufferPropertiesKHR(
6662 void* context, VkPhysicalDevice physicalDevice,
6663 const VkPhysicalDeviceExternalBufferInfoKHR* pExternalBufferInfo,
6664 VkExternalBufferPropertiesKHR* pExternalBufferProperties) {
6665 return on_vkGetPhysicalDeviceExternalBufferProperties_common(
6666 true /* is KHR */, context, physicalDevice, pExternalBufferInfo, pExternalBufferProperties);
6667 }
6668
on_vkGetPhysicalDeviceExternalSemaphoreProperties(void *,VkPhysicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)6669 void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphoreProperties(
6670 void*, VkPhysicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
6671 VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
6672 (void)pExternalSemaphoreInfo;
6673 (void)pExternalSemaphoreProperties;
6674 #ifdef VK_USE_PLATFORM_FUCHSIA
6675 if (pExternalSemaphoreInfo->handleType ==
6676 static_cast<uint32_t>(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA)) {
6677 pExternalSemaphoreProperties->compatibleHandleTypes |=
6678 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6679 pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6680 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
6681 pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6682 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6683 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6684 }
6685 #else
6686 const VkSemaphoreTypeCreateInfo* semaphoreTypeCi =
6687 vk_find_struct<VkSemaphoreTypeCreateInfo>(pExternalSemaphoreInfo);
6688 bool isSemaphoreTimeline =
6689 semaphoreTypeCi != nullptr && semaphoreTypeCi->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE;
6690 if (isSemaphoreTimeline) {
6691 // b/304373623
6692 // dEQP-VK.api.external.semaphore.sync_fd#info_timeline
6693 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
6694 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
6695 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
6696 } else if (pExternalSemaphoreInfo->handleType ==
6697 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
6698 pExternalSemaphoreProperties->compatibleHandleTypes |=
6699 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6700 pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
6701 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
6702 pExternalSemaphoreProperties->externalSemaphoreFeatures |=
6703 VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
6704 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
6705 }
6706 #endif // VK_USE_PLATFORM_FUCHSIA
6707 }
6708
on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(void * context,VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalSemaphoreInfo * pExternalSemaphoreInfo,VkExternalSemaphoreProperties * pExternalSemaphoreProperties)6709 void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(
6710 void* context, VkPhysicalDevice physicalDevice,
6711 const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
6712 VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
6713 on_vkGetPhysicalDeviceExternalSemaphoreProperties(
6714 context, physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties);
6715 }
6716
registerEncoderCleanupCallback(const VkEncoder * encoder,void * object,CleanupCallback callback)6717 void ResourceTracker::registerEncoderCleanupCallback(const VkEncoder* encoder, void* object,
6718 CleanupCallback callback) {
6719 AutoLock<RecursiveLock> lock(mLock);
6720 auto& callbacks = mEncoderCleanupCallbacks[encoder];
6721 callbacks[object] = callback;
6722 }
6723
unregisterEncoderCleanupCallback(const VkEncoder * encoder,void * object)6724 void ResourceTracker::unregisterEncoderCleanupCallback(const VkEncoder* encoder, void* object) {
6725 AutoLock<RecursiveLock> lock(mLock);
6726 mEncoderCleanupCallbacks[encoder].erase(object);
6727 }
6728
onEncoderDeleted(const VkEncoder * encoder)6729 void ResourceTracker::onEncoderDeleted(const VkEncoder* encoder) {
6730 AutoLock<RecursiveLock> lock(mLock);
6731 if (mEncoderCleanupCallbacks.find(encoder) == mEncoderCleanupCallbacks.end()) return;
6732
6733 std::unordered_map<void*, CleanupCallback> callbackCopies = mEncoderCleanupCallbacks[encoder];
6734
6735 mEncoderCleanupCallbacks.erase(encoder);
6736 lock.unlock();
6737
6738 for (auto it : callbackCopies) {
6739 it.second();
6740 }
6741 }
6742
getAlloc()6743 CommandBufferStagingStream::Alloc ResourceTracker::getAlloc() {
6744 if (mFeatureInfo->hasVulkanAuxCommandMemory) {
6745 return [this](size_t size) -> CommandBufferStagingStream::Memory {
6746 VkMemoryAllocateInfo info{
6747 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
6748 .pNext = nullptr,
6749 .allocationSize = size,
6750 .memoryTypeIndex = VK_MAX_MEMORY_TYPES // indicates auxiliary memory
6751 };
6752
6753 auto enc = ResourceTracker::getThreadLocalEncoder();
6754 VkDevice device = VK_NULL_HANDLE;
6755 VkDeviceMemory vkDeviceMem = VK_NULL_HANDLE;
6756 VkResult result = getCoherentMemory(&info, enc, device, &vkDeviceMem);
6757 if (result != VK_SUCCESS) {
6758 mesa_loge("Failed to get coherent memory %u", result);
6759 return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
6760 }
6761
6762 // getCoherentMemory() uses suballocations.
6763 // To retrieve the suballocated memory address, look up
6764 // VkDeviceMemory filled in by getCoherentMemory()
6765 // scope of mLock
6766 {
6767 AutoLock<RecursiveLock> lock(mLock);
6768 const auto it = info_VkDeviceMemory.find(vkDeviceMem);
6769 if (it == info_VkDeviceMemory.end()) {
6770 mesa_loge("Coherent memory allocated %u not found", result);
6771 return {.deviceMemory = VK_NULL_HANDLE, .ptr = nullptr};
6772 };
6773
6774 const auto& info = it->second;
6775 return {.deviceMemory = vkDeviceMem, .ptr = info.ptr};
6776 }
6777 };
6778 }
6779 return nullptr;
6780 }
6781
getFree()6782 CommandBufferStagingStream::Free ResourceTracker::getFree() {
6783 if (mFeatureInfo->hasVulkanAuxCommandMemory) {
6784 return [this](const CommandBufferStagingStream::Memory& memory) {
6785 // deviceMemory may not be the actual backing auxiliary VkDeviceMemory
6786 // for suballocations, deviceMemory is a alias VkDeviceMemory hand;
6787 // freeCoherentMemoryLocked maps the alias to the backing VkDeviceMemory
6788 VkDeviceMemory deviceMemory = memory.deviceMemory;
6789 AutoLock<RecursiveLock> lock(mLock);
6790 auto it = info_VkDeviceMemory.find(deviceMemory);
6791 if (it == info_VkDeviceMemory.end()) {
6792 mesa_loge("Device memory to free not found");
6793 return;
6794 }
6795 auto coherentMemory = freeCoherentMemoryLocked(deviceMemory, it->second);
6796 // We have to release the lock before we could possibly free a
6797 // CoherentMemory, because that will call into VkEncoder, which
6798 // shouldn't be called when the lock is held.
6799 lock.unlock();
6800 coherentMemory = nullptr;
6801 };
6802 }
6803 return nullptr;
6804 }
6805
on_vkBeginCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)6806 VkResult ResourceTracker::on_vkBeginCommandBuffer(void* context, VkResult input_result,
6807 VkCommandBuffer commandBuffer,
6808 const VkCommandBufferBeginInfo* pBeginInfo) {
6809 (void)context;
6810
6811 resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
6812 true /* also clear pending descriptor sets */);
6813
6814 VkEncoder* enc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
6815 (void)input_result;
6816
6817 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
6818 cb->flags = pBeginInfo->flags;
6819
6820 VkCommandBufferBeginInfo modifiedBeginInfo;
6821
6822 if (pBeginInfo->pInheritanceInfo && !cb->isSecondary) {
6823 modifiedBeginInfo = *pBeginInfo;
6824 modifiedBeginInfo.pInheritanceInfo = nullptr;
6825 pBeginInfo = &modifiedBeginInfo;
6826 }
6827
6828 if (!supportsDeferredCommands()) {
6829 return enc->vkBeginCommandBuffer(commandBuffer, pBeginInfo, true /* do lock */);
6830 }
6831
6832 enc->vkBeginCommandBufferAsyncGOOGLE(commandBuffer, pBeginInfo, true /* do lock */);
6833
6834 return VK_SUCCESS;
6835 }
6836
on_vkEndCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer)6837 VkResult ResourceTracker::on_vkEndCommandBuffer(void* context, VkResult input_result,
6838 VkCommandBuffer commandBuffer) {
6839 VkEncoder* enc = (VkEncoder*)context;
6840 (void)input_result;
6841
6842 if (!supportsDeferredCommands()) {
6843 return enc->vkEndCommandBuffer(commandBuffer, true /* do lock */);
6844 }
6845
6846 enc->vkEndCommandBufferAsyncGOOGLE(commandBuffer, true /* do lock */);
6847
6848 return VK_SUCCESS;
6849 }
6850
on_vkResetCommandBuffer(void * context,VkResult input_result,VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)6851 VkResult ResourceTracker::on_vkResetCommandBuffer(void* context, VkResult input_result,
6852 VkCommandBuffer commandBuffer,
6853 VkCommandBufferResetFlags flags) {
6854 VkEncoder* enc = (VkEncoder*)context;
6855 (void)input_result;
6856
6857 if (!supportsDeferredCommands()) {
6858 VkResult res = enc->vkResetCommandBuffer(commandBuffer, flags, true /* do lock */);
6859 resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
6860 true /* also clear pending descriptor sets */);
6861 return res;
6862 }
6863
6864 enc->vkResetCommandBufferAsyncGOOGLE(commandBuffer, flags, true /* do lock */);
6865 resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */,
6866 true /* also clear pending descriptor sets */);
6867 return VK_SUCCESS;
6868 }
6869
on_vkCreateImageView(void * context,VkResult input_result,VkDevice device,const VkImageViewCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkImageView * pView)6870 VkResult ResourceTracker::on_vkCreateImageView(void* context, VkResult input_result,
6871 VkDevice device,
6872 const VkImageViewCreateInfo* pCreateInfo,
6873 const VkAllocationCallbacks* pAllocator,
6874 VkImageView* pView) {
6875 VkEncoder* enc = (VkEncoder*)context;
6876 (void)input_result;
6877
6878 VkImageViewCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
6879 vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
6880
6881 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
6882 if (pCreateInfo->format == VK_FORMAT_UNDEFINED) {
6883 AutoLock<RecursiveLock> lock(mLock);
6884
6885 auto it = info_VkImage.find(pCreateInfo->image);
6886 if (it != info_VkImage.end() && it->second.hasExternalFormat) {
6887 localCreateInfo.format = vk_format_from_fourcc(it->second.externalFourccFormat);
6888 }
6889 }
6890 VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
6891 const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo =
6892 vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo);
6893 if (samplerYcbcrConversionInfo) {
6894 if (samplerYcbcrConversionInfo->conversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
6895 localVkSamplerYcbcrConversionInfo = vk_make_orphan_copy(*samplerYcbcrConversionInfo);
6896 vk_append_struct(&structChainIter, &localVkSamplerYcbcrConversionInfo);
6897 }
6898 }
6899 #endif
6900
6901 return enc->vkCreateImageView(device, &localCreateInfo, pAllocator, pView, true /* do lock */);
6902 }
6903
on_vkCmdExecuteCommands(void * context,VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)6904 void ResourceTracker::on_vkCmdExecuteCommands(void* context, VkCommandBuffer commandBuffer,
6905 uint32_t commandBufferCount,
6906 const VkCommandBuffer* pCommandBuffers) {
6907 VkEncoder* enc = (VkEncoder*)context;
6908
6909 if (!mFeatureInfo->hasVulkanQueueSubmitWithCommands) {
6910 enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers,
6911 true /* do lock */);
6912 return;
6913 }
6914
6915 struct goldfish_VkCommandBuffer* primary = as_goldfish_VkCommandBuffer(commandBuffer);
6916 for (uint32_t i = 0; i < commandBufferCount; ++i) {
6917 struct goldfish_VkCommandBuffer* secondary =
6918 as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
6919 appendObject(&secondary->superObjects, primary);
6920 appendObject(&primary->subObjects, secondary);
6921 }
6922
6923 enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers,
6924 true /* do lock */);
6925 }
6926
on_vkCmdBindDescriptorSets(void * context,VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout layout,uint32_t firstSet,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)6927 void ResourceTracker::on_vkCmdBindDescriptorSets(void* context, VkCommandBuffer commandBuffer,
6928 VkPipelineBindPoint pipelineBindPoint,
6929 VkPipelineLayout layout, uint32_t firstSet,
6930 uint32_t descriptorSetCount,
6931 const VkDescriptorSet* pDescriptorSets,
6932 uint32_t dynamicOffsetCount,
6933 const uint32_t* pDynamicOffsets) {
6934 VkEncoder* enc = (VkEncoder*)context;
6935
6936 if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate)
6937 addPendingDescriptorSets(commandBuffer, descriptorSetCount, pDescriptorSets);
6938
6939 enc->vkCmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet,
6940 descriptorSetCount, pDescriptorSets, dynamicOffsetCount,
6941 pDynamicOffsets, true /* do lock */);
6942 }
6943
on_vkCmdPipelineBarrier(void * context,VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)6944 void ResourceTracker::on_vkCmdPipelineBarrier(
6945 void* context, VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
6946 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
6947 uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
6948 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
6949 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers) {
6950 VkEncoder* enc = (VkEncoder*)context;
6951
6952 std::vector<VkImageMemoryBarrier> updatedImageMemoryBarriers;
6953 updatedImageMemoryBarriers.reserve(imageMemoryBarrierCount);
6954 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
6955 VkImageMemoryBarrier barrier = pImageMemoryBarriers[i];
6956
6957 #ifdef VK_USE_PLATFORM_ANDROID_KHR
6958 // Unfortunetly, Android does not yet have a mechanism for sharing the expected
6959 // VkImageLayout when passing around AHardwareBuffer-s so many existing users
6960 // that import AHardwareBuffer-s into VkImage-s/VkDeviceMemory-s simply use
6961 // VK_IMAGE_LAYOUT_UNDEFINED. However, the Vulkan spec's image layout transition
6962 // sections says "If the old layout is VK_IMAGE_LAYOUT_UNDEFINED, the contents of
6963 // that range may be discarded." Some Vulkan drivers have been observed to actually
6964 // perform the discard which leads to AHardwareBuffer-s being unintentionally
6965 // cleared. See go/ahb-vkimagelayout for more information.
6966 if (barrier.srcQueueFamilyIndex != barrier.dstQueueFamilyIndex &&
6967 (barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
6968 barrier.srcQueueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT) &&
6969 barrier.oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
6970 // This is not a complete solution as the Vulkan spec does not require that
6971 // Vulkan drivers perform a no-op in the case when oldLayout equals newLayout
6972 // but this has been observed to be enough to work for now to avoid clearing
6973 // out images.
6974 // TODO(b/236179843): figure out long term solution.
6975 barrier.oldLayout = barrier.newLayout;
6976 }
6977 #endif
6978
6979 updatedImageMemoryBarriers.push_back(barrier);
6980 }
6981
6982 enc->vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
6983 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
6984 pBufferMemoryBarriers, updatedImageMemoryBarriers.size(),
6985 updatedImageMemoryBarriers.data(), true /* do lock */);
6986 }
6987
on_vkDestroyDescriptorSetLayout(void * context,VkDevice device,VkDescriptorSetLayout descriptorSetLayout,const VkAllocationCallbacks * pAllocator)6988 void ResourceTracker::on_vkDestroyDescriptorSetLayout(void* context, VkDevice device,
6989 VkDescriptorSetLayout descriptorSetLayout,
6990 const VkAllocationCallbacks* pAllocator) {
6991 decDescriptorSetLayoutRef(context, device, descriptorSetLayout, pAllocator);
6992 }
6993
on_vkAllocateCommandBuffers(void * context,VkResult input_result,VkDevice device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)6994 VkResult ResourceTracker::on_vkAllocateCommandBuffers(
6995 void* context, VkResult input_result, VkDevice device,
6996 const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers) {
6997 (void)input_result;
6998
6999 VkEncoder* enc = (VkEncoder*)context;
7000 VkResult res =
7001 enc->vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers, true /* do lock */);
7002 if (VK_SUCCESS != res) return res;
7003
7004 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; ++i) {
7005 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7006 cb->isSecondary = pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY;
7007 cb->device = device;
7008 }
7009
7010 return res;
7011 }
7012
7013 #if defined(VK_USE_PLATFORM_ANDROID_KHR)
exportSyncFdForQSRILocked(VkImage image,int * fd)7014 VkResult ResourceTracker::exportSyncFdForQSRILocked(VkImage image, int* fd) {
7015 mesa_logi("%s: call for image %p hos timage handle 0x%llx\n", __func__, (void*)image,
7016 (unsigned long long)get_host_u64_VkImage(image));
7017
7018 if (mFeatureInfo->hasVirtioGpuNativeSync) {
7019 struct VirtGpuExecBuffer exec = {};
7020 struct gfxstreamCreateQSRIExportVK exportQSRI = {};
7021 VirtGpuDevice* instance = VirtGpuDevice::getInstance();
7022
7023 uint64_t hostImageHandle = get_host_u64_VkImage(image);
7024
7025 exportQSRI.hdr.opCode = GFXSTREAM_CREATE_QSRI_EXPORT_VK;
7026 exportQSRI.imageHandleLo = (uint32_t)hostImageHandle;
7027 exportQSRI.imageHandleHi = (uint32_t)(hostImageHandle >> 32);
7028
7029 exec.command = static_cast<void*>(&exportQSRI);
7030 exec.command_size = sizeof(exportQSRI);
7031 exec.flags = kFenceOut | kRingIdx;
7032 if (instance->execBuffer(exec, nullptr)) return VK_ERROR_OUT_OF_HOST_MEMORY;
7033
7034 *fd = exec.handle.osHandle;
7035 } else {
7036 #if GFXSTREAM_ENABLE_GUEST_GOLDFISH
7037 ensureSyncDeviceFd();
7038 goldfish_sync_queue_work(
7039 mSyncDeviceFd, get_host_u64_VkImage(image) /* the handle */,
7040 GOLDFISH_SYNC_VULKAN_QSRI /* thread handle (doubling as type field) */, fd);
7041 #endif
7042 }
7043
7044 mesa_logi("%s: got fd: %d\n", __func__, *fd);
7045 auto imageInfoIt = info_VkImage.find(image);
7046 if (imageInfoIt != info_VkImage.end()) {
7047 auto& imageInfo = imageInfoIt->second;
7048
7049 auto* syncHelper =
7050 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
7051
7052 // Remove any pending QSRI sync fds that are already signaled.
7053 auto syncFdIt = imageInfo.pendingQsriSyncFds.begin();
7054 while (syncFdIt != imageInfo.pendingQsriSyncFds.end()) {
7055 int syncFd = *syncFdIt;
7056 int syncWaitRet = syncHelper->wait(syncFd, /*timeout msecs*/ 0);
7057 if (syncWaitRet == 0) {
7058 // Sync fd is signaled.
7059 syncFdIt = imageInfo.pendingQsriSyncFds.erase(syncFdIt);
7060 syncHelper->close(syncFd);
7061 } else {
7062 if (errno != ETIME) {
7063 mesa_loge("%s: Failed to wait for pending QSRI sync: sterror: %s errno: %d",
7064 __func__, strerror(errno), errno);
7065 }
7066 break;
7067 }
7068 }
7069
7070 int syncFdDup = syncHelper->dup(*fd);
7071 if (syncFdDup < 0) {
7072 mesa_loge("%s: Failed to dup() QSRI sync fd : sterror: %s errno: %d", __func__,
7073 strerror(errno), errno);
7074 } else {
7075 imageInfo.pendingQsriSyncFds.push_back(syncFdDup);
7076 }
7077 }
7078
7079 return VK_SUCCESS;
7080 }
7081
on_vkQueueSignalReleaseImageANDROID(void * context,VkResult input_result,VkQueue queue,uint32_t waitSemaphoreCount,const VkSemaphore * pWaitSemaphores,VkImage image,int * pNativeFenceFd)7082 VkResult ResourceTracker::on_vkQueueSignalReleaseImageANDROID(void* context, VkResult input_result,
7083 VkQueue queue,
7084 uint32_t waitSemaphoreCount,
7085 const VkSemaphore* pWaitSemaphores,
7086 VkImage image, int* pNativeFenceFd) {
7087 (void)input_result;
7088
7089 VkEncoder* enc = (VkEncoder*)context;
7090
7091 if (!mFeatureInfo->hasVulkanAsyncQsri) {
7092 return enc->vkQueueSignalReleaseImageANDROID(queue, waitSemaphoreCount, pWaitSemaphores,
7093 image, pNativeFenceFd, true /* lock */);
7094 }
7095
7096 {
7097 AutoLock<RecursiveLock> lock(mLock);
7098 auto it = info_VkImage.find(image);
7099 if (it == info_VkImage.end()) {
7100 if (pNativeFenceFd) *pNativeFenceFd = -1;
7101 return VK_ERROR_INITIALIZATION_FAILED;
7102 }
7103 }
7104
7105 enc->vkQueueSignalReleaseImageANDROIDAsyncGOOGLE(queue, waitSemaphoreCount, pWaitSemaphores,
7106 image, true /* lock */);
7107
7108 AutoLock<RecursiveLock> lock(mLock);
7109 VkResult result;
7110 if (pNativeFenceFd) {
7111 result = exportSyncFdForQSRILocked(image, pNativeFenceFd);
7112 } else {
7113 int syncFd;
7114 result = exportSyncFdForQSRILocked(image, &syncFd);
7115
7116 if (syncFd >= 0) {
7117 auto* syncHelper =
7118 ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->syncHelper();
7119 syncHelper->close(syncFd);
7120 }
7121 }
7122
7123 return result;
7124 }
7125 #endif
7126
on_vkCreateGraphicsPipelines(void * context,VkResult input_result,VkDevice device,VkPipelineCache pipelineCache,uint32_t createInfoCount,const VkGraphicsPipelineCreateInfo * pCreateInfos,const VkAllocationCallbacks * pAllocator,VkPipeline * pPipelines)7127 VkResult ResourceTracker::on_vkCreateGraphicsPipelines(
7128 void* context, VkResult input_result, VkDevice device, VkPipelineCache pipelineCache,
7129 uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos,
7130 const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) {
7131 (void)input_result;
7132 VkEncoder* enc = (VkEncoder*)context;
7133 std::vector<VkGraphicsPipelineCreateInfo> localCreateInfos(pCreateInfos,
7134 pCreateInfos + createInfoCount);
7135 for (VkGraphicsPipelineCreateInfo& graphicsPipelineCreateInfo : localCreateInfos) {
7136 // dEQP-VK.api.pipeline.pipeline_invalid_pointers_unused_structs#graphics
7137 bool requireViewportState = false;
7138 // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750
7139 requireViewportState |=
7140 graphicsPipelineCreateInfo.pRasterizationState != nullptr &&
7141 graphicsPipelineCreateInfo.pRasterizationState->rasterizerDiscardEnable == VK_FALSE;
7142 // VUID-VkGraphicsPipelineCreateInfo-pViewportState-04892
7143 #ifdef VK_EXT_extended_dynamic_state2
7144 if (!requireViewportState && graphicsPipelineCreateInfo.pDynamicState) {
7145 for (uint32_t i = 0; i < graphicsPipelineCreateInfo.pDynamicState->dynamicStateCount;
7146 i++) {
7147 if (VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT ==
7148 graphicsPipelineCreateInfo.pDynamicState->pDynamicStates[i]) {
7149 requireViewportState = true;
7150 break;
7151 }
7152 }
7153 }
7154 #endif // VK_EXT_extended_dynamic_state2
7155 if (!requireViewportState) {
7156 graphicsPipelineCreateInfo.pViewportState = nullptr;
7157 }
7158
7159 // It has the same requirement as for pViewportState.
7160 bool shouldIncludeFragmentShaderState = requireViewportState;
7161
7162 // VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00751
7163 if (!shouldIncludeFragmentShaderState) {
7164 graphicsPipelineCreateInfo.pMultisampleState = nullptr;
7165 }
7166
7167 bool forceDepthStencilState = false;
7168 bool forceColorBlendState = false;
7169
7170 const VkPipelineRenderingCreateInfo* pipelineRenderingInfo =
7171 vk_find_struct<VkPipelineRenderingCreateInfo>(&graphicsPipelineCreateInfo);
7172
7173 if (pipelineRenderingInfo) {
7174 forceDepthStencilState |=
7175 pipelineRenderingInfo->depthAttachmentFormat != VK_FORMAT_UNDEFINED;
7176 forceDepthStencilState |=
7177 pipelineRenderingInfo->stencilAttachmentFormat != VK_FORMAT_UNDEFINED;
7178 forceColorBlendState |= pipelineRenderingInfo->colorAttachmentCount != 0;
7179 }
7180
7181 // VUID-VkGraphicsPipelineCreateInfo-renderPass-06043
7182 // VUID-VkGraphicsPipelineCreateInfo-renderPass-06044
7183 if (graphicsPipelineCreateInfo.renderPass == VK_NULL_HANDLE ||
7184 !shouldIncludeFragmentShaderState) {
7185 // VUID-VkGraphicsPipelineCreateInfo-renderPass-06053
7186 if (!forceDepthStencilState) {
7187 graphicsPipelineCreateInfo.pDepthStencilState = nullptr;
7188 }
7189 if (!forceColorBlendState) {
7190 graphicsPipelineCreateInfo.pColorBlendState = nullptr;
7191 }
7192 }
7193 }
7194 return enc->vkCreateGraphicsPipelines(device, pipelineCache, localCreateInfos.size(),
7195 localCreateInfos.data(), pAllocator, pPipelines,
7196 true /* do lock */);
7197 }
7198
getApiVersionFromInstance(VkInstance instance) const7199 uint32_t ResourceTracker::getApiVersionFromInstance(VkInstance instance) const {
7200 AutoLock<RecursiveLock> lock(mLock);
7201 uint32_t api = kDefaultApiVersion;
7202
7203 auto it = info_VkInstance.find(instance);
7204 if (it == info_VkInstance.end()) return api;
7205
7206 api = it->second.highestApiVersion;
7207
7208 return api;
7209 }
7210
getApiVersionFromDevice(VkDevice device) const7211 uint32_t ResourceTracker::getApiVersionFromDevice(VkDevice device) const {
7212 AutoLock<RecursiveLock> lock(mLock);
7213
7214 uint32_t api = kDefaultApiVersion;
7215
7216 auto it = info_VkDevice.find(device);
7217 if (it == info_VkDevice.end()) return api;
7218
7219 api = it->second.apiVersion;
7220
7221 return api;
7222 }
7223
hasInstanceExtension(VkInstance instance,const std::string & name) const7224 bool ResourceTracker::hasInstanceExtension(VkInstance instance, const std::string& name) const {
7225 AutoLock<RecursiveLock> lock(mLock);
7226
7227 auto it = info_VkInstance.find(instance);
7228 if (it == info_VkInstance.end()) return false;
7229
7230 return it->second.enabledExtensions.find(name) != it->second.enabledExtensions.end();
7231 }
7232
hasDeviceExtension(VkDevice device,const std::string & name) const7233 bool ResourceTracker::hasDeviceExtension(VkDevice device, const std::string& name) const {
7234 AutoLock<RecursiveLock> lock(mLock);
7235
7236 auto it = info_VkDevice.find(device);
7237 if (it == info_VkDevice.end()) return false;
7238
7239 return it->second.enabledExtensions.find(name) != it->second.enabledExtensions.end();
7240 }
7241
getDevice(VkCommandBuffer commandBuffer) const7242 VkDevice ResourceTracker::getDevice(VkCommandBuffer commandBuffer) const {
7243 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7244 if (!cb) {
7245 return nullptr;
7246 }
7247 return cb->device;
7248 }
7249
7250 // Resets staging stream for this command buffer and primary command buffers
7251 // where this command buffer has been recorded. If requested, also clears the pending
7252 // descriptor sets.
resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer,bool alsoResetPrimaries,bool alsoClearPendingDescriptorSets)7253 void ResourceTracker::resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer,
7254 bool alsoResetPrimaries,
7255 bool alsoClearPendingDescriptorSets) {
7256 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7257 if (!cb) {
7258 return;
7259 }
7260 if (cb->privateEncoder) {
7261 sStaging.pushStaging((CommandBufferStagingStream*)cb->privateStream, cb->privateEncoder);
7262 cb->privateEncoder = nullptr;
7263 cb->privateStream = nullptr;
7264 }
7265
7266 if (alsoClearPendingDescriptorSets && cb->userPtr) {
7267 CommandBufferPendingDescriptorSets* pendingSets =
7268 (CommandBufferPendingDescriptorSets*)cb->userPtr;
7269 pendingSets->sets.clear();
7270 }
7271
7272 if (alsoResetPrimaries) {
7273 forAllObjects(cb->superObjects, [this, alsoResetPrimaries,
7274 alsoClearPendingDescriptorSets](void* obj) {
7275 VkCommandBuffer superCommandBuffer = (VkCommandBuffer)obj;
7276 struct goldfish_VkCommandBuffer* superCb =
7277 as_goldfish_VkCommandBuffer(superCommandBuffer);
7278 this->resetCommandBufferStagingInfo(superCommandBuffer, alsoResetPrimaries,
7279 alsoClearPendingDescriptorSets);
7280 });
7281 eraseObjects(&cb->superObjects);
7282 }
7283
7284 forAllObjects(cb->subObjects, [cb](void* obj) {
7285 VkCommandBuffer subCommandBuffer = (VkCommandBuffer)obj;
7286 struct goldfish_VkCommandBuffer* subCb = as_goldfish_VkCommandBuffer(subCommandBuffer);
7287 // We don't do resetCommandBufferStagingInfo(subCommandBuffer)
7288 // since the user still might have submittable stuff pending there.
7289 eraseObject(&subCb->superObjects, (void*)cb);
7290 });
7291
7292 eraseObjects(&cb->subObjects);
7293 }
7294
7295 // Unlike resetCommandBufferStagingInfo, this does not always erase its
7296 // superObjects pointers because the command buffer has merely been
7297 // submitted, not reset. However, if the command buffer was recorded with
7298 // ONE_TIME_SUBMIT_BIT, then it will also reset its primaries.
7299 //
7300 // Also, we save the set of descriptor sets referenced by this command
7301 // buffer because we only submitted the command buffer and it's possible to
7302 // update the descriptor set again and re-submit the same command without
7303 // recording it (Update-after-bind descriptor sets)
resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer)7304 void ResourceTracker::resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer) {
7305 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7306 if (cb->flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) {
7307 resetCommandBufferStagingInfo(commandBuffer, true /* reset primaries */,
7308 true /* clear pending descriptor sets */);
7309 } else {
7310 resetCommandBufferStagingInfo(commandBuffer, false /* Don't reset primaries */,
7311 false /* Don't clear pending descriptor sets */);
7312 }
7313 }
7314
resetCommandPoolStagingInfo(VkCommandPool commandPool)7315 void ResourceTracker::resetCommandPoolStagingInfo(VkCommandPool commandPool) {
7316 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7317
7318 if (!p) return;
7319
7320 forAllObjects(p->subObjects, [this](void* commandBuffer) {
7321 this->resetCommandBufferStagingInfo((VkCommandBuffer)commandBuffer,
7322 true /* also reset primaries */,
7323 true /* also clear pending descriptor sets */);
7324 });
7325 }
7326
addToCommandPool(VkCommandPool commandPool,uint32_t commandBufferCount,VkCommandBuffer * pCommandBuffers)7327 void ResourceTracker::addToCommandPool(VkCommandPool commandPool, uint32_t commandBufferCount,
7328 VkCommandBuffer* pCommandBuffers) {
7329 for (uint32_t i = 0; i < commandBufferCount; ++i) {
7330 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7331 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
7332 appendObject(&p->subObjects, (void*)(pCommandBuffers[i]));
7333 appendObject(&cb->poolObjects, (void*)commandPool);
7334 }
7335 }
7336
clearCommandPool(VkCommandPool commandPool)7337 void ResourceTracker::clearCommandPool(VkCommandPool commandPool) {
7338 resetCommandPoolStagingInfo(commandPool);
7339 struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
7340 forAllObjects(p->subObjects, [this](void* commandBuffer) {
7341 this->unregister_VkCommandBuffer((VkCommandBuffer)commandBuffer);
7342 });
7343 eraseObjects(&p->subObjects);
7344 }
7345
getPhysicalDeviceMemoryProperties(void * context,VkDevice device,VkPhysicalDevice physicalDevice)7346 const VkPhysicalDeviceMemoryProperties& ResourceTracker::getPhysicalDeviceMemoryProperties(
7347 void* context, VkDevice device, VkPhysicalDevice physicalDevice) {
7348 if (!mCachedPhysicalDeviceMemoryProps) {
7349 if (physicalDevice == VK_NULL_HANDLE) {
7350 AutoLock<RecursiveLock> lock(mLock);
7351
7352 auto deviceInfoIt = info_VkDevice.find(device);
7353 if (deviceInfoIt == info_VkDevice.end()) {
7354 mesa_loge("Failed to pass device or physical device.");
7355 abort();
7356 }
7357 const auto& deviceInfo = deviceInfoIt->second;
7358 physicalDevice = deviceInfo.physdev;
7359 }
7360
7361 VkEncoder* enc = (VkEncoder*)context;
7362
7363 VkPhysicalDeviceMemoryProperties properties;
7364 enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &properties, true /* no lock */);
7365
7366 mCachedPhysicalDeviceMemoryProps.emplace(std::move(properties));
7367 }
7368 return *mCachedPhysicalDeviceMemoryProps;
7369 }
7370
7371 static ResourceTracker* sTracker = nullptr;
7372
ResourceTracker()7373 ResourceTracker::ResourceTracker() {
7374 mCreateMapping = new CreateMapping();
7375 mDestroyMapping = new DestroyMapping();
7376 // nothing to do
7377 }
7378
~ResourceTracker()7379 ResourceTracker::~ResourceTracker() {
7380 delete mCreateMapping;
7381 delete mDestroyMapping;
7382 }
7383
createMapping()7384 VulkanHandleMapping* ResourceTracker::createMapping() { return mCreateMapping; }
7385
destroyMapping()7386 VulkanHandleMapping* ResourceTracker::destroyMapping() { return mDestroyMapping; }
7387
7388 // static
get()7389 ResourceTracker* ResourceTracker::get() {
7390 if (!sTracker) {
7391 // To be initialized once on vulkan device open.
7392 sTracker = new ResourceTracker;
7393 }
7394 return sTracker;
7395 }
7396
7397 // static
getCommandBufferEncoder(VkCommandBuffer commandBuffer)7398 ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getCommandBufferEncoder(
7399 VkCommandBuffer commandBuffer) {
7400 if (!(ResourceTracker::streamFeatureBits &
7401 VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
7402 auto enc = ResourceTracker::getThreadLocalEncoder();
7403 ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, enc);
7404 return enc;
7405 }
7406
7407 struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
7408 if (!cb->privateEncoder) {
7409 sStaging.setAllocFree(ResourceTracker::get()->getAlloc(),
7410 ResourceTracker::get()->getFree());
7411 sStaging.popStaging((CommandBufferStagingStream**)&cb->privateStream, &cb->privateEncoder);
7412 }
7413 uint8_t* writtenPtr;
7414 size_t written;
7415 ((CommandBufferStagingStream*)cb->privateStream)->getWritten(&writtenPtr, &written);
7416 return cb->privateEncoder;
7417 }
7418
7419 // static
getQueueEncoder(VkQueue queue)7420 ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getQueueEncoder(VkQueue queue) {
7421 auto enc = ResourceTracker::getThreadLocalEncoder();
7422 if (!(ResourceTracker::streamFeatureBits &
7423 VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
7424 ResourceTracker::get()->syncEncodersForQueue(queue, enc);
7425 }
7426 return enc;
7427 }
7428
7429 // static
getThreadLocalEncoder()7430 ALWAYS_INLINE_GFXSTREAM VkEncoder* ResourceTracker::getThreadLocalEncoder() {
7431 auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
7432 auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
7433 return vkEncoder;
7434 }
7435
7436 // static
setSeqnoPtr(uint32_t * seqnoptr)7437 void ResourceTracker::setSeqnoPtr(uint32_t* seqnoptr) { sSeqnoPtr = seqnoptr; }
7438
7439 // static
nextSeqno()7440 ALWAYS_INLINE_GFXSTREAM uint32_t ResourceTracker::nextSeqno() {
7441 uint32_t res = __atomic_add_fetch(sSeqnoPtr, 1, __ATOMIC_SEQ_CST);
7442 return res;
7443 }
7444
7445 // static
getSeqno()7446 ALWAYS_INLINE_GFXSTREAM uint32_t ResourceTracker::getSeqno() {
7447 uint32_t res = __atomic_load_n(sSeqnoPtr, __ATOMIC_SEQ_CST);
7448 return res;
7449 }
7450
transformImpl_VkExternalMemoryProperties_tohost(VkExternalMemoryProperties *,uint32_t)7451 void ResourceTracker::transformImpl_VkExternalMemoryProperties_tohost(VkExternalMemoryProperties*,
7452 uint32_t) {}
7453
transformImpl_VkImageCreateInfo_fromhost(const VkImageCreateInfo *,uint32_t)7454 void ResourceTracker::transformImpl_VkImageCreateInfo_fromhost(const VkImageCreateInfo*, uint32_t) {
7455 }
transformImpl_VkImageCreateInfo_tohost(const VkImageCreateInfo *,uint32_t)7456 void ResourceTracker::transformImpl_VkImageCreateInfo_tohost(const VkImageCreateInfo*, uint32_t) {}
7457
7458 #define DEFINE_TRANSFORMED_TYPE_IMPL(type) \
7459 void ResourceTracker::transformImpl_##type##_tohost(type*, uint32_t) {} \
7460 void ResourceTracker::transformImpl_##type##_fromhost(type*, uint32_t) {}
7461
7462 LIST_TRIVIAL_TRANSFORMED_TYPES(DEFINE_TRANSFORMED_TYPE_IMPL)
7463
7464 } // namespace vk
7465 } // namespace gfxstream
7466