• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "VulkanManager.h"
18 
19 #include <EGL/egl.h>
20 #include <EGL/eglext.h>
21 #include <GrBackendSemaphore.h>
22 #include <GrBackendSurface.h>
23 #include <GrDirectContext.h>
24 #include <GrTypes.h>
25 #include <android/sync.h>
26 #include <ui/FatVector.h>
27 #include <vk/GrVkExtensions.h>
28 #include <vk/GrVkTypes.h>
29 
30 #include <cstring>
31 
32 #include <gui/TraceUtils.h>
33 #include "Properties.h"
34 #include "RenderThread.h"
35 #include "pipeline/skia/ShaderCache.h"
36 #include "renderstate/RenderState.h"
37 
38 #undef LOG_TAG
39 #define LOG_TAG "VulkanManager"
40 
41 namespace android {
42 namespace uirenderer {
43 namespace renderthread {
44 
free_features_extensions_structs(const VkPhysicalDeviceFeatures2 & features)45 static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
46     // All Vulkan structs that could be part of the features chain will start with the
47     // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
48     // so we can get access to the pNext for the next struct.
49     struct CommonVulkanHeader {
50         VkStructureType sType;
51         void* pNext;
52     };
53 
54     void* pNext = features.pNext;
55     while (pNext) {
56         void* current = pNext;
57         pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
58         free(current);
59     }
60 }
61 
62 GrVkGetProc VulkanManager::sSkiaGetProp = [](const char* proc_name, VkInstance instance,
__anon0fed7dee0102(const char* proc_name, VkInstance instance, VkDevice device) 63                                              VkDevice device) {
64     if (device != VK_NULL_HANDLE) {
65         if (strcmp("vkQueueSubmit", proc_name) == 0) {
66             return (PFN_vkVoidFunction)VulkanManager::interceptedVkQueueSubmit;
67         } else if (strcmp("vkQueueWaitIdle", proc_name) == 0) {
68             return (PFN_vkVoidFunction)VulkanManager::interceptedVkQueueWaitIdle;
69         }
70         return vkGetDeviceProcAddr(device, proc_name);
71     }
72     return vkGetInstanceProcAddr(instance, proc_name);
73 };
74 
75 #define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
76 #define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
77 #define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
78 
getInstance()79 sp<VulkanManager> VulkanManager::getInstance() {
80     // cache a weakptr to the context to enable a second thread to share the same vulkan state
81     static wp<VulkanManager> sWeakInstance = nullptr;
82     static std::mutex sLock;
83 
84     std::lock_guard _lock{sLock};
85     sp<VulkanManager> vulkanManager = sWeakInstance.promote();
86     if (!vulkanManager.get()) {
87         vulkanManager = new VulkanManager();
88         sWeakInstance = vulkanManager;
89     }
90 
91     return vulkanManager;
92 }
93 
~VulkanManager()94 VulkanManager::~VulkanManager() {
95     if (mDevice != VK_NULL_HANDLE) {
96         mDeviceWaitIdle(mDevice);
97         mDestroyDevice(mDevice, nullptr);
98     }
99 
100     if (mInstance != VK_NULL_HANDLE) {
101         mDestroyInstance(mInstance, nullptr);
102     }
103 
104     mGraphicsQueue = VK_NULL_HANDLE;
105     mDevice = VK_NULL_HANDLE;
106     mPhysicalDevice = VK_NULL_HANDLE;
107     mInstance = VK_NULL_HANDLE;
108     mInstanceExtensionsOwner.clear();
109     mInstanceExtensions.clear();
110     mDeviceExtensionsOwner.clear();
111     mDeviceExtensions.clear();
112     free_features_extensions_structs(mPhysicalDeviceFeatures2);
113     mPhysicalDeviceFeatures2 = {};
114 }
115 
setupDevice(GrVkExtensions & grExtensions,VkPhysicalDeviceFeatures2 & features)116 void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
117     VkResult err;
118 
119     constexpr VkApplicationInfo app_info = {
120             VK_STRUCTURE_TYPE_APPLICATION_INFO,  // sType
121             nullptr,                             // pNext
122             "android framework",                 // pApplicationName
123             0,                                   // applicationVersion
124             "android framework",                 // pEngineName
125             0,                                   // engineVerison
126             mAPIVersion,                         // apiVersion
127     };
128 
129     {
130         GET_PROC(EnumerateInstanceExtensionProperties);
131 
132         uint32_t extensionCount = 0;
133         err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
134         LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
135         mInstanceExtensionsOwner.resize(extensionCount);
136         err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount,
137                                                     mInstanceExtensionsOwner.data());
138         LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
139         bool hasKHRSurfaceExtension = false;
140         bool hasKHRAndroidSurfaceExtension = false;
141         for (const VkExtensionProperties& extension : mInstanceExtensionsOwner) {
142             mInstanceExtensions.push_back(extension.extensionName);
143             if (!strcmp(extension.extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
144                 hasKHRSurfaceExtension = true;
145             }
146             if (!strcmp(extension.extensionName, VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
147                 hasKHRAndroidSurfaceExtension = true;
148             }
149         }
150         LOG_ALWAYS_FATAL_IF(!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension);
151     }
152 
153     const VkInstanceCreateInfo instance_create = {
154             VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,  // sType
155             nullptr,                                 // pNext
156             0,                                       // flags
157             &app_info,                               // pApplicationInfo
158             0,                                       // enabledLayerNameCount
159             nullptr,                                 // ppEnabledLayerNames
160             (uint32_t)mInstanceExtensions.size(),    // enabledExtensionNameCount
161             mInstanceExtensions.data(),              // ppEnabledExtensionNames
162     };
163 
164     GET_PROC(CreateInstance);
165     err = mCreateInstance(&instance_create, nullptr, &mInstance);
166     LOG_ALWAYS_FATAL_IF(err < 0);
167 
168     GET_INST_PROC(CreateDevice);
169     GET_INST_PROC(DestroyInstance);
170     GET_INST_PROC(EnumerateDeviceExtensionProperties);
171     GET_INST_PROC(EnumeratePhysicalDevices);
172     GET_INST_PROC(GetPhysicalDeviceFeatures2);
173     GET_INST_PROC(GetPhysicalDeviceImageFormatProperties2);
174     GET_INST_PROC(GetPhysicalDeviceProperties);
175     GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
176 
177     uint32_t gpuCount;
178     LOG_ALWAYS_FATAL_IF(mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr));
179     LOG_ALWAYS_FATAL_IF(!gpuCount);
180     // Just returning the first physical device instead of getting the whole array. Since there
181     // should only be one device on android.
182     gpuCount = 1;
183     err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
184     // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
185     LOG_ALWAYS_FATAL_IF(err && VK_INCOMPLETE != err);
186 
187     VkPhysicalDeviceProperties physDeviceProperties;
188     mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
189     LOG_ALWAYS_FATAL_IF(physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0));
190     mDriverVersion = physDeviceProperties.driverVersion;
191 
192     // query to get the initial queue props size
193     uint32_t queueCount;
194     mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
195     LOG_ALWAYS_FATAL_IF(!queueCount);
196 
197     // now get the actual queue props
198     std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
199     mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
200 
201     // iterate to find the graphics queue
202     mGraphicsQueueIndex = queueCount;
203     for (uint32_t i = 0; i < queueCount; i++) {
204         if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
205             mGraphicsQueueIndex = i;
206             break;
207         }
208     }
209     LOG_ALWAYS_FATAL_IF(mGraphicsQueueIndex == queueCount);
210 
211     {
212         uint32_t extensionCount = 0;
213         err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
214                                                   nullptr);
215         LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
216         mDeviceExtensionsOwner.resize(extensionCount);
217         err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
218                                                   mDeviceExtensionsOwner.data());
219         LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
220         bool hasKHRSwapchainExtension = false;
221         for (const VkExtensionProperties& extension : mDeviceExtensionsOwner) {
222             mDeviceExtensions.push_back(extension.extensionName);
223             if (!strcmp(extension.extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
224                 hasKHRSwapchainExtension = true;
225             }
226         }
227         LOG_ALWAYS_FATAL_IF(!hasKHRSwapchainExtension);
228     }
229 
230     grExtensions.init(sSkiaGetProp, mInstance, mPhysicalDevice, mInstanceExtensions.size(),
231                       mInstanceExtensions.data(), mDeviceExtensions.size(),
232                       mDeviceExtensions.data());
233 
234     LOG_ALWAYS_FATAL_IF(!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1));
235 
236     memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
237     features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
238     features.pNext = nullptr;
239 
240     // Setup all extension feature structs we may want to use.
241     void** tailPNext = &features.pNext;
242 
243     if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
244         VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
245         blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*)malloc(
246                 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
247         LOG_ALWAYS_FATAL_IF(!blend);
248         blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
249         blend->pNext = nullptr;
250         *tailPNext = blend;
251         tailPNext = &blend->pNext;
252     }
253 
254     VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
255     ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*)malloc(
256             sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
257     LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
258     ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
259     ycbcrFeature->pNext = nullptr;
260     *tailPNext = ycbcrFeature;
261     tailPNext = &ycbcrFeature->pNext;
262 
263     // query to get the physical device features
264     mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
265     // this looks like it would slow things down,
266     // and we can't depend on it on all platforms
267     features.features.robustBufferAccess = VK_FALSE;
268 
269     float queuePriorities[1] = {0.0};
270 
271     void* queueNextPtr = nullptr;
272 
273     VkDeviceQueueGlobalPriorityCreateInfoEXT queuePriorityCreateInfo;
274 
275     if (Properties::contextPriority != 0 &&
276         grExtensions.hasExtension(VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME, 2)) {
277         memset(&queuePriorityCreateInfo, 0, sizeof(VkDeviceQueueGlobalPriorityCreateInfoEXT));
278         queuePriorityCreateInfo.sType =
279                 VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT;
280         queuePriorityCreateInfo.pNext = nullptr;
281         switch (Properties::contextPriority) {
282             case EGL_CONTEXT_PRIORITY_LOW_IMG:
283                 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT;
284                 break;
285             case EGL_CONTEXT_PRIORITY_MEDIUM_IMG:
286                 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
287                 break;
288             case EGL_CONTEXT_PRIORITY_HIGH_IMG:
289                 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT;
290                 break;
291             default:
292                 LOG_ALWAYS_FATAL("Unsupported context priority");
293         }
294         queueNextPtr = &queuePriorityCreateInfo;
295     }
296 
297     const VkDeviceQueueCreateInfo queueInfo = {
298             VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,  // sType
299             queueNextPtr,                                // pNext
300             0,                                           // VkDeviceQueueCreateFlags
301             mGraphicsQueueIndex,                         // queueFamilyIndex
302             1,                                           // queueCount
303             queuePriorities,                             // pQueuePriorities
304     };
305 
306     const VkDeviceCreateInfo deviceInfo = {
307             VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,  // sType
308             &features,                             // pNext
309             0,                                     // VkDeviceCreateFlags
310             1,                                     // queueCreateInfoCount
311             &queueInfo,                            // pQueueCreateInfos
312             0,                                     // layerCount
313             nullptr,                               // ppEnabledLayerNames
314             (uint32_t)mDeviceExtensions.size(),    // extensionCount
315             mDeviceExtensions.data(),              // ppEnabledExtensionNames
316             nullptr,                               // ppEnabledFeatures
317     };
318 
319     LOG_ALWAYS_FATAL_IF(mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice));
320 
321     GET_DEV_PROC(AllocateCommandBuffers);
322     GET_DEV_PROC(BeginCommandBuffer);
323     GET_DEV_PROC(CmdPipelineBarrier);
324     GET_DEV_PROC(CreateCommandPool);
325     GET_DEV_PROC(CreateFence);
326     GET_DEV_PROC(CreateSemaphore);
327     GET_DEV_PROC(DestroyCommandPool);
328     GET_DEV_PROC(DestroyDevice);
329     GET_DEV_PROC(DestroyFence);
330     GET_DEV_PROC(DestroySemaphore);
331     GET_DEV_PROC(DeviceWaitIdle);
332     GET_DEV_PROC(EndCommandBuffer);
333     GET_DEV_PROC(FreeCommandBuffers);
334     GET_DEV_PROC(GetDeviceQueue);
335     GET_DEV_PROC(GetSemaphoreFdKHR);
336     GET_DEV_PROC(ImportSemaphoreFdKHR);
337     GET_DEV_PROC(QueueSubmit);
338     GET_DEV_PROC(QueueWaitIdle);
339     GET_DEV_PROC(ResetCommandBuffer);
340     GET_DEV_PROC(ResetFences);
341     GET_DEV_PROC(WaitForFences);
342     GET_DEV_PROC(FrameBoundaryANDROID);
343 }
344 
initialize()345 void VulkanManager::initialize() {
346     std::lock_guard _lock{mInitializeLock};
347 
348     if (mDevice != VK_NULL_HANDLE) {
349         return;
350     }
351 
352     GET_PROC(EnumerateInstanceVersion);
353     uint32_t instanceVersion;
354     LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
355     LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
356 
357     this->setupDevice(mExtensions, mPhysicalDeviceFeatures2);
358 
359     mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
360 
361     if (Properties::enablePartialUpdates && Properties::useBufferAge) {
362         mSwapBehavior = SwapBehavior::BufferAge;
363     }
364 }
365 
createContext(const GrContextOptions & options,ContextType contextType)366 sk_sp<GrDirectContext> VulkanManager::createContext(const GrContextOptions& options,
367                                                     ContextType contextType) {
368 
369     GrVkBackendContext backendContext;
370     backendContext.fInstance = mInstance;
371     backendContext.fPhysicalDevice = mPhysicalDevice;
372     backendContext.fDevice = mDevice;
373     backendContext.fQueue = mGraphicsQueue;
374     backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
375     backendContext.fMaxAPIVersion = mAPIVersion;
376     backendContext.fVkExtensions = &mExtensions;
377     backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2;
378     backendContext.fGetProc = sSkiaGetProp;
379 
380     return GrDirectContext::MakeVulkan(backendContext, options);
381 }
382 
getVkFunctorInitParams() const383 VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const {
384     return VkFunctorInitParams{
385             .instance = mInstance,
386             .physical_device = mPhysicalDevice,
387             .device = mDevice,
388             .queue = mGraphicsQueue,
389             .graphics_queue_index = mGraphicsQueueIndex,
390             .api_version = mAPIVersion,
391             .enabled_instance_extension_names = mInstanceExtensions.data(),
392             .enabled_instance_extension_names_length =
393                     static_cast<uint32_t>(mInstanceExtensions.size()),
394             .enabled_device_extension_names = mDeviceExtensions.data(),
395             .enabled_device_extension_names_length =
396                     static_cast<uint32_t>(mDeviceExtensions.size()),
397             .device_features_2 = &mPhysicalDeviceFeatures2,
398     };
399 }
400 
dequeueNextBuffer(VulkanSurface * surface)401 Frame VulkanManager::dequeueNextBuffer(VulkanSurface* surface) {
402     VulkanSurface::NativeBufferInfo* bufferInfo = surface->dequeueNativeBuffer();
403 
404     if (bufferInfo == nullptr) {
405         ALOGE("VulkanSurface::dequeueNativeBuffer called with an invalid surface!");
406         return Frame(-1, -1, 0);
407     }
408 
409     LOG_ALWAYS_FATAL_IF(!bufferInfo->dequeued);
410 
411     if (bufferInfo->dequeue_fence != -1) {
412         struct sync_file_info* finfo = sync_file_info(bufferInfo->dequeue_fence);
413         bool isSignalPending = false;
414         if (finfo != NULL) {
415             isSignalPending = finfo->status != 1;
416             sync_file_info_free(finfo);
417         }
418         if (isSignalPending) {
419             int fence_clone = dup(bufferInfo->dequeue_fence);
420             if (fence_clone == -1) {
421                 ALOGE("dup(fence) failed, stalling until signalled: %s (%d)", strerror(errno),
422                       errno);
423                 sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
424             } else {
425                 VkSemaphoreCreateInfo semaphoreInfo;
426                 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
427                 semaphoreInfo.pNext = nullptr;
428                 semaphoreInfo.flags = 0;
429                 VkSemaphore semaphore;
430                 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
431                 if (err != VK_SUCCESS) {
432                     ALOGE("Failed to create import semaphore, err: %d", err);
433                     close(fence_clone);
434                     sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
435                 } else {
436                     VkImportSemaphoreFdInfoKHR importInfo;
437                     importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
438                     importInfo.pNext = nullptr;
439                     importInfo.semaphore = semaphore;
440                     importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
441                     importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
442                     importInfo.fd = fence_clone;
443 
444                     err = mImportSemaphoreFdKHR(mDevice, &importInfo);
445                     if (err != VK_SUCCESS) {
446                         ALOGE("Failed to import semaphore, err: %d", err);
447                         mDestroySemaphore(mDevice, semaphore, nullptr);
448                         close(fence_clone);
449                         sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
450                     } else {
451                         GrBackendSemaphore backendSemaphore;
452                         backendSemaphore.initVulkan(semaphore);
453                         // Skia will take ownership of the VkSemaphore and delete it once the wait
454                         // has finished. The VkSemaphore also owns the imported fd, so it will
455                         // close the fd when it is deleted.
456                         bufferInfo->skSurface->wait(1, &backendSemaphore);
457                         // The following flush blocks the GPU immediately instead of waiting for
458                         // other drawing ops. It seems dequeue_fence is not respected otherwise.
459                         // TODO: remove the flush after finding why backendSemaphore is not working.
460                         bufferInfo->skSurface->flushAndSubmit();
461                     }
462                 }
463             }
464         }
465     }
466 
467     int bufferAge = (mSwapBehavior == SwapBehavior::Discard) ? 0 : surface->getCurrentBuffersAge();
468     return Frame(surface->logicalWidth(), surface->logicalHeight(), bufferAge);
469 }
470 
471 struct DestroySemaphoreInfo {
472     PFN_vkDestroySemaphore mDestroyFunction;
473     VkDevice mDevice;
474     VkSemaphore mSemaphore;
475     // We need to make sure we don't delete the VkSemaphore until it is done being used by both Skia
476     // (including by the GPU) and inside the VulkanManager. So we always start with two refs, one
477     // owned by Skia and one owned by the VulkanManager. The refs are decremented each time
478     // destroy_semaphore is called with this object. Skia will call destroy_semaphore once it is
479     // done with the semaphore and the GPU has finished work on the semaphore. The VulkanManager
480     // calls destroy_semaphore after sending the semaphore to Skia and exporting it if need be.
481     int mRefs = 2;
482 
DestroySemaphoreInfoandroid::uirenderer::renderthread::DestroySemaphoreInfo483     DestroySemaphoreInfo(PFN_vkDestroySemaphore destroyFunction, VkDevice device,
484                          VkSemaphore semaphore)
485             : mDestroyFunction(destroyFunction), mDevice(device), mSemaphore(semaphore) {}
486 };
487 
destroy_semaphore(void * context)488 static void destroy_semaphore(void* context) {
489     DestroySemaphoreInfo* info = reinterpret_cast<DestroySemaphoreInfo*>(context);
490     --info->mRefs;
491     if (!info->mRefs) {
492         info->mDestroyFunction(info->mDevice, info->mSemaphore, nullptr);
493         delete info;
494     }
495 }
496 
finishFrame(SkSurface * surface)497 nsecs_t VulkanManager::finishFrame(SkSurface* surface) {
498     ATRACE_NAME("Vulkan finish frame");
499     ALOGE_IF(mSwapSemaphore != VK_NULL_HANDLE || mDestroySemaphoreContext != nullptr,
500              "finishFrame already has an outstanding semaphore");
501 
502     VkExportSemaphoreCreateInfo exportInfo;
503     exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
504     exportInfo.pNext = nullptr;
505     exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
506 
507     VkSemaphoreCreateInfo semaphoreInfo;
508     semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
509     semaphoreInfo.pNext = &exportInfo;
510     semaphoreInfo.flags = 0;
511     VkSemaphore semaphore;
512     VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
513     ALOGE_IF(VK_SUCCESS != err, "VulkanManager::makeSwapSemaphore(): Failed to create semaphore");
514 
515     GrBackendSemaphore backendSemaphore;
516     backendSemaphore.initVulkan(semaphore);
517 
518     GrFlushInfo flushInfo;
519     if (err == VK_SUCCESS) {
520         mDestroySemaphoreContext = new DestroySemaphoreInfo(mDestroySemaphore, mDevice, semaphore);
521         flushInfo.fNumSemaphores = 1;
522         flushInfo.fSignalSemaphores = &backendSemaphore;
523         flushInfo.fFinishedProc = destroy_semaphore;
524         flushInfo.fFinishedContext = mDestroySemaphoreContext;
525     } else {
526         semaphore = VK_NULL_HANDLE;
527     }
528     GrSemaphoresSubmitted submitted =
529             surface->flush(SkSurface::BackendSurfaceAccess::kPresent, flushInfo);
530     GrDirectContext* context = GrAsDirectContext(surface->recordingContext());
531     ALOGE_IF(!context, "Surface is not backed by gpu");
532     context->submit();
533     const nsecs_t submissionTime = systemTime();
534     if (semaphore != VK_NULL_HANDLE) {
535         if (submitted == GrSemaphoresSubmitted::kYes) {
536             mSwapSemaphore = semaphore;
537             if (mFrameBoundaryANDROID) {
538                 // retrieve VkImage used as render target
539                 VkImage image = VK_NULL_HANDLE;
540                 GrBackendRenderTarget backendRenderTarget =
541                         surface->getBackendRenderTarget(SkSurface::kFlushRead_BackendHandleAccess);
542                 if (backendRenderTarget.isValid()) {
543                     GrVkImageInfo info;
544                     if (backendRenderTarget.getVkImageInfo(&info)) {
545                         image = info.fImage;
546                     } else {
547                         ALOGE("Frame boundary: backend is not vulkan");
548                     }
549                 } else {
550                     ALOGE("Frame boundary: invalid backend render target");
551                 }
552                 // frameBoundaryANDROID needs to know about mSwapSemaphore, but
553                 // it won't wait on it.
554                 mFrameBoundaryANDROID(mDevice, mSwapSemaphore, image);
555             }
556         } else {
557             destroy_semaphore(mDestroySemaphoreContext);
558             mDestroySemaphoreContext = nullptr;
559         }
560     }
561     skiapipeline::ShaderCache::get().onVkFrameFlushed(context);
562 
563     return submissionTime;
564 }
565 
swapBuffers(VulkanSurface * surface,const SkRect & dirtyRect)566 void VulkanManager::swapBuffers(VulkanSurface* surface, const SkRect& dirtyRect) {
567     if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
568         ATRACE_NAME("Finishing GPU work");
569         mDeviceWaitIdle(mDevice);
570     }
571 
572     int fenceFd = -1;
573     if (mSwapSemaphore != VK_NULL_HANDLE) {
574         VkSemaphoreGetFdInfoKHR getFdInfo;
575         getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
576         getFdInfo.pNext = nullptr;
577         getFdInfo.semaphore = mSwapSemaphore;
578         getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
579 
580         VkResult err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
581         ALOGE_IF(VK_SUCCESS != err, "VulkanManager::swapBuffers(): Failed to get semaphore Fd");
582     } else {
583         ALOGE("VulkanManager::swapBuffers(): Semaphore submission failed");
584 
585         std::lock_guard<std::mutex> lock(mGraphicsQueueMutex);
586         mQueueWaitIdle(mGraphicsQueue);
587     }
588     if (mDestroySemaphoreContext) {
589         destroy_semaphore(mDestroySemaphoreContext);
590     }
591 
592     surface->presentCurrentBuffer(dirtyRect, fenceFd);
593     mSwapSemaphore = VK_NULL_HANDLE;
594     mDestroySemaphoreContext = nullptr;
595 }
596 
destroySurface(VulkanSurface * surface)597 void VulkanManager::destroySurface(VulkanSurface* surface) {
598     // Make sure all submit commands have finished before starting to destroy objects.
599     if (VK_NULL_HANDLE != mGraphicsQueue) {
600         std::lock_guard<std::mutex> lock(mGraphicsQueueMutex);
601         mQueueWaitIdle(mGraphicsQueue);
602     }
603     mDeviceWaitIdle(mDevice);
604 
605     delete surface;
606 }
607 
createSurface(ANativeWindow * window,ColorMode colorMode,sk_sp<SkColorSpace> surfaceColorSpace,SkColorType surfaceColorType,GrDirectContext * grContext,uint32_t extraBuffers)608 VulkanSurface* VulkanManager::createSurface(ANativeWindow* window,
609                                             ColorMode colorMode,
610                                             sk_sp<SkColorSpace> surfaceColorSpace,
611                                             SkColorType surfaceColorType,
612                                             GrDirectContext* grContext,
613                                             uint32_t extraBuffers) {
614     LOG_ALWAYS_FATAL_IF(!hasVkContext(), "Not initialized");
615     if (!window) {
616         return nullptr;
617     }
618 
619     return VulkanSurface::Create(window, colorMode, surfaceColorType, surfaceColorSpace, grContext,
620                                  *this, extraBuffers);
621 }
622 
fenceWait(int fence,GrDirectContext * grContext)623 status_t VulkanManager::fenceWait(int fence, GrDirectContext* grContext) {
624     if (!hasVkContext()) {
625         ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
626         return INVALID_OPERATION;
627     }
628 
629     // Block GPU on the fence.
630     int fenceFd = ::dup(fence);
631     if (fenceFd == -1) {
632         ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
633         return -errno;
634     }
635 
636     VkSemaphoreCreateInfo semaphoreInfo;
637     semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
638     semaphoreInfo.pNext = nullptr;
639     semaphoreInfo.flags = 0;
640     VkSemaphore semaphore;
641     VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
642     if (VK_SUCCESS != err) {
643         close(fenceFd);
644         ALOGE("Failed to create import semaphore, err: %d", err);
645         return UNKNOWN_ERROR;
646     }
647     VkImportSemaphoreFdInfoKHR importInfo;
648     importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
649     importInfo.pNext = nullptr;
650     importInfo.semaphore = semaphore;
651     importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
652     importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
653     importInfo.fd = fenceFd;
654 
655     err = mImportSemaphoreFdKHR(mDevice, &importInfo);
656     if (VK_SUCCESS != err) {
657         mDestroySemaphore(mDevice, semaphore, nullptr);
658         close(fenceFd);
659         ALOGE("Failed to import semaphore, err: %d", err);
660         return UNKNOWN_ERROR;
661     }
662 
663     GrBackendSemaphore beSemaphore;
664     beSemaphore.initVulkan(semaphore);
665 
666     // Skia will take ownership of the VkSemaphore and delete it once the wait has finished. The
667     // VkSemaphore also owns the imported fd, so it will close the fd when it is deleted.
668     grContext->wait(1, &beSemaphore);
669     grContext->flushAndSubmit();
670 
671     return OK;
672 }
673 
createReleaseFence(int * nativeFence,GrDirectContext * grContext)674 status_t VulkanManager::createReleaseFence(int* nativeFence, GrDirectContext* grContext) {
675     *nativeFence = -1;
676     if (!hasVkContext()) {
677         ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
678         return INVALID_OPERATION;
679     }
680 
681     VkExportSemaphoreCreateInfo exportInfo;
682     exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
683     exportInfo.pNext = nullptr;
684     exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
685 
686     VkSemaphoreCreateInfo semaphoreInfo;
687     semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
688     semaphoreInfo.pNext = &exportInfo;
689     semaphoreInfo.flags = 0;
690     VkSemaphore semaphore;
691     VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
692     if (VK_SUCCESS != err) {
693         ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
694         return INVALID_OPERATION;
695     }
696 
697     GrBackendSemaphore backendSemaphore;
698     backendSemaphore.initVulkan(semaphore);
699 
700     DestroySemaphoreInfo* destroyInfo =
701             new DestroySemaphoreInfo(mDestroySemaphore, mDevice, semaphore);
702     // Even if Skia fails to submit the semaphore, it will still call the destroy_semaphore callback
703     // which will remove its ref to the semaphore. The VulkanManager must still release its ref,
704     // when it is done with the semaphore.
705     GrFlushInfo flushInfo;
706     flushInfo.fNumSemaphores = 1;
707     flushInfo.fSignalSemaphores = &backendSemaphore;
708     flushInfo.fFinishedProc = destroy_semaphore;
709     flushInfo.fFinishedContext = destroyInfo;
710     GrSemaphoresSubmitted submitted = grContext->flush(flushInfo);
711     grContext->submit();
712 
713     if (submitted == GrSemaphoresSubmitted::kNo) {
714         ALOGE("VulkanManager::createReleaseFence: Failed to submit semaphore");
715         destroy_semaphore(destroyInfo);
716         return INVALID_OPERATION;
717     }
718 
719     VkSemaphoreGetFdInfoKHR getFdInfo;
720     getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
721     getFdInfo.pNext = nullptr;
722     getFdInfo.semaphore = semaphore;
723     getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
724 
725     int fenceFd = 0;
726 
727     err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
728     destroy_semaphore(destroyInfo);
729     if (VK_SUCCESS != err) {
730         ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
731         return INVALID_OPERATION;
732     }
733     *nativeFence = fenceFd;
734 
735     return OK;
736 }
737 
738 } /* namespace renderthread */
739 } /* namespace uirenderer */
740 } /* namespace android */
741