1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "platform/ohos/backend/rs_vulkan_context.h"
17 #include <memory>
18 #include <mutex>
19 #include <set>
20 #include <dlfcn.h>
21 #include <vector>
22 #include "common/rs_optional_trace.h"
23 #include "platform/common/rs_log.h"
24 #include "render_context/memory_handler.h"
25 #include "include/gpu/vk/GrVkExtensions.h"
26 #include "unistd.h"
27 #include "vulkan/vulkan_core.h"
28 #include "vulkan/vulkan_ohos.h"
29 #include "sync_fence.h"
30
31 #define ACQUIRE_PROC(name, context) \
32 if (!(vk##name = AcquireProc("vk" #name, context))) { \
33 ROSEN_LOGE("Could not acquire proc: vk" #name); \
34 }
35
36 namespace OHOS {
37 namespace Rosen {
38
39 thread_local std::shared_ptr<Drawing::GPUContext> RsVulkanContext::drawingContext_ = nullptr;
40 thread_local std::shared_ptr<Drawing::GPUContext> RsVulkanContext::protectedDrawingContext_ = nullptr;
41 thread_local bool RsVulkanContext::isProtected_ = false;
42 thread_local VulkanInterfaceType RsVulkanContext::vulkanInterfaceType_ = VulkanInterfaceType::UNI_RENDER;
43 void* RsVulkanInterface::handle_ = nullptr;
44
45 static std::vector<const char*> gInstanceExtensions = {
46 VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
47 };
48
49 static std::vector<const char*> gDeviceExtensions = {
50 VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
51 VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
52 VK_KHR_MAINTENANCE1_EXTENSION_NAME,
53 VK_KHR_MAINTENANCE2_EXTENSION_NAME,
54 VK_KHR_MAINTENANCE3_EXTENSION_NAME,
55 VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
56 VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
57 VK_OHOS_NATIVE_BUFFER_EXTENSION_NAME,
58 VK_OHOS_EXTERNAL_MEMORY_EXTENSION_NAME,
59 };
60
61 static const int GR_CACHE_MAX_COUNT = 8192;
62 static const size_t GR_CACHE_MAX_BYTE_SIZE = 96 * (1 << 20);
63 static const int32_t CACHE_LIMITS_TIMES = 5; // this will change RS memory!
64
Init(VulkanInterfaceType vulkanInterfaceType,bool isProtected)65 void RsVulkanInterface::Init(VulkanInterfaceType vulkanInterfaceType, bool isProtected)
66 {
67 acquiredMandatoryProcAddresses_ = false;
68 memHandler_ = nullptr;
69 acquiredMandatoryProcAddresses_ = OpenLibraryHandle() && SetupLoaderProcAddresses();
70 interfaceType_ = vulkanInterfaceType;
71 CreateInstance();
72 SelectPhysicalDevice(isProtected);
73 CreateDevice(isProtected);
74 std::unique_lock<std::mutex> lock(vkMutex_);
75 CreateSkiaBackendContext(&backendContext_, isProtected);
76 }
77
~RsVulkanInterface()78 RsVulkanInterface::~RsVulkanInterface()
79 {
80 for (auto&& semaphoreFence : usedSemaphoreFenceList_) {
81 if (semaphoreFence.fence != nullptr) {
82 semaphoreFence.fence->Wait(-1);
83 }
84 vkDestroySemaphore(device_, semaphoreFence.semaphore, nullptr);
85 }
86 usedSemaphoreFenceList_.clear();
87 if (protectedMemoryFeatures_) {
88 delete protectedMemoryFeatures_;
89 }
90 CloseLibraryHandle();
91 }
92
IsValid() const93 bool RsVulkanInterface::IsValid() const
94 {
95 return instance_ != VK_NULL_HANDLE && device_ != VK_NULL_HANDLE;
96 }
97
SetupLoaderProcAddresses()98 bool RsVulkanInterface::SetupLoaderProcAddresses()
99 {
100 if (handle_ == nullptr) {
101 return true;
102 }
103 vkGetInstanceProcAddr = reinterpret_cast<PFN_vkGetInstanceProcAddr>(dlsym(handle_, "vkGetInstanceProcAddr"));
104 vkGetDeviceProcAddr = reinterpret_cast<PFN_vkGetDeviceProcAddr>(dlsym(handle_, "vkGetDeviceProcAddr"));
105 vkEnumerateInstanceExtensionProperties = reinterpret_cast<PFN_vkEnumerateInstanceExtensionProperties>(
106 dlsym(handle_, "vkEnumerateInstanceExtensionProperties"));
107 vkCreateInstance = reinterpret_cast<PFN_vkCreateInstance>(dlsym(handle_, "vkCreateInstance"));
108
109 if (!vkGetInstanceProcAddr) {
110 ROSEN_LOGE("Could not acquire vkGetInstanceProcAddr");
111 return false;
112 }
113
114 VkInstance null_instance = VK_NULL_HANDLE;
115 ACQUIRE_PROC(EnumerateInstanceLayerProperties, null_instance);
116 return true;
117 }
118
CreateInstance()119 bool RsVulkanInterface::CreateInstance()
120 {
121 if (!acquiredMandatoryProcAddresses_) {
122 return false;
123 }
124
125 const VkApplicationInfo info = {
126 .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
127 .pNext = nullptr,
128 .pApplicationName = "OHOS",
129 .applicationVersion = 0,
130 .pEngineName = "Rosen",
131 .engineVersion = VK_MAKE_VERSION(1, 0, 0),
132 .apiVersion = VK_API_VERSION_1_2,
133 };
134
135 const VkInstanceCreateInfo create_info = {
136 .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
137 .pNext = nullptr,
138 .flags = 0,
139 .pApplicationInfo = &info,
140 .enabledLayerCount = 0,
141 .ppEnabledLayerNames = nullptr,
142 .enabledExtensionCount = static_cast<uint32_t>(gInstanceExtensions.size()),
143 .ppEnabledExtensionNames = gInstanceExtensions.data(),
144 };
145 if (vkCreateInstance(&create_info, nullptr, &instance_) != VK_SUCCESS) {
146 ROSEN_LOGE("Could not create vulkan instance");
147 return false;
148 }
149
150 ACQUIRE_PROC(CreateDevice, instance_);
151 ACQUIRE_PROC(DestroyDevice, instance_);
152 ACQUIRE_PROC(DestroyInstance, instance_);
153 ACQUIRE_PROC(EnumerateDeviceLayerProperties, instance_);
154 ACQUIRE_PROC(EnumeratePhysicalDevices, instance_);
155 ACQUIRE_PROC(GetPhysicalDeviceFeatures, instance_);
156 ACQUIRE_PROC(GetPhysicalDeviceQueueFamilyProperties, instance_);
157 ACQUIRE_PROC(GetPhysicalDeviceMemoryProperties, instance_);
158 ACQUIRE_PROC(GetPhysicalDeviceMemoryProperties2, instance_);
159 ACQUIRE_PROC(GetPhysicalDeviceFeatures2, instance_);
160
161 return true;
162 }
163
SelectPhysicalDevice(bool isProtected)164 bool RsVulkanInterface::SelectPhysicalDevice(bool isProtected)
165 {
166 if (!instance_) {
167 return false;
168 }
169 uint32_t deviceCount = 0;
170 if (vkEnumeratePhysicalDevices(instance_, &deviceCount, nullptr) != VK_SUCCESS) {
171 ROSEN_LOGE("vkEnumeratePhysicalDevices failed");
172 return false;
173 }
174
175 std::vector<VkPhysicalDevice> physicalDevices;
176 physicalDevices.resize(deviceCount);
177 if (vkEnumeratePhysicalDevices(instance_, &deviceCount, physicalDevices.data()) != VK_SUCCESS) {
178 ROSEN_LOGE("vkEnumeratePhysicalDevices failed");
179 return false;
180 }
181 physicalDevice_ = physicalDevices[0];
182 VkPhysicalDeviceProperties2 physDevProps = {
183 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
184 0,
185 {},
186 };
187 VkPhysicalDeviceProtectedMemoryProperties protMemProps = {
188 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES,
189 0,
190 {},
191 };
192 if (isProtected) {
193 physDevProps.pNext = &protMemProps;
194 }
195 vkGetPhysicalDeviceProperties2(physicalDevice_, &physDevProps);
196 return true;
197 }
198
CreateDevice(bool isProtected)199 bool RsVulkanInterface::CreateDevice(bool isProtected)
200 {
201 if (!physicalDevice_) {
202 return false;
203 }
204 uint32_t queueCount;
205 vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice_, &queueCount, nullptr);
206
207 std::vector<VkQueueFamilyProperties> queueProps(queueCount);
208 vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice_, &queueCount, queueProps.data());
209
210 for (uint32_t i = 0; i < queueCount; i++) {
211 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
212 graphicsQueueFamilyIndex_ = i;
213 break;
214 }
215 }
216
217 if (graphicsQueueFamilyIndex_ == UINT32_MAX) {
218 ROSEN_LOGE("graphicsQueueFamilyIndex_ is not valid");
219 return false;
220 }
221 // If multiple queues are needed, queue priorities should be set.
222 // when it is greater than 0.5 indicates high priority and less than 0.5 indicates low priority
223 const float priorities[1] = {1.0f};
224 VkDeviceQueueCreateFlags deviceQueueCreateFlags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
225 std::vector<VkDeviceQueueCreateInfo> queueCreate {{
226 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, .pNext = nullptr,
227 .flags = deviceQueueCreateFlags, .queueFamilyIndex = graphicsQueueFamilyIndex_, .queueCount = 1,
228 .pQueuePriorities = priorities,
229 }};
230 ycbcrFeature_.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES,
231 ycbcrFeature_.pNext = nullptr;
232 physicalDeviceFeatures2_.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
233 physicalDeviceFeatures2_.pNext = &ycbcrFeature_;
234 void** tailPnext = &ycbcrFeature_.pNext;
235 protectedMemoryFeatures_ = new VkPhysicalDeviceProtectedMemoryFeatures;
236 if (isProtected) {
237 protectedMemoryFeatures_->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
238 protectedMemoryFeatures_->pNext = nullptr;
239 *tailPnext = protectedMemoryFeatures_;
240 tailPnext = &protectedMemoryFeatures_->pNext;
241 }
242
243 vkGetPhysicalDeviceFeatures2(physicalDevice_, &physicalDeviceFeatures2_);
244
245 const VkDeviceCreateInfo createInfo = {
246 .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, .pNext = &physicalDeviceFeatures2_,
247 .flags = 0, .queueCreateInfoCount = queueCreate.size(), .pQueueCreateInfos = queueCreate.data(),
248 .enabledLayerCount = 0, .ppEnabledLayerNames = nullptr,
249 .enabledExtensionCount = static_cast<uint32_t>(gDeviceExtensions.size()),
250 .ppEnabledExtensionNames = gDeviceExtensions.data(), .pEnabledFeatures = nullptr,
251 };
252 if (vkCreateDevice(physicalDevice_, &createInfo, nullptr, &device_) != VK_SUCCESS) {
253 ROSEN_LOGE("vkCreateDevice failed");
254 return false;
255 }
256 if (!SetupDeviceProcAddresses(device_)) {
257 return false;
258 }
259
260 const VkDeviceQueueInfo2 deviceQueueInfo2 = {VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, nullptr,
261 deviceQueueCreateFlags, static_cast<uint32_t>(graphicsQueueFamilyIndex_), 0};
262 vkGetDeviceQueue2(device_, &deviceQueueInfo2, &queue_);
263 return true;
264 }
265
CreateSkiaBackendContext(GrVkBackendContext * context,bool isProtected)266 bool RsVulkanInterface::CreateSkiaBackendContext(GrVkBackendContext* context, bool isProtected)
267 {
268 auto getProc = CreateSkiaGetProc();
269 if (getProc == nullptr) {
270 ROSEN_LOGE("CreateSkiaBackendContext getProc is null");
271 return false;
272 }
273
274 VkPhysicalDeviceFeatures features;
275 vkGetPhysicalDeviceFeatures(physicalDevice_, &features);
276
277 uint32_t fFeatures = 0;
278 if (features.geometryShader) {
279 fFeatures |= kGeometryShader_GrVkFeatureFlag;
280 }
281 if (features.dualSrcBlend) {
282 fFeatures |= kDualSrcBlend_GrVkFeatureFlag;
283 }
284 if (features.sampleRateShading) {
285 fFeatures |= kSampleRateShading_GrVkFeatureFlag;
286 }
287
288 context->fInstance = instance_;
289 context->fPhysicalDevice = physicalDevice_;
290 context->fDevice = device_;
291 context->fQueue = queue_;
292 context->fGraphicsQueueIndex = graphicsQueueFamilyIndex_;
293 context->fMinAPIVersion = VK_API_VERSION_1_2;
294
295 uint32_t extensionFlags = kKHR_surface_GrVkExtensionFlag;
296 extensionFlags |= kKHR_ohos_surface_GrVkExtensionFlag;
297
298 context->fExtensions = extensionFlags;
299
300 skVkExtensions_.init(getProc, instance_, physicalDevice_,
301 gInstanceExtensions.size(), gInstanceExtensions.data(),
302 gDeviceExtensions.size(), gDeviceExtensions.data());
303
304 context->fVkExtensions = &skVkExtensions_;
305 context->fDeviceFeatures2 = &physicalDeviceFeatures2_;
306 context->fFeatures = fFeatures;
307 context->fGetProc = std::move(getProc);
308 context->fOwnsInstanceAndDevice = false;
309 context->fProtectedContext = isProtected ? GrProtected::kYes : GrProtected::kNo;
310
311 return true;
312 }
313
SetupDeviceProcAddresses(VkDevice device)314 bool RsVulkanInterface::SetupDeviceProcAddresses(VkDevice device)
315 {
316 ACQUIRE_PROC(AllocateCommandBuffers, device_);
317 ACQUIRE_PROC(AllocateMemory, device_);
318 ACQUIRE_PROC(BeginCommandBuffer, device_);
319 ACQUIRE_PROC(BindImageMemory, device_);
320 ACQUIRE_PROC(BindImageMemory2, device_);
321 ACQUIRE_PROC(CmdPipelineBarrier, device_);
322 ACQUIRE_PROC(CreateCommandPool, device_);
323 ACQUIRE_PROC(CreateFence, device_);
324 ACQUIRE_PROC(CreateImage, device_);
325 ACQUIRE_PROC(CreateImageView, device_);
326 ACQUIRE_PROC(CreateSemaphore, device_);
327 ACQUIRE_PROC(DestroyCommandPool, device_);
328 ACQUIRE_PROC(DestroyFence, device_);
329 ACQUIRE_PROC(DestroyImage, device_);
330 ACQUIRE_PROC(DestroyImageView, device_);
331 ACQUIRE_PROC(DestroySemaphore, device_);
332 ACQUIRE_PROC(DeviceWaitIdle, device_);
333 ACQUIRE_PROC(EndCommandBuffer, device_);
334 ACQUIRE_PROC(FreeCommandBuffers, device_);
335 ACQUIRE_PROC(FreeMemory, device_);
336 ACQUIRE_PROC(GetDeviceQueue, device_);
337 ACQUIRE_PROC(GetImageMemoryRequirements, device_);
338 ACQUIRE_PROC(QueueSubmit, device_);
339 ACQUIRE_PROC(QueueWaitIdle, device_);
340 ACQUIRE_PROC(ResetCommandBuffer, device_);
341 ACQUIRE_PROC(ResetFences, device_);
342 ACQUIRE_PROC(WaitForFences, device_);
343 ACQUIRE_PROC(GetNativeBufferPropertiesOHOS, device_);
344 ACQUIRE_PROC(QueueSignalReleaseImageOHOS, device_);
345 ACQUIRE_PROC(ImportSemaphoreFdKHR, device_);
346 ACQUIRE_PROC(SetFreqAdjustEnable, device_);
347
348 return true;
349 }
350
OpenLibraryHandle()351 bool RsVulkanInterface::OpenLibraryHandle()
352 {
353 if (handle_) {
354 ROSEN_LOGI("RsVulkanInterface OpenLibararyHandle: vk so has already been loaded.");
355 return true;
356 }
357 ROSEN_LOGI("VulkanProcTable OpenLibararyHandle: dlopen libvulkan.so.");
358 dlerror();
359 handle_ = dlopen("libvulkan.so", RTLD_NOW | RTLD_LOCAL);
360 if (handle_ == nullptr) {
361 ROSEN_LOGE("Could not open the vulkan library: %{public}s", dlerror());
362 return false;
363 }
364 return true;
365 }
366
CloseLibraryHandle()367 bool RsVulkanInterface::CloseLibraryHandle()
368 {
369 if (handle_ != nullptr) {
370 dlerror();
371 if (dlclose(handle_) != 0) {
372 ROSEN_LOGE("Could not close the vulkan lib handle. This indicates a leak. %{public}s", dlerror());
373 }
374 handle_ = nullptr;
375 }
376 return handle_ == nullptr;
377 }
378
AcquireProc(const char * procName,const VkInstance & instance) const379 PFN_vkVoidFunction RsVulkanInterface::AcquireProc(
380 const char* procName,
381 const VkInstance& instance) const
382 {
383 if (procName == nullptr || !vkGetInstanceProcAddr) {
384 return nullptr;
385 }
386
387 return vkGetInstanceProcAddr(instance, procName);
388 }
389
AcquireProc(const char * procName,const VkDevice & device) const390 PFN_vkVoidFunction RsVulkanInterface::AcquireProc(
391 const char* procName,
392 const VkDevice& device) const
393 {
394 if (procName == nullptr || !device || !vkGetInstanceProcAddr) {
395 return nullptr;
396 }
397 return vkGetDeviceProcAddr(device, procName);
398 }
399
CreateSkiaGetProc() const400 GrVkGetProc RsVulkanInterface::CreateSkiaGetProc() const
401 {
402 if (!IsValid()) {
403 return nullptr;
404 }
405
406 return [this](const char* procName, VkInstance instance, VkDevice device) {
407 if (device != VK_NULL_HANDLE) {
408 std::string_view s{procName};
409 if (s.find("vkQueueSubmit") == 0) {
410 return (PFN_vkVoidFunction)RsVulkanContext::HookedVkQueueSubmit;
411 }
412 auto result = AcquireProc(procName, device);
413 if (result != nullptr) {
414 return result;
415 }
416 }
417 return AcquireProc(procName, instance);
418 };
419 }
420
421 std::shared_ptr<Drawing::GPUContext> RsVulkanInterface::CreateDrawingContext(std::string.cacheDir)
422 {
423 std::unique_lock<std::mutex> lock(vkMutex_);
424
425 auto drawingContext = std::make_shared<Drawing::GPUContext>();
426 Drawing::GPUContextOptions options;
427 memHandler_ = std::make_unique<MemoryHandler>();
428 std::string vkVersion = std::to_string(VK_API_VERSION_1_2);
429 auto size = vkVersion.size();
430 memHandler_->ConfigureContext(&options, vkVersion.c_str(), size, cacheDir);
431 drawingContext->BuildFromVK(backendContext_, options);
432 int maxResources = 0;
433 size_t maxResourcesSize = 0;
434 int cacheLimitsTimes = CACHE_LIMITS_TIMES;
435 drawingContext->GetResourceCacheLimits(&maxResources, &maxResourcesSize);
436 if (maxResourcesSize > 0) {
437 drawingContext->SetResourceCacheLimits(cacheLimitsTimes * maxResources,
438 cacheLimitsTimes * std::fmin(maxResourcesSize, GR_CACHE_MAX_BYTE_SIZE));
439 } else {
440 drawingContext->SetResourceCacheLimits(GR_CACHE_MAX_COUNT, GR_CACHE_MAX_BYTE_SIZE);
441 }
442 return drawingContext;
443 }
444
445
DestroyAllSemaphoreFence()446 void RsVulkanInterface::DestroyAllSemaphoreFence()
447 {
448 std::lock_guard<std::mutex> lock(semaphoreLock_);
449 RS_LOGE("Device lost clear all semaphore fences, count [%{public}zu] ", usedSemaphoreFenceList_.size());
450 for (auto&& semaphoreFence : usedSemaphoreFenceList_) {
451 vkDestroySemaphore(device_, semaphoreFence.semaphore, nullptr);
452 }
453 usedSemaphoreFenceList_.clear();
454 }
455
RequireSemaphore()456 VkSemaphore RsVulkanInterface::RequireSemaphore()
457 {
458 {
459 std::lock_guard<std::mutex> lock(semaphoreLock_);
460 // 3000 means too many used semaphore fences
461 if (usedSemaphoreFenceList_.size() >= 3000) {
462 RS_LOGE("Too many used semaphore fences, count [%{public}zu] ", usedSemaphoreFenceList_.size());
463 for (auto&& semaphoreFence : usedSemaphoreFenceList_) {
464 if (semaphoreFence.fence != nullptr) {
465 semaphoreFence.fence->Wait(-1);
466 }
467 vkDestroySemaphore(device_, semaphoreFence.semaphore, nullptr);
468 }
469 usedSemaphoreFenceList_.clear();
470 }
471 for (auto it = usedSemaphoreFenceList_.begin(); it != usedSemaphoreFenceList_.end();) {
472 auto& fence = it->fence;
473 if (fence == nullptr || fence->GetStatus() == FenceStatus::SIGNALED) {
474 vkDestroySemaphore(device_, it->semaphore, nullptr);
475 it->semaphore = VK_NULL_HANDLE;
476 it = usedSemaphoreFenceList_.erase(it);
477 } else {
478 it++;
479 }
480 }
481 }
482
483 VkSemaphoreCreateInfo semaphoreInfo;
484 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
485 semaphoreInfo.pNext = nullptr;
486 semaphoreInfo.flags = 0;
487 VkSemaphore semaphore;
488 auto err = vkCreateSemaphore(device_, &semaphoreInfo, nullptr, &semaphore);
489 if (err != VK_SUCCESS) {
490 return VK_NULL_HANDLE;
491 }
492 return semaphore;
493 }
494
SendSemaphoreWithFd(VkSemaphore semaphore,int fenceFd)495 void RsVulkanInterface::SendSemaphoreWithFd(VkSemaphore semaphore, int fenceFd)
496 {
497 std::lock_guard<std::mutex> lock(semaphoreLock_);
498 auto& semaphoreFence = usedSemaphoreFenceList_.emplace_back();
499 semaphoreFence.semaphore = semaphore;
500 semaphoreFence.fence = (fenceFd != -1 ? std::make_unique<SyncFence>(fenceFd) : nullptr);
501 }
502
RsVulkanContext(std::string cacheDir)503 RsVulkanContext::RsVulkanContext(std::string cacheDir)
504 {
505 vulkanInterfaceVec.resize(size_t(VulkanInterfaceType::MAX_INTERFACE_TYPE));
506 // create vulkan interface for render thread.
507 auto uniRenderVulkanInterface = std::make_shared<RsVulkanInterface>();
508 uniRenderVulkanInterface->Init(VulkanInterfaceType::UNI_RENDER, false);
509 // init drawing context for RT thread bind to backendContext.
510 drawingContext_ = uniRenderVulkanInterface->CreateDrawingContext(cacheDir);
511 // create vulkan interface for hardware thread (unprotected).
512 auto unprotectedReDrawVulkanInterface = std::make_shared<RsVulkanInterface>();
513 unprotectedReDrawVulkanInterface->Init(VulkanInterfaceType::UNPROTECTED_REDRAW, false);
514 vulkanInterfaceVec[size_t(VulkanInterfaceType::UNI_RENDER)] = std::move(uniRenderVulkanInterface);
515 vulkanInterfaceVec[size_t(VulkanInterfaceType::UNPROTECTED_REDRAW)] = std::move(unprotectedReDrawVulkanInterface);
516 #ifdef IS_ENABLE_DRM
517 isProtected_ = true;
518 auto protectedReDrawVulkanInterface = std::make_shared<RsVulkanInterface>();
519 protectedReDrawVulkanInterface->Init(VulkanInterfaceType::PROTECTED_REDRAW, true);
520 // DRM needs to adapt vkQueue in the future.
521 protectedDrawingContext_ = protectedReDrawVulkanInterface->CreateDrawingContext(cacheDir);
522 vulkanInterfaceVec[size_t(VulkanInterfaceType::PROTECTED_REDRAW)] = std::move(protectedReDrawVulkanInterface);
523 isProtected_ = false;
524 #endif
525 }
526
GetSingleton()527 RsVulkanContext& RsVulkanContext::GetSingleton()
528 {
529 static RsVulkanContext singleton {};
530 return singleton;
531 }
532
GetSingletonWithCacheDir(std::string & cacheDir)533 RsVulkanContext& RsVulkanContext::GetSingletonWithCacheDir(std::string& cacheDir)
534 {
535 static RsVulkanContext singleton = RsVulkanContext(cacheDir);
536 return singleton;
537 }
538
GetRsVulkanInterface()539 RsVulkanInterface& RsVulkanContext::GetRsVulkanInterface()
540 {
541 switch (vulkanInterfaceType_) {
542 case VulkanInterfaceType::PROTECTED_REDRAW:
543 return *(vulkanInterfaceVec[size_t(VulkanInterfaceType::PROTECTED_REDRAW)].get());
544 case VulkanInterfaceType::UNPROTECTED_REDRAW:
545 return *(vulkanInterfaceVec[size_t(VulkanInterfaceType::UNPROTECTED_REDRAW)].get());
546 case VulkanInterfaceType::UNI_RENDER:
547 default:
548 return *(vulkanInterfaceVec[size_t(VulkanInterfaceType::UNI_RENDER)].get());
549 }
550 }
551
HookedVkQueueSubmit(VkQueue queue,uint32_t submitCount,VkSubmitInfo * pSubmits,VkFence fence)552 VKAPI_ATTR VkResult RsVulkanContext::HookedVkQueueSubmit(VkQueue queue, uint32_t submitCount,
553 VkSubmitInfo* pSubmits, VkFence fence)
554 {
555 RsVulkanInterface& vkInterface = RsVulkanContext::GetSingleton().GetRsVulkanInterface();
556 auto interfaceType = vkInterface.GetInterfaceType();
557 if (interfaceType == VulkanInterfaceType::UNPROTECTED_REDRAW ||
558 interfaceType == VulkanInterfaceType::PROTECTED_REDRAW) {
559 std::lock_guard<std::mutex> lock(vkInterface.hGraphicsQueueMutex_);
560 RS_LOGD("%{public}s hardware queue, interfaceType: %{public}d", __func__, static_cast<int>(interfaceType));
561 RS_OPTIONAL_TRACE_NAME_FMT("%s hardware queue, interfaceType: %d", __func__, static_cast<int>(interfaceType));
562 return vkInterface.vkQueueSubmit(queue, submitCount, pSubmits, fence);
563 } else if (interfaceType == VulkanInterfaceType::UNI_RENDER) {
564 std::lock_guard<std::mutex> lock(vkInterface.graphicsQueueMutex_);
565 RS_LOGD("%{public}s queue", __func__);
566 RS_OPTIONAL_TRACE_NAME_FMT("%s queue", __func__);
567 return vkInterface.vkQueueSubmit(queue, submitCount, pSubmits, fence);
568 }
569 RS_LOGE("%{public}s abnormal queue occured", __func__);
570 return VK_ERROR_UNKNOWN;
571 }
572
HookedVkQueueSignalReleaseImageOHOS(VkQueue queue,uint32_t waitSemaphoreCount,const VkSemaphore * pWaitSemaphores,VkImage image,int32_t * pNativeFenceFd)573 VKAPI_ATTR VkResult RsVulkanContext::HookedVkQueueSignalReleaseImageOHOS(VkQueue queue, uint32_t waitSemaphoreCount,
574 const VkSemaphore* pWaitSemaphores, VkImage image, int32_t* pNativeFenceFd)
575 {
576 RsVulkanInterface& vkInterface = RsVulkanContext::GetSingleton().GetRsVulkanInterface();
577 auto interfaceType = vkInterface.GetInterfaceType();
578 if (interfaceType == VulkanInterfaceType::UNPROTECTED_REDRAW ||
579 interfaceType == VulkanInterfaceType::PROTECTED_REDRAW) {
580 std::lock_guard<std::mutex> lock(vkInterface.hGraphicsQueueMutex_);
581 RS_LOGD("%{public}s hardware queue, interfaceType: %{public}d", __func__, static_cast<int>(interfaceType));
582 RS_OPTIONAL_TRACE_NAME_FMT("%s hardware queue, interfaceType: %d", __func__, static_cast<int>(interfaceType));
583 return vkInterface.vkQueueSignalReleaseImageOHOS(queue, waitSemaphoreCount,
584 pWaitSemaphores, image, pNativeFenceFd);
585 } else if (interfaceType == VulkanInterfaceType::UNI_RENDER) {
586 std::lock_guard<std::mutex> lock(vkInterface.graphicsQueueMutex_);
587 RS_LOGD("%{public}s queue", __func__);
588 RS_OPTIONAL_TRACE_NAME_FMT("%s queue", __func__);
589 return vkInterface.vkQueueSignalReleaseImageOHOS(queue,
590 waitSemaphoreCount, pWaitSemaphores, image, pNativeFenceFd);
591 }
592 RS_LOGE("%{public}s abnormal queue occured", __func__);
593 return VK_ERROR_UNKNOWN;
594 }
595
CreateDrawingContext()596 std::shared_ptr<Drawing::GPUContext> RsVulkanContext::CreateDrawingContext()
597 {
598 switch (vulkanInterfaceType_) {
599 case VulkanInterfaceType::PROTECTED_REDRAW:
600 if (protectedDrawingContext_) {
601 return protectedDrawingContext_;
602 }
603 protectedDrawingContext_ = GetRsVulkanInterface().CreateDrawingContext();
604 return protectedDrawingContext_;
605 case VulkanInterfaceType::UNI_RENDER:
606 case VulkanInterfaceType::UNPROTECTED_REDRAW:
607 default:
608 if (drawingContext_) {
609 return drawingContext_;
610 }
611 drawingContext_ = GetRsVulkanInterface().CreateDrawingContext();
612 return drawingContext_;
613 }
614 }
615
GetDrawingContext()616 std::shared_ptr<Drawing::GPUContext> RsVulkanContext::GetDrawingContext()
617 {
618 auto& drawingContext = isProtected_ ? protectedDrawingContext_ : drawingContext_;
619 if (drawingContext != nullptr) {
620 return drawingContext;
621 }
622 drawingContext = GetRsVulkanInterface().CreateDrawingContext();
623 return drawingContext;
624 }
625
SetIsProtected(bool isProtected)626 void RsVulkanContext::SetIsProtected(bool isProtected)
627 {
628 if (isProtected) {
629 vulkanInterfaceType_ = VulkanInterfaceType::PROTECTED_REDRAW;
630 } else {
631 vulkanInterfaceType_ = VulkanInterfaceType::UNPROTECTED_REDRAW;
632 }
633 if (isProtected_ != isProtected) {
634 RS_LOGW("RsVulkanContext switch, isProtected: %{public}d.", isProtected);
635 if (isProtected) {
636 RS_TRACE_NAME("RsVulkanContext switch to protected GPU context");
637 }
638 ClearGrContext(isProtected);
639 }
640 }
641
ClearGrContext(bool isProtected)642 void RsVulkanContext::ClearGrContext(bool isProtected)
643 {
644 RS_TRACE_NAME("RsVulkanContext ClearGrContext");
645 GetDrawingContext()->PurgeUnlockedResources(true);
646 isProtected_ = isProtected;
647 GetDrawingContext()->ResetContext();
648 }
649 }
650 }
651