1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "platform/ohos/backend/rs_vulkan_context.h"
17 #include <memory>
18 #include <mutex>
19 #include <unordered_set>
20 #include <string_view>
21 #include <dlfcn.h>
22 #include <vector>
23 #include "common/rs_optional_trace.h"
24 #include "platform/common/rs_log.h"
25 #include "render_context/memory_handler.h"
26 #ifdef USE_M133_SKIA
27 #include "include/gpu/vk/VulkanExtensions.h"
28 #else
29 #include "include/gpu/vk/GrVkExtensions.h"
30 #endif
31 #include "unistd.h"
32 #include "utils/system_properties.h"
33 #include "vulkan/vulkan_core.h"
34 #include "vulkan/vulkan_ohos.h"
35 #include "sync_fence.h"
36
37 #include "third_party/skia/include/gpu/GrDirectContext.h"
38 #include "third_party/skia/include/gpu/vk/GrVkBackendContext.h"
39 #include "third_party/skia/src/gpu/GrDirectContextPriv.h"
40 #include "third_party/skia/src/gpu/vk/GrVkCommandBuffer.h"
41 #include "third_party/skia/src/gpu/vk/GrVkGpu.h"
42 #include "third_party/skia/src/gpu/vk/GrVkSemaphore.h"
43
44 #include "hetero_hdr/rs_hdr_pattern_manager.h"
45
46 #define ACQUIRE_PROC(name, context) \
47 if (!(vk##name = AcquireProc("vk" #name, context))) { \
48 ROSEN_LOGE("Could not acquire proc: vk" #name); \
49 }
50
51 namespace OHOS {
52 namespace Rosen {
53 thread_local bool RsVulkanContext::isProtected_ = false;
54 thread_local VulkanInterfaceType RsVulkanContext::vulkanInterfaceType_ = VulkanInterfaceType::BASIC_RENDER;
55 std::map<int, std::pair<std::shared_ptr<Drawing::GPUContext>, bool>> RsVulkanContext::drawingContextMap_;
56 std::map<int, std::pair<std::shared_ptr<Drawing::GPUContext>, bool>> RsVulkanContext::protectedDrawingContextMap_;
57 std::mutex RsVulkanContext::drawingContextMutex_;
58 std::recursive_mutex RsVulkanContext::recyclableSingletonMutex_;
59 bool RsVulkanContext::isRecyclable_ = true;
60 std::atomic RsVulkanContext::isRecyclableSingletonValid_ = false;
61 std::atomic RsVulkanContext::isInited_ = false;
62 void* RsVulkanInterface::handle_ = nullptr;
63 VkInstance RsVulkanInterface::instance_ = VK_NULL_HANDLE;
64
65 static std::vector<const char*> gInstanceExtensions = {
66 VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
67 };
68
69 static std::vector<const char*> gMandatoryDeviceExtensions = {
70 VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
71 VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
72 VK_KHR_MAINTENANCE1_EXTENSION_NAME,
73 VK_KHR_MAINTENANCE2_EXTENSION_NAME,
74 VK_KHR_MAINTENANCE3_EXTENSION_NAME,
75 VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
76 VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
77 VK_OHOS_NATIVE_BUFFER_EXTENSION_NAME,
78 VK_OHOS_EXTERNAL_MEMORY_EXTENSION_NAME,
79 };
80
81 static std::vector<const char*> gOptionalDeviceExtensions = {
82 VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME,
83 VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME,
84 };
85
86 // enabled when persist.sys.graphic.openVkImageMemoryDfx is true
87 static std::vector<const char*> gOptionalDeviceExtensionsDebug = {
88 VK_KHR_MAINTENANCE_5_EXTENSION_NAME,
89 VK_EXT_DEVICE_FAULT_EXTENSION_NAME,
90 VK_EXT_DEVICE_ADDRESS_BINDING_REPORT_EXTENSION_NAME,
91 };
92
93 static const int GR_CHUNK_SIZE = 1048576;
94 static const int GR_CACHE_MAX_COUNT = 8192;
95 static const size_t GR_CACHE_MAX_BYTE_SIZE = 96 * (1 << 20);
96 static const int32_t CACHE_LIMITS_TIMES = 5; // this will change RS memory!
97 std::atomic<uint64_t> RsVulkanInterface::callbackSemaphoreInfofdDupCnt_ = 0;
98 std::atomic<uint64_t> RsVulkanInterface::callbackSemaphoreInfoRSDerefCnt_ = 0;
99 std::atomic<uint64_t> RsVulkanInterface::callbackSemaphoreInfo2DEngineDerefCnt_ = 0;
100 std::atomic<uint64_t> RsVulkanInterface::callbackSemaphoreInfo2DEngineDefensiveDerefCnt_ = 0;
101 std::atomic<uint64_t> RsVulkanInterface::callbackSemaphoreInfoFlushCnt_ = 0;
102 std::atomic<uint64_t> RsVulkanInterface::callbackSemaphoreInfo2DEngineCallCnt_ = 0;
103
Init(VulkanInterfaceType vulkanInterfaceType,bool isProtected,bool isHtsEnable)104 void RsVulkanInterface::Init(VulkanInterfaceType vulkanInterfaceType, bool isProtected, bool isHtsEnable)
105 {
106 acquiredMandatoryProcAddresses_ = false;
107 memHandler_ = nullptr;
108 acquiredMandatoryProcAddresses_ = OpenLibraryHandle() && SetupLoaderProcAddresses();
109 interfaceType_ = vulkanInterfaceType;
110 CreateInstance();
111 SelectPhysicalDevice(isProtected);
112 CreateDevice(isProtected, isHtsEnable);
113 std::unique_lock<std::mutex> lock(vkMutex_);
114 CreateSkiaBackendContext(&backendContext_, isProtected);
115 }
116
~RsVulkanInterface()117 RsVulkanInterface::~RsVulkanInterface()
118 {
119 for (auto&& semaphoreFence : usedSemaphoreFenceList_) {
120 if (semaphoreFence.fence != nullptr) {
121 semaphoreFence.fence->Wait(-1);
122 }
123 vkDestroySemaphore(device_, semaphoreFence.semaphore, nullptr);
124 }
125 usedSemaphoreFenceList_.clear();
126 if (protectedMemoryFeatures_) {
127 delete protectedMemoryFeatures_;
128 protectedMemoryFeatures_ = nullptr;
129 }
130
131 if (device_ != VK_NULL_HANDLE) {
132 vkDeviceWaitIdle(device_);
133 vkDestroyDevice(device_, nullptr);
134 device_ = VK_NULL_HANDLE;
135 }
136 CloseLibraryHandle();
137 }
138
IsValid() const139 bool RsVulkanInterface::IsValid() const
140 {
141 return instance_ != VK_NULL_HANDLE && device_ != VK_NULL_HANDLE;
142 }
143
SetupLoaderProcAddresses()144 bool RsVulkanInterface::SetupLoaderProcAddresses()
145 {
146 if (handle_ == nullptr) {
147 return true;
148 }
149 vkGetInstanceProcAddr = reinterpret_cast<PFN_vkGetInstanceProcAddr>(dlsym(handle_, "vkGetInstanceProcAddr"));
150 vkGetDeviceProcAddr = reinterpret_cast<PFN_vkGetDeviceProcAddr>(dlsym(handle_, "vkGetDeviceProcAddr"));
151 vkEnumerateInstanceExtensionProperties = reinterpret_cast<PFN_vkEnumerateInstanceExtensionProperties>(
152 dlsym(handle_, "vkEnumerateInstanceExtensionProperties"));
153 vkCreateInstance = reinterpret_cast<PFN_vkCreateInstance>(dlsym(handle_, "vkCreateInstance"));
154
155 if (!vkGetInstanceProcAddr) {
156 ROSEN_LOGE("Could not acquire vkGetInstanceProcAddr");
157 return false;
158 }
159
160 VkInstance null_instance = VK_NULL_HANDLE;
161 ACQUIRE_PROC(EnumerateInstanceLayerProperties, null_instance);
162 return true;
163 }
164
CreateInstance()165 bool RsVulkanInterface::CreateInstance()
166 {
167 if (!acquiredMandatoryProcAddresses_) {
168 return false;
169 }
170
171 if (instance_ == VK_NULL_HANDLE) {
172 const VkApplicationInfo info = {
173 .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
174 .pNext = nullptr,
175 .pApplicationName = "OHOS",
176 .applicationVersion = 0,
177 .pEngineName = "Rosen",
178 .engineVersion = VK_MAKE_VERSION(1, 0, 0),
179 .apiVersion = VK_API_VERSION_1_2,
180 };
181
182 const VkInstanceCreateInfo create_info = {
183 .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
184 .pNext = nullptr,
185 .flags = 0,
186 .pApplicationInfo = &info,
187 .enabledLayerCount = 0,
188 .ppEnabledLayerNames = nullptr,
189 .enabledExtensionCount = static_cast<uint32_t>(gInstanceExtensions.size()),
190 .ppEnabledExtensionNames = gInstanceExtensions.data(),
191 };
192 if (vkCreateInstance(&create_info, nullptr, &instance_) != VK_SUCCESS) {
193 ROSEN_LOGE("Could not create vulkan instance");
194 return false;
195 }
196 }
197
198 ACQUIRE_PROC(CreateDevice, instance_);
199 ACQUIRE_PROC(DestroyDevice, instance_);
200 ACQUIRE_PROC(DestroyInstance, instance_);
201 ACQUIRE_PROC(EnumerateDeviceExtensionProperties, instance_);
202 ACQUIRE_PROC(EnumerateDeviceLayerProperties, instance_);
203 ACQUIRE_PROC(EnumeratePhysicalDevices, instance_);
204 ACQUIRE_PROC(GetPhysicalDeviceFeatures, instance_);
205 ACQUIRE_PROC(GetPhysicalDeviceQueueFamilyProperties, instance_);
206 ACQUIRE_PROC(GetPhysicalDeviceMemoryProperties, instance_);
207 ACQUIRE_PROC(GetPhysicalDeviceMemoryProperties2, instance_);
208 ACQUIRE_PROC(GetPhysicalDeviceFeatures2, instance_);
209
210 return true;
211 }
212
SelectPhysicalDevice(bool isProtected)213 bool RsVulkanInterface::SelectPhysicalDevice(bool isProtected)
214 {
215 if (!instance_) {
216 return false;
217 }
218 uint32_t deviceCount = 0;
219 if (vkEnumeratePhysicalDevices(instance_, &deviceCount, nullptr) != VK_SUCCESS) {
220 ROSEN_LOGE("vkEnumeratePhysicalDevices failed");
221 return false;
222 }
223
224 std::vector<VkPhysicalDevice> physicalDevices;
225 physicalDevices.resize(deviceCount);
226 if (vkEnumeratePhysicalDevices(instance_, &deviceCount, physicalDevices.data()) != VK_SUCCESS) {
227 ROSEN_LOGE("vkEnumeratePhysicalDevices failed");
228 return false;
229 }
230 physicalDevice_ = physicalDevices[0];
231 VkPhysicalDeviceProperties2 physDevProps = {
232 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
233 0,
234 {},
235 };
236 VkPhysicalDeviceProtectedMemoryProperties protMemProps = {
237 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES,
238 0,
239 {},
240 };
241 if (isProtected) {
242 physDevProps.pNext = &protMemProps;
243 }
244 vkGetPhysicalDeviceProperties2(physicalDevice_, &physDevProps);
245 return true;
246 }
247
ConfigureFeatures(bool isProtected)248 void RsVulkanInterface::ConfigureFeatures(bool isProtected)
249 {
250 ycbcrFeature_.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES,
251 ycbcrFeature_.pNext = nullptr;
252 sync2Feature_.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES;
253 sync2Feature_.pNext = &ycbcrFeature_;
254 bindlessFeature_.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES;
255 bindlessFeature_.pNext = &sync2Feature_;
256 timelineFeature_.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES;
257 timelineFeature_.pNext = &bindlessFeature_;
258 physicalDeviceFeatures2_.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
259 physicalDeviceFeatures2_.pNext = &timelineFeature_;
260 void** tailPnext = &ycbcrFeature_.pNext;
261 protectedMemoryFeatures_ = new VkPhysicalDeviceProtectedMemoryFeatures;
262 if (isProtected) {
263 protectedMemoryFeatures_->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
264 protectedMemoryFeatures_->pNext = nullptr;
265 *tailPnext = protectedMemoryFeatures_;
266 tailPnext = &protectedMemoryFeatures_->pNext;
267 }
268 }
269
ConfigureExtensions()270 void RsVulkanInterface::ConfigureExtensions()
271 {
272 deviceExtensions_ = gMandatoryDeviceExtensions;
273 uint32_t count = 0;
274 std::vector<VkExtensionProperties> supportedExtensions;
275 if (vkEnumerateDeviceExtensionProperties(physicalDevice_, nullptr, &count, nullptr) != VK_SUCCESS) {
276 ROSEN_LOGE("Failed to get device extension count, try to create device with mandatory extensions only!");
277 return;
278 }
279 supportedExtensions.resize(count);
280 if (vkEnumerateDeviceExtensionProperties(physicalDevice_, nullptr, &count,
281 supportedExtensions.data()) != VK_SUCCESS) {
282 ROSEN_LOGE("Failed to get device extensions, try to create device with mandatory extensions only!");
283 return;
284 }
285 std::unordered_set<std::string_view> extensionNames;
286 for (auto& prop: supportedExtensions) {
287 extensionNames.emplace(prop.extensionName);
288 }
289 for (auto& ext: gOptionalDeviceExtensions) {
290 if (extensionNames.find(ext) != extensionNames.end()) {
291 deviceExtensions_.emplace_back(ext);
292 }
293 }
294 #ifdef ROSEN_OHOS
295 if (Drawing::SystemProperties::IsVkImageDfxEnabled()) {
296 for (auto& ext: gOptionalDeviceExtensionsDebug) {
297 if (extensionNames.find(ext) == extensionNames.end()) {
298 ROSEN_LOGE("Optional device extension %{public}s not found! Skip it.", ext);
299 continue;
300 }
301 deviceExtensions_.emplace_back(ext);
302 }
303 }
304 #endif
305 for (auto& ext: gMandatoryDeviceExtensions) {
306 if (extensionNames.find(ext) == extensionNames.end()) {
307 ROSEN_LOGE("Mandatory device extension %{public}s not found! Try to enable it anyway.", ext);
308 }
309 }
310 }
311
CreateDevice(bool isProtected,bool isHtsEnable)312 bool RsVulkanInterface::CreateDevice(bool isProtected, bool isHtsEnable)
313 {
314 if (!physicalDevice_) {
315 return false;
316 }
317 uint32_t queueCount;
318 vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice_, &queueCount, nullptr);
319
320 std::vector<VkQueueFamilyProperties> queueProps(queueCount);
321 vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice_, &queueCount, queueProps.data());
322
323 for (uint32_t i = 0; i < queueCount; i++) {
324 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
325 graphicsQueueFamilyIndex_ = i;
326 break;
327 }
328 }
329
330 if (graphicsQueueFamilyIndex_ == UINT32_MAX) {
331 ROSEN_LOGE("graphicsQueueFamilyIndex_ is not valid");
332 return false;
333 }
334 // If multiple queues are needed, queue priorities should be set.
335 // when it is greater than 0.5 indicates high priority and less than 0.5 indicates low priority
336 const float priorities[1] = {1.0f};
337 VkDeviceQueueCreateFlags deviceQueueCreateFlags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
338 std::vector<VkDeviceQueueCreateInfo> queueCreate {{
339 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, .pNext = nullptr,
340 .flags = deviceQueueCreateFlags, .queueFamilyIndex = graphicsQueueFamilyIndex_, .queueCount = 1,
341 .pQueuePriorities = priorities,
342 }};
343 ConfigureExtensions();
344 ConfigureFeatures(isProtected);
345
346 vkGetPhysicalDeviceFeatures2(physicalDevice_, &physicalDeviceFeatures2_);
347
348 VkDeviceCreateFlags deviceCreateFlags = isHtsEnable ? VK_DEVICE_CREATE_HTS_ENABLE_BIT : 0;
349
350 const VkDeviceCreateInfo createInfo = {
351 .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, .pNext = &physicalDeviceFeatures2_,
352 .flags = deviceCreateFlags,
353 .queueCreateInfoCount = queueCreate.size(),
354 .pQueueCreateInfos = queueCreate.data(),
355 .enabledLayerCount = 0, .ppEnabledLayerNames = nullptr,
356 .enabledExtensionCount = static_cast<uint32_t>(deviceExtensions_.size()),
357 .ppEnabledExtensionNames = deviceExtensions_.data(), .pEnabledFeatures = nullptr,
358 };
359 if (vkCreateDevice(physicalDevice_, &createInfo, nullptr, &device_) != VK_SUCCESS) {
360 ROSEN_LOGE("vkCreateDevice failed");
361 SetVulkanDeviceStatus(VulkanDeviceStatus::CREATE_FAIL);
362 return false;
363 }
364 SetVulkanDeviceStatus(VulkanDeviceStatus::CREATE_SUCCESS);
365 if (!SetupDeviceProcAddresses(device_)) {
366 return false;
367 }
368
369 const VkDeviceQueueInfo2 deviceQueueInfo2 = {VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, nullptr,
370 deviceQueueCreateFlags, static_cast<uint32_t>(graphicsQueueFamilyIndex_), 0};
371 vkGetDeviceQueue2(device_, &deviceQueueInfo2, &queue_);
372 return true;
373 }
374
375 #ifdef USE_M133_SKIA
CreateSkiaBackendContext(skgpu::VulkanBackendContext * context,bool isProtected)376 bool RsVulkanInterface::CreateSkiaBackendContext(skgpu::VulkanBackendContext* context, bool isProtected)
377 #else
378 bool RsVulkanInterface::CreateSkiaBackendContext(GrVkBackendContext* context, bool isProtected)
379 #endif
380 {
381 auto getProc = CreateSkiaGetProc();
382 if (getProc == nullptr) {
383 ROSEN_LOGE("CreateSkiaBackendContext getProc is null");
384 return false;
385 }
386 #ifndef USE_M133_SKIA
387 VkPhysicalDeviceFeatures features;
388 vkGetPhysicalDeviceFeatures(physicalDevice_, &features);
389
390 uint32_t fFeatures = 0;
391 if (features.geometryShader) {
392 fFeatures |= kGeometryShader_GrVkFeatureFlag;
393 }
394 if (features.dualSrcBlend) {
395 fFeatures |= kDualSrcBlend_GrVkFeatureFlag;
396 }
397 if (features.sampleRateShading) {
398 fFeatures |= kSampleRateShading_GrVkFeatureFlag;
399 }
400 #endif
401
402 context->fInstance = instance_;
403 context->fPhysicalDevice = physicalDevice_;
404 context->fDevice = device_;
405 context->fQueue = queue_;
406 context->fGraphicsQueueIndex = graphicsQueueFamilyIndex_;
407 #ifndef USE_M133_SKIA
408 context->fMinAPIVersion = VK_API_VERSION_1_2;
409
410 uint32_t extensionFlags = kKHR_surface_GrVkExtensionFlag;
411 extensionFlags |= kKHR_ohos_surface_GrVkExtensionFlag;
412
413 context->fExtensions = extensionFlags;
414 #else
415 context->fMaxAPIVersion = VK_API_VERSION_1_2;
416 #endif
417
418 skVkExtensions_.init(getProc, instance_, physicalDevice_,
419 gInstanceExtensions.size(), gInstanceExtensions.data(),
420 deviceExtensions_.size(), deviceExtensions_.data());
421
422 context->fVkExtensions = &skVkExtensions_;
423 context->fDeviceFeatures2 = &physicalDeviceFeatures2_;
424 #ifndef USE_M133_SKIA
425 context->fFeatures = fFeatures;
426 #endif
427 context->fGetProc = std::move(getProc);
428 #ifdef USE_M133_SKIA
429 context->fProtectedContext = isProtected ? skgpu::Protected::kYes : skgpu::Protected::kNo;
430 #else
431 context->fOwnsInstanceAndDevice = false;
432 context->fProtectedContext = isProtected ? GrProtected::kYes : GrProtected::kNo;
433 #endif
434
435 return true;
436 }
437
SetupDeviceProcAddresses(VkDevice device)438 bool RsVulkanInterface::SetupDeviceProcAddresses(VkDevice device)
439 {
440 ACQUIRE_PROC(AllocateCommandBuffers, device_);
441 ACQUIRE_PROC(AllocateMemory, device_);
442 ACQUIRE_PROC(BeginCommandBuffer, device_);
443 ACQUIRE_PROC(BindImageMemory, device_);
444 ACQUIRE_PROC(BindImageMemory2, device_);
445 ACQUIRE_PROC(CmdPipelineBarrier, device_);
446 ACQUIRE_PROC(CreateCommandPool, device_);
447 ACQUIRE_PROC(CreateFence, device_);
448 ACQUIRE_PROC(CreateImage, device_);
449 ACQUIRE_PROC(CreateImageView, device_);
450 ACQUIRE_PROC(CreateSemaphore, device_);
451 ACQUIRE_PROC(DestroyCommandPool, device_);
452 ACQUIRE_PROC(DestroyFence, device_);
453 ACQUIRE_PROC(DestroyImage, device_);
454 ACQUIRE_PROC(DestroyImageView, device_);
455 ACQUIRE_PROC(DestroySemaphore, device_);
456 ACQUIRE_PROC(DeviceWaitIdle, device_);
457 ACQUIRE_PROC(EndCommandBuffer, device_);
458 ACQUIRE_PROC(FreeCommandBuffers, device_);
459 ACQUIRE_PROC(FreeMemory, device_);
460 ACQUIRE_PROC(GetDeviceQueue, device_);
461 ACQUIRE_PROC(GetImageMemoryRequirements, device_);
462 ACQUIRE_PROC(QueueSubmit, device_);
463 ACQUIRE_PROC(QueueWaitIdle, device_);
464 ACQUIRE_PROC(ResetCommandBuffer, device_);
465 ACQUIRE_PROC(ResetFences, device_);
466 ACQUIRE_PROC(WaitForFences, device_);
467 ACQUIRE_PROC(GetNativeBufferPropertiesOHOS, device_);
468 ACQUIRE_PROC(QueueSignalReleaseImageOHOS, device_);
469 ACQUIRE_PROC(ImportSemaphoreFdKHR, device_);
470 ACQUIRE_PROC(SetFreqAdjustEnable, device_);
471 ACQUIRE_PROC(GetSemaphoreFdKHR, device_);
472
473 return true;
474 }
475
OpenLibraryHandle()476 bool RsVulkanInterface::OpenLibraryHandle()
477 {
478 if (handle_) {
479 ROSEN_LOGI("RsVulkanInterface OpenLibararyHandle: vk so has already been loaded.");
480 return true;
481 }
482 ROSEN_LOGI("VulkanProcTable OpenLibararyHandle: dlopen libvulkan.so.");
483 dlerror();
484 handle_ = dlopen("libvulkan.so", RTLD_NOW | RTLD_LOCAL);
485 if (handle_ == nullptr) {
486 ROSEN_LOGE("Could not open the vulkan library: %{public}s", dlerror());
487 return false;
488 }
489 return true;
490 }
491
CloseLibraryHandle()492 bool RsVulkanInterface::CloseLibraryHandle()
493 {
494 if (handle_ != nullptr) {
495 dlerror();
496 if (dlclose(handle_) != 0) {
497 ROSEN_LOGE("Could not close the vulkan lib handle. This indicates a leak. %{public}s", dlerror());
498 }
499 handle_ = nullptr;
500 }
501 return handle_ == nullptr;
502 }
503
AcquireProc(const char * procName,const VkInstance & instance) const504 PFN_vkVoidFunction RsVulkanInterface::AcquireProc(
505 const char* procName,
506 const VkInstance& instance) const
507 {
508 if (procName == nullptr || !vkGetInstanceProcAddr) {
509 return nullptr;
510 }
511
512 return vkGetInstanceProcAddr(instance, procName);
513 }
514
AcquireProc(const char * procName,const VkDevice & device) const515 PFN_vkVoidFunction RsVulkanInterface::AcquireProc(
516 const char* procName,
517 const VkDevice& device) const
518 {
519 if (procName == nullptr || !device || !vkGetInstanceProcAddr) {
520 return nullptr;
521 }
522 return vkGetDeviceProcAddr(device, procName);
523 }
524
525 #ifdef USE_M133_SKIA
CreateSkiaGetProc() const526 skgpu::VulkanGetProc RsVulkanInterface::CreateSkiaGetProc() const
527 #else
528 GrVkGetProc RsVulkanInterface::CreateSkiaGetProc() const
529 #endif
530 {
531 if (!IsValid()) {
532 return nullptr;
533 }
534
535 return [this](const char* procName, VkInstance instance, VkDevice device) {
536 if (device != VK_NULL_HANDLE) {
537 std::string_view s{procName};
538 if (s.find("vkQueueSubmit") == 0) {
539 return (PFN_vkVoidFunction)RsVulkanContext::HookedVkQueueSubmit;
540 }
541 auto result = AcquireProc(procName, device);
542 if (result != nullptr) {
543 return result;
544 }
545 }
546 return AcquireProc(procName, instance);
547 };
548 }
549
CreateDrawingContext(std::string cacheDir)550 std::shared_ptr<Drawing::GPUContext> RsVulkanInterface::CreateDrawingContext(std::string cacheDir)
551 {
552 std::unique_lock<std::mutex> lock(vkMutex_);
553
554 auto drawingContext = std::make_shared<Drawing::GPUContext>();
555 Drawing::GPUContextOptions options;
556 memHandler_ = std::make_unique<MemoryHandler>();
557 std::string vkVersion = std::to_string(VK_API_VERSION_1_2);
558 auto size = vkVersion.size();
559 memHandler_->ConfigureContext(&options, vkVersion.c_str(), size, cacheDir);
560 drawingContext->BuildFromVK(backendContext_, options);
561 int maxResources = 0;
562 size_t maxResourcesSize = 0;
563 int cacheLimitsTimes = CACHE_LIMITS_TIMES;
564 drawingContext->GetResourceCacheLimits(&maxResources, &maxResourcesSize);
565 if (maxResourcesSize > 0) {
566 drawingContext->SetResourceCacheLimits(cacheLimitsTimes * maxResources,
567 cacheLimitsTimes * std::fmin(maxResourcesSize, GR_CACHE_MAX_BYTE_SIZE));
568 } else {
569 drawingContext->SetResourceCacheLimits(GR_CACHE_MAX_COUNT, GR_CACHE_MAX_BYTE_SIZE);
570 }
571 return drawingContext;
572 }
573
CreateDrawingContext(std::string cacheDir)574 std::shared_ptr<Drawing::GPUContext> RsVulkanInterface::CreateDrawingContext(std::string cacheDir)
575 {
576 auto drawingContext = DoCreateDrawingContext(cacheDir);
577 RsVulkanContext::SaveNewDrawingContext(gettid(), drawingContext);
578 return drawingContext;
579 }
580
581
DestroyAllSemaphoreFence()582 void RsVulkanInterface::DestroyAllSemaphoreFence()
583 {
584 std::lock_guard<std::mutex> lock(semaphoreLock_);
585 RS_LOGE("Device lost clear all semaphore fences, count [%{public}zu] ", usedSemaphoreFenceList_.size());
586 for (auto&& semaphoreFence : usedSemaphoreFenceList_) {
587 vkDestroySemaphore(device_, semaphoreFence.semaphore, nullptr);
588 }
589 usedSemaphoreFenceList_.clear();
590 }
591
SetVulkanDeviceStatus(VulkanDeviceStatus status)592 void RsVulkanInterface::SetVulkanDeviceStatus(VulkanDeviceStatus status)
593 {
594 deviceStatus_ = status;
595 }
596
GetVulkanDeviceStatus()597 VulkanDeviceStatus RsVulkanInterface::GetVulkanDeviceStatus()
598 {
599 return deviceStatus_.load();
600 }
601
RequireSemaphore()602 VkSemaphore RsVulkanInterface::RequireSemaphore()
603 {
604 {
605 std::lock_guard<std::mutex> lock(semaphoreLock_);
606 // 3000 means too many used semaphore fences
607 if (usedSemaphoreFenceList_.size() >= 3000) {
608 RS_LOGE("Too many used semaphore fences, count [%{public}zu] ", usedSemaphoreFenceList_.size());
609 for (auto&& semaphoreFence : usedSemaphoreFenceList_) {
610 if (semaphoreFence.fence != nullptr) {
611 semaphoreFence.fence->Wait(-1);
612 }
613 vkDestroySemaphore(device_, semaphoreFence.semaphore, nullptr);
614 }
615 usedSemaphoreFenceList_.clear();
616 }
617 for (auto it = usedSemaphoreFenceList_.begin(); it != usedSemaphoreFenceList_.end();) {
618 auto& fence = it->fence;
619 if (fence == nullptr || fence->GetStatus() == FenceStatus::SIGNALED) {
620 vkDestroySemaphore(device_, it->semaphore, nullptr);
621 it->semaphore = VK_NULL_HANDLE;
622 it = usedSemaphoreFenceList_.erase(it);
623 } else {
624 it++;
625 }
626 }
627 // 7200 : print once every 1min at most.
628 if (RsVulkanInterface::callbackSemaphoreInfofdDupCnt_.load(std::memory_order_relaxed) % 7200 == 0) {
629 RS_LOGI("used fences, surface flush count[%{public}" PRIu64 "],"
630 "dup fence count[%{public}" PRIu64 "], rs deref count[%{public}" PRIu64 "],"
631 "call 2DEngineDeref count[%{public}" PRIu64 "], 2DEngine deref count[%{public}" PRIu64 "],"
632 "Defensive 2DEngine deref count[%{public}" PRIu64 "], wait close fence count[%{public}zu]",
633 RsVulkanInterface::callbackSemaphoreInfoFlushCnt_.load(std::memory_order_relaxed),
634 RsVulkanInterface::callbackSemaphoreInfofdDupCnt_.load(std::memory_order_relaxed),
635 RsVulkanInterface::callbackSemaphoreInfoRSDerefCnt_.load(std::memory_order_relaxed),
636 RsVulkanInterface::callbackSemaphoreInfo2DEngineCallCnt_.load(std::memory_order_relaxed),
637 RsVulkanInterface::callbackSemaphoreInfo2DEngineDerefCnt_.load(std::memory_order_relaxed),
638 RsVulkanInterface::callbackSemaphoreInfo2DEngineDefensiveDerefCnt_.load(std::memory_order_relaxed),
639 usedSemaphoreFenceList_.size());
640 }
641 }
642
643 VkSemaphoreCreateInfo semaphoreInfo;
644 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
645 semaphoreInfo.pNext = nullptr;
646 semaphoreInfo.flags = 0;
647 VkSemaphore semaphore;
648 auto err = vkCreateSemaphore(device_, &semaphoreInfo, nullptr, &semaphore);
649 if (err != VK_SUCCESS) {
650 return VK_NULL_HANDLE;
651 }
652 return semaphore;
653 }
654
SendSemaphoreWithFd(VkSemaphore semaphore,int fenceFd)655 void RsVulkanInterface::SendSemaphoreWithFd(VkSemaphore semaphore, int fenceFd)
656 {
657 std::lock_guard<std::mutex> lock(semaphoreLock_);
658 auto& semaphoreFence = usedSemaphoreFenceList_.emplace_back();
659 semaphoreFence.semaphore = semaphore;
660 semaphoreFence.fence = (fenceFd != -1 ? std::make_unique<SyncFence>(fenceFd) : nullptr);
661 }
662
RsVulkanContext(std::string cacheDir)663 RsVulkanContext::RsVulkanContext(std::string cacheDir)
664 {
665 vulkanInterfaceVec_.resize(size_t(VulkanInterfaceType::MAX_INTERFACE_TYPE));
666 if (RsVulkanContext::IsRecyclable()) {
667 InitVulkanContextForHybridRender(cacheDir);
668 } else {
669 InitVulkanContextForUniRender(cacheDir);
670 }
671 RsVulkanContext::isInited_ = true;
672 RsVulkanContext::isRecyclableSingletonValid_ = true;
673 }
674
~RsVulkanContext()675 RsVulkanContext::~RsVulkanContext()
676 {
677 std::lock_guard<std::mutex> lock(drawingContextMutex_);
678 drawingContextMap_.clear();
679 protectedDrawingContextMap_.clear();
680 RsVulkanContext::isRecyclableSingletonValid_ = false;
681 }
682
InitVulkanContextForHybridRender(const std::string & cacheDir)683 void RsVulkanContext::InitVulkanContextForHybridRender(const std::string& cacheDir)
684 {
685 if (cacheDir.empty()) {
686 RS_TRACE_NAME("Init hybrid render vk context without cache dir, this may cause redundant shader compiling.");
687 }
688 auto vulkanInterface = std::make_shared<RsVulkanInterface>();
689 vulkanInterface->Init(VulkanInterfaceType::BASIC_RENDER, false);
690 // init drawing context for RT thread bind to backendContext.
691 vulkanInterface->CreateDrawingContext(cacheDir);
692
693 vulkanInterfaceVec_[size_t(VulkanInterfaceType::BASIC_RENDER)] = std::move(vulkanInterface);
694 }
695
InitVulkanContextForUniRender(const std::string & cacheDir)696 void RsVulkanContext::InitVulkanContextForUniRender(const std::string& cacheDir)
697 {
698 // create vulkan interface for render thread.
699 auto uniRenderVulkanInterface = std::make_shared<RsVulkanInterface>();
700 uniRenderVulkanInterface->Init(VulkanInterfaceType::BASIC_RENDER, false, true);
701 // init drawing context for RT thread bind to backendContext.
702 uniRenderVulkanInterface->CreateDrawingContext(cacheDir);
703 // create vulkan interface for hardware thread (unprotected).
704 auto unprotectedReDrawVulkanInterface = std::make_shared<RsVulkanInterface>();
705 unprotectedReDrawVulkanInterface->Init(VulkanInterfaceType::UNPROTECTED_REDRAW, false, false);
706 vulkanInterfaceVec_[size_t(VulkanInterfaceType::BASIC_RENDER)] = std::move(uniRenderVulkanInterface);
707 vulkanInterfaceVec_[size_t(VulkanInterfaceType::UNPROTECTED_REDRAW)] = std::move(unprotectedReDrawVulkanInterface);
708 #ifdef IS_ENABLE_DRM
709 isProtected_ = true;
710 auto protectedReDrawVulkanInterface = std::make_shared<RsVulkanInterface>();
711 protectedReDrawVulkanInterface->Init(VulkanInterfaceType::PROTECTED_REDRAW, true, false);
712 // DRM needs to adapt vkQueue in the future.
713 protectedReDrawVulkanInterface->CreateDrawingContext(cacheDir);
714 vulkanInterfaceVec_[size_t(VulkanInterfaceType::PROTECTED_REDRAW)] = std::move(protectedReDrawVulkanInterface);
715 isProtected_ = false;
716 #endif
717 }
718
GetRecyclableSingletonPtr(const std::string & cacheDir)719 std::unique_ptr<RsVulkanContext>& RsVulkanContext::GetRecyclableSingletonPtr(const std::string& cacheDir)
720 {
721 std::lock_guard<std::recursive_mutex> lock(recyclableSingletonMutex_);
722 static std::unique_ptr<RsVulkanContext> recyclableSingleton = std::make_unique<RsVulkanContext>(cacheDir);
723 return recyclableSingleton;
724 }
725
GetRecyclableSingleton(const std::string & cacheDir)726 RsVulkanContext& RsVulkanContext::GetRecyclableSingleton(const std::string& cacheDir)
727 {
728 std::lock_guard<std::recursive_mutex> lock(recyclableSingletonMutex_);
729 static std::string cacheDirInit = cacheDir;
730 std::unique_ptr<RsVulkanContext>& recyclableSingleton = GetRecyclableSingletonPtr(cacheDirInit);
731 if (recyclableSingleton == nullptr) {
732 recyclableSingleton = std::make_unique<RsVulkanContext>(cacheDirInit);
733 }
734 return *recyclableSingleton;
735 }
736
GetSingleton(const std::string & cacheDir)737 RsVulkanContext& RsVulkanContext::GetSingleton(const std::string& cacheDir)
738 {
739 if (isRecyclable_) {
740 return RsVulkanContext::GetRecyclableSingleton(cacheDir);
741 }
742 static RsVulkanContext singleton = RsVulkanContext(cacheDir);
743 return singleton;
744 }
745
CheckDrawingContextRecyclable()746 bool RsVulkanContext::CheckDrawingContextRecyclable()
747 {
748 std::lock_guard<std::mutex> lock(drawingContextMutex_);
749 for (const auto& iter : RsVulkanContext::drawingContextMap_) {
750 // check the tag only set to true when GetRecyclableDrawingContext
751 if (!iter.second.second) {
752 return false;
753 }
754 }
755 for (const auto& iter : RsVulkanContext::protectedDrawingContextMap_) {
756 // check the tag only set to true when GetRecyclableDrawingContext
757 if (!iter.second.second) {
758 return false;
759 }
760 }
761 return true;
762 }
763
ReleaseRecyclableSingleton()764 void RsVulkanContext::ReleaseRecyclableSingleton()
765 {
766 if (!isRecyclable_) {
767 return;
768 }
769 if (!CheckDrawingContextRecyclable()) {
770 ReleaseRecyclableDrawingContext();
771 return;
772 }
773 ReleaseDrawingContextMap();
774 {
775 std::lock_guard<std::recursive_mutex> lock(recyclableSingletonMutex_);
776 auto& recyclableSingleton = GetRecyclableSingletonPtr();
777 recyclableSingleton.reset();
778 }
779 }
780
GetRecyclableDrawingContext(const std::string & cacheDir)781 std::shared_ptr<Drawing::GPUContext> RsVulkanContext::GetRecyclableDrawingContext(const std::string& cacheDir)
782 {
783 // 1. get or create drawing context and save it in the map
784 auto drawingContext = RsVulkanContext::GetDrawingContext(cacheDir);
785
786 // 2. set recyclable tag for drawingContext when it's valid (i.e it's in the map)
787 static thread_local int tidForRecyclable = gettid();
788 auto& drawingContextMap = isProtected_ ?
789 RsVulkanContext::protectedDrawingContextMap_ : RsVulkanContext::drawingContextMap_;
790 std::lock_guard<std::mutex> lock(drawingContextMutex_);
791 auto iter = drawingContextMap.find(tidForRecyclable);
792 if (iter != drawingContextMap.end()) {
793 iter->second.second = true;
794 }
795 return drawingContext;
796 }
797
ReleaseDrawingContextMap()798 void RsVulkanContext::ReleaseDrawingContextMap()
799 {
800 std::lock_guard<std::mutex> lock(drawingContextMutex_);
801 for (auto& iter : drawingContextMap_) {
802 auto context = iter.second.first;
803 if (context == nullptr) {
804 continue;
805 }
806 context->FlushAndSubmit(true);
807 }
808 drawingContextMap_.clear();
809
810 for (auto& protectedIter : protectedDrawingContextMap_) {
811 auto protectedContext = protectedIter.second.first;
812 if (protectedContext == nullptr) {
813 continue;
814 }
815 protectedContext->FlushAndSubmit(true);
816 }
817 protectedDrawingContextMap_.clear();
818 }
819
ReleaseRecyclableDrawingContext()820 void RsVulkanContext::ReleaseRecyclableDrawingContext()
821 {
822 auto& drawingContextMap = isProtected_ ?
823 RsVulkanContext::protectedDrawingContextMap_ : RsVulkanContext::drawingContextMap_;
824 std::lock_guard<std::mutex> lock(drawingContextMutex_);
825 for (auto iter = drawingContextMap.begin(); iter != drawingContextMap.end();) {
826 if (iter->second.second) {
827 iter = drawingContextMap.erase(iter);
828 } else {
829 ++iter;
830 }
831 }
832 }
833
ReleaseDrawingContextForThread(int tid)834 void RsVulkanContext::ReleaseDrawingContextForThread(int tid)
835 {
836 std::lock_guard<std::mutex> lock(drawingContextMutex_);
837 drawingContextMap_.erase(tid);
838 protectedDrawingContextMap_.erase(tid);
839 }
840
SaveNewDrawingContext(int tid,std::shared_ptr<Drawing::GPUContext> drawingContext)841 void RsVulkanContext::SaveNewDrawingContext(int tid, std::shared_ptr<Drawing::GPUContext> drawingContext)
842 {
843 std::lock_guard<std::mutex> lock(drawingContextMutex_);
844 static thread_local auto func = [tid]() {
845 RsVulkanContext::ReleaseDrawingContextForThread(tid);
846 };
847 static thread_local auto drawContextHolder = std::make_shared<DrawContextHolder>(func);
848 if (isProtected_) {
849 protectedDrawingContextMap_[tid] = std::make_pair(drawingContext, false);
850 } else {
851 drawingContextMap_[tid] = std::make_pair(drawingContext, false);
852 }
853 }
854
GetIsInited()855 bool RsVulkanContext::GetIsInited()
856 {
857 return isInited_.load();
858 }
859
IsRecyclableSingletonValid()860 bool RsVulkanContext::IsRecyclableSingletonValid()
861 {
862 return isRecyclableSingletonValid_.load();
863 }
864
GetRsVulkanInterface()865 RsVulkanInterface& RsVulkanContext::GetRsVulkanInterface()
866 {
867 switch (vulkanInterfaceType_) {
868 case VulkanInterfaceType::PROTECTED_REDRAW:
869 return *(vulkanInterfaceVec_[size_t(VulkanInterfaceType::PROTECTED_REDRAW)].get());
870 case VulkanInterfaceType::UNPROTECTED_REDRAW:
871 return *(vulkanInterfaceVec_[size_t(VulkanInterfaceType::UNPROTECTED_REDRAW)].get());
872 case VulkanInterfaceType::BASIC_RENDER:
873 default:
874 return *(vulkanInterfaceVec_[size_t(VulkanInterfaceType::BASIC_RENDER)].get());
875 }
876 }
877
HookedVkQueueSubmit(VkQueue queue,uint32_t submitCount,VkSubmitInfo * pSubmits,VkFence fence)878 VKAPI_ATTR VkResult RsVulkanContext::HookedVkQueueSubmit(VkQueue queue, uint32_t submitCount,
879 VkSubmitInfo* pSubmits, VkFence fence)
880 {
881 RsVulkanInterface& vkInterface = RsVulkanContext::GetSingleton().GetRsVulkanInterface();
882 auto interfaceType = vkInterface.GetInterfaceType();
883 if (interfaceType == VulkanInterfaceType::UNPROTECTED_REDRAW ||
884 interfaceType == VulkanInterfaceType::PROTECTED_REDRAW) {
885 std::lock_guard<std::mutex> lock(vkInterface.hGraphicsQueueMutex_);
886 RS_LOGD("%{public}s hardware queue, interfaceType: %{public}d", __func__, static_cast<int>(interfaceType));
887 RS_OPTIONAL_TRACE_NAME_FMT("%s hardware queue, interfaceType: %d", __func__, static_cast<int>(interfaceType));
888 return vkInterface.vkQueueSubmit(queue, submitCount, pSubmits, fence);
889 } else if (interfaceType == VulkanInterfaceType::BASIC_RENDER) {
890 std::lock_guard<std::mutex> lock(vkInterface.graphicsQueueMutex_);
891 RS_LOGD("%{public}s queue", __func__);
892 RS_OPTIONAL_TRACE_NAME_FMT("%s queue", __func__);
893 return vkInterface.vkQueueSubmit(queue, submitCount, pSubmits, fence);
894 }
895 RS_LOGE("%{public}s abnormal queue occured", __func__);
896 return VK_ERROR_UNKNOWN;
897 }
898
HookedVkQueueSignalReleaseImageOHOS(VkQueue queue,uint32_t waitSemaphoreCount,const VkSemaphore * pWaitSemaphores,VkImage image,int32_t * pNativeFenceFd)899 VKAPI_ATTR VkResult RsVulkanContext::HookedVkQueueSignalReleaseImageOHOS(VkQueue queue, uint32_t waitSemaphoreCount,
900 const VkSemaphore* pWaitSemaphores, VkImage image, int32_t* pNativeFenceFd)
901 {
902 RsVulkanInterface& vkInterface = RsVulkanContext::GetSingleton().GetRsVulkanInterface();
903 auto interfaceType = vkInterface.GetInterfaceType();
904 if (interfaceType == VulkanInterfaceType::UNPROTECTED_REDRAW ||
905 interfaceType == VulkanInterfaceType::PROTECTED_REDRAW) {
906 std::lock_guard<std::mutex> lock(vkInterface.hGraphicsQueueMutex_);
907 RS_LOGD("%{public}s hardware queue, interfaceType: %{public}d", __func__, static_cast<int>(interfaceType));
908 RS_OPTIONAL_TRACE_NAME_FMT("%s hardware queue, interfaceType: %d", __func__, static_cast<int>(interfaceType));
909 return vkInterface.vkQueueSignalReleaseImageOHOS(queue, waitSemaphoreCount,
910 pWaitSemaphores, image, pNativeFenceFd);
911 } else if (interfaceType == VulkanInterfaceType::BASIC_RENDER) {
912 std::lock_guard<std::mutex> lock(vkInterface.graphicsQueueMutex_);
913 RS_LOGD("%{public}s queue", __func__);
914 RS_OPTIONAL_TRACE_NAME_FMT("%s queue", __func__);
915 return vkInterface.vkQueueSignalReleaseImageOHOS(queue,
916 waitSemaphoreCount, pWaitSemaphores, image, pNativeFenceFd);
917 }
918 RS_LOGE("%{public}s abnormal queue occured", __func__);
919 return VK_ERROR_UNKNOWN;
920 }
921
CreateDrawingContext()922 std::shared_ptr<Drawing::GPUContext> RsVulkanContext::CreateDrawingContext()
923 {
924 static thread_local int tidForRecyclable = gettid();
925 {
926 std::lock_guard<std::mutex> lock(drawingContextMutex_);
927 switch (vulkanInterfaceType_) {
928 case VulkanInterfaceType::PROTECTED_REDRAW: {
929 // protectedDrawingContextMap_ : <tid, <drawingContext, isRecyclable>>
930 auto protectedIter = protectedDrawingContextMap_.find(tidForRecyclable);
931 if (protectedIter != protectedDrawingContextMap_.end() && protectedIter->second.first != nullptr) {
932 return protectedIter->second.first;
933 }
934 break;
935 }
936 case VulkanInterfaceType::BASIC_RENDER:
937 case VulkanInterfaceType::UNPROTECTED_REDRAW:
938 default: {
939 // drawingContextMap_ : <tid, <drawingContext, isRecyclable>>
940 auto iter = drawingContextMap_.find(tidForRecyclable);
941 if (iter != drawingContextMap_.end() && iter->second.first != nullptr) {
942 return iter->second.first;
943 }
944 break;
945 }
946 }
947 }
948 return GetRsVulkanInterface().CreateDrawingContext();
949 }
950
GetDrawingContext(const std::string & cacheDir)951 std::shared_ptr<Drawing::GPUContext> RsVulkanContext::GetDrawingContext(const std::string& cacheDir)
952 {
953 static thread_local int tidForRecyclable = gettid();
954 {
955 std::lock_guard<std::mutex> lock(drawingContextMutex_);
956 if (isProtected_) {
957 // protectedDrawingContextMap_ : <tid, <drawingContext, isRecyclable>>
958 auto protectedIter = protectedDrawingContextMap_.find(tidForRecyclable);
959 if (protectedIter != protectedDrawingContextMap_.end() && protectedIter->second.first != nullptr) {
960 return protectedIter->second.first;
961 }
962 } else {
963 // drawingContextMap_ : <tid, <drawingContext, isRecyclable>>
964 auto iter = drawingContextMap_.find(tidForRecyclable);
965 if (iter != drawingContextMap_.end() && iter->second.first != nullptr) {
966 return iter->second.first;
967 }
968 }
969 }
970 return GetRsVulkanInterface().CreateDrawingContext(cacheDir);
971 }
972
GetIsProtected() const973 bool RsVulkanContext::GetIsProtected() const
974 {
975 return isProtected_;
976 }
977
SetIsProtected(bool isProtected)978 void RsVulkanContext::SetIsProtected(bool isProtected)
979 {
980 if (isProtected) {
981 vulkanInterfaceType_ = VulkanInterfaceType::PROTECTED_REDRAW;
982 } else {
983 vulkanInterfaceType_ = VulkanInterfaceType::UNPROTECTED_REDRAW;
984 }
985 if (isProtected_ != isProtected) {
986 RS_LOGW("RsVulkanContext switch, isProtected: %{public}d.", isProtected);
987 if (isProtected) {
988 RS_TRACE_NAME("RsVulkanContext switch to protected GPU context");
989 }
990 ClearGrContext(isProtected);
991 }
992 }
993
IsRecyclable()994 bool RsVulkanContext::IsRecyclable()
995 {
996 return isRecyclable_;
997 }
998
SetRecyclable(bool isRecyclable)999 void RsVulkanContext::SetRecyclable(bool isRecyclable)
1000 {
1001 isRecyclable_ = isRecyclable;
1002 }
1003
ClearGrContext(bool isProtected)1004 void RsVulkanContext::ClearGrContext(bool isProtected)
1005 {
1006 RS_TRACE_NAME("RsVulkanContext ClearGrContext");
1007 GetDrawingContext()->PurgeUnlockedResources(true);
1008 isProtected_ = isProtected;
1009 GetDrawingContext()->ResetContext();
1010 }
1011 }
1012 }
1013