1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "platform/ohos/backend/rs_vulkan_context.h"
17 #include <set>
18 #include <dlfcn.h>
19 #include <vector>
20 #include "platform/common/rs_log.h"
21 #include "render_context/memory_handler.h"
22 #include "include/gpu/vk/GrVkExtensions.h"
23 #include "unistd.h"
24 #include "vulkan/vulkan_core.h"
25 #include "vulkan/vulkan_ohos.h"
26
27 #define ACQUIRE_PROC(name, context) \
28 if (!(vk##name = AcquireProc("vk" #name, context))) { \
29 ROSEN_LOGE("Could not acquire proc: vk" #name); \
30 }
31
32 namespace OHOS {
33 namespace Rosen {
34
35 #ifndef USE_ROSEN_DRAWING
36 thread_local sk_sp<GrDirectContext> RsVulkanContext::skContext_ = nullptr;
37 #else
38 thread_local std::shared_ptr<Drawing::GPUContext> RsVulkanContext::drawingContext_ = nullptr;
39 #endif
40
41 static std::vector<const char*> gInstanceExtensions = {
42 VK_KHR_SURFACE_EXTENSION_NAME,
43 VK_OHOS_SURFACE_EXTENSION_NAME,
44 VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
45 };
46
47 static std::vector<const char*> gDeviceExtensions = {
48 VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
49 VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
50 VK_KHR_MAINTENANCE1_EXTENSION_NAME,
51 VK_KHR_MAINTENANCE2_EXTENSION_NAME,
52 VK_KHR_MAINTENANCE3_EXTENSION_NAME,
53 VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
54 VK_KHR_BIND_MEMORY_2_EXTENSION_NAME
55 };
56
57 static const int GR_CACHE_MAX_COUNT = 8192;
58 static const size_t GR_CACHE_MAX_BYTE_SIZE = 96 * (1 << 20);
59 static const int32_t CACHE_LIMITS_TIMES = 3;
60
RsVulkanContext()61 RsVulkanContext::RsVulkanContext()
62 : handle_(nullptr), acquiredMandatoryProcAddresses_(false), memHandler_(nullptr)
63 {
64 acquiredMandatoryProcAddresses_ = OpenLibraryHandle() && SetupLoaderProcAddresses();
65 CreateInstance();
66 SelectPhysicalDevice();
67 CreateDevice();
68 std::unique_lock<std::mutex> lock(vkMutex_);
69 CreateSkiaBackendContext(&backendContext_);
70 }
71
~RsVulkanContext()72 RsVulkanContext::~RsVulkanContext()
73 {
74 CloseLibraryHandle();
75 }
76
IsValid() const77 bool RsVulkanContext::IsValid() const
78 {
79 return instance_ != VK_NULL_HANDLE && device_ != VK_NULL_HANDLE;
80 }
81
GetSingleton()82 RsVulkanContext& RsVulkanContext::GetSingleton()
83 {
84 static RsVulkanContext singleton {};
85 return singleton;
86 }
87
HookedVkQueueSubmit(VkQueue queue,uint32_t submitCount,const VkSubmitInfo * pSubmits,VkFence fence)88 VKAPI_ATTR VkResult RsVulkanContext::HookedVkQueueSubmit(VkQueue queue,
89 uint32_t submitCount,
90 const VkSubmitInfo* pSubmits, VkFence fence)
91 {
92 RsVulkanContext& vkContext = RsVulkanContext::GetSingleton();
93 if (queue == vkContext.GetHardwareQueue()) {
94 return vkContext.vkQueueSubmit(queue, submitCount, pSubmits, fence);
95 }
96 std::lock_guard<std::mutex> lock(vkContext.graphicsQueueMutex_);
97 return vkContext.vkQueueSubmit(queue, submitCount, pSubmits, fence);
98 }
99
HookedVkQueueSignalReleaseImageOHOS(VkQueue queue,uint32_t waitSemaphoreCount,const VkSemaphore * pWaitSemaphores,VkImage image,int32_t * pNativeFenceFd)100 VKAPI_ATTR VkResult RsVulkanContext::HookedVkQueueSignalReleaseImageOHOS(
101 VkQueue queue, uint32_t waitSemaphoreCount,
102 const VkSemaphore* pWaitSemaphores, VkImage image, int32_t* pNativeFenceFd)
103 {
104 RsVulkanContext& vkContext = RsVulkanContext::GetSingleton();
105 if (queue == vkContext.GetHardwareQueue()) {
106 return vkContext.vkQueueSignalReleaseImageOHOS(queue, waitSemaphoreCount, pWaitSemaphores,
107 image, pNativeFenceFd);
108 }
109 std::lock_guard<std::mutex> lock(vkContext.graphicsQueueMutex_);
110 return vkContext.vkQueueSignalReleaseImageOHOS(queue, waitSemaphoreCount, pWaitSemaphores, image, pNativeFenceFd);
111 }
112
SetupLoaderProcAddresses()113 bool RsVulkanContext::SetupLoaderProcAddresses()
114 {
115 if (handle_ == nullptr) {
116 return true;
117 }
118 vkGetInstanceProcAddr = reinterpret_cast<PFN_vkGetInstanceProcAddr>(dlsym(handle_, "vkGetInstanceProcAddr"));
119 vkGetDeviceProcAddr = reinterpret_cast<PFN_vkGetDeviceProcAddr>(dlsym(handle_, "vkGetDeviceProcAddr"));
120 vkEnumerateInstanceExtensionProperties = reinterpret_cast<PFN_vkEnumerateInstanceExtensionProperties>(
121 dlsym(handle_, "vkEnumerateInstanceExtensionProperties"));
122 vkCreateInstance = reinterpret_cast<PFN_vkCreateInstance>(dlsym(handle_, "vkCreateInstance"));
123
124 if (!vkGetInstanceProcAddr) {
125 ROSEN_LOGE("Could not acquire vkGetInstanceProcAddr");
126 return false;
127 }
128
129 VkInstance null_instance = VK_NULL_HANDLE;
130 ACQUIRE_PROC(EnumerateInstanceLayerProperties, null_instance);
131 return true;
132 }
133
CreateInstance()134 bool RsVulkanContext::CreateInstance()
135 {
136 if (!acquiredMandatoryProcAddresses_) {
137 return false;
138 }
139
140 const VkApplicationInfo info = {
141 .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
142 .pNext = nullptr,
143 .pApplicationName = "OHOS",
144 .applicationVersion = 0,
145 .pEngineName = "Rosen",
146 .engineVersion = VK_MAKE_VERSION(1, 0, 0),
147 .apiVersion = VK_API_VERSION_1_2,
148 };
149
150 const VkInstanceCreateInfo create_info = {
151 .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
152 .pNext = nullptr,
153 .flags = 0,
154 .pApplicationInfo = &info,
155 .enabledLayerCount = 0,
156 .ppEnabledLayerNames = nullptr,
157 .enabledExtensionCount = static_cast<uint32_t>(gInstanceExtensions.size()),
158 .ppEnabledExtensionNames = gInstanceExtensions.data(),
159 };
160 if (vkCreateInstance(&create_info, nullptr, &instance_) != VK_SUCCESS) {
161 ROSEN_LOGE("Could not create vulkan instance");
162 return false;
163 }
164
165 ACQUIRE_PROC(CreateDevice, instance_);
166 ACQUIRE_PROC(DestroyDevice, instance_);
167 ACQUIRE_PROC(DestroyInstance, instance_);
168 ACQUIRE_PROC(EnumerateDeviceLayerProperties, instance_);
169 ACQUIRE_PROC(EnumeratePhysicalDevices, instance_);
170 ACQUIRE_PROC(GetPhysicalDeviceFeatures, instance_);
171 ACQUIRE_PROC(GetPhysicalDeviceQueueFamilyProperties, instance_);
172 ACQUIRE_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR, instance_);
173 ACQUIRE_PROC(GetPhysicalDeviceSurfaceFormatsKHR, instance_);
174 ACQUIRE_PROC(GetPhysicalDeviceSurfacePresentModesKHR, instance_);
175 ACQUIRE_PROC(GetPhysicalDeviceSurfaceSupportKHR, instance_);
176 ACQUIRE_PROC(DestroySurfaceKHR, instance_);
177 ACQUIRE_PROC(CreateSurfaceOHOS, instance_);
178 ACQUIRE_PROC(GetPhysicalDeviceMemoryProperties, instance_);
179 ACQUIRE_PROC(GetPhysicalDeviceMemoryProperties2, instance_);
180 ACQUIRE_PROC(GetPhysicalDeviceFeatures2, instance_);
181
182 return true;
183 }
184
SelectPhysicalDevice()185 bool RsVulkanContext::SelectPhysicalDevice()
186 {
187 if (!instance_) {
188 return false;
189 }
190 uint32_t deviceCount = 0;
191 if (vkEnumeratePhysicalDevices(instance_, &deviceCount, nullptr) != VK_SUCCESS) {
192 ROSEN_LOGE("vkEnumeratePhysicalDevices failed");
193 return false;
194 }
195
196 std::vector<VkPhysicalDevice> physicalDevices;
197 physicalDevices.resize(deviceCount);
198 if (vkEnumeratePhysicalDevices(instance_, &deviceCount, physicalDevices.data()) != VK_SUCCESS) {
199 ROSEN_LOGE("vkEnumeratePhysicalDevices failed");
200 return false;
201 }
202 physicalDevice_ = physicalDevices[0];
203 return true;
204 }
205
CreateDevice()206 bool RsVulkanContext::CreateDevice()
207 {
208 if (!physicalDevice_) {
209 return false;
210 }
211 uint32_t queueCount;
212 vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice_, &queueCount, nullptr);
213
214 std::vector<VkQueueFamilyProperties> queueProps(queueCount);
215 vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice_, &queueCount, queueProps.data());
216
217 for (uint32_t i = 0; i < queueCount; i++) {
218 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
219 graphicsQueueFamilyIndex_ = i;
220 break;
221 }
222 }
223
224 if (graphicsQueueFamilyIndex_ == UINT32_MAX) {
225 ROSEN_LOGE("graphicsQueueFamilyIndex_ is not valid");
226 return false;
227 }
228 const float priorities[1] = {1.0f};
229 std::vector<VkDeviceQueueCreateInfo> queueCreate {{
230 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, .pNext = nullptr,
231 .flags = 0, .queueFamilyIndex = graphicsQueueFamilyIndex_, .queueCount = 2,
232 .pQueuePriorities = priorities,
233 }};
234 ycbcrFeature_.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES,
235 ycbcrFeature_.pNext = nullptr;
236 physicalDeviceFeatures2_.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
237 physicalDeviceFeatures2_.pNext = &ycbcrFeature_;
238 vkGetPhysicalDeviceFeatures2(physicalDevice_, &physicalDeviceFeatures2_);
239
240 const VkDeviceCreateInfo createInfo = {
241 .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, .pNext = &physicalDeviceFeatures2_,
242 .flags = 0, .queueCreateInfoCount = queueCreate.size(), .pQueueCreateInfos = queueCreate.data(),
243 .enabledLayerCount = 0, .ppEnabledLayerNames = nullptr,
244 .enabledExtensionCount = static_cast<uint32_t>(gDeviceExtensions.size()),
245 .ppEnabledExtensionNames = gDeviceExtensions.data(), .pEnabledFeatures = nullptr,
246 };
247 if (vkCreateDevice(physicalDevice_, &createInfo, nullptr, &device_) != VK_SUCCESS) {
248 ROSEN_LOGE("vkCreateDevice failed");
249 return false;
250 }
251
252 if (!SetupDeviceProcAddresses(device_)) {
253 return false;
254 }
255
256 vkGetDeviceQueue(device_, graphicsQueueFamilyIndex_, 0, &queue_);
257 vkGetDeviceQueue(device_, graphicsQueueFamilyIndex_, 1, &hardwareQueue_);
258 return true;
259 }
260
CreateSkiaBackendContext(GrVkBackendContext * context,bool createNew)261 bool RsVulkanContext::CreateSkiaBackendContext(GrVkBackendContext* context, bool createNew)
262 {
263 auto getProc = CreateSkiaGetProc();
264 if (getProc == nullptr) {
265 ROSEN_LOGE("CreateSkiaBackendContext getProc is null");
266 return false;
267 }
268
269 VkPhysicalDeviceFeatures features;
270 vkGetPhysicalDeviceFeatures(physicalDevice_, &features);
271
272 uint32_t fFeatures = 0;
273 if (features.geometryShader) {
274 fFeatures |= kGeometryShader_GrVkFeatureFlag;
275 }
276 if (features.dualSrcBlend) {
277 fFeatures |= kDualSrcBlend_GrVkFeatureFlag;
278 }
279 if (features.sampleRateShading) {
280 fFeatures |= kSampleRateShading_GrVkFeatureFlag;
281 }
282
283 context->fInstance = instance_;
284 context->fPhysicalDevice = physicalDevice_;
285 context->fDevice = device_;
286 if (createNew) {
287 context->fQueue = hardwareQueue_;
288 } else {
289 context->fQueue = queue_;
290 }
291 context->fGraphicsQueueIndex = graphicsQueueFamilyIndex_;
292 context->fMinAPIVersion = VK_API_VERSION_1_2;
293
294 uint32_t extensionFlags = kKHR_surface_GrVkExtensionFlag;
295 extensionFlags |= kKHR_ohos_surface_GrVkExtensionFlag;
296
297 context->fExtensions = extensionFlags;
298
299 skVkExtensions_.init(getProc, instance_, physicalDevice_,
300 gInstanceExtensions.size(), gInstanceExtensions.data(),
301 gDeviceExtensions.size(), gDeviceExtensions.data());
302
303 context->fVkExtensions = &skVkExtensions_;
304 context->fDeviceFeatures2 = &physicalDeviceFeatures2_;
305 context->fFeatures = fFeatures;
306 context->fGetProc = std::move(getProc);
307 context->fOwnsInstanceAndDevice = false;
308
309 return true;
310 }
311
SetupDeviceProcAddresses(VkDevice device)312 bool RsVulkanContext::SetupDeviceProcAddresses(VkDevice device)
313 {
314 ACQUIRE_PROC(AllocateCommandBuffers, device_);
315 ACQUIRE_PROC(AllocateMemory, device_);
316 ACQUIRE_PROC(BeginCommandBuffer, device_);
317 ACQUIRE_PROC(BindImageMemory, device_);
318 ACQUIRE_PROC(BindImageMemory2, device_);
319 ACQUIRE_PROC(CmdPipelineBarrier, device_);
320 ACQUIRE_PROC(CreateCommandPool, device_);
321 ACQUIRE_PROC(CreateFence, device_);
322 ACQUIRE_PROC(CreateImage, device_);
323 ACQUIRE_PROC(CreateImageView, device_);
324 ACQUIRE_PROC(CreateSemaphore, device_);
325 ACQUIRE_PROC(DestroyCommandPool, device_);
326 ACQUIRE_PROC(DestroyFence, device_);
327 ACQUIRE_PROC(DestroyImage, device_);
328 ACQUIRE_PROC(DestroyImageView, device_);
329 ACQUIRE_PROC(DestroySemaphore, device_);
330 ACQUIRE_PROC(DeviceWaitIdle, device_);
331 ACQUIRE_PROC(EndCommandBuffer, device_);
332 ACQUIRE_PROC(FreeCommandBuffers, device_);
333 ACQUIRE_PROC(FreeMemory, device_);
334 ACQUIRE_PROC(GetDeviceQueue, device_);
335 ACQUIRE_PROC(GetImageMemoryRequirements, device_);
336 ACQUIRE_PROC(QueueSubmit, device_);
337 ACQUIRE_PROC(QueueWaitIdle, device_);
338 ACQUIRE_PROC(ResetCommandBuffer, device_);
339 ACQUIRE_PROC(ResetFences, device_);
340 ACQUIRE_PROC(WaitForFences, device_);
341 ACQUIRE_PROC(AcquireNextImageKHR, device_);
342 ACQUIRE_PROC(CreateSwapchainKHR, device_);
343 ACQUIRE_PROC(DestroySwapchainKHR, device_);
344 ACQUIRE_PROC(GetSwapchainImagesKHR, device_);
345 ACQUIRE_PROC(QueuePresentKHR, device_);
346 ACQUIRE_PROC(GetNativeBufferPropertiesOHOS, device_);
347 ACQUIRE_PROC(QueueSignalReleaseImageOHOS, device_);
348 ACQUIRE_PROC(ImportSemaphoreFdKHR, device_);
349
350 return true;
351 }
352
OpenLibraryHandle()353 bool RsVulkanContext::OpenLibraryHandle()
354 {
355 ROSEN_LOGI("VulkanProcTable OpenLibararyHandle: dlopen libvulkan.so.");
356 dlerror();
357 handle_ = dlopen("/system/lib64/libvulkan.so", RTLD_NOW | RTLD_LOCAL);
358 if (handle_ == nullptr) {
359 ROSEN_LOGE("Could not open the vulkan library: %{public}s", dlerror());
360 return false;
361 }
362 return true;
363 }
364
CloseLibraryHandle()365 bool RsVulkanContext::CloseLibraryHandle()
366 {
367 if (handle_ != nullptr) {
368 dlerror();
369 if (dlclose(handle_) != 0) {
370 ROSEN_LOGE("Could not close the vulkan lib handle. This indicates a leak. %{public}s", dlerror());
371 }
372 handle_ = nullptr;
373 }
374 return handle_ == nullptr;
375 }
376
AcquireProc(const char * procName,const VkInstance & instance) const377 PFN_vkVoidFunction RsVulkanContext::AcquireProc(
378 const char* procName,
379 const VkInstance& instance) const
380 {
381 if (procName == nullptr || !vkGetInstanceProcAddr) {
382 return nullptr;
383 }
384
385 return vkGetInstanceProcAddr(instance, procName);
386 }
387
AcquireProc(const char * procName,const VkDevice & device) const388 PFN_vkVoidFunction RsVulkanContext::AcquireProc(
389 const char* procName,
390 const VkDevice& device) const
391 {
392 if (procName == nullptr || !device || !vkGetInstanceProcAddr) {
393 return nullptr;
394 }
395 return vkGetDeviceProcAddr(device, procName);
396 }
397
CreateSkiaGetProc() const398 GrVkGetProc RsVulkanContext::CreateSkiaGetProc() const
399 {
400 if (!IsValid()) {
401 return nullptr;
402 }
403
404 return [this](const char* procName, VkInstance instance, VkDevice device) {
405 if (device != VK_NULL_HANDLE) {
406 std::string_view s{procName};
407 if (s.find("vkQueueSubmit") == 0) {
408 return (PFN_vkVoidFunction)RsVulkanContext::HookedVkQueueSubmit;
409 }
410 auto result = AcquireProc(procName, device);
411 if (result != nullptr) {
412 return result;
413 }
414 }
415 return AcquireProc(procName, instance);
416 };
417 }
418
419 #ifndef USE_ROSEN_DRAWING
CreateSkContext(bool independentContext)420 sk_sp<GrDirectContext> RsVulkanContext::CreateSkContext(bool independentContext)
421 {
422 std::unique_lock<std::mutex> lock(vkMutex_);
423 if (independentContext) {
424 return CreateNewSkContext();
425 }
426 if (skContext_ != nullptr) {
427 return skContext_;
428 }
429
430 skContext_ = GrDirectContext::MakeVulkan(backendContext_);
431 int maxResources = 0;
432 size_t maxResourcesSize = 0;
433 int cacheLimitsTimes = CACHE_LIMITS_TIMES;
434 skContext_->getResourceCacheLimits(&maxResources, &maxResourcesSize);
435 if (maxResourcesSize > 0) {
436 skContext_->setResourceCacheLimits(cacheLimitsTimes * maxResources,
437 cacheLimitsTimes * std::fmin(maxResourcesSize, GR_CACHE_MAX_BYTE_SIZE));
438 } else {
439 skContext_->setResourceCacheLimits(GR_CACHE_MAX_COUNT, GR_CACHE_MAX_BYTE_SIZE);
440 }
441 RS_LOGE("skContext_:%{public}p %{public}p", skContext_.get(), backendContext_.fQueue);
442 return skContext_;
443 }
444 #else
CreateDrawingContext(bool independentContext)445 std::shared_ptr<Drawing::GPUContext> RsVulkanContext::CreateDrawingContext(bool independentContext)
446 {
447 std::unique_lock<std::mutex> lock(vkMutex_);
448 if (independentContext) {
449 return CreateNewDrawingContext();
450 }
451 if (drawingContext_ != nullptr) {
452 return drawingContext_;
453 }
454
455 drawingContext_ = std::make_shared<Drawing::GPUContext>();
456 Drawing::GPUContextOptions options;
457 memHandler_ = std::make_shared<MemoryHandler>();
458 std::string vkVersion = std::to_string(VK_API_VERSION_1_2);
459 auto size = vkVersion.size();
460 memHandler_->ConfigureContext(&options, vkVersion.c_str(), size);
461 drawingContext_->BuildFromVK(backendContext_, options);
462 int maxResources = 0;
463 size_t maxResourcesSize = 0;
464 int cacheLimitsTimes = CACHE_LIMITS_TIMES;
465 drawingContext_->GetResourceCacheLimits(&maxResources, &maxResourcesSize);
466 if (maxResourcesSize > 0) {
467 drawingContext_->SetResourceCacheLimits(cacheLimitsTimes * maxResources,
468 cacheLimitsTimes * std::fmin(maxResourcesSize, GR_CACHE_MAX_BYTE_SIZE));
469 } else {
470 drawingContext_->SetResourceCacheLimits(GR_CACHE_MAX_COUNT, GR_CACHE_MAX_BYTE_SIZE);
471 }
472 return drawingContext_;
473 }
474 #endif
475 #ifndef USE_ROSEN_DRAWING
CreateNewSkContext()476 sk_sp<GrDirectContext> RsVulkanContext::CreateNewSkContext()
477 {
478 CreateSkiaBackendContext(&hbackendContext_, true);
479 skContext_ = GrDirectContext::MakeVulkan(hbackendContext_);
480 int maxResources = 0;
481 size_t maxResourcesSize = 0;
482 int cacheLimitsTimes = 3;
483 skContext_->getResourceCacheLimits(&maxResources, &maxResourcesSize);
484 if (maxResourcesSize > 0) {
485 skContext_->setResourceCacheLimits(cacheLimitsTimes * maxResources, cacheLimitsTimes *
486 std::fmin(maxResourcesSize, GR_CACHE_MAX_BYTE_SIZE));
487 } else {
488 skContext_->setResourceCacheLimits(GR_CACHE_MAX_COUNT, GR_CACHE_MAX_BYTE_SIZE);
489 }
490 hcontext_ = skContext_;
491 RS_LOGD("new skContext_:%{public}p %{public}p", skContext_.get(), hbackendContext_.fQueue);
492 return skContext_;
493 }
494 #else
CreateNewDrawingContext()495 std::shared_ptr<Drawing::GPUContext> RsVulkanContext::CreateNewDrawingContext()
496 {
497 CreateSkiaBackendContext(&hbackendContext_, true);
498 drawingContext_ = std::make_shared<Drawing::GPUContext>();
499 Drawing::GPUContextOptions options;
500 memHandler_ = std::make_shared<MemoryHandler>();
501 std::string vkVersion = std::to_string(VK_API_VERSION_1_2);
502 auto size = vkVersion.size();
503 memHandler_->ConfigureContext(&options, vkVersion.c_str(), size);
504 drawingContext_->BuildFromVK(backendContext_, options);
505 int maxResources = 0;
506 size_t maxResourcesSize = 0;
507 int cacheLimitsTimes = CACHE_LIMITS_TIMES;
508 drawingContext_->GetResourceCacheLimits(&maxResources, &maxResourcesSize);
509 if (maxResourcesSize > 0) {
510 drawingContext_->SetResourceCacheLimits(cacheLimitsTimes * maxResources, cacheLimitsTimes *
511 std::fmin(maxResourcesSize, GR_CACHE_MAX_BYTE_SIZE));
512 } else {
513 drawingContext_->SetResourceCacheLimits(GR_CACHE_MAX_COUNT, GR_CACHE_MAX_BYTE_SIZE);
514 }
515 hcontext_ = drawingContext_;
516 return drawingContext_;
517 }
518 #endif
519
520 #ifndef USE_ROSEN_DRAWING
GetSkContext()521 sk_sp<GrDirectContext> RsVulkanContext::GetSkContext()
522 {
523 if (skContext_ != nullptr) {
524 return skContext_;
525 }
526 CreateSkContext();
527 return skContext_;
528 }
529 #else
GetDrawingContext()530 std::shared_ptr<Drawing::GPUContext> RsVulkanContext::GetDrawingContext()
531 {
532 if (drawingContext_ != nullptr) {
533 return drawingContext_;
534 }
535 CreateDrawingContext();
536 return drawingContext_;
537 }
538 #endif
539 }
540 }
541