• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2022 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/graphite/vk/VulkanSharedContext.h"
9 
10 #include "include/gpu/graphite/ContextOptions.h"
11 #include "include/gpu/vk/VulkanBackendContext.h"
12 #include "include/private/base/SkMutex.h"
13 #include "src/gpu/graphite/Log.h"
14 #include "src/gpu/graphite/ResourceTypes.h"
15 #include "src/gpu/graphite/vk/VulkanBuffer.h"
16 #include "src/gpu/graphite/vk/VulkanCaps.h"
17 #include "src/gpu/graphite/vk/VulkanResourceProvider.h"
18 #include "src/gpu/vk/VulkanInterface.h"
19 #include "src/gpu/vk/VulkanUtilsPriv.h"
20 
21 #if defined(SK_USE_VMA)
22 #include "src/gpu/vk/VulkanAMDMemoryAllocator.h"
23 #endif
24 
25 namespace skgpu::graphite {
26 
Make(const VulkanBackendContext & context,const ContextOptions & options)27 sk_sp<SharedContext> VulkanSharedContext::Make(const VulkanBackendContext& context,
28                                                const ContextOptions& options) {
29     if (context.fInstance == VK_NULL_HANDLE ||
30         context.fPhysicalDevice == VK_NULL_HANDLE ||
31         context.fDevice == VK_NULL_HANDLE ||
32         context.fQueue == VK_NULL_HANDLE) {
33         SKGPU_LOG_E("Failed to create VulkanSharedContext because either fInstance,"
34                     "fPhysicalDevice, fDevice, or fQueue in the VulkanBackendContext is"
35                     "VK_NULL_HANDLE.");
36         return nullptr;
37     }
38     if (!context.fGetProc) {
39         SKGPU_LOG_E("Failed to create VulkanSharedContext because there is no valid VulkanGetProc"
40                     "on the VulkanBackendContext");
41         return nullptr;
42     }
43 
44     PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
45             reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
46                     context.fGetProc("vkEnumerateInstanceVersion", VK_NULL_HANDLE, VK_NULL_HANDLE));
47     uint32_t instanceVersion = 0;
48     if (!localEnumerateInstanceVersion) {
49         instanceVersion = VK_MAKE_VERSION(1, 0, 0);
50     } else {
51         VkResult err = localEnumerateInstanceVersion(&instanceVersion);
52         if (err) {
53             SKGPU_LOG_E("Failed to enumerate instance version. Err: %d\n", err);
54             return nullptr;
55         }
56     }
57 
58     PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
59             reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
60                     context.fGetProc("vkGetPhysicalDeviceProperties",
61                                       context.fInstance,
62                                       VK_NULL_HANDLE));
63 
64     if (!localGetPhysicalDeviceProperties) {
65         SKGPU_LOG_E("Failed to get function pointer to vkGetPhysicalDeviceProperties.");
66         return nullptr;
67     }
68     VkPhysicalDeviceProperties physDeviceProperties;
69     localGetPhysicalDeviceProperties(context.fPhysicalDevice, &physDeviceProperties);
70     uint32_t physDevVersion = physDeviceProperties.apiVersion;
71 
72     uint32_t apiVersion = context.fMaxAPIVersion ? context.fMaxAPIVersion : instanceVersion;
73 
74     instanceVersion = std::min(instanceVersion, apiVersion);
75     physDevVersion = std::min(physDevVersion, apiVersion);
76 
77     sk_sp<const skgpu::VulkanInterface> interface(
78             new skgpu::VulkanInterface(context.fGetProc,
79                                        context.fInstance,
80                                        context.fDevice,
81                                        instanceVersion,
82                                        physDevVersion,
83                                        context.fVkExtensions));
84     if (!interface->validate(instanceVersion, physDevVersion, context.fVkExtensions)) {
85         SKGPU_LOG_E("Failed to validate VulkanInterface.");
86         return nullptr;
87     }
88 
89     VkPhysicalDeviceFeatures2 features;
90     const VkPhysicalDeviceFeatures2* featuresPtr;
91     // If fDeviceFeatures2 is not null, then we ignore fDeviceFeatures. If both are null, we assume
92     // no features are enabled.
93     if (!context.fDeviceFeatures2 && context.fDeviceFeatures) {
94         features.pNext = nullptr;
95         features.features = *context.fDeviceFeatures;
96         featuresPtr = &features;
97     } else {
98         featuresPtr = context.fDeviceFeatures2;
99     }
100 
101     std::unique_ptr<const VulkanCaps> caps(new VulkanCaps(options,
102                                                           interface.get(),
103                                                           context.fPhysicalDevice,
104                                                           physDevVersion,
105                                                           featuresPtr,
106                                                           context.fVkExtensions,
107                                                           context.fProtectedContext));
108 
109     sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator = context.fMemoryAllocator;
110 #if defined(SK_USE_VMA)
111     if (!memoryAllocator) {
112         // We were not given a memory allocator at creation
113         skgpu::ThreadSafe threadSafe = options.fClientWillExternallySynchronizeAllThreads
114                                                ? skgpu::ThreadSafe::kNo
115                                                : skgpu::ThreadSafe::kYes;
116         memoryAllocator = skgpu::VulkanAMDMemoryAllocator::Make(context.fInstance,
117                                                                 context.fPhysicalDevice,
118                                                                 context.fDevice,
119                                                                 physDevVersion,
120                                                                 context.fVkExtensions,
121                                                                 interface.get(),
122                                                                 threadSafe);
123     }
124 #endif
125     if (!memoryAllocator) {
126         SKGPU_LOG_E("No supplied vulkan memory allocator and unable to create one internally.");
127         return nullptr;
128     }
129 
130     return sk_sp<SharedContext>(new VulkanSharedContext(context,
131                                                         std::move(interface),
132                                                         std::move(memoryAllocator),
133                                                         std::move(caps)));
134 }
135 
VulkanSharedContext(const VulkanBackendContext & backendContext,sk_sp<const skgpu::VulkanInterface> interface,sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator,std::unique_ptr<const VulkanCaps> caps)136 VulkanSharedContext::VulkanSharedContext(const VulkanBackendContext& backendContext,
137                                          sk_sp<const skgpu::VulkanInterface> interface,
138                                          sk_sp<skgpu::VulkanMemoryAllocator> memoryAllocator,
139                                          std::unique_ptr<const VulkanCaps> caps)
140         : skgpu::graphite::SharedContext(std::move(caps), BackendApi::kVulkan)
141         , fInterface(std::move(interface))
142         , fMemoryAllocator(std::move(memoryAllocator))
143         , fDevice(std::move(backendContext.fDevice))
144         , fQueueIndex(backendContext.fGraphicsQueueIndex)
145         , fDeviceLostContext(backendContext.fDeviceLostContext)
146         , fDeviceLostProc(backendContext.fDeviceLostProc) {}
147 
~VulkanSharedContext()148 VulkanSharedContext::~VulkanSharedContext() {
149     // need to clear out resources before the allocator is removed
150     this->globalCache()->deleteResources();
151 }
152 
makeResourceProvider(SingleOwner * singleOwner,uint32_t recorderID,size_t resourceBudget)153 std::unique_ptr<ResourceProvider> VulkanSharedContext::makeResourceProvider(
154         SingleOwner* singleOwner,
155         uint32_t recorderID,
156         size_t resourceBudget) {
157     // Establish a uniform buffer that can be updated across multiple render passes and cmd buffers
158     size_t alignedIntrinsicConstantSize =
159             std::max(VulkanResourceProvider::kIntrinsicConstantSize,
160                      this->vulkanCaps().requiredUniformBufferAlignment());
161     sk_sp<Buffer> intrinsicConstantBuffer = VulkanBuffer::Make(
162             this, alignedIntrinsicConstantSize, BufferType::kUniform, AccessPattern::kGpuOnly);
163     if (!intrinsicConstantBuffer) {
164         SKGPU_LOG_E("Failed to create intrinsic constant uniform buffer");
165         return nullptr;
166     }
167     SkASSERT(static_cast<VulkanBuffer*>(intrinsicConstantBuffer.get())->bufferUsageFlags()
168              & VK_BUFFER_USAGE_TRANSFER_DST_BIT);
169     intrinsicConstantBuffer->setLabel("IntrinsicConstantBuffer");
170 
171     // Establish a vertex buffer that can be updated across multiple render passes and cmd buffers
172     // for loading MSAA from resolve
173     sk_sp<Buffer> loadMSAAVertexBuffer =
174             VulkanBuffer::Make(this,
175                                VulkanResourceProvider::kLoadMSAAVertexBufferSize,
176                                BufferType::kVertex,
177                                AccessPattern::kGpuOnly);
178     if (!loadMSAAVertexBuffer) {
179         SKGPU_LOG_E("Failed to create vertex buffer for loading MSAA from resolve");
180         return nullptr;
181     }
182     SkASSERT(static_cast<VulkanBuffer*>(loadMSAAVertexBuffer.get())->bufferUsageFlags()
183              & VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
184     loadMSAAVertexBuffer->setLabel("LoadMSAAVertexBuffer");
185 
186     return std::unique_ptr<ResourceProvider>(
187             new VulkanResourceProvider(this,
188                                        singleOwner,
189                                        recorderID,
190                                        resourceBudget,
191                                        std::move(intrinsicConstantBuffer),
192                                        std::move(loadMSAAVertexBuffer)));
193 }
194 
checkVkResult(VkResult result) const195 bool VulkanSharedContext::checkVkResult(VkResult result) const {
196     switch (result) {
197     case VK_SUCCESS:
198         return true;
199     case VK_ERROR_DEVICE_LOST:
200         {
201             SkAutoMutexExclusive lock(fDeviceIsLostMutex);
202             if (fDeviceIsLost) {
203                 return false;
204             }
205             fDeviceIsLost = true;
206             // Fall through to InvokeDeviceLostCallback (on first VK_ERROR_DEVICE_LOST) only afer
207             // releasing fDeviceIsLostMutex, otherwise clients might cause deadlock by checking
208             // isDeviceLost() from the callback.
209         }
210         skgpu::InvokeDeviceLostCallback(interface(),
211                                         device(),
212                                         fDeviceLostContext,
213                                         fDeviceLostProc,
214                                         vulkanCaps().supportsDeviceFaultInfo());
215         return false;
216     case VK_ERROR_OUT_OF_DEVICE_MEMORY:
217     case VK_ERROR_OUT_OF_HOST_MEMORY:
218         // TODO: determine how we'll track this in a thread-safe manner
219         //this->setOOMed();
220         return false;
221     default:
222         return false;
223     }
224 }
225 } // namespace skgpu::graphite
226