• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2023 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // ShareGroupVk.cpp:
7 //    Implements the class methods for ShareGroupVk.
8 //
9 
10 #include "libANGLE/renderer/vulkan/ShareGroupVk.h"
11 
12 #include "common/debug.h"
13 #include "common/system_utils.h"
14 #include "libANGLE/Context.h"
15 #include "libANGLE/Display.h"
16 #include "libANGLE/renderer/vulkan/BufferVk.h"
17 #include "libANGLE/renderer/vulkan/ContextVk.h"
18 #include "libANGLE/renderer/vulkan/DeviceVk.h"
19 #include "libANGLE/renderer/vulkan/ImageVk.h"
20 #include "libANGLE/renderer/vulkan/SurfaceVk.h"
21 #include "libANGLE/renderer/vulkan/SyncVk.h"
22 #include "libANGLE/renderer/vulkan/TextureVk.h"
23 #include "libANGLE/renderer/vulkan/VkImageImageSiblingVk.h"
24 #include "libANGLE/renderer/vulkan/vk_renderer.h"
25 
26 namespace rx
27 {
28 
29 namespace
30 {
31 // How often monolithic pipelines should be created, if preferMonolithicPipelinesOverLibraries is
32 // enabled.  Pipeline creation is typically O(hundreds of microseconds).  A value of 2ms is chosen
33 // arbitrarily; it ensures that there is always at most a single pipeline job in progress, while
34 // maintaining a high throughput of 500 pipelines / second for heavier applications.
35 constexpr double kMonolithicPipelineJobPeriod = 0.002;
36 
37 // Time interval in seconds that we should try to prune default buffer pools.
38 constexpr double kTimeElapsedForPruneDefaultBufferPool = 0.25;
39 
ValidateIdenticalPriority(const egl::ContextMap & contexts,egl::ContextPriority sharedPriority)40 bool ValidateIdenticalPriority(const egl::ContextMap &contexts, egl::ContextPriority sharedPriority)
41 {
42     if (sharedPriority == egl::ContextPriority::InvalidEnum)
43     {
44         return false;
45     }
46 
47     for (auto context : contexts)
48     {
49         const ContextVk *contextVk = vk::GetImpl(context.second);
50         if (contextVk->getPriority() != sharedPriority)
51         {
52             return false;
53         }
54     }
55 
56     return true;
57 }
58 }  // namespace
59 
60 // Set to true will log bufferpool stats into INFO stream
61 #define ANGLE_ENABLE_BUFFER_POOL_STATS_LOGGING false
62 
ShareGroupVk(const egl::ShareGroupState & state)63 ShareGroupVk::ShareGroupVk(const egl::ShareGroupState &state)
64     : ShareGroupImpl(state),
65       mContextsPriority(egl::ContextPriority::InvalidEnum),
66       mIsContextsPriorityLocked(false),
67       mLastMonolithicPipelineJobTime(0)
68 {
69     mLastPruneTime = angle::GetCurrentSystemTime();
70 }
71 
onContextAdd()72 void ShareGroupVk::onContextAdd()
73 {
74     ASSERT(ValidateIdenticalPriority(getContexts(), mContextsPriority));
75 }
76 
unifyContextsPriority(ContextVk * newContextVk)77 angle::Result ShareGroupVk::unifyContextsPriority(ContextVk *newContextVk)
78 {
79     const egl::ContextPriority newContextPriority = newContextVk->getPriority();
80     ASSERT(newContextPriority != egl::ContextPriority::InvalidEnum);
81 
82     if (mContextsPriority == egl::ContextPriority::InvalidEnum)
83     {
84         ASSERT(!mIsContextsPriorityLocked);
85         ASSERT(getContexts().empty());
86         mContextsPriority = newContextPriority;
87         return angle::Result::Continue;
88     }
89 
90     static_assert(egl::ContextPriority::Low < egl::ContextPriority::Medium);
91     static_assert(egl::ContextPriority::Medium < egl::ContextPriority::High);
92     if (mContextsPriority >= newContextPriority || mIsContextsPriorityLocked)
93     {
94         newContextVk->setPriority(mContextsPriority);
95         return angle::Result::Continue;
96     }
97 
98     ANGLE_TRY(updateContextsPriority(newContextVk, newContextPriority));
99 
100     return angle::Result::Continue;
101 }
102 
lockDefaultContextsPriority(ContextVk * contextVk)103 angle::Result ShareGroupVk::lockDefaultContextsPriority(ContextVk *contextVk)
104 {
105     constexpr egl::ContextPriority kDefaultPriority = egl::ContextPriority::Medium;
106     if (!mIsContextsPriorityLocked)
107     {
108         if (mContextsPriority != kDefaultPriority)
109         {
110             ANGLE_TRY(updateContextsPriority(contextVk, kDefaultPriority));
111         }
112         mIsContextsPriorityLocked = true;
113     }
114     ASSERT(mContextsPriority == kDefaultPriority);
115     return angle::Result::Continue;
116 }
117 
updateContextsPriority(ContextVk * contextVk,egl::ContextPriority newPriority)118 angle::Result ShareGroupVk::updateContextsPriority(ContextVk *contextVk,
119                                                    egl::ContextPriority newPriority)
120 {
121     ASSERT(!mIsContextsPriorityLocked);
122     ASSERT(newPriority != egl::ContextPriority::InvalidEnum);
123     ASSERT(newPriority != mContextsPriority);
124     if (mContextsPriority == egl::ContextPriority::InvalidEnum)
125     {
126         ASSERT(getContexts().empty());
127         mContextsPriority = newPriority;
128         return angle::Result::Continue;
129     }
130 
131     vk::ProtectionTypes protectionTypes;
132     protectionTypes.set(contextVk->getProtectionType());
133     for (auto context : getContexts())
134     {
135         protectionTypes.set(vk::GetImpl(context.second)->getProtectionType());
136     }
137 
138     {
139         vk::ScopedQueueSerialIndex index;
140         vk::Renderer *renderer = contextVk->getRenderer();
141         ANGLE_TRY(renderer->allocateScopedQueueSerialIndex(&index));
142         ANGLE_TRY(renderer->submitPriorityDependency(contextVk, protectionTypes, mContextsPriority,
143                                                      newPriority, index.get()));
144     }
145 
146     for (auto context : getContexts())
147     {
148         ContextVk *sharedContextVk = vk::GetImpl(context.second);
149 
150         ASSERT(sharedContextVk->getPriority() == mContextsPriority);
151         sharedContextVk->setPriority(newPriority);
152     }
153     mContextsPriority = newPriority;
154 
155     return angle::Result::Continue;
156 }
157 
onDestroy(const egl::Display * display)158 void ShareGroupVk::onDestroy(const egl::Display *display)
159 {
160     DisplayVk *displayVk   = vk::GetImpl(display);
161     vk::Renderer *renderer = displayVk->getRenderer();
162 
163     mRefCountedEventsGarbageRecycler.destroy(renderer);
164 
165     for (std::unique_ptr<vk::BufferPool> &pool : mDefaultBufferPools)
166     {
167         if (pool)
168         {
169             // If any context uses display texture share group, it is expected that a
170             // BufferBlock may still in used by textures that outlived ShareGroup.  The
171             // non-empty BufferBlock will be put into Renderer's orphan list instead.
172             pool->destroy(renderer, mState.hasAnyContextWithDisplayTextureShareGroup());
173         }
174     }
175 
176     mPipelineLayoutCache.destroy(renderer);
177     mDescriptorSetLayoutCache.destroy(renderer);
178 
179     mMetaDescriptorPools[DescriptorSetIndex::UniformsAndXfb].destroy(renderer);
180     mMetaDescriptorPools[DescriptorSetIndex::Texture].destroy(renderer);
181     mMetaDescriptorPools[DescriptorSetIndex::ShaderResource].destroy(renderer);
182 
183     mFramebufferCache.destroy(renderer);
184     resetPrevTexture();
185 
186     mVertexInputGraphicsPipelineCache.destroy(displayVk);
187     mFragmentOutputGraphicsPipelineCache.destroy(displayVk);
188 }
189 
onMutableTextureUpload(ContextVk * contextVk,TextureVk * newTexture)190 angle::Result ShareGroupVk::onMutableTextureUpload(ContextVk *contextVk, TextureVk *newTexture)
191 {
192     return mTextureUpload.onMutableTextureUpload(contextVk, newTexture);
193 }
194 
onTextureRelease(TextureVk * textureVk)195 void ShareGroupVk::onTextureRelease(TextureVk *textureVk)
196 {
197     mTextureUpload.onTextureRelease(textureVk);
198 }
199 
scheduleMonolithicPipelineCreationTask(ContextVk * contextVk,vk::WaitableMonolithicPipelineCreationTask * taskOut)200 angle::Result ShareGroupVk::scheduleMonolithicPipelineCreationTask(
201     ContextVk *contextVk,
202     vk::WaitableMonolithicPipelineCreationTask *taskOut)
203 {
204     ASSERT(contextVk->getFeatures().preferMonolithicPipelinesOverLibraries.enabled);
205 
206     // Limit to a single task to avoid hogging all the cores.
207     if (mMonolithicPipelineCreationEvent && !mMonolithicPipelineCreationEvent->isReady())
208     {
209         return angle::Result::Continue;
210     }
211 
212     // Additionally, rate limit the job postings.
213     double currentTime = angle::GetCurrentSystemTime();
214     if (currentTime - mLastMonolithicPipelineJobTime < kMonolithicPipelineJobPeriod)
215     {
216         return angle::Result::Continue;
217     }
218 
219     mLastMonolithicPipelineJobTime = currentTime;
220 
221     const vk::RenderPass *compatibleRenderPass = nullptr;
222     // Pull in a compatible RenderPass to be used by the task.  This is done at the last minute,
223     // just before the task is scheduled, to minimize the time this reference to the render pass
224     // cache is held.  If the render pass cache needs to be cleared, the main thread will wait for
225     // the job to complete.
226     ANGLE_TRY(contextVk->getCompatibleRenderPass(taskOut->getTask()->getRenderPassDesc(),
227                                                  &compatibleRenderPass));
228     taskOut->setRenderPass(compatibleRenderPass);
229 
230     mMonolithicPipelineCreationEvent =
231         contextVk->getRenderer()->getGlobalOps()->postMultiThreadWorkerTask(taskOut->getTask());
232 
233     taskOut->onSchedule(mMonolithicPipelineCreationEvent);
234 
235     return angle::Result::Continue;
236 }
237 
waitForCurrentMonolithicPipelineCreationTask()238 void ShareGroupVk::waitForCurrentMonolithicPipelineCreationTask()
239 {
240     if (mMonolithicPipelineCreationEvent)
241     {
242         mMonolithicPipelineCreationEvent->wait();
243     }
244 }
245 
onMutableTextureUpload(ContextVk * contextVk,TextureVk * newTexture)246 angle::Result TextureUpload::onMutableTextureUpload(ContextVk *contextVk, TextureVk *newTexture)
247 {
248     // This feature is currently disabled in the case of display-level texture sharing.
249     ASSERT(!contextVk->hasDisplayTextureShareGroup());
250     ASSERT(!newTexture->isImmutable());
251     ASSERT(mPrevUploadedMutableTexture == nullptr || !mPrevUploadedMutableTexture->isImmutable());
252 
253     // If the previous texture is null, it should be set to the current texture. We also have to
254     // make sure that the previous texture pointer is still a mutable texture. Otherwise, we skip
255     // the optimization.
256     if (mPrevUploadedMutableTexture == nullptr)
257     {
258         mPrevUploadedMutableTexture = newTexture;
259         return angle::Result::Continue;
260     }
261 
262     // Skip the optimization if we have not switched to a new texture yet.
263     if (mPrevUploadedMutableTexture == newTexture)
264     {
265         return angle::Result::Continue;
266     }
267 
268     // If the mutable texture is consistently specified, we initialize a full mip chain for it.
269     if (mPrevUploadedMutableTexture->isMutableTextureConsistentlySpecifiedForFlush())
270     {
271         ANGLE_TRY(mPrevUploadedMutableTexture->ensureImageInitialized(
272             contextVk, ImageMipLevels::EnabledLevels));
273         contextVk->getPerfCounters().mutableTexturesUploaded++;
274     }
275 
276     // Update the mutable texture pointer with the new pointer for the next potential flush.
277     mPrevUploadedMutableTexture = newTexture;
278 
279     return angle::Result::Continue;
280 }
281 
onTextureRelease(TextureVk * textureVk)282 void TextureUpload::onTextureRelease(TextureVk *textureVk)
283 {
284     if (mPrevUploadedMutableTexture == textureVk)
285     {
286         resetPrevTexture();
287     }
288 }
289 
getDefaultBufferPool(vk::Renderer * renderer,VkDeviceSize size,uint32_t memoryTypeIndex,BufferUsageType usageType)290 vk::BufferPool *ShareGroupVk::getDefaultBufferPool(vk::Renderer *renderer,
291                                                    VkDeviceSize size,
292                                                    uint32_t memoryTypeIndex,
293                                                    BufferUsageType usageType)
294 {
295     if (!mDefaultBufferPools[memoryTypeIndex])
296     {
297         const vk::Allocator &allocator = renderer->getAllocator();
298         VkBufferUsageFlags usageFlags  = GetDefaultBufferUsageFlags(renderer);
299 
300         VkMemoryPropertyFlags memoryPropertyFlags;
301         allocator.getMemoryTypeProperties(memoryTypeIndex, &memoryPropertyFlags);
302 
303         std::unique_ptr<vk::BufferPool> pool  = std::make_unique<vk::BufferPool>();
304         vma::VirtualBlockCreateFlags vmaFlags = vma::VirtualBlockCreateFlagBits::GENERAL;
305         pool->initWithFlags(renderer, vmaFlags, usageFlags, 0, memoryTypeIndex,
306                             memoryPropertyFlags);
307         mDefaultBufferPools[memoryTypeIndex] = std::move(pool);
308     }
309 
310     return mDefaultBufferPools[memoryTypeIndex].get();
311 }
312 
pruneDefaultBufferPools(vk::Renderer * renderer)313 void ShareGroupVk::pruneDefaultBufferPools(vk::Renderer *renderer)
314 {
315     mLastPruneTime = angle::GetCurrentSystemTime();
316 
317     // Bail out if no suballocation have been destroyed since last prune.
318     if (renderer->getSuballocationDestroyedSize() == 0)
319     {
320         return;
321     }
322 
323     for (std::unique_ptr<vk::BufferPool> &pool : mDefaultBufferPools)
324     {
325         if (pool)
326         {
327             pool->pruneEmptyBuffers(renderer);
328         }
329     }
330 
331     renderer->onBufferPoolPrune();
332 
333 #if ANGLE_ENABLE_BUFFER_POOL_STATS_LOGGING
334     logBufferPools();
335 #endif
336 }
337 
isDueForBufferPoolPrune(vk::Renderer * renderer)338 bool ShareGroupVk::isDueForBufferPoolPrune(vk::Renderer *renderer)
339 {
340     // Ensure we periodically prune to maintain the heuristic information
341     double timeElapsed = angle::GetCurrentSystemTime() - mLastPruneTime;
342     if (timeElapsed > kTimeElapsedForPruneDefaultBufferPool)
343     {
344         return true;
345     }
346 
347     // If we have destroyed a lot of memory, also prune to ensure memory gets freed as soon as
348     // possible
349     if (renderer->getSuballocationDestroyedSize() >= kMaxTotalEmptyBufferBytes)
350     {
351         return true;
352     }
353 
354     return false;
355 }
356 
calculateTotalBufferCount(size_t * bufferCount,VkDeviceSize * totalSize) const357 void ShareGroupVk::calculateTotalBufferCount(size_t *bufferCount, VkDeviceSize *totalSize) const
358 {
359     *bufferCount = 0;
360     *totalSize   = 0;
361     for (const std::unique_ptr<vk::BufferPool> &pool : mDefaultBufferPools)
362     {
363         if (pool)
364         {
365             *bufferCount += pool->getBufferCount();
366             *totalSize += pool->getMemorySize();
367         }
368     }
369 }
370 
logBufferPools() const371 void ShareGroupVk::logBufferPools() const
372 {
373     for (size_t i = 0; i < mDefaultBufferPools.size(); i++)
374     {
375         const std::unique_ptr<vk::BufferPool> &pool = mDefaultBufferPools[i];
376         if (pool && pool->getBufferCount() > 0)
377         {
378             std::ostringstream log;
379             pool->addStats(&log);
380             INFO() << "Pool[" << i << "]:" << log.str();
381         }
382     }
383 }
384 }  // namespace rx
385