1 //
2 // Copyright 2023 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // ShareGroupVk.cpp:
7 // Implements the class methods for ShareGroupVk.
8 //
9
10 #include "libANGLE/renderer/vulkan/ShareGroupVk.h"
11
12 #include "common/debug.h"
13 #include "common/system_utils.h"
14 #include "libANGLE/Context.h"
15 #include "libANGLE/Display.h"
16 #include "libANGLE/renderer/vulkan/BufferVk.h"
17 #include "libANGLE/renderer/vulkan/ContextVk.h"
18 #include "libANGLE/renderer/vulkan/DeviceVk.h"
19 #include "libANGLE/renderer/vulkan/ImageVk.h"
20 #include "libANGLE/renderer/vulkan/SurfaceVk.h"
21 #include "libANGLE/renderer/vulkan/SyncVk.h"
22 #include "libANGLE/renderer/vulkan/TextureVk.h"
23 #include "libANGLE/renderer/vulkan/VkImageImageSiblingVk.h"
24 #include "libANGLE/renderer/vulkan/vk_renderer.h"
25
26 namespace rx
27 {
28
29 namespace
30 {
31 // How often monolithic pipelines should be created, if preferMonolithicPipelinesOverLibraries is
32 // enabled. Pipeline creation is typically O(hundreds of microseconds). A value of 2ms is chosen
33 // arbitrarily; it ensures that there is always at most a single pipeline job in progress, while
34 // maintaining a high throughput of 500 pipelines / second for heavier applications.
35 constexpr double kMonolithicPipelineJobPeriod = 0.002;
36
37 // Time interval in seconds that we should try to prune default buffer pools.
38 constexpr double kTimeElapsedForPruneDefaultBufferPool = 0.25;
39
ValidateIdenticalPriority(const egl::ContextMap & contexts,egl::ContextPriority sharedPriority)40 bool ValidateIdenticalPriority(const egl::ContextMap &contexts, egl::ContextPriority sharedPriority)
41 {
42 if (sharedPriority == egl::ContextPriority::InvalidEnum)
43 {
44 return false;
45 }
46
47 for (auto context : contexts)
48 {
49 const ContextVk *contextVk = vk::GetImpl(context.second);
50 if (contextVk->getPriority() != sharedPriority)
51 {
52 return false;
53 }
54 }
55
56 return true;
57 }
58 } // namespace
59
60 // Set to true will log bufferpool stats into INFO stream
61 #define ANGLE_ENABLE_BUFFER_POOL_STATS_LOGGING false
62
ShareGroupVk(const egl::ShareGroupState & state,vk::Renderer * renderer)63 ShareGroupVk::ShareGroupVk(const egl::ShareGroupState &state, vk::Renderer *renderer)
64 : ShareGroupImpl(state),
65 mRenderer(renderer),
66 mCurrentFrameCount(0),
67 mContextsPriority(egl::ContextPriority::InvalidEnum),
68 mIsContextsPriorityLocked(false),
69 mLastMonolithicPipelineJobTime(0)
70 {
71 mLastPruneTime = angle::GetCurrentSystemTime();
72 }
73
onContextAdd()74 void ShareGroupVk::onContextAdd()
75 {
76 ASSERT(ValidateIdenticalPriority(getContexts(), mContextsPriority));
77 }
78
unifyContextsPriority(ContextVk * newContextVk)79 angle::Result ShareGroupVk::unifyContextsPriority(ContextVk *newContextVk)
80 {
81 const egl::ContextPriority newContextPriority = newContextVk->getPriority();
82 ASSERT(newContextPriority != egl::ContextPriority::InvalidEnum);
83
84 if (mContextsPriority == egl::ContextPriority::InvalidEnum)
85 {
86 ASSERT(!mIsContextsPriorityLocked);
87 ASSERT(getContexts().empty());
88 mContextsPriority = newContextPriority;
89 return angle::Result::Continue;
90 }
91
92 static_assert(egl::ContextPriority::Low < egl::ContextPriority::Medium);
93 static_assert(egl::ContextPriority::Medium < egl::ContextPriority::High);
94 if (mContextsPriority >= newContextPriority || mIsContextsPriorityLocked)
95 {
96 newContextVk->setPriority(mContextsPriority);
97 return angle::Result::Continue;
98 }
99
100 ANGLE_TRY(updateContextsPriority(newContextVk, newContextPriority));
101
102 return angle::Result::Continue;
103 }
104
lockDefaultContextsPriority(ContextVk * contextVk)105 angle::Result ShareGroupVk::lockDefaultContextsPriority(ContextVk *contextVk)
106 {
107 constexpr egl::ContextPriority kDefaultPriority = egl::ContextPriority::Medium;
108 if (!mIsContextsPriorityLocked)
109 {
110 if (mContextsPriority != kDefaultPriority)
111 {
112 ANGLE_TRY(updateContextsPriority(contextVk, kDefaultPriority));
113 }
114 mIsContextsPriorityLocked = true;
115 }
116 ASSERT(mContextsPriority == kDefaultPriority);
117 return angle::Result::Continue;
118 }
119
updateContextsPriority(ContextVk * contextVk,egl::ContextPriority newPriority)120 angle::Result ShareGroupVk::updateContextsPriority(ContextVk *contextVk,
121 egl::ContextPriority newPriority)
122 {
123 ASSERT(!mIsContextsPriorityLocked);
124 ASSERT(newPriority != egl::ContextPriority::InvalidEnum);
125 ASSERT(newPriority != mContextsPriority);
126 if (mContextsPriority == egl::ContextPriority::InvalidEnum)
127 {
128 ASSERT(getContexts().empty());
129 mContextsPriority = newPriority;
130 return angle::Result::Continue;
131 }
132
133 vk::ProtectionTypes protectionTypes;
134 protectionTypes.set(contextVk->getProtectionType());
135 for (auto context : getContexts())
136 {
137 protectionTypes.set(vk::GetImpl(context.second)->getProtectionType());
138 }
139
140 {
141 vk::ScopedQueueSerialIndex index;
142 ANGLE_TRY(mRenderer->allocateScopedQueueSerialIndex(&index));
143 ANGLE_TRY(mRenderer->submitPriorityDependency(contextVk, protectionTypes, mContextsPriority,
144 newPriority, index.get()));
145 }
146
147 for (auto context : getContexts())
148 {
149 ContextVk *sharedContextVk = vk::GetImpl(context.second);
150
151 ASSERT(sharedContextVk->getPriority() == mContextsPriority);
152 sharedContextVk->setPriority(newPriority);
153 }
154 mContextsPriority = newPriority;
155
156 return angle::Result::Continue;
157 }
158
onDestroy(const egl::Display * display)159 void ShareGroupVk::onDestroy(const egl::Display *display)
160 {
161 mRefCountedEventsGarbageRecycler.destroy(mRenderer);
162
163 for (std::unique_ptr<vk::BufferPool> &pool : mDefaultBufferPools)
164 {
165 if (pool)
166 {
167 // If any context uses display texture share group, it is expected that a
168 // BufferBlock may still in used by textures that outlived ShareGroup. The
169 // non-empty BufferBlock will be put into Renderer's orphan list instead.
170 pool->destroy(mRenderer, mState.hasAnyContextWithDisplayTextureShareGroup());
171 }
172 }
173
174 mPipelineLayoutCache.destroy(mRenderer);
175 mDescriptorSetLayoutCache.destroy(mRenderer);
176
177 mMetaDescriptorPools[DescriptorSetIndex::UniformsAndXfb].destroy(mRenderer);
178 mMetaDescriptorPools[DescriptorSetIndex::Texture].destroy(mRenderer);
179 mMetaDescriptorPools[DescriptorSetIndex::ShaderResource].destroy(mRenderer);
180
181 mFramebufferCache.destroy(mRenderer);
182 resetPrevTexture();
183 }
184
onMutableTextureUpload(ContextVk * contextVk,TextureVk * newTexture)185 angle::Result ShareGroupVk::onMutableTextureUpload(ContextVk *contextVk, TextureVk *newTexture)
186 {
187 return mTextureUpload.onMutableTextureUpload(contextVk, newTexture);
188 }
189
onTextureRelease(TextureVk * textureVk)190 void ShareGroupVk::onTextureRelease(TextureVk *textureVk)
191 {
192 mTextureUpload.onTextureRelease(textureVk);
193 }
194
scheduleMonolithicPipelineCreationTask(ContextVk * contextVk,vk::WaitableMonolithicPipelineCreationTask * taskOut)195 angle::Result ShareGroupVk::scheduleMonolithicPipelineCreationTask(
196 ContextVk *contextVk,
197 vk::WaitableMonolithicPipelineCreationTask *taskOut)
198 {
199 ASSERT(contextVk->getFeatures().preferMonolithicPipelinesOverLibraries.enabled);
200
201 // Limit to a single task to avoid hogging all the cores.
202 if (mMonolithicPipelineCreationEvent && !mMonolithicPipelineCreationEvent->isReady())
203 {
204 return angle::Result::Continue;
205 }
206
207 // Additionally, rate limit the job postings.
208 double currentTime = angle::GetCurrentSystemTime();
209 if (currentTime - mLastMonolithicPipelineJobTime < kMonolithicPipelineJobPeriod)
210 {
211 return angle::Result::Continue;
212 }
213
214 mLastMonolithicPipelineJobTime = currentTime;
215
216 const vk::RenderPass *compatibleRenderPass = nullptr;
217 // Pull in a compatible RenderPass to be used by the task. This is done at the last minute,
218 // just before the task is scheduled, to minimize the time this reference to the render pass
219 // cache is held. If the render pass cache needs to be cleared, the main thread will wait
220 // for the job to complete.
221 ANGLE_TRY(contextVk->getCompatibleRenderPass(taskOut->getTask()->getRenderPassDesc(),
222 &compatibleRenderPass));
223 taskOut->setRenderPass(compatibleRenderPass);
224
225 mMonolithicPipelineCreationEvent =
226 mRenderer->getGlobalOps()->postMultiThreadWorkerTask(taskOut->getTask());
227
228 taskOut->onSchedule(mMonolithicPipelineCreationEvent);
229
230 return angle::Result::Continue;
231 }
232
waitForCurrentMonolithicPipelineCreationTask()233 void ShareGroupVk::waitForCurrentMonolithicPipelineCreationTask()
234 {
235 if (mMonolithicPipelineCreationEvent)
236 {
237 mMonolithicPipelineCreationEvent->wait();
238 }
239 }
240
onMutableTextureUpload(ContextVk * contextVk,TextureVk * newTexture)241 angle::Result TextureUpload::onMutableTextureUpload(ContextVk *contextVk, TextureVk *newTexture)
242 {
243 // This feature is currently disabled in the case of display-level texture sharing.
244 ASSERT(!contextVk->hasDisplayTextureShareGroup());
245 ASSERT(!newTexture->isImmutable());
246 ASSERT(mPrevUploadedMutableTexture == nullptr || !mPrevUploadedMutableTexture->isImmutable());
247
248 // If the previous texture is null, it should be set to the current texture. We also have to
249 // make sure that the previous texture pointer is still a mutable texture. Otherwise, we skip
250 // the optimization.
251 if (mPrevUploadedMutableTexture == nullptr)
252 {
253 mPrevUploadedMutableTexture = newTexture;
254 return angle::Result::Continue;
255 }
256
257 // Skip the optimization if we have not switched to a new texture yet.
258 if (mPrevUploadedMutableTexture == newTexture)
259 {
260 return angle::Result::Continue;
261 }
262
263 // If the mutable texture is consistently specified, we initialize a full mip chain for it.
264 if (mPrevUploadedMutableTexture->isMutableTextureConsistentlySpecifiedForFlush())
265 {
266 ANGLE_TRY(mPrevUploadedMutableTexture->ensureImageInitialized(
267 contextVk, ImageMipLevels::EnabledLevels));
268 contextVk->getPerfCounters().mutableTexturesUploaded++;
269 }
270
271 // Update the mutable texture pointer with the new pointer for the next potential flush.
272 mPrevUploadedMutableTexture = newTexture;
273
274 return angle::Result::Continue;
275 }
276
onTextureRelease(TextureVk * textureVk)277 void TextureUpload::onTextureRelease(TextureVk *textureVk)
278 {
279 if (mPrevUploadedMutableTexture == textureVk)
280 {
281 resetPrevTexture();
282 }
283 }
284
onFramebufferBoundary()285 void ShareGroupVk::onFramebufferBoundary()
286 {
287 if (isDueForBufferPoolPrune())
288 {
289 pruneDefaultBufferPools();
290 }
291
292 // Always clean up event garbage and destroy the excessive free list at frame boundary.
293 cleanupRefCountedEventGarbage();
294
295 mCurrentFrameCount++;
296 }
297
getDefaultBufferPool(VkDeviceSize size,uint32_t memoryTypeIndex,BufferUsageType usageType)298 vk::BufferPool *ShareGroupVk::getDefaultBufferPool(VkDeviceSize size,
299 uint32_t memoryTypeIndex,
300 BufferUsageType usageType)
301 {
302 if (!mDefaultBufferPools[memoryTypeIndex])
303 {
304 const vk::Allocator &allocator = mRenderer->getAllocator();
305 VkBufferUsageFlags usageFlags = GetDefaultBufferUsageFlags(mRenderer);
306
307 VkMemoryPropertyFlags memoryPropertyFlags;
308 allocator.getMemoryTypeProperties(memoryTypeIndex, &memoryPropertyFlags);
309
310 std::unique_ptr<vk::BufferPool> pool = std::make_unique<vk::BufferPool>();
311 vma::VirtualBlockCreateFlags vmaFlags = vma::VirtualBlockCreateFlagBits::GENERAL;
312 pool->initWithFlags(mRenderer, vmaFlags, usageFlags, 0, memoryTypeIndex,
313 memoryPropertyFlags);
314 mDefaultBufferPools[memoryTypeIndex] = std::move(pool);
315 }
316
317 return mDefaultBufferPools[memoryTypeIndex].get();
318 }
319
pruneDefaultBufferPools()320 void ShareGroupVk::pruneDefaultBufferPools()
321 {
322 mLastPruneTime = angle::GetCurrentSystemTime();
323
324 // Bail out if no suballocation have been destroyed since last prune.
325 if (mRenderer->getSuballocationDestroyedSize() == 0)
326 {
327 return;
328 }
329
330 for (std::unique_ptr<vk::BufferPool> &pool : mDefaultBufferPools)
331 {
332 if (pool)
333 {
334 pool->pruneEmptyBuffers(mRenderer);
335 }
336 }
337
338 mRenderer->onBufferPoolPrune();
339
340 #if ANGLE_ENABLE_BUFFER_POOL_STATS_LOGGING
341 logBufferPools();
342 #endif
343 }
344
isDueForBufferPoolPrune()345 bool ShareGroupVk::isDueForBufferPoolPrune()
346 {
347 // Ensure we periodically prune to maintain the heuristic information
348 double timeElapsed = angle::GetCurrentSystemTime() - mLastPruneTime;
349 if (timeElapsed > kTimeElapsedForPruneDefaultBufferPool)
350 {
351 return true;
352 }
353
354 // If we have destroyed a lot of memory, also prune to ensure memory gets freed as soon as
355 // possible
356 if (mRenderer->getSuballocationDestroyedSize() >= kMaxTotalEmptyBufferBytes)
357 {
358 return true;
359 }
360
361 return false;
362 }
363
calculateTotalBufferCount(size_t * bufferCount,VkDeviceSize * totalSize) const364 void ShareGroupVk::calculateTotalBufferCount(size_t *bufferCount, VkDeviceSize *totalSize) const
365 {
366 *bufferCount = 0;
367 *totalSize = 0;
368 for (const std::unique_ptr<vk::BufferPool> &pool : mDefaultBufferPools)
369 {
370 if (pool)
371 {
372 *bufferCount += pool->getBufferCount();
373 *totalSize += pool->getMemorySize();
374 }
375 }
376 }
377
logBufferPools() const378 void ShareGroupVk::logBufferPools() const
379 {
380 for (size_t i = 0; i < mDefaultBufferPools.size(); i++)
381 {
382 const std::unique_ptr<vk::BufferPool> &pool = mDefaultBufferPools[i];
383 if (pool && pool->getBufferCount() > 0)
384 {
385 std::ostringstream log;
386 pool->addStats(&log);
387 INFO() << "Pool[" << i << "]:" << log.str();
388 }
389 }
390 }
391 } // namespace rx
392