1 //
2 // Copyright 2024 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // RefCountedEvent:
7 // Manages reference count of VkEvent and its associated functions.
8 //
9
10 #include "libANGLE/renderer/vulkan/vk_ref_counted_event.h"
11 #include "libANGLE/renderer/vulkan/vk_helpers.h"
12 #include "libANGLE/renderer/vulkan/vk_renderer.h"
13
14 namespace rx
15 {
16 namespace vk
17 {
18 namespace
19 {
DestroyRefCountedEvents(VkDevice device,RefCountedEventCollector & events)20 void DestroyRefCountedEvents(VkDevice device, RefCountedEventCollector &events)
21 {
22 while (!events.empty())
23 {
24 events.back().destroy(device);
25 events.pop_back();
26 }
27 }
28 } // namespace
29
init(Context * context,EventStage eventStage)30 bool RefCountedEvent::init(Context *context, EventStage eventStage)
31 {
32 ASSERT(mHandle == nullptr);
33 ASSERT(eventStage != EventStage::InvalidEnum);
34
35 // First try with recycler. We must issue VkCmdResetEvent before VkCmdSetEvent
36 if (context->getRefCountedEventsGarbageRecycler()->fetch(context->getRenderer(), this))
37 {
38 ASSERT(valid());
39 ASSERT(!mHandle->isReferenced());
40 }
41 else
42 {
43 // If failed to fetch from recycler, then create a new event.
44 mHandle = new RefCounted<EventAndStage>;
45 VkEventCreateInfo createInfo = {};
46 createInfo.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
47 // Use device only for performance reasons.
48 createInfo.flags = context->getFeatures().supportsSynchronization2.enabled
49 ? VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR
50 : 0;
51 VkResult result = mHandle->get().event.init(context->getDevice(), createInfo);
52 if (result != VK_SUCCESS)
53 {
54 WARN() << "event.init failed. Clean up garbage and retry again";
55 // Proactively clean up garbage and retry
56 context->getRefCountedEventsGarbageRecycler()->cleanup(context->getRenderer());
57 result = mHandle->get().event.init(context->getDevice(), createInfo);
58 if (result != VK_SUCCESS)
59 {
60 // Drivers usually can allocate huge amount of VkEvents, and we should never use
61 // that many VkEvents under normal situation. If we failed to allocate, there is a
62 // high chance that we may have a leak somewhere. This macro should help us catch
63 // such potential bugs in the bots if that happens.
64 UNREACHABLE();
65 // If still fail to create, we just return. An invalid event will trigger
66 // pipelineBarrier code path
67 return false;
68 }
69 }
70 }
71
72 mHandle->addRef();
73 mHandle->get().eventStage = eventStage;
74 return true;
75 }
76
release(Context * context)77 void RefCountedEvent::release(Context *context)
78 {
79 if (mHandle != nullptr)
80 {
81 releaseImpl(context->getRenderer(), context->getRefCountedEventsGarbageRecycler());
82 }
83 }
84
release(Renderer * renderer)85 void RefCountedEvent::release(Renderer *renderer)
86 {
87 if (mHandle != nullptr)
88 {
89 releaseImpl(renderer, renderer->getRefCountedEventRecycler());
90 }
91 }
92
93 template <typename RecyclerT>
releaseImpl(Renderer * renderer,RecyclerT * recycler)94 void RefCountedEvent::releaseImpl(Renderer *renderer, RecyclerT *recycler)
95 {
96 ASSERT(mHandle != nullptr);
97 // This should never be called from async clean up thread since the refcount is not atomic. It
98 // is expected only called under context share lock.
99 ASSERT(std::this_thread::get_id() != renderer->getCleanUpThreadId());
100
101 const bool isLastReference = mHandle->getAndReleaseRef() == 1;
102 if (isLastReference)
103 {
104 ASSERT(recycler != nullptr);
105 recycler->recycle(std::move(*this));
106 ASSERT(mHandle == nullptr);
107 }
108 else
109 {
110 mHandle = nullptr;
111 }
112 }
113
destroy(VkDevice device)114 void RefCountedEvent::destroy(VkDevice device)
115 {
116 ASSERT(mHandle != nullptr);
117 ASSERT(!mHandle->isReferenced());
118 mHandle->get().event.destroy(device);
119 SafeDelete(mHandle);
120 }
121
getPipelineStageMask(Renderer * renderer) const122 VkPipelineStageFlags RefCountedEvent::getPipelineStageMask(Renderer *renderer) const
123 {
124 return renderer->getPipelineStageMask(getEventStage());
125 }
126
127 // RefCountedEventArray implementation.
release(Renderer * renderer)128 void RefCountedEventArray::release(Renderer *renderer)
129 {
130 for (EventStage eventStage : mBitMask)
131 {
132 ASSERT(mEvents[eventStage].valid());
133 mEvents[eventStage].release(renderer);
134 }
135 mBitMask.reset();
136 }
137
release(Context * context)138 void RefCountedEventArray::release(Context *context)
139 {
140 for (EventStage eventStage : mBitMask)
141 {
142 ASSERT(mEvents[eventStage].valid());
143 mEvents[eventStage].release(context);
144 }
145 mBitMask.reset();
146 }
147
releaseToEventCollector(RefCountedEventCollector * eventCollector)148 void RefCountedEventArray::releaseToEventCollector(RefCountedEventCollector *eventCollector)
149 {
150 for (EventStage eventStage : mBitMask)
151 {
152 eventCollector->emplace_back(std::move(mEvents[eventStage]));
153 }
154 mBitMask.reset();
155 }
156
initEventAtStage(Context * context,EventStage eventStage)157 bool RefCountedEventArray::initEventAtStage(Context *context, EventStage eventStage)
158 {
159 if (mBitMask[eventStage])
160 {
161 return true;
162 }
163
164 // Create the event if we have not yet so. Otherwise just use the already created event.
165 if (!mEvents[eventStage].init(context, eventStage))
166 {
167 return false;
168 }
169 mBitMask.set(eventStage);
170 return true;
171 }
172
173 template <typename CommandBufferT>
flushSetEvents(Renderer * renderer,CommandBufferT * commandBuffer) const174 void RefCountedEventArray::flushSetEvents(Renderer *renderer, CommandBufferT *commandBuffer) const
175 {
176 for (EventStage eventStage : mBitMask)
177 {
178 VkPipelineStageFlags pipelineStageFlags = renderer->getPipelineStageMask(eventStage);
179 commandBuffer->setEvent(mEvents[eventStage].getEvent().getHandle(), pipelineStageFlags);
180 }
181 }
182
183 template void RefCountedEventArray::flushSetEvents<VulkanSecondaryCommandBuffer>(
184 Renderer *renderer,
185 VulkanSecondaryCommandBuffer *commandBuffer) const;
186 template void RefCountedEventArray::flushSetEvents<priv::SecondaryCommandBuffer>(
187 Renderer *renderer,
188 priv::SecondaryCommandBuffer *commandBuffer) const;
189 template void RefCountedEventArray::flushSetEvents<priv::CommandBuffer>(
190 Renderer *renderer,
191 priv::CommandBuffer *commandBuffer) const;
192
193 // EventArray implementation.
init(Renderer * renderer,const RefCountedEventArray & refCountedEventArray)194 void EventArray::init(Renderer *renderer, const RefCountedEventArray &refCountedEventArray)
195 {
196 mBitMask = refCountedEventArray.getBitMask();
197 for (EventStage eventStage : mBitMask)
198 {
199 ASSERT(refCountedEventArray.getEvent(eventStage).valid());
200 mEvents[eventStage] = refCountedEventArray.getEvent(eventStage).getEvent().getHandle();
201 mPipelineStageFlags[eventStage] = renderer->getPipelineStageMask(eventStage);
202 }
203 }
204
flushSetEvents(PrimaryCommandBuffer * primary)205 void EventArray::flushSetEvents(PrimaryCommandBuffer *primary)
206 {
207 for (EventStage eventStage : mBitMask)
208 {
209 ASSERT(mEvents[eventStage] != VK_NULL_HANDLE);
210 primary->setEvent(mEvents[eventStage], mPipelineStageFlags[eventStage]);
211 mEvents[eventStage] = VK_NULL_HANDLE;
212 }
213 mBitMask.reset();
214 }
215
216 // RefCountedEventsGarbage implementation.
destroy(Renderer * renderer)217 void RefCountedEventsGarbage::destroy(Renderer *renderer)
218 {
219 ASSERT(renderer->hasQueueSerialFinished(mQueueSerial));
220 while (!mRefCountedEvents.empty())
221 {
222 ASSERT(mRefCountedEvents.back().valid());
223 mRefCountedEvents.back().release(renderer);
224 mRefCountedEvents.pop_back();
225 }
226 }
227
releaseIfComplete(Renderer * renderer,RefCountedEventsGarbageRecycler * recycler)228 bool RefCountedEventsGarbage::releaseIfComplete(Renderer *renderer,
229 RefCountedEventsGarbageRecycler *recycler)
230 {
231 if (!renderer->hasQueueSerialFinished(mQueueSerial))
232 {
233 return false;
234 }
235
236 while (!mRefCountedEvents.empty())
237 {
238 ASSERT(mRefCountedEvents.back().valid());
239 mRefCountedEvents.back().releaseImpl(renderer, recycler);
240 ASSERT(!mRefCountedEvents.back().valid());
241 mRefCountedEvents.pop_back();
242 }
243 return true;
244 }
245
moveIfComplete(Renderer * renderer,std::deque<RefCountedEventCollector> * releasedBucket)246 bool RefCountedEventsGarbage::moveIfComplete(Renderer *renderer,
247 std::deque<RefCountedEventCollector> *releasedBucket)
248 {
249 if (!renderer->hasQueueSerialFinished(mQueueSerial))
250 {
251 return false;
252 }
253
254 releasedBucket->emplace_back(std::move(mRefCountedEvents));
255 return true;
256 }
257
258 // RefCountedEventRecycler implementation.
destroy(VkDevice device)259 void RefCountedEventRecycler::destroy(VkDevice device)
260 {
261 std::lock_guard<angle::SimpleMutex> lock(mMutex);
262
263 while (!mEventsToReset.empty())
264 {
265 DestroyRefCountedEvents(device, mEventsToReset.back());
266 mEventsToReset.pop_back();
267 }
268
269 ASSERT(mResettingQueue.empty());
270
271 while (!mEventsToReuse.empty())
272 {
273 DestroyRefCountedEvents(device, mEventsToReuse.back());
274 mEventsToReuse.pop_back();
275 }
276 }
277
resetEvents(ErrorContext * context,const QueueSerial queueSerial,PrimaryCommandBuffer * commandbuffer)278 void RefCountedEventRecycler::resetEvents(ErrorContext *context,
279 const QueueSerial queueSerial,
280 PrimaryCommandBuffer *commandbuffer)
281 {
282 std::lock_guard<angle::SimpleMutex> lock(mMutex);
283
284 if (mEventsToReset.empty())
285 {
286 return;
287 }
288
289 Renderer *renderer = context->getRenderer();
290 while (!mEventsToReset.empty())
291 {
292 RefCountedEventCollector &events = mEventsToReset.back();
293 ASSERT(!events.empty());
294 for (const RefCountedEvent &refCountedEvent : events)
295 {
296 VkPipelineStageFlags stageMask = refCountedEvent.getPipelineStageMask(renderer);
297 commandbuffer->resetEvent(refCountedEvent.getEvent().getHandle(), stageMask);
298 }
299 mResettingQueue.emplace(queueSerial, std::move(events));
300 mEventsToReset.pop_back();
301 }
302 }
303
cleanupResettingEvents(Renderer * renderer)304 size_t RefCountedEventRecycler::cleanupResettingEvents(Renderer *renderer)
305 {
306 size_t eventsReleased = 0;
307 std::lock_guard<angle::SimpleMutex> lock(mMutex);
308 while (!mResettingQueue.empty())
309 {
310 bool released = mResettingQueue.front().moveIfComplete(renderer, &mEventsToReuse);
311 if (released)
312 {
313 mResettingQueue.pop();
314 ++eventsReleased;
315 }
316 else
317 {
318 break;
319 }
320 }
321 return eventsReleased;
322 }
323
fetchEventsToReuse(RefCountedEventCollector * eventsToReuseOut)324 bool RefCountedEventRecycler::fetchEventsToReuse(RefCountedEventCollector *eventsToReuseOut)
325 {
326 ASSERT(eventsToReuseOut != nullptr);
327 ASSERT(eventsToReuseOut->empty());
328 std::lock_guard<angle::SimpleMutex> lock(mMutex);
329 if (mEventsToReuse.empty())
330 {
331 return false;
332 }
333 eventsToReuseOut->swap(mEventsToReuse.back());
334 mEventsToReuse.pop_back();
335 return true;
336 }
337
338 // RefCountedEventsGarbageRecycler implementation.
~RefCountedEventsGarbageRecycler()339 RefCountedEventsGarbageRecycler::~RefCountedEventsGarbageRecycler()
340 {
341 ASSERT(mEventsToReset.empty());
342 ASSERT(mGarbageQueue.empty());
343 ASSERT(mEventsToReuse.empty());
344 ASSERT(mGarbageCount == 0);
345 }
346
destroy(Renderer * renderer)347 void RefCountedEventsGarbageRecycler::destroy(Renderer *renderer)
348 {
349 VkDevice device = renderer->getDevice();
350 DestroyRefCountedEvents(device, mEventsToReset);
351 ASSERT(mGarbageQueue.empty());
352 ASSERT(mGarbageCount == 0);
353 mEventsToReuse.destroy(device);
354 }
355
cleanup(Renderer * renderer)356 void RefCountedEventsGarbageRecycler::cleanup(Renderer *renderer)
357 {
358 // First cleanup already completed events and add to mEventsToReset
359 while (!mGarbageQueue.empty())
360 {
361 size_t count = mGarbageQueue.front().size();
362 bool released = mGarbageQueue.front().releaseIfComplete(renderer, this);
363 if (released)
364 {
365 mGarbageCount -= count;
366 mGarbageQueue.pop();
367 }
368 else
369 {
370 break;
371 }
372 }
373
374 // Move mEventsToReset to the renderer so that it can be reset.
375 if (!mEventsToReset.empty())
376 {
377 renderer->getRefCountedEventRecycler()->recycle(std::move(mEventsToReset));
378 }
379 }
380
fetch(Renderer * renderer,RefCountedEvent * outObject)381 bool RefCountedEventsGarbageRecycler::fetch(Renderer *renderer, RefCountedEvent *outObject)
382 {
383 if (mEventsToReuse.empty())
384 {
385 // Retrieve a list of ready to reuse events from renderer.
386 RefCountedEventCollector events;
387 if (!renderer->getRefCountedEventRecycler()->fetchEventsToReuse(&events))
388 {
389 return false;
390 }
391 mEventsToReuse.refill(std::move(events));
392 ASSERT(!mEventsToReuse.empty());
393 }
394 mEventsToReuse.fetch(outObject);
395 return true;
396 }
397
398 // EventBarrier implementation.
addDiagnosticsString(std::ostringstream & out) const399 void EventBarrier::addDiagnosticsString(std::ostringstream &out) const
400 {
401 if (mMemoryBarrierSrcAccess != 0 || mMemoryBarrierDstAccess != 0)
402 {
403 out << "Src: 0x" << std::hex << mMemoryBarrierSrcAccess << " → Dst: 0x" << std::hex
404 << mMemoryBarrierDstAccess << std::endl;
405 }
406 }
407
execute(PrimaryCommandBuffer * primary)408 void EventBarrier::execute(PrimaryCommandBuffer *primary)
409 {
410 if (isEmpty())
411 {
412 return;
413 }
414 ASSERT(mEvent != VK_NULL_HANDLE);
415 ASSERT(mImageMemoryBarrierCount == 0 ||
416 (mImageMemoryBarrierCount == 1 && mImageMemoryBarrier.image != VK_NULL_HANDLE));
417
418 // Issue vkCmdWaitEvents call
419 VkMemoryBarrier memoryBarrier = {};
420 memoryBarrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
421 memoryBarrier.srcAccessMask = mMemoryBarrierSrcAccess;
422 memoryBarrier.dstAccessMask = mMemoryBarrierDstAccess;
423
424 primary->waitEvents(1, &mEvent, mSrcStageMask, mDstStageMask, 1, &memoryBarrier, 0, nullptr,
425 mImageMemoryBarrierCount,
426 mImageMemoryBarrierCount == 0 ? nullptr : &mImageMemoryBarrier);
427 }
428
429 // EventBarrierArray implementation.
addAdditionalStageAccess(const RefCountedEvent & waitEvent,VkPipelineStageFlags dstStageMask,VkAccessFlags dstAccess)430 void EventBarrierArray::addAdditionalStageAccess(const RefCountedEvent &waitEvent,
431 VkPipelineStageFlags dstStageMask,
432 VkAccessFlags dstAccess)
433 {
434 for (EventBarrier &barrier : mBarriers)
435 {
436 if (barrier.hasEvent(waitEvent.getEvent().getHandle()))
437 {
438 barrier.addAdditionalStageAccess(dstStageMask, dstAccess);
439 return;
440 }
441 }
442 UNREACHABLE();
443 }
444
addEventMemoryBarrier(Renderer * renderer,const RefCountedEvent & waitEvent,VkAccessFlags srcAccess,VkPipelineStageFlags dstStageMask,VkAccessFlags dstAccess)445 void EventBarrierArray::addEventMemoryBarrier(Renderer *renderer,
446 const RefCountedEvent &waitEvent,
447 VkAccessFlags srcAccess,
448 VkPipelineStageFlags dstStageMask,
449 VkAccessFlags dstAccess)
450 {
451 ASSERT(waitEvent.valid());
452 VkPipelineStageFlags srcStageFlags = waitEvent.getPipelineStageMask(renderer);
453 mBarriers.emplace_back(srcStageFlags, dstStageMask, srcAccess, dstAccess,
454 waitEvent.getEvent().getHandle());
455 }
456
addEventImageBarrier(Renderer * renderer,const RefCountedEvent & waitEvent,VkPipelineStageFlags dstStageMask,const VkImageMemoryBarrier & imageMemoryBarrier)457 void EventBarrierArray::addEventImageBarrier(Renderer *renderer,
458 const RefCountedEvent &waitEvent,
459 VkPipelineStageFlags dstStageMask,
460 const VkImageMemoryBarrier &imageMemoryBarrier)
461 {
462 ASSERT(waitEvent.valid());
463 VkPipelineStageFlags srcStageFlags = waitEvent.getPipelineStageMask(renderer);
464 mBarriers.emplace_back(srcStageFlags, dstStageMask, waitEvent.getEvent().getHandle(),
465 imageMemoryBarrier);
466 }
467
execute(Renderer * renderer,PrimaryCommandBuffer * primary)468 void EventBarrierArray::execute(Renderer *renderer, PrimaryCommandBuffer *primary)
469 {
470 while (!mBarriers.empty())
471 {
472 mBarriers.back().execute(primary);
473 mBarriers.pop_back();
474 }
475 reset();
476 }
477
addDiagnosticsString(std::ostringstream & out) const478 void EventBarrierArray::addDiagnosticsString(std::ostringstream &out) const
479 {
480 out << "Event Barrier: ";
481 for (const EventBarrier &barrier : mBarriers)
482 {
483 barrier.addDiagnosticsString(out);
484 }
485 out << "\\l";
486 }
487 } // namespace vk
488 } // namespace rx
489