• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2022 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/graphite/QueueManager.h"
9 
10 #include "include/gpu/graphite/Recording.h"
11 #include "src/core/SkTraceEvent.h"
12 #include "src/gpu/RefCntedCallback.h"
13 #include "src/gpu/graphite/Buffer.h"
14 #include "src/gpu/graphite/Caps.h"
15 #include "src/gpu/graphite/CommandBuffer.h"
16 #include "src/gpu/graphite/ContextPriv.h"
17 #include "src/gpu/graphite/GpuWorkSubmission.h"
18 #include "src/gpu/graphite/Log.h"
19 #include "src/gpu/graphite/RecordingPriv.h"
20 #include "src/gpu/graphite/Surface_Graphite.h"
21 #include "src/gpu/graphite/UploadBufferManager.h"
22 #include "src/gpu/graphite/task/Task.h"
23 
24 namespace skgpu::graphite {
25 
26 // This constant determines how many OutstandingSubmissions are allocated together as a block in
27 // the deque. As such it needs to balance allocating too much memory vs. incurring
28 // allocation/deallocation thrashing. It should roughly correspond to the max number of outstanding
29 // submissions we expect to see.
30 static constexpr int kDefaultOutstandingAllocCnt = 8;
31 
QueueManager(const SharedContext * sharedContext)32 QueueManager::QueueManager(const SharedContext* sharedContext)
33         : fSharedContext(sharedContext)
34         , fOutstandingSubmissions(sizeof(OutstandingSubmission), kDefaultOutstandingAllocCnt) {
35 }
36 
~QueueManager()37 QueueManager::~QueueManager() {
38     if (fSharedContext->caps()->allowCpuSync()) {
39         this->checkForFinishedWork(SyncToCpu::kYes);
40     } else if (!fOutstandingSubmissions.empty()) {
41         SKGPU_LOG_F("When ContextOptions::fNeverYieldToWebGPU is specified all GPU work must be "
42                     "finished before destroying Context.");
43     }
44 }
45 
setupCommandBuffer(ResourceProvider * resourceProvider)46 bool QueueManager::setupCommandBuffer(ResourceProvider* resourceProvider) {
47     if (!fCurrentCommandBuffer) {
48         if (fAvailableCommandBuffers.size()) {
49             fCurrentCommandBuffer = std::move(fAvailableCommandBuffers.back());
50             fAvailableCommandBuffers.pop_back();
51             if (!fCurrentCommandBuffer->setNewCommandBufferResources()) {
52                 fCurrentCommandBuffer.reset();
53             }
54         }
55     }
56     if (!fCurrentCommandBuffer) {
57         fCurrentCommandBuffer = this->getNewCommandBuffer(resourceProvider);
58     }
59     if (!fCurrentCommandBuffer) {
60         return false;
61     }
62 
63     return true;
64 }
65 
addRecording(const InsertRecordingInfo & info,Context * context)66 bool QueueManager::addRecording(const InsertRecordingInfo& info, Context* context) {
67     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
68 
69     sk_sp<RefCntedCallback> callback;
70     if (info.fFinishedProc) {
71         callback = RefCntedCallback::Make(info.fFinishedProc, info.fFinishedContext);
72     }
73 
74     SkASSERT(info.fRecording);
75     if (!info.fRecording) {
76         if (callback) {
77             callback->setFailureResult();
78         }
79         SKGPU_LOG_E("No valid Recording passed into addRecording call");
80         return false;
81     }
82 
83     if (this->fSharedContext->caps()->requireOrderedRecordings()) {
84         uint32_t* recordingID = fLastAddedRecordingIDs.find(info.fRecording->priv().recorderID());
85         if (recordingID &&
86             info.fRecording->priv().uniqueID() != *recordingID+1) {
87             if (callback) {
88                 callback->setFailureResult();
89             }
90             SKGPU_LOG_E("Recordings are expected to be replayed in order");
91             return false;
92         }
93 
94         // Note the new Recording ID.
95         fLastAddedRecordingIDs.set(info.fRecording->priv().recorderID(),
96                                    info.fRecording->priv().uniqueID());
97     }
98 
99     if (info.fTargetSurface &&
100         !static_cast<const SkSurface_Base*>(info.fTargetSurface)->isGraphiteBacked()) {
101         if (callback) {
102             callback->setFailureResult();
103         }
104         info.fRecording->priv().setFailureResultForFinishedProcs();
105         SKGPU_LOG_E("Target surface passed into addRecording call is not graphite-backed");
106         return false;
107     }
108 
109     auto resourceProvider = context->priv().resourceProvider();
110     if (!this->setupCommandBuffer(resourceProvider)) {
111         if (callback) {
112             callback->setFailureResult();
113         }
114         info.fRecording->priv().setFailureResultForFinishedProcs();
115         SKGPU_LOG_E("CommandBuffer creation failed");
116         return false;
117     }
118 
119     if (info.fRecording->priv().hasNonVolatileLazyProxies()) {
120         if (!info.fRecording->priv().instantiateNonVolatileLazyProxies(resourceProvider)) {
121             if (callback) {
122                 callback->setFailureResult();
123             }
124             info.fRecording->priv().setFailureResultForFinishedProcs();
125             SKGPU_LOG_E("Non-volatile PromiseImage instantiation has failed");
126             return false;
127         }
128     }
129 
130     if (info.fRecording->priv().hasVolatileLazyProxies()) {
131         if (!info.fRecording->priv().instantiateVolatileLazyProxies(resourceProvider)) {
132             if (callback) {
133                 callback->setFailureResult();
134             }
135             info.fRecording->priv().setFailureResultForFinishedProcs();
136             info.fRecording->priv().deinstantiateVolatileLazyProxies();
137             SKGPU_LOG_E("Volatile PromiseImage instantiation has failed");
138             return false;
139         }
140     }
141 
142     fCurrentCommandBuffer->addWaitSemaphores(info.fNumWaitSemaphores, info.fWaitSemaphores);
143     if (!info.fRecording->priv().addCommands(context,
144                                              fCurrentCommandBuffer.get(),
145                                              static_cast<Surface*>(info.fTargetSurface),
146                                              info.fTargetTranslation)) {
147         if (callback) {
148             callback->setFailureResult();
149         }
150         info.fRecording->priv().setFailureResultForFinishedProcs();
151         info.fRecording->priv().deinstantiateVolatileLazyProxies();
152         SKGPU_LOG_E("Adding Recording commands to the CommandBuffer has failed");
153         return false;
154     }
155     fCurrentCommandBuffer->addSignalSemaphores(info.fNumSignalSemaphores, info.fSignalSemaphores);
156     if (info.fTargetTextureState) {
157         fCurrentCommandBuffer->prepareSurfaceForStateUpdate(info.fTargetSurface,
158                                                             info.fTargetTextureState);
159     }
160 
161     if (callback) {
162         fCurrentCommandBuffer->addFinishedProc(std::move(callback));
163     }
164 
165     info.fRecording->priv().deinstantiateVolatileLazyProxies();
166 
167     return true;
168 }
169 
addTask(Task * task,Context * context)170 bool QueueManager::addTask(Task* task,
171                            Context* context) {
172     SkASSERT(task);
173     if (!task) {
174         SKGPU_LOG_E("No valid Task passed into addTask call");
175         return false;
176     }
177 
178     if (!this->setupCommandBuffer(context->priv().resourceProvider())) {
179         SKGPU_LOG_E("CommandBuffer creation failed");
180         return false;
181     }
182 
183     if (task->addCommands(context, fCurrentCommandBuffer.get(), {}) == Task::Status::kFail) {
184         SKGPU_LOG_E("Adding Task commands to the CommandBuffer has failed");
185         return false;
186     }
187 
188     return true;
189 }
190 
addFinishInfo(const InsertFinishInfo & info,ResourceProvider * resourceProvider,SkSpan<const sk_sp<Buffer>> buffersToAsyncMap)191 bool QueueManager::addFinishInfo(const InsertFinishInfo& info,
192                                  ResourceProvider* resourceProvider,
193                                  SkSpan<const sk_sp<Buffer>> buffersToAsyncMap) {
194     sk_sp<RefCntedCallback> callback;
195     if (info.fFinishedProc) {
196         callback = RefCntedCallback::Make(info.fFinishedProc, info.fFinishedContext);
197     }
198 
199     if (!this->setupCommandBuffer(resourceProvider)) {
200         if (callback) {
201             callback->setFailureResult();
202         }
203         SKGPU_LOG_E("CommandBuffer creation failed");
204         return false;
205     }
206 
207     if (callback) {
208         fCurrentCommandBuffer->addFinishedProc(std::move(callback));
209     }
210     fCurrentCommandBuffer->addBuffersToAsyncMapOnSubmit(buffersToAsyncMap);
211 
212     return true;
213 }
214 
submitToGpu()215 bool QueueManager::submitToGpu() {
216     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
217 
218     if (!fCurrentCommandBuffer) {
219         // We warn because this probably representative of a bad client state, where they don't
220         // need to submit but didn't notice, but technically the submit itself is fine (no-op), so
221         // we return true.
222         SKGPU_LOG_D("Submit called with no active command buffer!");
223         return true;
224     }
225 
226 #ifdef SK_DEBUG
227     if (!fCurrentCommandBuffer->hasWork()) {
228         SKGPU_LOG_D("Submitting empty command buffer!");
229     }
230 #endif
231 
232     auto submission = this->onSubmitToGpu();
233     if (!submission) {
234         return false;
235     }
236 
237     new (fOutstandingSubmissions.push_back()) OutstandingSubmission(std::move(submission));
238     return true;
239 }
240 
hasUnfinishedGpuWork()241 bool QueueManager::hasUnfinishedGpuWork() { return !fOutstandingSubmissions.empty(); }
242 
checkForFinishedWork(SyncToCpu sync)243 void QueueManager::checkForFinishedWork(SyncToCpu sync) {
244     TRACE_EVENT1("skia.gpu", TRACE_FUNC, "sync", sync == SyncToCpu::kYes);
245 
246     if (sync == SyncToCpu::kYes) {
247         SkASSERT(fSharedContext->caps()->allowCpuSync());
248         // wait for the last submission to finish
249         OutstandingSubmission* back = (OutstandingSubmission*)fOutstandingSubmissions.back();
250         if (back) {
251             (*back)->waitUntilFinished(fSharedContext);
252         }
253     }
254 
255     // Iterate over all the outstanding submissions to see if any have finished. The work
256     // submissions are in order from oldest to newest, so we start at the front to check if they
257     // have finished. If so we pop it off and move onto the next.
258     // Repeat till we find a submission that has not finished yet (and all others afterwards are
259     // also guaranteed to not have finished).
260     OutstandingSubmission* front = (OutstandingSubmission*)fOutstandingSubmissions.front();
261     while (front && (*front)->isFinished(fSharedContext)) {
262         // Make sure we remove before deleting as deletion might try to kick off another submit
263         // (though hopefully *not* in Graphite).
264         fOutstandingSubmissions.pop_front();
265         // Since we used placement new we are responsible for calling the destructor manually.
266         front->~OutstandingSubmission();
267         front = (OutstandingSubmission*)fOutstandingSubmissions.front();
268     }
269     SkASSERT(sync == SyncToCpu::kNo || fOutstandingSubmissions.empty());
270 }
271 
returnCommandBuffer(std::unique_ptr<CommandBuffer> commandBuffer)272 void QueueManager::returnCommandBuffer(std::unique_ptr<CommandBuffer> commandBuffer) {
273     fAvailableCommandBuffers.push_back(std::move(commandBuffer));
274 }
275 
addUploadBufferManagerRefs(UploadBufferManager * uploadManager)276 void QueueManager::addUploadBufferManagerRefs(UploadBufferManager* uploadManager) {
277     SkASSERT(fCurrentCommandBuffer);
278     uploadManager->transferToCommandBuffer(fCurrentCommandBuffer.get());
279 }
280 
281 
282 } // namespace skgpu::graphite
283