1 /*
2 * Copyright 2021 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/gpu/graphite/Context.h"
9
10 #include "include/core/SkPathTypes.h"
11 #include "include/effects/SkRuntimeEffect.h"
12 #include "include/gpu/graphite/BackendTexture.h"
13 #include "include/gpu/graphite/Recorder.h"
14 #include "include/gpu/graphite/Recording.h"
15 #include "include/gpu/graphite/TextureInfo.h"
16 #include "src/gpu/RefCntedCallback.h"
17 #include "src/gpu/graphite/BufferManager.h"
18 #include "src/gpu/graphite/Caps.h"
19 #include "src/gpu/graphite/ClientMappedBufferManager.h"
20 #include "src/gpu/graphite/CommandBuffer.h"
21 #include "src/gpu/graphite/ContextPriv.h"
22 #include "src/gpu/graphite/CopyTask.h"
23 #include "src/gpu/graphite/DrawAtlas.h"
24 #include "src/gpu/graphite/GlobalCache.h"
25 #include "src/gpu/graphite/GraphicsPipeline.h"
26 #include "src/gpu/graphite/GraphicsPipelineDesc.h"
27 #include "src/gpu/graphite/Image_Graphite.h"
28 #include "src/gpu/graphite/KeyContext.h"
29 #include "src/gpu/graphite/Log.h"
30 #include "src/gpu/graphite/QueueManager.h"
31 #include "src/gpu/graphite/RecorderPriv.h"
32 #include "src/gpu/graphite/RecordingPriv.h"
33 #include "src/gpu/graphite/Renderer.h"
34 #include "src/gpu/graphite/RendererProvider.h"
35 #include "src/gpu/graphite/ResourceProvider.h"
36 #include "src/gpu/graphite/RuntimeEffectDictionary.h"
37 #include "src/gpu/graphite/ShaderCodeDictionary.h"
38 #include "src/gpu/graphite/SharedContext.h"
39 #include "src/gpu/graphite/Surface_Graphite.h"
40 #include "src/gpu/graphite/SynchronizeToCpuTask.h"
41 #include "src/gpu/graphite/TextureProxyView.h"
42 #include "src/gpu/graphite/UploadTask.h"
43
44 namespace skgpu::graphite {
45
46 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(this->singleOwner())
47
Next()48 Context::ContextID Context::ContextID::Next() {
49 static std::atomic<uint32_t> nextID{1};
50 uint32_t id;
51 do {
52 id = nextID.fetch_add(1, std::memory_order_relaxed);
53 } while (id == SK_InvalidUniqueID);
54 return ContextID(id);
55 }
56
57 //--------------------------------------------------------------------------------------------------
Context(sk_sp<SharedContext> sharedContext,std::unique_ptr<QueueManager> queueManager,const ContextOptions & options)58 Context::Context(sk_sp<SharedContext> sharedContext,
59 std::unique_ptr<QueueManager> queueManager,
60 const ContextOptions& options)
61 : fSharedContext(std::move(sharedContext))
62 , fQueueManager(std::move(queueManager))
63 #if GRAPHITE_TEST_UTILS
64 , fStoreContextRefInRecorder(options.fStoreContextRefInRecorder)
65 #endif
66 , fContextID(ContextID::Next()) {
67 // We have to create this outside the initializer list because we need to pass in the Context's
68 // SingleOwner object and it is declared last
69 fResourceProvider = fSharedContext->makeResourceProvider(&fSingleOwner);
70 fMappedBufferManager = std::make_unique<ClientMappedBufferManager>(this->contextID());
71 fPlotUploadTracker = std::make_unique<PlotUploadTracker>();
72 }
73
~Context()74 Context::~Context() {
75 #if GRAPHITE_TEST_UTILS
76 ASSERT_SINGLE_OWNER
77 for (auto& recorder : fTrackedRecorders) {
78 recorder->priv().setContext(nullptr);
79 }
80 #endif
81 }
82
finishInitialization()83 bool Context::finishInitialization() {
84 SkASSERT(!fSharedContext->rendererProvider()); // Can only initialize once
85
86 StaticBufferManager bufferManager{fResourceProvider.get(), fSharedContext->caps()};
87 std::unique_ptr<RendererProvider> renderers{
88 new RendererProvider(fSharedContext->caps(), &bufferManager)};
89
90 auto result = bufferManager.finalize(this, fQueueManager.get(), fSharedContext->globalCache());
91 if (result == StaticBufferManager::FinishResult::kFailure) {
92 // If something went wrong filling out the static vertex buffers, any Renderer that would
93 // use it will draw incorrectly, so it's better to fail the Context creation.
94 return false;
95 }
96 if (result == StaticBufferManager::FinishResult::kSuccess &&
97 !fQueueManager->submitToGpu()) {
98 SKGPU_LOG_W("Failed to submit initial command buffer for Context creation.\n");
99 return false;
100 } // else result was kNoWork so skip submitting to the GPU
101 fSharedContext->setRendererProvider(std::move(renderers));
102 return true;
103 }
104
backend() const105 BackendApi Context::backend() const { return fSharedContext->backend(); }
106
makeRecorder(const RecorderOptions & options)107 std::unique_ptr<Recorder> Context::makeRecorder(const RecorderOptions& options) {
108 ASSERT_SINGLE_OWNER
109
110 auto recorder = std::unique_ptr<Recorder>(new Recorder(fSharedContext, options));
111 #if GRAPHITE_TEST_UTILS
112 if (fStoreContextRefInRecorder) {
113 recorder->priv().setContext(this);
114 }
115 #endif
116 return recorder;
117 }
118
insertRecording(const InsertRecordingInfo & info)119 bool Context::insertRecording(const InsertRecordingInfo& info) {
120 ASSERT_SINGLE_OWNER
121
122 return fQueueManager->addRecording(info, this);
123 }
124
submit(SyncToCpu syncToCpu)125 bool Context::submit(SyncToCpu syncToCpu) {
126 ASSERT_SINGLE_OWNER
127
128 bool success = fQueueManager->submitToGpu();
129 fQueueManager->checkForFinishedWork(syncToCpu);
130 return success;
131 }
132
asyncReadPixels(const SkImage * image,const SkColorInfo & dstColorInfo,const SkIRect & srcRect,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)133 void Context::asyncReadPixels(const SkImage* image,
134 const SkColorInfo& dstColorInfo,
135 const SkIRect& srcRect,
136 SkImage::ReadPixelsCallback callback,
137 SkImage::ReadPixelsContext callbackContext) {
138 if (!as_IB(image)->isGraphiteBacked()) {
139 callback(callbackContext, nullptr);
140 return;
141 }
142 auto graphiteImage = reinterpret_cast<const skgpu::graphite::Image*>(image);
143 TextureProxyView proxyView = graphiteImage->textureProxyView();
144
145 this->asyncReadPixels(proxyView.proxy(),
146 image->imageInfo(),
147 dstColorInfo,
148 srcRect,
149 callback,
150 callbackContext);
151 }
152
asyncReadPixels(const SkSurface * surface,const SkColorInfo & dstColorInfo,const SkIRect & srcRect,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)153 void Context::asyncReadPixels(const SkSurface* surface,
154 const SkColorInfo& dstColorInfo,
155 const SkIRect& srcRect,
156 SkImage::ReadPixelsCallback callback,
157 SkImage::ReadPixelsContext callbackContext) {
158 if (!static_cast<const SkSurface_Base*>(surface)->isGraphiteBacked()) {
159 callback(callbackContext, nullptr);
160 return;
161 }
162 auto graphiteSurface = reinterpret_cast<const skgpu::graphite::Surface*>(surface);
163 TextureProxyView proxyView = graphiteSurface->readSurfaceView();
164
165 this->asyncReadPixels(proxyView.proxy(),
166 surface->imageInfo(),
167 dstColorInfo,
168 srcRect,
169 callback,
170 callbackContext);
171 }
172
asyncReadPixels(const TextureProxy * proxy,const SkImageInfo & srcImageInfo,const SkColorInfo & dstColorInfo,const SkIRect & srcRect,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)173 void Context::asyncReadPixels(const TextureProxy* proxy,
174 const SkImageInfo& srcImageInfo,
175 const SkColorInfo& dstColorInfo,
176 const SkIRect& srcRect,
177 SkImage::ReadPixelsCallback callback,
178 SkImage::ReadPixelsContext callbackContext) {
179 if (!proxy) {
180 callback(callbackContext, nullptr);
181 return;
182 }
183
184 if (!SkImageInfoIsValid(srcImageInfo) || !SkColorInfoIsValid(dstColorInfo)) {
185 callback(callbackContext, nullptr);
186 return;
187 }
188
189 if (!SkIRect::MakeSize(srcImageInfo.dimensions()).contains(srcRect)) {
190 callback(callbackContext, nullptr);
191 return;
192 }
193
194 const Caps* caps = fSharedContext->caps();
195 if (!caps->supportsReadPixels(proxy->textureInfo())) {
196 // TODO: try to copy to a readable texture instead
197 callback(callbackContext, nullptr);
198 return;
199 }
200
201 PixelTransferResult transferResult = this->transferPixels(proxy, srcImageInfo,
202 dstColorInfo, srcRect);
203
204 if (!transferResult.fTransferBuffer) {
205 // TODO: try to do a synchronous readPixels instead
206 callback(callbackContext, nullptr);
207 return;
208 }
209
210 using AsyncReadResult = skgpu::TAsyncReadResult<Buffer, ContextID, PixelTransferResult>;
211 struct FinishContext {
212 SkImage::ReadPixelsCallback* fClientCallback;
213 SkImage::ReadPixelsContext fClientContext;
214 SkISize fSize;
215 size_t fRowBytes;
216 ClientMappedBufferManager* fMappedBufferManager;
217 PixelTransferResult fTransferResult;
218 };
219 size_t rowBytes = fSharedContext->caps()->getAlignedTextureDataRowBytes(
220 srcRect.width() * SkColorTypeBytesPerPixel(dstColorInfo.colorType()));
221 auto* finishContext = new FinishContext{callback,
222 callbackContext,
223 srcRect.size(),
224 rowBytes,
225 fMappedBufferManager.get(),
226 std::move(transferResult)};
227 GpuFinishedProc finishCallback = [](GpuFinishedContext c, CallbackResult status) {
228 const auto* context = reinterpret_cast<const FinishContext*>(c);
229 if (status == CallbackResult::kSuccess) {
230 ClientMappedBufferManager* manager = context->fMappedBufferManager;
231 auto result = std::make_unique<AsyncReadResult>(manager->ownerID());
232 if (!result->addTransferResult(context->fTransferResult, context->fSize,
233 context->fRowBytes, manager)) {
234 result.reset();
235 }
236 (*context->fClientCallback)(context->fClientContext, std::move(result));
237 } else {
238 (*context->fClientCallback)(context->fClientContext, nullptr);
239 }
240 delete context;
241 };
242
243 InsertFinishInfo info;
244 info.fFinishedContext = finishContext;
245 info.fFinishedProc = finishCallback;
246 // If addFinishInfo() fails, it invokes the finish callback automatically, which handles all the
247 // required clean up for us, just log an error message.
248 if (!fQueueManager->addFinishInfo(info, fResourceProvider.get())) {
249 SKGPU_LOG_E("Failed to register finish callbacks for asyncReadPixels.");
250 }
251 }
252
transferPixels(const TextureProxy * proxy,const SkImageInfo & srcImageInfo,const SkColorInfo & dstColorInfo,const SkIRect & srcRect)253 Context::PixelTransferResult Context::transferPixels(const TextureProxy* proxy,
254 const SkImageInfo& srcImageInfo,
255 const SkColorInfo& dstColorInfo,
256 const SkIRect& srcRect) {
257 SkASSERT(srcImageInfo.bounds().contains(srcRect));
258
259 const Caps* caps = fSharedContext->caps();
260 SkColorType supportedColorType =
261 caps->supportedReadPixelsColorType(srcImageInfo.colorType(),
262 proxy->textureInfo(),
263 dstColorInfo.colorType());
264 if (supportedColorType == kUnknown_SkColorType) {
265 return {};
266 }
267
268 // Fail if read color type does not have all of dstCT's color channels and those missing color
269 // channels are in the src.
270 uint32_t dstChannels = SkColorTypeChannelFlags(dstColorInfo.colorType());
271 uint32_t legalReadChannels = SkColorTypeChannelFlags(supportedColorType);
272 uint32_t srcChannels = SkColorTypeChannelFlags(srcImageInfo.colorType());
273 if ((~legalReadChannels & dstChannels) & srcChannels) {
274 return {};
275 }
276
277 size_t rowBytes = caps->getAlignedTextureDataRowBytes(
278 SkColorTypeBytesPerPixel(supportedColorType) * srcRect.width());
279 size_t size = SkAlignTo(rowBytes * srcRect.height(), caps->requiredTransferBufferAlignment());
280 sk_sp<Buffer> buffer = fResourceProvider->findOrCreateBuffer(
281 size,
282 BufferType::kXferGpuToCpu,
283 PrioritizeGpuReads::kNo);
284 if (!buffer) {
285 return {};
286 }
287
288 // Set up copy task. Since we always use a new buffer the offset can be 0 and we don't need to
289 // worry about aligning it to the required transfer buffer alignment.
290 sk_sp<CopyTextureToBufferTask> copyTask = CopyTextureToBufferTask::Make(sk_ref_sp(proxy),
291 srcRect,
292 buffer,
293 /*bufferOffset=*/0,
294 rowBytes);
295 if (!copyTask || !fQueueManager->addTask(copyTask.get(), this)) {
296 return {};
297 }
298 sk_sp<SynchronizeToCpuTask> syncTask = SynchronizeToCpuTask::Make(buffer);
299 if (!syncTask || !fQueueManager->addTask(syncTask.get(), this)) {
300 return {};
301 }
302
303 PixelTransferResult result;
304 result.fTransferBuffer = std::move(buffer);
305 if (srcImageInfo.colorInfo() != dstColorInfo) {
306 result.fPixelConverter = [dims = srcRect.size(), dstColorInfo, srcImageInfo, rowBytes](
307 void* dst, const void* src) {
308 SkImageInfo srcInfo = SkImageInfo::Make(dims, srcImageInfo.colorInfo());
309 SkImageInfo dstInfo = SkImageInfo::Make(dims, dstColorInfo);
310 SkAssertResult(SkConvertPixels(dstInfo, dst, dstInfo.minRowBytes(),
311 srcInfo, src, rowBytes));
312 };
313 }
314
315 return result;
316 }
317
318
checkAsyncWorkCompletion()319 void Context::checkAsyncWorkCompletion() {
320 ASSERT_SINGLE_OWNER
321
322 fQueueManager->checkForFinishedWork(SyncToCpu::kNo);
323 }
324
deleteBackendTexture(BackendTexture & texture)325 void Context::deleteBackendTexture(BackendTexture& texture) {
326 ASSERT_SINGLE_OWNER
327
328 if (!texture.isValid() || texture.backend() != this->backend()) {
329 return;
330 }
331 fResourceProvider->deleteBackendTexture(texture);
332 }
333
334 ///////////////////////////////////////////////////////////////////////////////////
335
336 #if GRAPHITE_TEST_UTILS
readPixels(const SkPixmap & pm,const TextureProxy * textureProxy,const SkImageInfo & srcImageInfo,int srcX,int srcY)337 bool ContextPriv::readPixels(const SkPixmap& pm,
338 const TextureProxy* textureProxy,
339 const SkImageInfo& srcImageInfo,
340 int srcX, int srcY) {
341 auto rect = SkIRect::MakeXYWH(srcX, srcY, pm.width(), pm.height());
342 struct AsyncContext {
343 bool fCalled = false;
344 std::unique_ptr<const SkImage::AsyncReadResult> fResult;
345 } asyncContext;
346 fContext->asyncReadPixels(textureProxy, srcImageInfo, pm.info().colorInfo(), rect,
347 [](void* c, std::unique_ptr<const SkImage::AsyncReadResult> result) {
348 auto context = static_cast<AsyncContext*>(c);
349 context->fResult = std::move(result);
350 context->fCalled = true;
351 },
352 &asyncContext);
353
354 if (!asyncContext.fCalled) {
355 fContext->submit(SyncToCpu::kYes);
356 }
357 SkASSERT(asyncContext.fCalled);
358 if (!asyncContext.fResult) {
359 return false;
360 }
361 SkRectMemcpy(pm.writable_addr(), pm.rowBytes(), asyncContext.fResult->data(0),
362 asyncContext.fResult->rowBytes(0), pm.info().minRowBytes(),
363 pm.height());
364 return true;
365 }
366
deregisterRecorder(const Recorder * recorder)367 void ContextPriv::deregisterRecorder(const Recorder* recorder) {
368 SKGPU_ASSERT_SINGLE_OWNER(fContext->singleOwner())
369 for (auto it = fContext->fTrackedRecorders.begin();
370 it != fContext->fTrackedRecorders.end();
371 it++) {
372 if (*it == recorder) {
373 fContext->fTrackedRecorders.erase(it);
374 return;
375 }
376 }
377 }
378
379 #endif
380
381 ///////////////////////////////////////////////////////////////////////////////////
382
MakeContext(sk_sp<SharedContext> sharedContext,std::unique_ptr<QueueManager> queueManager,const ContextOptions & options)383 std::unique_ptr<Context> ContextCtorAccessor::MakeContext(
384 sk_sp<SharedContext> sharedContext,
385 std::unique_ptr<QueueManager> queueManager,
386 const ContextOptions& options) {
387 auto context = std::unique_ptr<Context>(new Context(std::move(sharedContext),
388 std::move(queueManager),
389 options));
390 if (context && context->finishInitialization()) {
391 return context;
392 } else {
393 return nullptr;
394 }
395 }
396
397 } // namespace skgpu::graphite
398