1 /*
2 * Copyright 2021 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/gpu/graphite/Context.h"
9
10 #include "include/core/SkColorSpace.h"
11 #include "include/core/SkPathTypes.h"
12 #include "include/core/SkTraceMemoryDump.h"
13 #include "include/effects/SkRuntimeEffect.h"
14 #include "include/gpu/graphite/BackendTexture.h"
15 #include "include/gpu/graphite/PrecompileContext.h"
16 #include "include/gpu/graphite/Recorder.h"
17 #include "include/gpu/graphite/Recording.h"
18 #include "include/gpu/graphite/Surface.h"
19 #include "include/gpu/graphite/TextureInfo.h"
20 #include "include/private/base/SkOnce.h"
21 #include "src/base/SkRectMemcpy.h"
22 #include "src/core/SkAutoPixmapStorage.h"
23 #include "src/core/SkColorFilterPriv.h"
24 #include "src/core/SkConvertPixels.h"
25 #include "src/core/SkTraceEvent.h"
26 #include "src/core/SkYUVMath.h"
27 #include "src/gpu/RefCntedCallback.h"
28 #include "src/gpu/graphite/AtlasProvider.h"
29 #include "src/gpu/graphite/BufferManager.h"
30 #include "src/gpu/graphite/Caps.h"
31 #include "src/gpu/graphite/ClientMappedBufferManager.h"
32 #include "src/gpu/graphite/CommandBuffer.h"
33 #include "src/gpu/graphite/ContextPriv.h"
34 #include "src/gpu/graphite/DrawAtlas.h"
35 #include "src/gpu/graphite/GlobalCache.h"
36 #include "src/gpu/graphite/GraphicsPipeline.h"
37 #include "src/gpu/graphite/GraphicsPipelineDesc.h"
38 #include "src/gpu/graphite/Image_Base_Graphite.h"
39 #include "src/gpu/graphite/Image_Graphite.h"
40 #include "src/gpu/graphite/KeyContext.h"
41 #include "src/gpu/graphite/Log.h"
42 #include "src/gpu/graphite/QueueManager.h"
43 #include "src/gpu/graphite/RecorderPriv.h"
44 #include "src/gpu/graphite/RecordingPriv.h"
45 #include "src/gpu/graphite/Renderer.h"
46 #include "src/gpu/graphite/RendererProvider.h"
47 #include "src/gpu/graphite/ResourceProvider.h"
48 #include "src/gpu/graphite/RuntimeEffectDictionary.h"
49 #include "src/gpu/graphite/ShaderCodeDictionary.h"
50 #include "src/gpu/graphite/SharedContext.h"
51 #include "src/gpu/graphite/Surface_Graphite.h"
52 #include "src/gpu/graphite/TextureProxyView.h"
53 #include "src/gpu/graphite/TextureUtils.h"
54 #include "src/gpu/graphite/task/CopyTask.h"
55 #include "src/gpu/graphite/task/SynchronizeToCpuTask.h"
56 #include "src/gpu/graphite/task/UploadTask.h"
57 #include "src/image/SkSurface_Base.h"
58 #include "src/sksl/SkSLGraphiteModules.h"
59
60 #if defined(GPU_TEST_UTILS)
61 #include "src/gpu/graphite/ContextOptionsPriv.h"
62 #endif
63
64 namespace skgpu::graphite {
65
66 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(this->singleOwner())
67
Next()68 Context::ContextID Context::ContextID::Next() {
69 static std::atomic<uint32_t> nextID{1};
70 uint32_t id;
71 do {
72 id = nextID.fetch_add(1, std::memory_order_relaxed);
73 } while (id == SK_InvalidUniqueID);
74 return ContextID(id);
75 }
76
77 //--------------------------------------------------------------------------------------------------
Context(sk_sp<SharedContext> sharedContext,std::unique_ptr<QueueManager> queueManager,const ContextOptions & options)78 Context::Context(sk_sp<SharedContext> sharedContext,
79 std::unique_ptr<QueueManager> queueManager,
80 const ContextOptions& options)
81 : fSharedContext(std::move(sharedContext))
82 , fQueueManager(std::move(queueManager))
83 , fContextID(ContextID::Next()) {
84 // We need to move the Graphite SkSL code into the central SkSL data loader at least once
85 // (but preferrably only once) before we try to use it. We assume that there's no way to
86 // use the SkSL code without making a context, so we initialize it here.
87 static SkOnce once;
88 once([] { SkSL::Loader::SetGraphiteModuleData(SkSL::Loader::GetGraphiteModules()); });
89
90 // We have to create this outside the initializer list because we need to pass in the Context's
91 // SingleOwner object and it is declared last
92 fResourceProvider = fSharedContext->makeResourceProvider(&fSingleOwner,
93 SK_InvalidGenID,
94 options.fGpuBudgetInBytes);
95 fMappedBufferManager = std::make_unique<ClientMappedBufferManager>(this->contextID());
96 #if defined(GPU_TEST_UTILS)
97 if (options.fOptionsPriv) {
98 fStoreContextRefInRecorder = options.fOptionsPriv->fStoreContextRefInRecorder;
99 }
100 #endif
101
102 fSharedContext->globalCache()->setPipelineCallback(options.fPipelineCallback,
103 options.fPipelineCallbackContext);
104 }
105
~Context()106 Context::~Context() {
107 #if defined(GPU_TEST_UTILS)
108 SkAutoMutexExclusive lock(fTestingLock);
109 for (auto& recorder : fTrackedRecorders) {
110 recorder->priv().setContext(nullptr);
111 }
112 #endif
113 }
114
finishInitialization()115 bool Context::finishInitialization() {
116 SkASSERT(!fSharedContext->rendererProvider()); // Can only initialize once
117
118 StaticBufferManager bufferManager{fResourceProvider.get(), fSharedContext->caps()};
119 std::unique_ptr<RendererProvider> renderers{
120 new RendererProvider(fSharedContext->caps(), &bufferManager)};
121
122 auto result = bufferManager.finalize(this, fQueueManager.get(), fSharedContext->globalCache());
123 if (result == StaticBufferManager::FinishResult::kFailure) {
124 // If something went wrong filling out the static vertex buffers, any Renderer that would
125 // use it will draw incorrectly, so it's better to fail the Context creation.
126 return false;
127 }
128 if (result == StaticBufferManager::FinishResult::kSuccess &&
129 !fQueueManager->submitToGpu()) {
130 SKGPU_LOG_W("Failed to submit initial command buffer for Context creation.\n");
131 return false;
132 } // else result was kNoWork so skip submitting to the GPU
133 fSharedContext->setRendererProvider(std::move(renderers));
134 return true;
135 }
136
backend() const137 BackendApi Context::backend() const { return fSharedContext->backend(); }
138
makeRecorder(const RecorderOptions & options)139 std::unique_ptr<Recorder> Context::makeRecorder(const RecorderOptions& options) {
140 ASSERT_SINGLE_OWNER
141
142 // This is a client-owned Recorder so pass a null context so it creates its own ResourceProvider
143 auto recorder = std::unique_ptr<Recorder>(new Recorder(fSharedContext, options, nullptr));
144 #if defined(GPU_TEST_UTILS)
145 if (fStoreContextRefInRecorder) {
146 recorder->priv().setContext(this);
147 }
148 #endif
149 return recorder;
150 }
151
makePrecompileContext()152 std::unique_ptr<PrecompileContext> Context::makePrecompileContext() {
153 ASSERT_SINGLE_OWNER
154
155 return std::unique_ptr<PrecompileContext>(new PrecompileContext(fSharedContext));
156 }
157
makeInternalRecorder() const158 std::unique_ptr<Recorder> Context::makeInternalRecorder() const {
159 ASSERT_SINGLE_OWNER
160
161 // Unlike makeRecorder(), this Recorder is meant to be short-lived and go
162 // away before a Context public API function returns to the caller. As such
163 // it shares the Context's resource provider (no separate budget) and does
164 // not get tracked. The internal drawing performed with an internal recorder
165 // should not require a client image provider.
166 return std::unique_ptr<Recorder>(new Recorder(fSharedContext, {}, this));
167 }
168
insertRecording(const InsertRecordingInfo & info)169 bool Context::insertRecording(const InsertRecordingInfo& info) {
170 ASSERT_SINGLE_OWNER
171
172 return fQueueManager->addRecording(info, this);
173 }
174
submit(SyncToCpu syncToCpu)175 bool Context::submit(SyncToCpu syncToCpu) {
176 ASSERT_SINGLE_OWNER
177
178 if (syncToCpu == SyncToCpu::kYes && !fSharedContext->caps()->allowCpuSync()) {
179 SKGPU_LOG_E("SyncToCpu::kYes not supported with ContextOptions::fNeverYieldToWebGPU. "
180 "The parameter is ignored and no synchronization will occur.");
181 syncToCpu = SyncToCpu::kNo;
182 }
183 bool success = fQueueManager->submitToGpu();
184 this->checkForFinishedWork(syncToCpu);
185 return success;
186 }
187
hasUnfinishedGpuWork() const188 bool Context::hasUnfinishedGpuWork() const { return fQueueManager->hasUnfinishedGpuWork(); }
189
190 template <typename SrcPixels>
191 struct Context::AsyncParams {
192 const SrcPixels* fSrcImage;
193 SkIRect fSrcRect;
194 SkImageInfo fDstImageInfo;
195
196 SkImage::ReadPixelsCallback* fCallback;
197 SkImage::ReadPixelsContext fCallbackContext;
198
199 template <typename S>
withNewSourceskgpu::graphite::Context::AsyncParams200 AsyncParams<S> withNewSource(const S* newPixels, const SkIRect& newSrcRect) const {
201 return AsyncParams<S>{newPixels, newSrcRect,
202 fDstImageInfo, fCallback, fCallbackContext};
203 }
204
failskgpu::graphite::Context::AsyncParams205 void fail() const {
206 (*fCallback)(fCallbackContext, nullptr);
207 }
208
validateskgpu::graphite::Context::AsyncParams209 bool validate() const {
210 if (!fSrcImage) {
211 return false;
212 }
213 // SkImage::isProtected() -> bool, TextureProxy::isProtected() -> Protected enum.
214 // The explicit cast makes this function work for both template instantiations since
215 // the Protected enum is backed by a bool.
216 if ((bool) fSrcImage->isProtected()) {
217 return false;
218 }
219 if (!SkIRect::MakeSize(fSrcImage->dimensions()).contains(fSrcRect)) {
220 return false;
221 }
222 if (!SkImageInfoIsValid(fDstImageInfo)) {
223 return false;
224 }
225 return true;
226 }
227 };
228
229 template <typename ReadFn, typename... ExtraArgs>
asyncRescaleAndReadImpl(ReadFn Context::* asyncRead,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,const AsyncParams<SkImage> & params,ExtraArgs...extraParams)230 void Context::asyncRescaleAndReadImpl(ReadFn Context::* asyncRead,
231 SkImage::RescaleGamma rescaleGamma,
232 SkImage::RescaleMode rescaleMode,
233 const AsyncParams<SkImage>& params,
234 ExtraArgs... extraParams) {
235 if (!params.validate()) {
236 return params.fail();
237 }
238
239 if (params.fSrcRect.size() == params.fDstImageInfo.dimensions()) {
240 // No need to rescale so do a direct readback
241 return (this->*asyncRead)(/*recorder=*/nullptr, params, extraParams...);
242 }
243
244 // Make a recorder to collect the rescale drawing commands and the copy commands
245 std::unique_ptr<Recorder> recorder = this->makeInternalRecorder();
246 sk_sp<SkImage> scaledImage = RescaleImage(recorder.get(),
247 params.fSrcImage,
248 params.fSrcRect,
249 params.fDstImageInfo,
250 rescaleGamma,
251 rescaleMode);
252 if (!scaledImage) {
253 SKGPU_LOG_W("AsyncRead failed because rescaling failed");
254 return params.fail();
255 }
256 (this->*asyncRead)(std::move(recorder),
257 params.withNewSource(scaledImage.get(), params.fDstImageInfo.bounds()),
258 extraParams...);
259 }
260
asyncRescaleAndReadPixels(const SkImage * src,const SkImageInfo & dstImageInfo,const SkIRect & srcRect,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)261 void Context::asyncRescaleAndReadPixels(const SkImage* src,
262 const SkImageInfo& dstImageInfo,
263 const SkIRect& srcRect,
264 SkImage::RescaleGamma rescaleGamma,
265 SkImage::RescaleMode rescaleMode,
266 SkImage::ReadPixelsCallback callback,
267 SkImage::ReadPixelsContext callbackContext) {
268 this->asyncRescaleAndReadImpl(&Context::asyncReadPixels,
269 rescaleGamma, rescaleMode,
270 {src, srcRect, dstImageInfo, callback, callbackContext});
271 }
272
asyncRescaleAndReadPixels(const SkSurface * src,const SkImageInfo & dstImageInfo,const SkIRect & srcRect,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)273 void Context::asyncRescaleAndReadPixels(const SkSurface* src,
274 const SkImageInfo& dstImageInfo,
275 const SkIRect& srcRect,
276 SkImage::RescaleGamma rescaleGamma,
277 SkImage::RescaleMode rescaleMode,
278 SkImage::ReadPixelsCallback callback,
279 SkImage::ReadPixelsContext callbackContext) {
280 sk_sp<SkImage> surfaceImage = SkSurfaces::AsImage(sk_ref_sp(src));
281 if (!surfaceImage) {
282 // The source surface is not texturable, so the only supported readback is if there's
283 // no rescaling
284 if (src && asConstSB(src)->isGraphiteBacked() &&
285 srcRect.size() == dstImageInfo.dimensions()) {
286 TextureProxy* proxy = static_cast<const Surface*>(src)->backingTextureProxy();
287 return this->asyncReadTexture(/*recorder=*/nullptr,
288 {proxy, srcRect, dstImageInfo, callback, callbackContext},
289 src->imageInfo().colorInfo());
290 }
291 // else fall through and let asyncRescaleAndReadPixels() invoke the callback when it detects
292 // the null image.
293 }
294 this->asyncRescaleAndReadPixels(surfaceImage.get(),
295 dstImageInfo,
296 srcRect,
297 rescaleGamma,
298 rescaleMode,
299 callback,
300 callbackContext);
301 }
302
asyncReadPixels(std::unique_ptr<Recorder> recorder,const AsyncParams<SkImage> & params)303 void Context::asyncReadPixels(std::unique_ptr<Recorder> recorder,
304 const AsyncParams<SkImage>& params) {
305 TRACE_EVENT2("skia.gpu", TRACE_FUNC,
306 "width", params.fSrcRect.width(),
307 "height", params.fSrcRect.height());
308 SkASSERT(params.validate()); // all paths to here are already validated
309 SkASSERT(params.fSrcRect.size() == params.fDstImageInfo.dimensions());
310
311 const Caps* caps = fSharedContext->caps();
312 TextureProxyView view = AsView(params.fSrcImage);
313 if (!view || !caps->supportsReadPixels(view.proxy()->textureInfo())) {
314 // This is either a YUVA image (null view) or the texture can't be read directly, so
315 // perform a draw into a compatible texture format and/or flatten any YUVA planes to RGBA.
316 if (!recorder) {
317 recorder = this->makeInternalRecorder();
318 }
319 sk_sp<SkImage> flattened = CopyAsDraw(recorder.get(),
320 params.fSrcImage,
321 params.fSrcRect,
322 params.fDstImageInfo.colorInfo(),
323 Budgeted::kYes,
324 Mipmapped::kNo,
325 SkBackingFit::kApprox,
326 "AsyncReadPixelsFallbackTexture");
327 if (!flattened) {
328 SKGPU_LOG_W("AsyncRead failed because copy-as-drawing into a readable format failed");
329 return params.fail();
330 }
331 // Use the original fSrcRect and not flattened's size since it's approx-fit.
332 return this->asyncReadPixels(std::move(recorder),
333 params.withNewSource(flattened.get(),
334 SkIRect::MakeSize(params.fSrcRect.size())));
335 }
336
337 // Can copy directly from the image's texture
338 this->asyncReadTexture(std::move(recorder), params.withNewSource(view.proxy(), params.fSrcRect),
339 params.fSrcImage->imageInfo().colorInfo());
340 }
341
asyncReadTexture(std::unique_ptr<Recorder> recorder,const AsyncParams<TextureProxy> & params,const SkColorInfo & srcColorInfo)342 void Context::asyncReadTexture(std::unique_ptr<Recorder> recorder,
343 const AsyncParams<TextureProxy>& params,
344 const SkColorInfo& srcColorInfo) {
345 SkASSERT(params.fSrcRect.size() == params.fDstImageInfo.dimensions());
346
347 // We can get here directly from surface or testing-only read pixels, so re-validate
348 if (!params.validate()) {
349 return params.fail();
350 }
351 PixelTransferResult transferResult = this->transferPixels(recorder.get(),
352 params.fSrcImage,
353 srcColorInfo,
354 params.fDstImageInfo.colorInfo(),
355 params.fSrcRect);
356
357 if (!transferResult.fTransferBuffer) {
358 // TODO: try to do a synchronous readPixels instead
359 return params.fail();
360 }
361
362 this->finalizeAsyncReadPixels(std::move(recorder),
363 {&transferResult, 1},
364 params.fCallback,
365 params.fCallbackContext);
366 }
367
asyncRescaleAndReadPixelsYUV420(const SkImage * src,SkYUVColorSpace yuvColorSpace,sk_sp<SkColorSpace> dstColorSpace,const SkIRect & srcRect,const SkISize & dstSize,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)368 void Context::asyncRescaleAndReadPixelsYUV420(const SkImage* src,
369 SkYUVColorSpace yuvColorSpace,
370 sk_sp<SkColorSpace> dstColorSpace,
371 const SkIRect& srcRect,
372 const SkISize& dstSize,
373 SkImage::RescaleGamma rescaleGamma,
374 SkImage::RescaleMode rescaleMode,
375 SkImage::ReadPixelsCallback callback,
376 SkImage::ReadPixelsContext callbackContext) {
377 // Use kOpaque alpha type to signal that we don't read back the alpha channel
378 SkImageInfo dstImageInfo = SkImageInfo::Make(dstSize,
379 kRGBA_8888_SkColorType,
380 kOpaque_SkAlphaType,
381 std::move(dstColorSpace));
382 this->asyncRescaleAndReadImpl(&Context::asyncReadPixelsYUV420,
383 rescaleGamma, rescaleMode,
384 {src, srcRect, dstImageInfo, callback, callbackContext},
385 yuvColorSpace);
386 }
387
asyncRescaleAndReadPixelsYUV420(const SkSurface * src,SkYUVColorSpace yuvColorSpace,sk_sp<SkColorSpace> dstColorSpace,const SkIRect & srcRect,const SkISize & dstSize,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)388 void Context::asyncRescaleAndReadPixelsYUV420(const SkSurface* src,
389 SkYUVColorSpace yuvColorSpace,
390 sk_sp<SkColorSpace> dstColorSpace,
391 const SkIRect& srcRect,
392 const SkISize& dstSize,
393 SkImage::RescaleGamma rescaleGamma,
394 SkImage::RescaleMode rescaleMode,
395 SkImage::ReadPixelsCallback callback,
396 SkImage::ReadPixelsContext callbackContext) {
397 // YUV[A] readback requires the surface to be texturable since the plane conversion is performed
398 // by draws. If AsImage() returns null, the image version of asyncRescaleAndReadback will
399 // automatically fail.
400 // TODO: Is it worth performing an extra copy from 'surface' into a texture in order to succeed?
401 sk_sp<SkImage> surfaceImage = SkSurfaces::AsImage(sk_ref_sp(src));
402 this->asyncRescaleAndReadPixelsYUV420(surfaceImage.get(),
403 yuvColorSpace,
404 dstColorSpace,
405 srcRect,
406 dstSize,
407 rescaleGamma,
408 rescaleMode,
409 callback,
410 callbackContext);
411 }
412
asyncRescaleAndReadPixelsYUVA420(const SkImage * src,SkYUVColorSpace yuvColorSpace,sk_sp<SkColorSpace> dstColorSpace,const SkIRect & srcRect,const SkISize & dstSize,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)413 void Context::asyncRescaleAndReadPixelsYUVA420(const SkImage* src,
414 SkYUVColorSpace yuvColorSpace,
415 sk_sp<SkColorSpace> dstColorSpace,
416 const SkIRect& srcRect,
417 const SkISize& dstSize,
418 SkImage::RescaleGamma rescaleGamma,
419 SkImage::RescaleMode rescaleMode,
420 SkImage::ReadPixelsCallback callback,
421 SkImage::ReadPixelsContext callbackContext) {
422 SkImageInfo dstImageInfo = SkImageInfo::Make(dstSize,
423 kRGBA_8888_SkColorType,
424 kPremul_SkAlphaType,
425 std::move(dstColorSpace));
426 this->asyncRescaleAndReadImpl(&Context::asyncReadPixelsYUV420,
427 rescaleGamma, rescaleMode,
428 {src, srcRect, dstImageInfo, callback, callbackContext},
429 yuvColorSpace);
430 }
431
asyncRescaleAndReadPixelsYUVA420(const SkSurface * src,SkYUVColorSpace yuvColorSpace,sk_sp<SkColorSpace> dstColorSpace,const SkIRect & srcRect,const SkISize & dstSize,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)432 void Context::asyncRescaleAndReadPixelsYUVA420(const SkSurface* src,
433 SkYUVColorSpace yuvColorSpace,
434 sk_sp<SkColorSpace> dstColorSpace,
435 const SkIRect& srcRect,
436 const SkISize& dstSize,
437 SkImage::RescaleGamma rescaleGamma,
438 SkImage::RescaleMode rescaleMode,
439 SkImage::ReadPixelsCallback callback,
440 SkImage::ReadPixelsContext callbackContext) {
441 sk_sp<SkImage> surfaceImage = SkSurfaces::AsImage(sk_ref_sp(src));
442 this->asyncRescaleAndReadPixelsYUVA420(surfaceImage.get(),
443 yuvColorSpace,
444 dstColorSpace,
445 srcRect,
446 dstSize,
447 rescaleGamma,
448 rescaleMode,
449 callback,
450 callbackContext);
451 }
452
asyncReadPixelsYUV420(std::unique_ptr<Recorder> recorder,const AsyncParams<SkImage> & params,SkYUVColorSpace yuvColorSpace)453 void Context::asyncReadPixelsYUV420(std::unique_ptr<Recorder> recorder,
454 const AsyncParams<SkImage>& params,
455 SkYUVColorSpace yuvColorSpace) {
456 TRACE_EVENT2("skia.gpu", TRACE_FUNC,
457 "width", params.fSrcRect.width(),
458 "height", params.fSrcRect.height());
459 // This is only called by asyncRescaleAndReadImpl which already validates its parameters
460 SkASSERT(params.validate());
461 SkASSERT(params.fSrcRect.size() == params.fDstImageInfo.dimensions());
462
463 // The planes are always extracted via drawing, so create the Recorder if there isn't one yet.
464 if (!recorder) {
465 recorder = this->makeInternalRecorder();
466 }
467
468 // copyPlane renders the source image into an A8 image and sets up a transfer stored in 'result'
469 auto copyPlane = [&](SkImageInfo planeInfo,
470 std::string_view label,
471 float rgb2yuv[20],
472 const SkMatrix& texMatrix,
473 PixelTransferResult* result) {
474 sk_sp<Surface> dstSurface = Surface::MakeScratch(recorder.get(),
475 planeInfo,
476 std::move(label),
477 Budgeted::kYes,
478 Mipmapped::kNo,
479 SkBackingFit::kApprox);
480 if (!dstSurface) {
481 return false;
482 }
483
484 // Render the plane defined by rgb2yuv from srcImage into dstSurface
485 SkPaint paint;
486 const SkSamplingOptions sampling(SkFilterMode::kLinear, SkMipmapMode::kNone);
487 sk_sp<SkShader> imgShader = params.fSrcImage->makeShader(
488 SkTileMode::kClamp, SkTileMode::kClamp, sampling, texMatrix);
489 paint.setShader(std::move(imgShader));
490 paint.setBlendMode(SkBlendMode::kSrc);
491
492 if (rgb2yuv) {
493 // NOTE: The dstSurface's color space is set to the requested RGB dstColorSpace, so
494 // the rendered image is automatically converted to that RGB color space before the
495 // RGB->YUV color filter is evaluated, putting the plane data into the alpha channel.
496 paint.setColorFilter(SkColorFilters::Matrix(rgb2yuv));
497 }
498
499 SkCanvas* canvas = dstSurface->getCanvas();
500 canvas->drawPaint(paint);
501
502 // Manually flush the surface before transferPixels() is called to ensure the rendering
503 // operations run before the CopyTextureToBuffer task.
504 Flush(dstSurface);
505 // Must use planeInfo.bounds() for srcRect since dstSurface is kApprox-fit.
506 *result = this->transferPixels(recorder.get(),
507 dstSurface->backingTextureProxy(),
508 dstSurface->imageInfo().colorInfo(),
509 planeInfo.colorInfo(),
510 planeInfo.bounds());
511 return SkToBool(result->fTransferBuffer);
512 };
513
514 // Set up draws and transfers. This interleaves the drawing to a plane and the copy to the
515 // transfer buffer, which will allow the scratch A8 surface to be reused for each plane.
516 // TODO: Use one transfer buffer for all three planes to reduce map/unmap cost?
517 const bool readAlpha = params.fDstImageInfo.colorInfo().alphaType() != kOpaque_SkAlphaType;
518 SkImageInfo yaInfo = params.fDstImageInfo.makeColorType(kAlpha_8_SkColorType)
519 .makeAlphaType(kPremul_SkAlphaType);
520 SkImageInfo uvInfo = yaInfo.makeWH(yaInfo.width()/2, yaInfo.height()/2);
521 PixelTransferResult transfers[4];
522
523 float baseM[20];
524 SkColorMatrix_RGB2YUV(yuvColorSpace, baseM);
525 SkMatrix texMatrix = SkMatrix::Translate(-params.fSrcRect.fLeft, -params.fSrcRect.fTop);
526
527 // This matrix generates (r,g,b,a) = (0, 0, 0, y)
528 float yM[20];
529 std::fill_n(yM, 15, 0.f);
530 std::copy_n(baseM + 0, 5, yM + 15);
531 if (!copyPlane(yaInfo, "AsyncReadPixelsYPlane", yM, texMatrix, &transfers[0])) {
532 return params.fail();
533 }
534
535 // No matrix, straight copy of alpha channel
536 SkASSERT(baseM[15] == 0 &&
537 baseM[16] == 0 &&
538 baseM[17] == 0 &&
539 baseM[18] == 1 &&
540 baseM[19] == 0);
541 if (readAlpha &&
542 !copyPlane(yaInfo, "AsyncReadPixelsAPlane", nullptr, texMatrix, &transfers[3])) {
543 return params.fail();
544 }
545
546 // The UV planes are at half resolution compared to Y and A in 4:2:0
547 texMatrix.postScale(0.5f, 0.5f);
548
549 // This matrix generates (r,g,b,a) = (0, 0, 0, u)
550 float uM[20];
551 std::fill_n(uM, 15, 0.f);
552 std::copy_n(baseM + 5, 5, uM + 15);
553 if (!copyPlane(uvInfo, "AsyncReadPixelsUPlane", uM, texMatrix, &transfers[1])) {
554 return params.fail();
555 }
556
557 // This matrix generates (r,g,b,a) = (0, 0, 0, v)
558 float vM[20];
559 std::fill_n(vM, 15, 0.f);
560 std::copy_n(baseM + 10, 5, vM + 15);
561 if (!copyPlane(uvInfo, "AsyncReadPixelsVPlane", vM, texMatrix, &transfers[2])) {
562 return params.fail();
563 }
564
565 this->finalizeAsyncReadPixels(std::move(recorder),
566 {transfers, readAlpha ? 4 : 3},
567 params.fCallback,
568 params.fCallbackContext);
569 }
570
finalizeAsyncReadPixels(std::unique_ptr<Recorder> recorder,SkSpan<PixelTransferResult> transferResults,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)571 void Context::finalizeAsyncReadPixels(std::unique_ptr<Recorder> recorder,
572 SkSpan<PixelTransferResult> transferResults,
573 SkImage::ReadPixelsCallback callback,
574 SkImage::ReadPixelsContext callbackContext) {
575 // If the async readback work required a Recorder, insert the recording with all of the
576 // accumulated work (which includes any copies). Otherwise, for pure copy readbacks,
577 // transferPixels() already added the tasks directly to the QueueManager.
578 if (recorder) {
579 std::unique_ptr<Recording> recording = recorder->snap();
580 if (!recording) {
581 callback(callbackContext, nullptr);
582 return;
583 }
584 InsertRecordingInfo recordingInfo;
585 recordingInfo.fRecording = recording.get();
586 if (!this->insertRecording(recordingInfo)) {
587 callback(callbackContext, nullptr);
588 return;
589 }
590 }
591
592 // Set up FinishContext and add transfer commands to queue
593 struct AsyncReadFinishContext {
594 SkImage::ReadPixelsCallback* fClientCallback;
595 SkImage::ReadPixelsContext fClientContext;
596 ClientMappedBufferManager* fMappedBufferManager;
597 std::array<PixelTransferResult, 4> fTransferResults;
598 };
599
600 auto finishContext = std::make_unique<AsyncReadFinishContext>();
601 finishContext->fClientCallback = callback;
602 finishContext->fClientContext = callbackContext;
603 finishContext->fMappedBufferManager = fMappedBufferManager.get();
604
605 SkASSERT(transferResults.size() <= std::size(finishContext->fTransferResults));
606 skia_private::STArray<4, sk_sp<Buffer>> buffersToAsyncMap;
607 for (size_t i = 0; i < transferResults.size(); ++i) {
608 finishContext->fTransferResults[i] = std::move(transferResults[i]);
609 if (fSharedContext->caps()->bufferMapsAreAsync()) {
610 buffersToAsyncMap.push_back(finishContext->fTransferResults[i].fTransferBuffer);
611 }
612 }
613
614 InsertFinishInfo info;
615 info.fFinishedContext = finishContext.release();
616 info.fFinishedProc = [](GpuFinishedContext c, CallbackResult status) {
617 std::unique_ptr<const AsyncReadFinishContext> context(
618 reinterpret_cast<const AsyncReadFinishContext*>(c));
619 using AsyncReadResult = skgpu::TAsyncReadResult<Buffer, ContextID, PixelTransferResult>;
620
621 ClientMappedBufferManager* manager = context->fMappedBufferManager;
622 std::unique_ptr<AsyncReadResult> result;
623 if (status == CallbackResult::kSuccess) {
624 result = std::make_unique<AsyncReadResult>(manager->ownerID());
625 }
626 for (const auto& r : context->fTransferResults) {
627 if (!r.fTransferBuffer) {
628 break;
629 }
630 if (result && !result->addTransferResult(r, r.fSize, r.fRowBytes, manager)) {
631 result.reset();
632 }
633 // If we didn't get this buffer into the mapped buffer manager then make sure it gets
634 // unmapped if it has a pending or completed async map.
635 if (!result && r.fTransferBuffer->isUnmappable()) {
636 r.fTransferBuffer->unmap();
637 }
638 }
639 (*context->fClientCallback)(context->fClientContext, std::move(result));
640 };
641
642 // If addFinishInfo() fails, it invokes the finish callback automatically, which handles all the
643 // required clean up for us, just log an error message. The buffers will never be mapped and
644 // thus don't need an unmap.
645 if (!fQueueManager->addFinishInfo(info, fResourceProvider.get(), buffersToAsyncMap)) {
646 SKGPU_LOG_E("Failed to register finish callbacks for asyncReadPixels.");
647 return;
648 }
649 }
650
transferPixels(Recorder * recorder,const TextureProxy * srcProxy,const SkColorInfo & srcColorInfo,const SkColorInfo & dstColorInfo,const SkIRect & srcRect)651 Context::PixelTransferResult Context::transferPixels(Recorder* recorder,
652 const TextureProxy* srcProxy,
653 const SkColorInfo& srcColorInfo,
654 const SkColorInfo& dstColorInfo,
655 const SkIRect& srcRect) {
656 SkASSERT(SkIRect::MakeSize(srcProxy->dimensions()).contains(srcRect));
657 SkASSERT(SkColorInfoIsValid(dstColorInfo));
658
659 const Caps* caps = fSharedContext->caps();
660 if (!srcProxy || !caps->supportsReadPixels(srcProxy->textureInfo())) {
661 return {};
662 }
663
664 const SkColorType srcColorType = srcColorInfo.colorType();
665 SkColorType supportedColorType;
666 bool isRGB888Format;
667 std::tie(supportedColorType, isRGB888Format) =
668 caps->supportedReadPixelsColorType(srcColorType,
669 srcProxy->textureInfo(),
670 dstColorInfo.colorType());
671 if (supportedColorType == kUnknown_SkColorType) {
672 return {};
673 }
674
675 // Fail if read color type does not have all of dstCT's color channels and those missing color
676 // channels are in the src.
677 uint32_t dstChannels = SkColorTypeChannelFlags(dstColorInfo.colorType());
678 uint32_t legalReadChannels = SkColorTypeChannelFlags(supportedColorType);
679 uint32_t srcChannels = SkColorTypeChannelFlags(srcColorType);
680 if ((~legalReadChannels & dstChannels) & srcChannels) {
681 return {};
682 }
683
684 int bpp = isRGB888Format ? 3 : SkColorTypeBytesPerPixel(supportedColorType);
685 size_t rowBytes = caps->getAlignedTextureDataRowBytes(bpp * srcRect.width());
686 size_t size = SkAlignTo(rowBytes * srcRect.height(), caps->requiredTransferBufferAlignment());
687 sk_sp<Buffer> buffer = fResourceProvider->findOrCreateBuffer(
688 size, BufferType::kXferGpuToCpu, AccessPattern::kHostVisible, "TransferToCpu");
689 if (!buffer) {
690 return {};
691 }
692
693 // Set up copy task. Since we always use a new buffer the offset can be 0 and we don't need to
694 // worry about aligning it to the required transfer buffer alignment.
695 sk_sp<CopyTextureToBufferTask> copyTask = CopyTextureToBufferTask::Make(sk_ref_sp(srcProxy),
696 srcRect,
697 buffer,
698 /*bufferOffset=*/0,
699 rowBytes);
700 const bool addTasksDirectly = !SkToBool(recorder);
701 Protected contextIsProtected = fSharedContext->isProtected();
702 if (!copyTask || (addTasksDirectly && !fQueueManager->addTask(copyTask.get(),
703 this,
704 contextIsProtected))) {
705 return {};
706 } else if (!addTasksDirectly) {
707 // Add the task to the Recorder instead of the QueueManager if that's been required for
708 // collecting tasks to prepare the copied textures.
709 recorder->priv().add(std::move(copyTask));
710 }
711 sk_sp<SynchronizeToCpuTask> syncTask = SynchronizeToCpuTask::Make(buffer);
712 if (!syncTask || (addTasksDirectly && !fQueueManager->addTask(syncTask.get(),
713 this,
714 contextIsProtected))) {
715 return {};
716 } else if (!addTasksDirectly) {
717 recorder->priv().add(std::move(syncTask));
718 }
719
720 PixelTransferResult result;
721 result.fTransferBuffer = std::move(buffer);
722 result.fSize = srcRect.size();
723 // srcColorInfo describes the texture; readColorInfo describes the result of the copy-to-buffer,
724 // which may be different; dstColorInfo is what we have to transform it into when invoking the
725 // async callbacks.
726 SkColorInfo readColorInfo = srcColorInfo.makeColorType(supportedColorType);
727 if (readColorInfo != dstColorInfo || isRGB888Format) {
728 SkISize dims = srcRect.size();
729 SkImageInfo srcInfo = SkImageInfo::Make(dims, readColorInfo);
730 SkImageInfo dstInfo = SkImageInfo::Make(dims, dstColorInfo);
731 result.fRowBytes = dstInfo.minRowBytes();
732 result.fPixelConverter = [dstInfo, srcInfo, rowBytes, isRGB888Format](
733 void* dst, const void* src) {
734 SkAutoPixmapStorage temp;
735 size_t srcRowBytes = rowBytes;
736 if (isRGB888Format) {
737 temp.alloc(srcInfo);
738 size_t tRowBytes = temp.rowBytes();
739 auto* sRow = reinterpret_cast<const char*>(src);
740 auto* tRow = reinterpret_cast<char*>(temp.writable_addr());
741 for (int y = 0; y < srcInfo.height(); ++y, sRow += srcRowBytes, tRow += tRowBytes) {
742 for (int x = 0; x < srcInfo.width(); ++x) {
743 auto s = sRow + x*3;
744 auto t = tRow + x*sizeof(uint32_t);
745 memcpy(t, s, 3);
746 t[3] = static_cast<char>(0xFF);
747 }
748 }
749 src = temp.addr();
750 srcRowBytes = tRowBytes;
751 }
752 SkAssertResult(SkConvertPixels(dstInfo, dst, dstInfo.minRowBytes(),
753 srcInfo, src, srcRowBytes));
754 };
755 } else {
756 result.fRowBytes = rowBytes;
757 }
758
759 return result;
760 }
761
checkForFinishedWork(SyncToCpu syncToCpu)762 void Context::checkForFinishedWork(SyncToCpu syncToCpu) {
763 ASSERT_SINGLE_OWNER
764
765 fQueueManager->checkForFinishedWork(syncToCpu);
766 fMappedBufferManager->process();
767 // Process the return queue periodically to make sure it doesn't get too big
768 fResourceProvider->forceProcessReturnedResources();
769 }
770
checkAsyncWorkCompletion()771 void Context::checkAsyncWorkCompletion() {
772 this->checkForFinishedWork(SyncToCpu::kNo);
773 }
774
deleteBackendTexture(const BackendTexture & texture)775 void Context::deleteBackendTexture(const BackendTexture& texture) {
776 ASSERT_SINGLE_OWNER
777
778 if (!texture.isValid() || texture.backend() != this->backend()) {
779 return;
780 }
781 fResourceProvider->deleteBackendTexture(texture);
782 }
783
freeGpuResources()784 void Context::freeGpuResources() {
785 ASSERT_SINGLE_OWNER
786
787 this->checkAsyncWorkCompletion();
788
789 fResourceProvider->freeGpuResources();
790 }
791
performDeferredCleanup(std::chrono::milliseconds msNotUsed)792 void Context::performDeferredCleanup(std::chrono::milliseconds msNotUsed) {
793 ASSERT_SINGLE_OWNER
794
795 this->checkAsyncWorkCompletion();
796
797 auto purgeTime = skgpu::StdSteadyClock::now() - msNotUsed;
798 fResourceProvider->purgeResourcesNotUsedSince(purgeTime);
799 }
800
currentBudgetedBytes() const801 size_t Context::currentBudgetedBytes() const {
802 ASSERT_SINGLE_OWNER
803 return fResourceProvider->getResourceCacheCurrentBudgetedBytes();
804 }
805
currentPurgeableBytes() const806 size_t Context::currentPurgeableBytes() const {
807 ASSERT_SINGLE_OWNER
808 return fResourceProvider->getResourceCacheCurrentPurgeableBytes();
809 }
810
maxBudgetedBytes() const811 size_t Context::maxBudgetedBytes() const {
812 ASSERT_SINGLE_OWNER
813 return fResourceProvider->getResourceCacheLimit();
814 }
815
setMaxBudgetedBytes(size_t bytes)816 void Context::setMaxBudgetedBytes(size_t bytes) {
817 ASSERT_SINGLE_OWNER
818 return fResourceProvider->setResourceCacheLimit(bytes);
819 }
820
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const821 void Context::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
822 ASSERT_SINGLE_OWNER
823 fResourceProvider->dumpMemoryStatistics(traceMemoryDump);
824 // TODO: What is the graphite equivalent for the text blob cache and how do we print out its
825 // used bytes here (see Ganesh implementation).
826 }
827
isDeviceLost() const828 bool Context::isDeviceLost() const {
829 return fSharedContext->isDeviceLost();
830 }
831
maxTextureSize() const832 int Context::maxTextureSize() const {
833 return fSharedContext->caps()->maxTextureSize();
834 }
835
supportsProtectedContent() const836 bool Context::supportsProtectedContent() const {
837 return fSharedContext->isProtected() == Protected::kYes;
838 }
839
supportedGpuStats() const840 GpuStatsFlags Context::supportedGpuStats() const {
841 return fSharedContext->caps()->supportedGpuStats();
842 }
843
844 ///////////////////////////////////////////////////////////////////////////////////
845
846 #if defined(GPU_TEST_UTILS)
deregisterRecorder(const Recorder * recorder)847 void Context::deregisterRecorder(const Recorder* recorder) {
848 SkAutoMutexExclusive lock(fTestingLock);
849 for (auto it = fTrackedRecorders.begin();
850 it != fTrackedRecorders.end();
851 it++) {
852 if (*it == recorder) {
853 fTrackedRecorders.erase(it);
854 return;
855 }
856 }
857 }
858
readPixels(const SkPixmap & pm,const TextureProxy * textureProxy,const SkImageInfo & srcImageInfo,int srcX,int srcY)859 bool ContextPriv::readPixels(const SkPixmap& pm,
860 const TextureProxy* textureProxy,
861 const SkImageInfo& srcImageInfo,
862 int srcX, int srcY) {
863 auto rect = SkIRect::MakeXYWH(srcX, srcY, pm.width(), pm.height());
864 struct AsyncContext {
865 bool fCalled = false;
866 std::unique_ptr<const SkImage::AsyncReadResult> fResult;
867 } asyncContext;
868
869 auto asyncCallback = [](void* c, std::unique_ptr<const SkImage::AsyncReadResult> out) {
870 auto context = static_cast<AsyncContext*>(c);
871 context->fResult = std::move(out);
872 context->fCalled = true;
873 };
874
875 const SkColorInfo& srcColorInfo = srcImageInfo.colorInfo();
876
877 // This is roughly equivalent to the logic taken in asyncRescaleAndRead(SkSurface) to either
878 // try the image-based readback (with copy-as-draw fallbacks) or read the texture directly
879 // if it supports reading.
880 if (!fContext->fSharedContext->caps()->supportsReadPixels(textureProxy->textureInfo())) {
881 // Since this is a synchronous testing-only API, callers should have flushed any pending
882 // work that modifies this texture proxy already. This means we don't have to worry about
883 // re-wrapping the proxy in a new Image (that wouldn't tbe connected to any Device, etc.).
884 sk_sp<SkImage> image{new Image(TextureProxyView(sk_ref_sp(textureProxy)), srcColorInfo)};
885 Context::AsyncParams<SkImage> params {image.get(), rect, pm.info(),
886 asyncCallback, &asyncContext};
887 if (!params.validate()) {
888 params.fail();
889 } else {
890 fContext->asyncReadPixels(/*recorder=*/nullptr, params);
891 }
892 } else {
893 fContext->asyncReadTexture(/*recorder=*/nullptr,
894 {textureProxy, rect, pm.info(), asyncCallback, &asyncContext},
895 srcImageInfo.colorInfo());
896 }
897
898 if (fContext->fSharedContext->caps()->allowCpuSync()) {
899 fContext->submit(SyncToCpu::kYes);
900 } else {
901 fContext->submit(SyncToCpu::kNo);
902 if (fContext->fSharedContext->backend() == BackendApi::kDawn) {
903 while (!asyncContext.fCalled) {
904 fContext->fSharedContext->deviceTick(fContext);
905 }
906 } else {
907 SK_ABORT("Only Dawn supports non-syncing contexts.");
908 }
909 }
910 SkASSERT(asyncContext.fCalled);
911 if (!asyncContext.fResult) {
912 return false;
913 }
914 SkRectMemcpy(pm.writable_addr(), pm.rowBytes(), asyncContext.fResult->data(0),
915 asyncContext.fResult->rowBytes(0), pm.info().minRowBytes(),
916 pm.height());
917 return true;
918 }
919
supportsPathRendererStrategy(PathRendererStrategy strategy)920 bool ContextPriv::supportsPathRendererStrategy(PathRendererStrategy strategy) {
921 AtlasProvider::PathAtlasFlagsBitMask pathAtlasFlags =
922 AtlasProvider::QueryPathAtlasSupport(this->caps());
923 switch (strategy) {
924 case PathRendererStrategy::kDefault:
925 return true;
926 case PathRendererStrategy::kComputeAnalyticAA:
927 case PathRendererStrategy::kComputeMSAA16:
928 case PathRendererStrategy::kComputeMSAA8:
929 return SkToBool(pathAtlasFlags & AtlasProvider::PathAtlasFlags::kCompute);
930 case PathRendererStrategy::kRasterAA:
931 return SkToBool(pathAtlasFlags & AtlasProvider::PathAtlasFlags::kRaster);
932 case PathRendererStrategy::kTessellation:
933 return true;
934 }
935
936 return false;
937 }
938
939 #endif // GPU_TEST_UTILS
940
941 ///////////////////////////////////////////////////////////////////////////////////
942
MakeContext(sk_sp<SharedContext> sharedContext,std::unique_ptr<QueueManager> queueManager,const ContextOptions & options)943 std::unique_ptr<Context> ContextCtorAccessor::MakeContext(
944 sk_sp<SharedContext> sharedContext,
945 std::unique_ptr<QueueManager> queueManager,
946 const ContextOptions& options) {
947 auto context = std::unique_ptr<Context>(new Context(std::move(sharedContext),
948 std::move(queueManager),
949 options));
950 if (context && context->finishInitialization()) {
951 return context;
952 } else {
953 return nullptr;
954 }
955 }
956
957 } // namespace skgpu::graphite
958