1 /*
2 * Copyright 2021 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/gpu/graphite/Context.h"
9
10 #include "include/core/SkColorSpace.h"
11 #include "include/core/SkPathTypes.h"
12 #include "include/core/SkTraceMemoryDump.h"
13 #include "include/effects/SkRuntimeEffect.h"
14 #include "include/gpu/graphite/BackendTexture.h"
15 #include "include/gpu/graphite/PrecompileContext.h"
16 #include "include/gpu/graphite/Recorder.h"
17 #include "include/gpu/graphite/Recording.h"
18 #include "include/gpu/graphite/Surface.h"
19 #include "include/gpu/graphite/TextureInfo.h"
20 #include "include/private/base/SkOnce.h"
21 #include "src/base/SkRectMemcpy.h"
22 #include "src/core/SkAutoPixmapStorage.h"
23 #include "src/core/SkColorFilterPriv.h"
24 #include "src/core/SkConvertPixels.h"
25 #include "src/core/SkTraceEvent.h"
26 #include "src/core/SkYUVMath.h"
27 #include "src/gpu/RefCntedCallback.h"
28 #include "src/gpu/graphite/AtlasProvider.h"
29 #include "src/gpu/graphite/BufferManager.h"
30 #include "src/gpu/graphite/Caps.h"
31 #include "src/gpu/graphite/ClientMappedBufferManager.h"
32 #include "src/gpu/graphite/CommandBuffer.h"
33 #include "src/gpu/graphite/ContextPriv.h"
34 #include "src/gpu/graphite/DrawAtlas.h"
35 #include "src/gpu/graphite/GlobalCache.h"
36 #include "src/gpu/graphite/GraphicsPipeline.h"
37 #include "src/gpu/graphite/GraphicsPipelineDesc.h"
38 #include "src/gpu/graphite/Image_Base_Graphite.h"
39 #include "src/gpu/graphite/Image_Graphite.h"
40 #include "src/gpu/graphite/KeyContext.h"
41 #include "src/gpu/graphite/Log.h"
42 #include "src/gpu/graphite/QueueManager.h"
43 #include "src/gpu/graphite/RecorderPriv.h"
44 #include "src/gpu/graphite/RecordingPriv.h"
45 #include "src/gpu/graphite/Renderer.h"
46 #include "src/gpu/graphite/RendererProvider.h"
47 #include "src/gpu/graphite/ResourceProvider.h"
48 #include "src/gpu/graphite/RuntimeEffectDictionary.h"
49 #include "src/gpu/graphite/ShaderCodeDictionary.h"
50 #include "src/gpu/graphite/SharedContext.h"
51 #include "src/gpu/graphite/Surface_Graphite.h"
52 #include "src/gpu/graphite/TextureProxyView.h"
53 #include "src/gpu/graphite/TextureUtils.h"
54 #include "src/gpu/graphite/task/CopyTask.h"
55 #include "src/gpu/graphite/task/SynchronizeToCpuTask.h"
56 #include "src/gpu/graphite/task/UploadTask.h"
57 #include "src/image/SkSurface_Base.h"
58 #include "src/sksl/SkSLGraphiteModules.h"
59
60 #if defined(GPU_TEST_UTILS)
61 #include "src/gpu/graphite/ContextOptionsPriv.h"
62 #endif
63
64 namespace skgpu::graphite {
65
66 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(this->singleOwner())
67
Next()68 Context::ContextID Context::ContextID::Next() {
69 static std::atomic<uint32_t> nextID{1};
70 uint32_t id;
71 do {
72 id = nextID.fetch_add(1, std::memory_order_relaxed);
73 } while (id == SK_InvalidUniqueID);
74 return ContextID(id);
75 }
76
77 //--------------------------------------------------------------------------------------------------
Context(sk_sp<SharedContext> sharedContext,std::unique_ptr<QueueManager> queueManager,const ContextOptions & options)78 Context::Context(sk_sp<SharedContext> sharedContext,
79 std::unique_ptr<QueueManager> queueManager,
80 const ContextOptions& options)
81 : fSharedContext(std::move(sharedContext))
82 , fQueueManager(std::move(queueManager))
83 , fContextID(ContextID::Next()) {
84 // We need to move the Graphite SkSL code into the central SkSL data loader at least once
85 // (but preferrably only once) before we try to use it. We assume that there's no way to
86 // use the SkSL code without making a context, so we initialize it here.
87 static SkOnce once;
88 once([] { SkSL::Loader::SetGraphiteModuleData(SkSL::Loader::GetGraphiteModules()); });
89
90 // We have to create this outside the initializer list because we need to pass in the Context's
91 // SingleOwner object and it is declared last
92 fResourceProvider = fSharedContext->makeResourceProvider(&fSingleOwner,
93 SK_InvalidGenID,
94 options.fGpuBudgetInBytes);
95 fMappedBufferManager = std::make_unique<ClientMappedBufferManager>(this->contextID());
96 #if defined(GPU_TEST_UTILS)
97 if (options.fOptionsPriv) {
98 fStoreContextRefInRecorder = options.fOptionsPriv->fStoreContextRefInRecorder;
99 }
100 #endif
101 }
102
~Context()103 Context::~Context() {
104 #if defined(GPU_TEST_UTILS)
105 SkAutoMutexExclusive lock(fTestingLock);
106 for (auto& recorder : fTrackedRecorders) {
107 recorder->priv().setContext(nullptr);
108 }
109 #endif
110 }
111
finishInitialization()112 bool Context::finishInitialization() {
113 SkASSERT(!fSharedContext->rendererProvider()); // Can only initialize once
114
115 StaticBufferManager bufferManager{fResourceProvider.get(), fSharedContext->caps()};
116 std::unique_ptr<RendererProvider> renderers{
117 new RendererProvider(fSharedContext->caps(), &bufferManager)};
118
119 auto result = bufferManager.finalize(this, fQueueManager.get(), fSharedContext->globalCache());
120 if (result == StaticBufferManager::FinishResult::kFailure) {
121 // If something went wrong filling out the static vertex buffers, any Renderer that would
122 // use it will draw incorrectly, so it's better to fail the Context creation.
123 return false;
124 }
125 if (result == StaticBufferManager::FinishResult::kSuccess &&
126 !fQueueManager->submitToGpu()) {
127 SKGPU_LOG_W("Failed to submit initial command buffer for Context creation.\n");
128 return false;
129 } // else result was kNoWork so skip submitting to the GPU
130 fSharedContext->setRendererProvider(std::move(renderers));
131 return true;
132 }
133
backend() const134 BackendApi Context::backend() const { return fSharedContext->backend(); }
135
makeRecorder(const RecorderOptions & options)136 std::unique_ptr<Recorder> Context::makeRecorder(const RecorderOptions& options) {
137 ASSERT_SINGLE_OWNER
138
139 // This is a client-owned Recorder so pass a null context so it creates its own ResourceProvider
140 auto recorder = std::unique_ptr<Recorder>(new Recorder(fSharedContext, options, nullptr));
141 #if defined(GPU_TEST_UTILS)
142 if (fStoreContextRefInRecorder) {
143 recorder->priv().setContext(this);
144 }
145 #endif
146 return recorder;
147 }
148
makePrecompileContext()149 std::unique_ptr<PrecompileContext> Context::makePrecompileContext() {
150 ASSERT_SINGLE_OWNER
151
152 return std::unique_ptr<PrecompileContext>(new PrecompileContext(fSharedContext));
153 }
154
makeInternalRecorder() const155 std::unique_ptr<Recorder> Context::makeInternalRecorder() const {
156 ASSERT_SINGLE_OWNER
157
158 // Unlike makeRecorder(), this Recorder is meant to be short-lived and go
159 // away before a Context public API function returns to the caller. As such
160 // it shares the Context's resource provider (no separate budget) and does
161 // not get tracked. The internal drawing performed with an internal recorder
162 // should not require a client image provider.
163 return std::unique_ptr<Recorder>(new Recorder(fSharedContext, {}, this));
164 }
165
insertRecording(const InsertRecordingInfo & info)166 bool Context::insertRecording(const InsertRecordingInfo& info) {
167 ASSERT_SINGLE_OWNER
168
169 return fQueueManager->addRecording(info, this);
170 }
171
submit(SyncToCpu syncToCpu)172 bool Context::submit(SyncToCpu syncToCpu) {
173 ASSERT_SINGLE_OWNER
174
175 if (syncToCpu == SyncToCpu::kYes && !fSharedContext->caps()->allowCpuSync()) {
176 SKGPU_LOG_E("SyncToCpu::kYes not supported with ContextOptions::fNeverYieldToWebGPU. "
177 "The parameter is ignored and no synchronization will occur.");
178 syncToCpu = SyncToCpu::kNo;
179 }
180 bool success = fQueueManager->submitToGpu();
181 this->checkForFinishedWork(syncToCpu);
182 return success;
183 }
184
hasUnfinishedGpuWork() const185 bool Context::hasUnfinishedGpuWork() const { return fQueueManager->hasUnfinishedGpuWork(); }
186
187 template <typename SrcPixels>
188 struct Context::AsyncParams {
189 const SrcPixels* fSrcImage;
190 SkIRect fSrcRect;
191 SkImageInfo fDstImageInfo;
192
193 SkImage::ReadPixelsCallback* fCallback;
194 SkImage::ReadPixelsContext fCallbackContext;
195
196 template <typename S>
withNewSourceskgpu::graphite::Context::AsyncParams197 AsyncParams<S> withNewSource(const S* newPixels, const SkIRect& newSrcRect) const {
198 return AsyncParams<S>{newPixels, newSrcRect,
199 fDstImageInfo, fCallback, fCallbackContext};
200 }
201
failskgpu::graphite::Context::AsyncParams202 void fail() const {
203 (*fCallback)(fCallbackContext, nullptr);
204 }
205
validateskgpu::graphite::Context::AsyncParams206 bool validate() const {
207 if (!fSrcImage) {
208 return false;
209 }
210 if (fSrcImage->isProtected()) {
211 return false;
212 }
213 if (!SkIRect::MakeSize(fSrcImage->dimensions()).contains(fSrcRect)) {
214 return false;
215 }
216 if (!SkImageInfoIsValid(fDstImageInfo)) {
217 return false;
218 }
219 return true;
220 }
221 };
222
223 template <typename ReadFn, typename... ExtraArgs>
asyncRescaleAndReadImpl(ReadFn Context::* asyncRead,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,const AsyncParams<SkImage> & params,ExtraArgs...extraParams)224 void Context::asyncRescaleAndReadImpl(ReadFn Context::* asyncRead,
225 SkImage::RescaleGamma rescaleGamma,
226 SkImage::RescaleMode rescaleMode,
227 const AsyncParams<SkImage>& params,
228 ExtraArgs... extraParams) {
229 if (!params.validate()) {
230 return params.fail();
231 }
232
233 if (params.fSrcRect.size() == params.fDstImageInfo.dimensions()) {
234 // No need to rescale so do a direct readback
235 return (this->*asyncRead)(/*recorder=*/nullptr, params, extraParams...);
236 }
237
238 // Make a recorder to collect the rescale drawing commands and the copy commands
239 std::unique_ptr<Recorder> recorder = this->makeInternalRecorder();
240 sk_sp<SkImage> scaledImage = RescaleImage(recorder.get(),
241 params.fSrcImage,
242 params.fSrcRect,
243 params.fDstImageInfo,
244 rescaleGamma,
245 rescaleMode);
246 if (!scaledImage) {
247 SKGPU_LOG_W("AsyncRead failed because rescaling failed");
248 return params.fail();
249 }
250 (this->*asyncRead)(std::move(recorder),
251 params.withNewSource(scaledImage.get(), params.fDstImageInfo.bounds()),
252 extraParams...);
253 }
254
asyncRescaleAndReadPixels(const SkImage * src,const SkImageInfo & dstImageInfo,const SkIRect & srcRect,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)255 void Context::asyncRescaleAndReadPixels(const SkImage* src,
256 const SkImageInfo& dstImageInfo,
257 const SkIRect& srcRect,
258 SkImage::RescaleGamma rescaleGamma,
259 SkImage::RescaleMode rescaleMode,
260 SkImage::ReadPixelsCallback callback,
261 SkImage::ReadPixelsContext callbackContext) {
262 this->asyncRescaleAndReadImpl(&Context::asyncReadPixels,
263 rescaleGamma, rescaleMode,
264 {src, srcRect, dstImageInfo, callback, callbackContext});
265 }
266
asyncRescaleAndReadPixels(const SkSurface * src,const SkImageInfo & dstImageInfo,const SkIRect & srcRect,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)267 void Context::asyncRescaleAndReadPixels(const SkSurface* src,
268 const SkImageInfo& dstImageInfo,
269 const SkIRect& srcRect,
270 SkImage::RescaleGamma rescaleGamma,
271 SkImage::RescaleMode rescaleMode,
272 SkImage::ReadPixelsCallback callback,
273 SkImage::ReadPixelsContext callbackContext) {
274 sk_sp<SkImage> surfaceImage = SkSurfaces::AsImage(sk_ref_sp(src));
275 if (!surfaceImage) {
276 // The source surface is not texturable, so the only supported readback is if there's
277 // no rescaling
278 if (src && asConstSB(src)->isGraphiteBacked() &&
279 srcRect.size() == dstImageInfo.dimensions()) {
280 TextureProxy* proxy = static_cast<const Surface*>(src)->backingTextureProxy();
281 return this->asyncReadTexture(/*recorder=*/nullptr,
282 {proxy, srcRect, dstImageInfo, callback, callbackContext},
283 src->imageInfo().colorInfo());
284 }
285 // else fall through and let asyncRescaleAndReadPixels() invoke the callback when it detects
286 // the null image.
287 }
288 this->asyncRescaleAndReadPixels(surfaceImage.get(),
289 dstImageInfo,
290 srcRect,
291 rescaleGamma,
292 rescaleMode,
293 callback,
294 callbackContext);
295 }
296
asyncReadPixels(std::unique_ptr<Recorder> recorder,const AsyncParams<SkImage> & params)297 void Context::asyncReadPixels(std::unique_ptr<Recorder> recorder,
298 const AsyncParams<SkImage>& params) {
299 TRACE_EVENT2("skia.gpu", TRACE_FUNC,
300 "width", params.fSrcRect.width(),
301 "height", params.fSrcRect.height());
302 SkASSERT(params.validate()); // all paths to here are already validated
303 SkASSERT(params.fSrcRect.size() == params.fDstImageInfo.dimensions());
304
305 const Caps* caps = fSharedContext->caps();
306 TextureProxyView view = AsView(params.fSrcImage);
307 if (!view || !caps->supportsReadPixels(view.proxy()->textureInfo())) {
308 // This is either a YUVA image (null view) or the texture can't be read directly, so
309 // perform a draw into a compatible texture format and/or flatten any YUVA planes to RGBA.
310 if (!recorder) {
311 recorder = this->makeInternalRecorder();
312 }
313 sk_sp<SkImage> flattened = CopyAsDraw(recorder.get(),
314 params.fSrcImage,
315 params.fSrcRect,
316 params.fDstImageInfo.colorInfo(),
317 Budgeted::kYes,
318 Mipmapped::kNo,
319 SkBackingFit::kApprox,
320 "AsyncReadPixelsFallbackTexture");
321 if (!flattened) {
322 SKGPU_LOG_W("AsyncRead failed because copy-as-drawing into a readable format failed");
323 return params.fail();
324 }
325 // Use the original fSrcRect and not flattened's size since it's approx-fit.
326 return this->asyncReadPixels(std::move(recorder),
327 params.withNewSource(flattened.get(),
328 SkIRect::MakeSize(params.fSrcRect.size())));
329 }
330
331 // Can copy directly from the image's texture
332 this->asyncReadTexture(std::move(recorder), params.withNewSource(view.proxy(), params.fSrcRect),
333 params.fSrcImage->imageInfo().colorInfo());
334 }
335
asyncReadTexture(std::unique_ptr<Recorder> recorder,const AsyncParams<TextureProxy> & params,const SkColorInfo & srcColorInfo)336 void Context::asyncReadTexture(std::unique_ptr<Recorder> recorder,
337 const AsyncParams<TextureProxy>& params,
338 const SkColorInfo& srcColorInfo) {
339 SkASSERT(params.fSrcRect.size() == params.fDstImageInfo.dimensions());
340
341 // We can get here directly from surface or testing-only read pixels, so re-validate
342 if (!params.validate()) {
343 return params.fail();
344 }
345 PixelTransferResult transferResult = this->transferPixels(recorder.get(),
346 params.fSrcImage,
347 srcColorInfo,
348 params.fDstImageInfo.colorInfo(),
349 params.fSrcRect);
350
351 if (!transferResult.fTransferBuffer) {
352 // TODO: try to do a synchronous readPixels instead
353 return params.fail();
354 }
355
356 this->finalizeAsyncReadPixels(std::move(recorder),
357 {&transferResult, 1},
358 params.fCallback,
359 params.fCallbackContext);
360 }
361
asyncRescaleAndReadPixelsYUV420(const SkImage * src,SkYUVColorSpace yuvColorSpace,sk_sp<SkColorSpace> dstColorSpace,const SkIRect & srcRect,const SkISize & dstSize,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)362 void Context::asyncRescaleAndReadPixelsYUV420(const SkImage* src,
363 SkYUVColorSpace yuvColorSpace,
364 sk_sp<SkColorSpace> dstColorSpace,
365 const SkIRect& srcRect,
366 const SkISize& dstSize,
367 SkImage::RescaleGamma rescaleGamma,
368 SkImage::RescaleMode rescaleMode,
369 SkImage::ReadPixelsCallback callback,
370 SkImage::ReadPixelsContext callbackContext) {
371 // Use kOpaque alpha type to signal that we don't read back the alpha channel
372 SkImageInfo dstImageInfo = SkImageInfo::Make(dstSize,
373 kRGBA_8888_SkColorType,
374 kOpaque_SkAlphaType,
375 std::move(dstColorSpace));
376 this->asyncRescaleAndReadImpl(&Context::asyncReadPixelsYUV420,
377 rescaleGamma, rescaleMode,
378 {src, srcRect, dstImageInfo, callback, callbackContext},
379 yuvColorSpace);
380 }
381
asyncRescaleAndReadPixelsYUV420(const SkSurface * src,SkYUVColorSpace yuvColorSpace,sk_sp<SkColorSpace> dstColorSpace,const SkIRect & srcRect,const SkISize & dstSize,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)382 void Context::asyncRescaleAndReadPixelsYUV420(const SkSurface* src,
383 SkYUVColorSpace yuvColorSpace,
384 sk_sp<SkColorSpace> dstColorSpace,
385 const SkIRect& srcRect,
386 const SkISize& dstSize,
387 SkImage::RescaleGamma rescaleGamma,
388 SkImage::RescaleMode rescaleMode,
389 SkImage::ReadPixelsCallback callback,
390 SkImage::ReadPixelsContext callbackContext) {
391 // YUV[A] readback requires the surface to be texturable since the plane conversion is performed
392 // by draws. If AsImage() returns null, the image version of asyncRescaleAndReadback will
393 // automatically fail.
394 // TODO: Is it worth performing an extra copy from 'surface' into a texture in order to succeed?
395 sk_sp<SkImage> surfaceImage = SkSurfaces::AsImage(sk_ref_sp(src));
396 this->asyncRescaleAndReadPixelsYUV420(surfaceImage.get(),
397 yuvColorSpace,
398 dstColorSpace,
399 srcRect,
400 dstSize,
401 rescaleGamma,
402 rescaleMode,
403 callback,
404 callbackContext);
405 }
406
asyncRescaleAndReadPixelsYUVA420(const SkImage * src,SkYUVColorSpace yuvColorSpace,sk_sp<SkColorSpace> dstColorSpace,const SkIRect & srcRect,const SkISize & dstSize,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)407 void Context::asyncRescaleAndReadPixelsYUVA420(const SkImage* src,
408 SkYUVColorSpace yuvColorSpace,
409 sk_sp<SkColorSpace> dstColorSpace,
410 const SkIRect& srcRect,
411 const SkISize& dstSize,
412 SkImage::RescaleGamma rescaleGamma,
413 SkImage::RescaleMode rescaleMode,
414 SkImage::ReadPixelsCallback callback,
415 SkImage::ReadPixelsContext callbackContext) {
416 SkImageInfo dstImageInfo = SkImageInfo::Make(dstSize,
417 kRGBA_8888_SkColorType,
418 kPremul_SkAlphaType,
419 std::move(dstColorSpace));
420 this->asyncRescaleAndReadImpl(&Context::asyncReadPixelsYUV420,
421 rescaleGamma, rescaleMode,
422 {src, srcRect, dstImageInfo, callback, callbackContext},
423 yuvColorSpace);
424 }
425
asyncRescaleAndReadPixelsYUVA420(const SkSurface * src,SkYUVColorSpace yuvColorSpace,sk_sp<SkColorSpace> dstColorSpace,const SkIRect & srcRect,const SkISize & dstSize,SkImage::RescaleGamma rescaleGamma,SkImage::RescaleMode rescaleMode,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)426 void Context::asyncRescaleAndReadPixelsYUVA420(const SkSurface* src,
427 SkYUVColorSpace yuvColorSpace,
428 sk_sp<SkColorSpace> dstColorSpace,
429 const SkIRect& srcRect,
430 const SkISize& dstSize,
431 SkImage::RescaleGamma rescaleGamma,
432 SkImage::RescaleMode rescaleMode,
433 SkImage::ReadPixelsCallback callback,
434 SkImage::ReadPixelsContext callbackContext) {
435 sk_sp<SkImage> surfaceImage = SkSurfaces::AsImage(sk_ref_sp(src));
436 this->asyncRescaleAndReadPixelsYUVA420(surfaceImage.get(),
437 yuvColorSpace,
438 dstColorSpace,
439 srcRect,
440 dstSize,
441 rescaleGamma,
442 rescaleMode,
443 callback,
444 callbackContext);
445 }
446
asyncReadPixelsYUV420(std::unique_ptr<Recorder> recorder,const AsyncParams<SkImage> & params,SkYUVColorSpace yuvColorSpace)447 void Context::asyncReadPixelsYUV420(std::unique_ptr<Recorder> recorder,
448 const AsyncParams<SkImage>& params,
449 SkYUVColorSpace yuvColorSpace) {
450 TRACE_EVENT2("skia.gpu", TRACE_FUNC,
451 "width", params.fSrcRect.width(),
452 "height", params.fSrcRect.height());
453 // This is only called by asyncRescaleAndReadImpl which already validates its parameters
454 SkASSERT(params.validate());
455 SkASSERT(params.fSrcRect.size() == params.fDstImageInfo.dimensions());
456
457 // The planes are always extracted via drawing, so create the Recorder if there isn't one yet.
458 if (!recorder) {
459 recorder = this->makeInternalRecorder();
460 }
461
462 // copyPlane renders the source image into an A8 image and sets up a transfer stored in 'result'
463 auto copyPlane = [&](SkImageInfo planeInfo,
464 std::string_view label,
465 float rgb2yuv[20],
466 const SkMatrix& texMatrix,
467 PixelTransferResult* result) {
468 sk_sp<Surface> dstSurface = Surface::MakeScratch(recorder.get(),
469 planeInfo,
470 std::move(label),
471 Budgeted::kYes,
472 Mipmapped::kNo,
473 SkBackingFit::kApprox);
474 if (!dstSurface) {
475 return false;
476 }
477
478 // Render the plane defined by rgb2yuv from srcImage into dstSurface
479 SkPaint paint;
480 const SkSamplingOptions sampling(SkFilterMode::kLinear, SkMipmapMode::kNone);
481 sk_sp<SkShader> imgShader = params.fSrcImage->makeShader(
482 SkTileMode::kClamp, SkTileMode::kClamp, sampling, texMatrix);
483 paint.setShader(std::move(imgShader));
484 paint.setBlendMode(SkBlendMode::kSrc);
485
486 if (rgb2yuv) {
487 // NOTE: The dstSurface's color space is set to the requested RGB dstColorSpace, so
488 // the rendered image is automatically converted to that RGB color space before the
489 // RGB->YUV color filter is evaluated, putting the plane data into the alpha channel.
490 paint.setColorFilter(SkColorFilters::Matrix(rgb2yuv));
491 }
492
493 SkCanvas* canvas = dstSurface->getCanvas();
494 canvas->drawPaint(paint);
495
496 // Manually flush the surface before transferPixels() is called to ensure the rendering
497 // operations run before the CopyTextureToBuffer task.
498 Flush(dstSurface);
499 // Must use planeInfo.bounds() for srcRect since dstSurface is kApprox-fit.
500 *result = this->transferPixels(recorder.get(),
501 dstSurface->backingTextureProxy(),
502 dstSurface->imageInfo().colorInfo(),
503 planeInfo.colorInfo(),
504 planeInfo.bounds());
505 return SkToBool(result->fTransferBuffer);
506 };
507
508 // Set up draws and transfers. This interleaves the drawing to a plane and the copy to the
509 // transfer buffer, which will allow the scratch A8 surface to be reused for each plane.
510 // TODO: Use one transfer buffer for all three planes to reduce map/unmap cost?
511 const bool readAlpha = params.fDstImageInfo.colorInfo().alphaType() != kOpaque_SkAlphaType;
512 SkImageInfo yaInfo = params.fDstImageInfo.makeColorType(kAlpha_8_SkColorType)
513 .makeAlphaType(kPremul_SkAlphaType);
514 SkImageInfo uvInfo = yaInfo.makeWH(yaInfo.width()/2, yaInfo.height()/2);
515 PixelTransferResult transfers[4];
516
517 float baseM[20];
518 SkColorMatrix_RGB2YUV(yuvColorSpace, baseM);
519 SkMatrix texMatrix = SkMatrix::Translate(-params.fSrcRect.fLeft, -params.fSrcRect.fTop);
520
521 // This matrix generates (r,g,b,a) = (0, 0, 0, y)
522 float yM[20];
523 std::fill_n(yM, 15, 0.f);
524 std::copy_n(baseM + 0, 5, yM + 15);
525 if (!copyPlane(yaInfo, "AsyncReadPixelsYPlane", yM, texMatrix, &transfers[0])) {
526 return params.fail();
527 }
528
529 // No matrix, straight copy of alpha channel
530 SkASSERT(baseM[15] == 0 &&
531 baseM[16] == 0 &&
532 baseM[17] == 0 &&
533 baseM[18] == 1 &&
534 baseM[19] == 0);
535 if (readAlpha &&
536 !copyPlane(yaInfo, "AsyncReadPixelsAPlane", nullptr, texMatrix, &transfers[3])) {
537 return params.fail();
538 }
539
540 // The UV planes are at half resolution compared to Y and A in 4:2:0
541 texMatrix.postScale(0.5f, 0.5f);
542
543 // This matrix generates (r,g,b,a) = (0, 0, 0, u)
544 float uM[20];
545 std::fill_n(uM, 15, 0.f);
546 std::copy_n(baseM + 5, 5, uM + 15);
547 if (!copyPlane(uvInfo, "AsyncReadPixelsUPlane", uM, texMatrix, &transfers[1])) {
548 return params.fail();
549 }
550
551 // This matrix generates (r,g,b,a) = (0, 0, 0, v)
552 float vM[20];
553 std::fill_n(vM, 15, 0.f);
554 std::copy_n(baseM + 10, 5, vM + 15);
555 if (!copyPlane(uvInfo, "AsyncReadPixelsVPlane", vM, texMatrix, &transfers[2])) {
556 return params.fail();
557 }
558
559 this->finalizeAsyncReadPixels(std::move(recorder),
560 {transfers, readAlpha ? 4 : 3},
561 params.fCallback,
562 params.fCallbackContext);
563 }
564
finalizeAsyncReadPixels(std::unique_ptr<Recorder> recorder,SkSpan<PixelTransferResult> transferResults,SkImage::ReadPixelsCallback callback,SkImage::ReadPixelsContext callbackContext)565 void Context::finalizeAsyncReadPixels(std::unique_ptr<Recorder> recorder,
566 SkSpan<PixelTransferResult> transferResults,
567 SkImage::ReadPixelsCallback callback,
568 SkImage::ReadPixelsContext callbackContext) {
569 // If the async readback work required a Recorder, insert the recording with all of the
570 // accumulated work (which includes any copies). Otherwise, for pure copy readbacks,
571 // transferPixels() already added the tasks directly to the QueueManager.
572 if (recorder) {
573 std::unique_ptr<Recording> recording = recorder->snap();
574 if (!recording) {
575 callback(callbackContext, nullptr);
576 return;
577 }
578 InsertRecordingInfo recordingInfo;
579 recordingInfo.fRecording = recording.get();
580 if (!this->insertRecording(recordingInfo)) {
581 callback(callbackContext, nullptr);
582 return;
583 }
584 }
585
586 // Set up FinishContext and add transfer commands to queue
587 struct AsyncReadFinishContext {
588 SkImage::ReadPixelsCallback* fClientCallback;
589 SkImage::ReadPixelsContext fClientContext;
590 ClientMappedBufferManager* fMappedBufferManager;
591 std::array<PixelTransferResult, 4> fTransferResults;
592 };
593
594 auto finishContext = std::make_unique<AsyncReadFinishContext>();
595 finishContext->fClientCallback = callback;
596 finishContext->fClientContext = callbackContext;
597 finishContext->fMappedBufferManager = fMappedBufferManager.get();
598
599 SkASSERT(transferResults.size() <= std::size(finishContext->fTransferResults));
600 skia_private::STArray<4, sk_sp<Buffer>> buffersToAsyncMap;
601 for (size_t i = 0; i < transferResults.size(); ++i) {
602 finishContext->fTransferResults[i] = std::move(transferResults[i]);
603 if (fSharedContext->caps()->bufferMapsAreAsync()) {
604 buffersToAsyncMap.push_back(finishContext->fTransferResults[i].fTransferBuffer);
605 }
606 }
607
608 InsertFinishInfo info;
609 info.fFinishedContext = finishContext.release();
610 info.fFinishedProc = [](GpuFinishedContext c, CallbackResult status) {
611 std::unique_ptr<const AsyncReadFinishContext> context(
612 reinterpret_cast<const AsyncReadFinishContext*>(c));
613 using AsyncReadResult = skgpu::TAsyncReadResult<Buffer, ContextID, PixelTransferResult>;
614
615 ClientMappedBufferManager* manager = context->fMappedBufferManager;
616 std::unique_ptr<AsyncReadResult> result;
617 if (status == CallbackResult::kSuccess) {
618 result = std::make_unique<AsyncReadResult>(manager->ownerID());
619 }
620 for (const auto& r : context->fTransferResults) {
621 if (!r.fTransferBuffer) {
622 break;
623 }
624 if (result && !result->addTransferResult(r, r.fSize, r.fRowBytes, manager)) {
625 result.reset();
626 }
627 // If we didn't get this buffer into the mapped buffer manager then make sure it gets
628 // unmapped if it has a pending or completed async map.
629 if (!result && r.fTransferBuffer->isUnmappable()) {
630 r.fTransferBuffer->unmap();
631 }
632 }
633 (*context->fClientCallback)(context->fClientContext, std::move(result));
634 };
635
636 // If addFinishInfo() fails, it invokes the finish callback automatically, which handles all the
637 // required clean up for us, just log an error message. The buffers will never be mapped and
638 // thus don't need an unmap.
639 if (!fQueueManager->addFinishInfo(info, fResourceProvider.get(), buffersToAsyncMap)) {
640 SKGPU_LOG_E("Failed to register finish callbacks for asyncReadPixels.");
641 return;
642 }
643 }
644
transferPixels(Recorder * recorder,const TextureProxy * srcProxy,const SkColorInfo & srcColorInfo,const SkColorInfo & dstColorInfo,const SkIRect & srcRect)645 Context::PixelTransferResult Context::transferPixels(Recorder* recorder,
646 const TextureProxy* srcProxy,
647 const SkColorInfo& srcColorInfo,
648 const SkColorInfo& dstColorInfo,
649 const SkIRect& srcRect) {
650 SkASSERT(SkIRect::MakeSize(srcProxy->dimensions()).contains(srcRect));
651 SkASSERT(SkColorInfoIsValid(dstColorInfo));
652
653 const Caps* caps = fSharedContext->caps();
654 if (!srcProxy || !caps->supportsReadPixels(srcProxy->textureInfo())) {
655 return {};
656 }
657
658 const SkColorType srcColorType = srcColorInfo.colorType();
659 SkColorType supportedColorType;
660 bool isRGB888Format;
661 std::tie(supportedColorType, isRGB888Format) =
662 caps->supportedReadPixelsColorType(srcColorType,
663 srcProxy->textureInfo(),
664 dstColorInfo.colorType());
665 if (supportedColorType == kUnknown_SkColorType) {
666 return {};
667 }
668
669 // Fail if read color type does not have all of dstCT's color channels and those missing color
670 // channels are in the src.
671 uint32_t dstChannels = SkColorTypeChannelFlags(dstColorInfo.colorType());
672 uint32_t legalReadChannels = SkColorTypeChannelFlags(supportedColorType);
673 uint32_t srcChannels = SkColorTypeChannelFlags(srcColorType);
674 if ((~legalReadChannels & dstChannels) & srcChannels) {
675 return {};
676 }
677
678 int bpp = isRGB888Format ? 3 : SkColorTypeBytesPerPixel(supportedColorType);
679 size_t rowBytes = caps->getAlignedTextureDataRowBytes(bpp * srcRect.width());
680 size_t size = SkAlignTo(rowBytes * srcRect.height(), caps->requiredTransferBufferAlignment());
681 sk_sp<Buffer> buffer = fResourceProvider->findOrCreateBuffer(
682 size, BufferType::kXferGpuToCpu, AccessPattern::kHostVisible, "TransferToCpu");
683 if (!buffer) {
684 return {};
685 }
686
687 // Set up copy task. Since we always use a new buffer the offset can be 0 and we don't need to
688 // worry about aligning it to the required transfer buffer alignment.
689 sk_sp<CopyTextureToBufferTask> copyTask = CopyTextureToBufferTask::Make(sk_ref_sp(srcProxy),
690 srcRect,
691 buffer,
692 /*bufferOffset=*/0,
693 rowBytes);
694 const bool addTasksDirectly = !SkToBool(recorder);
695 Protected contextIsProtected = fSharedContext->isProtected();
696 if (!copyTask || (addTasksDirectly && !fQueueManager->addTask(copyTask.get(),
697 this,
698 contextIsProtected))) {
699 return {};
700 } else if (!addTasksDirectly) {
701 // Add the task to the Recorder instead of the QueueManager if that's been required for
702 // collecting tasks to prepare the copied textures.
703 recorder->priv().add(std::move(copyTask));
704 }
705 sk_sp<SynchronizeToCpuTask> syncTask = SynchronizeToCpuTask::Make(buffer);
706 if (!syncTask || (addTasksDirectly && !fQueueManager->addTask(syncTask.get(),
707 this,
708 contextIsProtected))) {
709 return {};
710 } else if (!addTasksDirectly) {
711 recorder->priv().add(std::move(syncTask));
712 }
713
714 PixelTransferResult result;
715 result.fTransferBuffer = std::move(buffer);
716 result.fSize = srcRect.size();
717 // srcColorInfo describes the texture; readColorInfo describes the result of the copy-to-buffer,
718 // which may be different; dstColorInfo is what we have to transform it into when invoking the
719 // async callbacks.
720 SkColorInfo readColorInfo = srcColorInfo.makeColorType(supportedColorType);
721 if (readColorInfo != dstColorInfo || isRGB888Format) {
722 SkISize dims = srcRect.size();
723 SkImageInfo srcInfo = SkImageInfo::Make(dims, readColorInfo);
724 SkImageInfo dstInfo = SkImageInfo::Make(dims, dstColorInfo);
725 result.fRowBytes = dstInfo.minRowBytes();
726 result.fPixelConverter = [dstInfo, srcInfo, rowBytes, isRGB888Format](
727 void* dst, const void* src) {
728 SkAutoPixmapStorage temp;
729 size_t srcRowBytes = rowBytes;
730 if (isRGB888Format) {
731 temp.alloc(srcInfo);
732 size_t tRowBytes = temp.rowBytes();
733 auto* sRow = reinterpret_cast<const char*>(src);
734 auto* tRow = reinterpret_cast<char*>(temp.writable_addr());
735 for (int y = 0; y < srcInfo.height(); ++y, sRow += srcRowBytes, tRow += tRowBytes) {
736 for (int x = 0; x < srcInfo.width(); ++x) {
737 auto s = sRow + x*3;
738 auto t = tRow + x*sizeof(uint32_t);
739 memcpy(t, s, 3);
740 t[3] = static_cast<char>(0xFF);
741 }
742 }
743 src = temp.addr();
744 srcRowBytes = tRowBytes;
745 }
746 SkAssertResult(SkConvertPixels(dstInfo, dst, dstInfo.minRowBytes(),
747 srcInfo, src, srcRowBytes));
748 };
749 } else {
750 result.fRowBytes = rowBytes;
751 }
752
753 return result;
754 }
755
checkForFinishedWork(SyncToCpu syncToCpu)756 void Context::checkForFinishedWork(SyncToCpu syncToCpu) {
757 ASSERT_SINGLE_OWNER
758
759 fQueueManager->checkForFinishedWork(syncToCpu);
760 fMappedBufferManager->process();
761 }
762
checkAsyncWorkCompletion()763 void Context::checkAsyncWorkCompletion() {
764 this->checkForFinishedWork(SyncToCpu::kNo);
765 }
766
deleteBackendTexture(const BackendTexture & texture)767 void Context::deleteBackendTexture(const BackendTexture& texture) {
768 ASSERT_SINGLE_OWNER
769
770 if (!texture.isValid() || texture.backend() != this->backend()) {
771 return;
772 }
773 fResourceProvider->deleteBackendTexture(texture);
774 }
775
freeGpuResources()776 void Context::freeGpuResources() {
777 ASSERT_SINGLE_OWNER
778
779 this->checkAsyncWorkCompletion();
780
781 fResourceProvider->freeGpuResources();
782 }
783
performDeferredCleanup(std::chrono::milliseconds msNotUsed)784 void Context::performDeferredCleanup(std::chrono::milliseconds msNotUsed) {
785 ASSERT_SINGLE_OWNER
786
787 this->checkAsyncWorkCompletion();
788
789 auto purgeTime = skgpu::StdSteadyClock::now() - msNotUsed;
790 fResourceProvider->purgeResourcesNotUsedSince(purgeTime);
791 }
792
currentBudgetedBytes() const793 size_t Context::currentBudgetedBytes() const {
794 ASSERT_SINGLE_OWNER
795 return fResourceProvider->getResourceCacheCurrentBudgetedBytes();
796 }
797
currentPurgeableBytes() const798 size_t Context::currentPurgeableBytes() const {
799 ASSERT_SINGLE_OWNER
800 return fResourceProvider->getResourceCacheCurrentPurgeableBytes();
801 }
802
maxBudgetedBytes() const803 size_t Context::maxBudgetedBytes() const {
804 ASSERT_SINGLE_OWNER
805 return fResourceProvider->getResourceCacheLimit();
806 }
807
setMaxBudgetedBytes(size_t bytes)808 void Context::setMaxBudgetedBytes(size_t bytes) {
809 ASSERT_SINGLE_OWNER
810 return fResourceProvider->setResourceCacheLimit(bytes);
811 }
812
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const813 void Context::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
814 ASSERT_SINGLE_OWNER
815 fResourceProvider->dumpMemoryStatistics(traceMemoryDump);
816 // TODO: What is the graphite equivalent for the text blob cache and how do we print out its
817 // used bytes here (see Ganesh implementation).
818 }
819
isDeviceLost() const820 bool Context::isDeviceLost() const {
821 return fSharedContext->isDeviceLost();
822 }
823
maxTextureSize() const824 int Context::maxTextureSize() const {
825 return fSharedContext->caps()->maxTextureSize();
826 }
827
supportsProtectedContent() const828 bool Context::supportsProtectedContent() const {
829 return fSharedContext->isProtected() == Protected::kYes;
830 }
831
supportedGpuStats() const832 GpuStatsFlags Context::supportedGpuStats() const {
833 return fSharedContext->caps()->supportedGpuStats();
834 }
835
836 ///////////////////////////////////////////////////////////////////////////////////
837
838 #if defined(GPU_TEST_UTILS)
deregisterRecorder(const Recorder * recorder)839 void Context::deregisterRecorder(const Recorder* recorder) {
840 SkAutoMutexExclusive lock(fTestingLock);
841 for (auto it = fTrackedRecorders.begin();
842 it != fTrackedRecorders.end();
843 it++) {
844 if (*it == recorder) {
845 fTrackedRecorders.erase(it);
846 return;
847 }
848 }
849 }
850
readPixels(const SkPixmap & pm,const TextureProxy * textureProxy,const SkImageInfo & srcImageInfo,int srcX,int srcY)851 bool ContextPriv::readPixels(const SkPixmap& pm,
852 const TextureProxy* textureProxy,
853 const SkImageInfo& srcImageInfo,
854 int srcX, int srcY) {
855 auto rect = SkIRect::MakeXYWH(srcX, srcY, pm.width(), pm.height());
856 struct AsyncContext {
857 bool fCalled = false;
858 std::unique_ptr<const SkImage::AsyncReadResult> fResult;
859 } asyncContext;
860
861 auto asyncCallback = [](void* c, std::unique_ptr<const SkImage::AsyncReadResult> out) {
862 auto context = static_cast<AsyncContext*>(c);
863 context->fResult = std::move(out);
864 context->fCalled = true;
865 };
866
867 const SkColorInfo& srcColorInfo = srcImageInfo.colorInfo();
868
869 // This is roughly equivalent to the logic taken in asyncRescaleAndRead(SkSurface) to either
870 // try the image-based readback (with copy-as-draw fallbacks) or read the texture directly
871 // if it supports reading.
872 if (!fContext->fSharedContext->caps()->supportsReadPixels(textureProxy->textureInfo())) {
873 // Since this is a synchronous testing-only API, callers should have flushed any pending
874 // work that modifies this texture proxy already. This means we don't have to worry about
875 // re-wrapping the proxy in a new Image (that wouldn't tbe connected to any Device, etc.).
876 sk_sp<SkImage> image{new Image(TextureProxyView(sk_ref_sp(textureProxy)), srcColorInfo)};
877 Context::AsyncParams<SkImage> params {image.get(), rect, pm.info(),
878 asyncCallback, &asyncContext};
879 if (!params.validate()) {
880 params.fail();
881 } else {
882 fContext->asyncReadPixels(/*recorder=*/nullptr, params);
883 }
884 } else {
885 fContext->asyncReadTexture(/*recorder=*/nullptr,
886 {textureProxy, rect, pm.info(), asyncCallback, &asyncContext},
887 srcImageInfo.colorInfo());
888 }
889
890 if (fContext->fSharedContext->caps()->allowCpuSync()) {
891 fContext->submit(SyncToCpu::kYes);
892 } else {
893 fContext->submit(SyncToCpu::kNo);
894 if (fContext->fSharedContext->backend() == BackendApi::kDawn) {
895 while (!asyncContext.fCalled) {
896 fContext->fSharedContext->deviceTick(fContext);
897 }
898 } else {
899 SK_ABORT("Only Dawn supports non-syncing contexts.");
900 }
901 }
902 SkASSERT(asyncContext.fCalled);
903 if (!asyncContext.fResult) {
904 return false;
905 }
906 SkRectMemcpy(pm.writable_addr(), pm.rowBytes(), asyncContext.fResult->data(0),
907 asyncContext.fResult->rowBytes(0), pm.info().minRowBytes(),
908 pm.height());
909 return true;
910 }
911
supportsPathRendererStrategy(PathRendererStrategy strategy)912 bool ContextPriv::supportsPathRendererStrategy(PathRendererStrategy strategy) {
913 AtlasProvider::PathAtlasFlagsBitMask pathAtlasFlags =
914 AtlasProvider::QueryPathAtlasSupport(this->caps());
915 switch (strategy) {
916 case PathRendererStrategy::kDefault:
917 return true;
918 case PathRendererStrategy::kComputeAnalyticAA:
919 case PathRendererStrategy::kComputeMSAA16:
920 case PathRendererStrategy::kComputeMSAA8:
921 return SkToBool(pathAtlasFlags & AtlasProvider::PathAtlasFlags::kCompute);
922 case PathRendererStrategy::kRasterAA:
923 return SkToBool(pathAtlasFlags & AtlasProvider::PathAtlasFlags::kRaster);
924 case PathRendererStrategy::kTessellation:
925 return true;
926 }
927
928 return false;
929 }
930
931 #endif // GPU_TEST_UTILS
932
933 ///////////////////////////////////////////////////////////////////////////////////
934
MakeContext(sk_sp<SharedContext> sharedContext,std::unique_ptr<QueueManager> queueManager,const ContextOptions & options)935 std::unique_ptr<Context> ContextCtorAccessor::MakeContext(
936 sk_sp<SharedContext> sharedContext,
937 std::unique_ptr<QueueManager> queueManager,
938 const ContextOptions& options) {
939 auto context = std::unique_ptr<Context>(new Context(std::move(sharedContext),
940 std::move(queueManager),
941 options));
942 if (context && context->finishInitialization()) {
943 return context;
944 } else {
945 return nullptr;
946 }
947 }
948
949 } // namespace skgpu::graphite
950