1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef GrGpu_DEFINED
9 #define GrGpu_DEFINED
10
11 #include "include/core/SkData.h"
12 #include "include/core/SkRect.h"
13 #include "include/core/SkRefCnt.h"
14 #include "include/core/SkSpan.h"
15 #include "include/gpu/ganesh/GrDirectContext.h"
16 #include "include/core/SkTypes.h"
17 #include "include/gpu/GpuTypes.h"
18 #include "include/gpu/ganesh/GrBackendSurface.h"
19 #include "include/gpu/ganesh/GrTypes.h"
20 #include "include/private/base/SkTArray.h"
21 #include "include/private/gpu/ganesh/GrTypesPriv.h"
22 #include "src/gpu/ganesh/GrCaps.h"
23 #include "src/gpu/ganesh/GrGpuBuffer.h" // IWYU pragma: keep
24 #include "src/gpu/ganesh/GrOpsRenderPass.h"
25 #include "src/gpu/ganesh/GrSamplerState.h"
26 #include "src/gpu/ganesh/GrXferProcessor.h"
27
28 #include <array>
29 #include <cstddef>
30 #include <cstdint>
31 #include <memory>
32 #include <optional>
33 #include <string_view>
34
35 class GrAttachment;
36 class GrBackendSemaphore;
37 class GrDirectContext;
38 class GrGLContext;
39 class GrProgramDesc;
40 class GrProgramInfo;
41 class GrRenderTarget;
42 class GrRingBuffer;
43 class GrSemaphore;
44 class GrStagingBufferManager;
45 class GrSurface;
46 class GrSurfaceProxy;
47 class GrTexture;
48 class GrThreadSafePipelineBuilder;
49 class SkJSONWriter;
50 class SkString;
51 enum class SkTextureCompressionType;
52 struct GrVkDrawableInfo;
53 struct SkISize;
54 struct SkImageInfo;
55
56 namespace SkSurfaces {
57 enum class BackendSurfaceAccess;
58 }
59 namespace skgpu {
60 class AutoCallback;
61 class MutableTextureState;
62 class RefCntedCallback;
63 } // namespace skgpu
64
65 // This is sufficient for the GL implementation (which is all we have now). It can become a
66 // "Backend" SkAnySubclass type to cover other backends in the future.
67 struct GrTimerQuery {
68 uint32_t query;
69 };
70
71 class SK_API GrGpu {
72 public:
73 GrGpu(GrDirectContext* direct);
74 virtual ~GrGpu();
75
getContext()76 GrDirectContext* getContext() { return fContext; }
getContext()77 const GrDirectContext* getContext() const { return fContext; }
78
setCurrentGrResourceTag(const GrGpuResourceTag & tag)79 void setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
80 if (fContext) {
81 fContext->setCurrentGrResourceTag(tag);
82 }
83 }
84
popGrResourceTag()85 void popGrResourceTag()
86 {
87 if (fContext) {
88 fContext->popGrResourceTag();
89 }
90 }
91
92 /**
93 * Gets the capabilities of the draw target.
94 */
caps()95 const GrCaps* caps() const { return fCaps.get(); }
refCaps()96 sk_sp<const GrCaps> refCaps() const { return fCaps; }
97
stagingBufferManager()98 virtual GrStagingBufferManager* stagingBufferManager() { return nullptr; }
99
uniformsRingBuffer()100 virtual GrRingBuffer* uniformsRingBuffer() { return nullptr; }
101
102 enum class DisconnectType {
103 // No cleanup should be attempted, immediately cease making backend API calls
104 kAbandon,
105 // Free allocated resources (not known by GrResourceCache) before returning and
106 // ensure no backend backend 3D API calls will be made after disconnect() returns.
107 kCleanup,
108 };
109
110 // Called by context when the underlying backend context is already or will be destroyed
111 // before GrDirectContext.
112 virtual void disconnect(DisconnectType);
113
114 virtual GrThreadSafePipelineBuilder* pipelineBuilder() = 0;
115 virtual sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() = 0;
116
117 // Called by GrDirectContext::isContextLost. Returns true if the backend Gpu object has gotten
118 // into an unrecoverable, lost state.
isDeviceLost()119 virtual bool isDeviceLost() const { return false; }
120
121 /**
122 * The GrGpu object normally assumes that no outsider is setting state
123 * within the underlying 3D API's context/device/whatever. This call informs
124 * the GrGpu that the state was modified and it shouldn't make assumptions
125 * about the state.
126 */
127 void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; }
128
129 /**
130 * Creates a texture object. If renderable is kYes then the returned texture can
131 * be used as a render target by calling GrTexture::asRenderTarget(). Not all
132 * pixel configs can be used as render targets. Support for configs as textures
133 * or render targets can be checked using GrCaps.
134 *
135 * @param dimensions dimensions of the texture to be created.
136 * @param format the format for the texture (not currently used).
137 * @param renderable should the resulting texture be renderable
138 * @param renderTargetSampleCnt The number of samples to use for rendering if renderable is
139 * kYes. If renderable is kNo then this must be 1.
140 * @param budgeted does this texture count against the resource cache budget?
141 * @param isProtected should the texture be created as protected.
142 * @param texels array of mipmap levels containing texel data to load.
143 * If level i has pixels then it is assumed that its dimensions are
144 * max(1, floor(dimensions.fWidth / 2)) by
145 * max(1, floor(dimensions.fHeight / 2)).
146 * If texels[i].fPixels == nullptr for all i <= mipLevelCount or
147 * mipLevelCount is 0 then the texture's contents are uninitialized.
148 * If a level has non-null pixels, its row bytes must be a multiple of the
149 * config's bytes-per-pixel. The row bytes must be tight to the
150 * level width if !caps->writePixelsRowBytesSupport().
151 * If mipLevelCount > 1 and texels[i].fPixels != nullptr for any i > 0
152 * then all levels must have non-null pixels. All levels must have
153 * non-null pixels if GrCaps::createTextureMustSpecifyAllLevels() is true.
154 * @param textureColorType The color type interpretation of the texture for the purpose of
155 * of uploading texel data.
156 * @param srcColorType The color type of data in texels[].
157 * @param texelLevelCount the number of levels in 'texels'. May be 0, 1, or
158 * floor(max((log2(dimensions.fWidth), log2(dimensions.fHeight)))). It
159 * must be the latter if GrCaps::createTextureMustSpecifyAllLevels() is
160 * true.
161 * @return The texture object if successful, otherwise nullptr.
162 */
163 sk_sp<GrTexture> createTexture(SkISize dimensions,
164 const GrBackendFormat& format,
165 GrTextureType textureType,
166 GrRenderable renderable,
167 int renderTargetSampleCnt,
168 skgpu::Budgeted budgeted,
169 GrProtected isProtected,
170 GrColorType textureColorType,
171 GrColorType srcColorType,
172 const GrMipLevel texels[],
173 int texelLevelCount,
174 std::string_view label);
175
176 /**
177 * Simplified createTexture() interface for when there is no initial texel data to upload.
178 */
179 sk_sp<GrTexture> createTexture(SkISize dimensions,
180 const GrBackendFormat& format,
181 GrTextureType textureType,
182 GrRenderable renderable,
183 int renderTargetSampleCnt,
184 skgpu::Mipmapped mipmapped,
185 skgpu::Budgeted budgeted,
186 GrProtected isProtected,
187 std::string_view label);
188
189 sk_sp<GrTexture> createCompressedTexture(SkISize dimensions,
190 const GrBackendFormat& format,
191 skgpu::Budgeted budgeted,
192 skgpu::Mipmapped mipmapped,
193 GrProtected isProtected,
194 const void* data,
195 size_t dataSize);
196
197 sk_sp<GrTexture> createCompressedTexture(SkISize dimensions,
198 const GrBackendFormat& format,
199 skgpu::Budgeted budgeted,
200 skgpu::Mipmapped mipMapped,
201 GrProtected isProtected,
202 OH_NativeBuffer* nativeBuffer,
203 size_t bufferSize);
204
205 /**
206 * Implements GrResourceProvider::wrapBackendTexture
207 */
208 sk_sp<GrTexture> wrapBackendTexture(const GrBackendTexture&,
209 GrWrapOwnership,
210 GrWrapCacheable,
211 GrIOType);
212
213 sk_sp<GrTexture> wrapCompressedBackendTexture(const GrBackendTexture&,
214 GrWrapOwnership,
215 GrWrapCacheable);
216
217 /**
218 * Implements GrResourceProvider::wrapRenderableBackendTexture
219 */
220 sk_sp<GrTexture> wrapRenderableBackendTexture(const GrBackendTexture&,
221 int sampleCnt,
222 GrWrapOwnership,
223 GrWrapCacheable);
224
225 /**
226 * Implements GrResourceProvider::wrapBackendRenderTarget
227 */
228 sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTarget&);
229
230 /**
231 * Implements GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget
232 */
233 sk_sp<GrRenderTarget> wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
234 const GrVkDrawableInfo&);
235
236 /**
237 * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked.
238 *
239 * @param size size of buffer to create.
240 * @param intendedType hint to the graphics subsystem about what the buffer will be used for.
241 * @param accessPattern hint to the graphics subsystem about how the data will be accessed.
242 *
243 * @return the buffer if successful, otherwise nullptr.
244 */
245 sk_sp<GrGpuBuffer> createBuffer(size_t size,
246 GrGpuBufferType intendedType,
247 GrAccessPattern accessPattern);
248
249 /**
250 * Resolves MSAA. The resolveRect must already be in the native destination space.
251 */
252 void resolveRenderTarget(GrRenderTarget*, const SkIRect& resolveRect);
253
254 /**
255 * Uses the base of the texture to recompute the contents of the other levels.
256 */
257 bool regenerateMipMapLevels(GrTexture*);
258
259 /**
260 * If the backend API has stateful texture bindings, this resets them back to defaults.
261 */
262 void resetTextureBindings();
263
264 /**
265 * Reads a rectangle of pixels from a render target. No sRGB/linear conversions are performed.
266 *
267 * @param surface the surface to read from
268 * @param rect the rectangle of pixels to read
269 * @param surfaceColorType the color type for this use of the surface.
270 * @param dstColorType the color type of the destination buffer.
271 * @param buffer memory to read the rectangle into.
272 * @param rowBytes the number of bytes between consecutive rows. Must be a multiple of
273 * dstColorType's bytes-per-pixel. Must be tight to width if
274 * !caps->readPixelsRowBytesSupport().
275 *
276 * @return true if the read succeeded, false if not. The read can fail
277 * because of the surface doesn't support reading, the color type
278 * is not allowed for the format of the surface or if the rectangle
279 * read is not contained in the surface.
280 */
281 bool readPixels(GrSurface* surface,
282 SkIRect rect,
283 GrColorType surfaceColorType,
284 GrColorType dstColorType,
285 void* buffer,
286 size_t rowBytes);
287
288 /**
289 * Updates the pixels in a rectangle of a surface. No sRGB/linear conversions are performed.
290 *
291 * @param surface the surface to write to.
292 * @param rect the rectangle of pixels to overwrite
293 * @param surfaceColorType the color type for this use of the surface.
294 * @param srcColorType the color type of the source buffer.
295 * @param texels array of mipmap levels containing texture data. Row bytes must be a
296 * multiple of srcColorType's bytes-per-pixel. Must be tight to level
297 * width if !caps->writePixelsRowBytesSupport().
298 * @param mipLevelCount number of levels in 'texels'
299 * @param prepForTexSampling After doing write pixels should the surface be prepared for texture
300 * sampling. This is currently only used by Vulkan for inline uploads
301 * to set that layout back to sampled after doing the upload. Inline
302 * uploads currently can happen between draws in a single op so it is
303 * not trivial to break up the OpsTask into two tasks when we see
304 * an inline upload. However, once we are able to support doing that
305 * we can remove this parameter.
306 *
307 * @return true if the write succeeded, false if not. The read can fail
308 * because of the surface doesn't support writing (e.g. read only),
309 * the color type is not allowed for the format of the surface or
310 * if the rectangle written is not contained in the surface.
311 */
312 bool writePixels(GrSurface* surface,
313 SkIRect rect,
314 GrColorType surfaceColorType,
315 GrColorType srcColorType,
316 const GrMipLevel texels[],
317 int mipLevelCount,
318 bool prepForTexSampling = false);
319
320 /**
321 * Helper for the case of a single level.
322 */
323 bool writePixels(GrSurface* surface,
324 SkIRect rect,
325 GrColorType surfaceColorType,
326 GrColorType srcColorType,
327 const void* buffer,
328 size_t rowBytes,
329 bool prepForTexSampling = false) {
330 GrMipLevel mipLevel = {buffer, rowBytes, nullptr};
331 return this->writePixels(surface,
332 rect,
333 surfaceColorType,
334 srcColorType,
335 &mipLevel,
336 1,
337 prepForTexSampling);
338 }
339
340 /**
341 * Transfer bytes from one GPU buffer to another. The src buffer must have type kXferCpuToGpu
342 * and the dst buffer must not. Neither buffer may currently be mapped. The offsets and size
343 * must be aligned to GrCaps::transferFromBufferToBufferAlignment.
344 *
345 * @param src the buffer to read from
346 * @param srcOffset the aligned offset at the src at which the transfer begins.
347 * @param dst the buffer to write to
348 * @param dstOffset the aligned offset in the dst at which the transfer begins
349 * @param size the aligned number of bytes to transfer;
350 */
351 bool transferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
352 size_t srcOffset,
353 sk_sp<GrGpuBuffer> dst,
354 size_t dstOffset,
355 size_t size);
356
357 /**
358 * Updates the pixels in a rectangle of a texture using a buffer. If the texture is MIP mapped,
359 * the base level is written to.
360 *
361 * @param texture the texture to write to.
362 * @param rect the rectangle of pixels in the texture to overwrite
363 * @param textureColorType the color type for this use of the surface.
364 * @param bufferColorType the color type of the transfer buffer's pixel data
365 * @param transferBuffer GrBuffer to read pixels from (type must be "kXferCpuToGpu")
366 * @param offset offset from the start of the buffer
367 * @param rowBytes number of bytes between consecutive rows in the buffer. Must be a
368 * multiple of bufferColorType's bytes-per-pixel. Must be tight to
369 * rect.width() if !caps->writePixelsRowBytesSupport().
370 */
371 bool transferPixelsTo(GrTexture* texture,
372 SkIRect rect,
373 GrColorType textureColorType,
374 GrColorType bufferColorType,
375 sk_sp<GrGpuBuffer> transferBuffer,
376 size_t offset,
377 size_t rowBytes);
378
379 /**
380 * Reads the pixels from a rectangle of a surface into a buffer. Use
381 * GrCaps::SupportedRead::fOffsetAlignmentForTransferBuffer to determine the requirements for
382 * the buffer offset alignment. If the surface is a MIP mapped texture, the base level is read.
383 *
384 * If successful the row bytes in the buffer is always:
385 * GrColorTypeBytesPerPixel(bufferColorType) * rect.width()
386 *
387 * Asserts that the caller has passed a properly aligned offset and that the buffer is
388 * large enough to hold the result
389 *
390 * @param surface the surface to read from.
391 * @param rect the rectangle of pixels to read
392 * @param surfaceColorType the color type for this use of the surface.
393 * @param bufferColorType the color type of the transfer buffer's pixel data
394 * @param transferBuffer GrBuffer to write pixels to (type must be "kXferGpuToCpu")
395 * @param offset offset from the start of the buffer
396 */
397 bool transferPixelsFrom(GrSurface* surface,
398 SkIRect rect,
399 GrColorType surfaceColorType,
400 GrColorType bufferColorType,
401 sk_sp<GrGpuBuffer> transferBuffer,
402 size_t offset);
403
404 // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst
405 // take place at higher levels and this function implement faster copy paths. The src and dst
406 // rects are pre-clipped. The src rect and dst rect are guaranteed to be within the
407 // src/dst bounds and non-empty. They must also be in their exact device space coords, including
408 // already being transformed for origin if need be. If canDiscardOutsideDstRect is set to true
409 // then we don't need to preserve any data on the dst surface outside of the copy.
410 //
411 // Backends may or may not support src and dst rects with differing dimensions. This can assume
412 // that GrCaps.canCopySurface() returned true for these surfaces and rects.
413 bool copySurface(GrSurface* dst, const SkIRect& dstRect,
414 GrSurface* src, const SkIRect& srcRect,
415 GrSamplerState::Filter filter);
416
417 // Returns a GrOpsRenderPass which OpsTasks send draw commands to instead of directly
418 // to the Gpu object. The 'bounds' rect is the content rect of the renderTarget.
419 // If a 'stencil' is provided it will be the one bound to 'renderTarget'. If one is not
420 // provided but 'renderTarget' has a stencil buffer then that is a signal that the
421 // render target's stencil buffer should be ignored.
422 GrOpsRenderPass* getOpsRenderPass(
423 GrRenderTarget* renderTarget,
424 bool useMSAASurface,
425 GrAttachment* stencil,
426 GrSurfaceOrigin,
427 const SkIRect& bounds,
428 const GrOpsRenderPass::LoadAndStoreInfo&,
429 const GrOpsRenderPass::StencilLoadAndStoreInfo&,
430 const skia_private::TArray<GrSurfaceProxy*, true>& sampledProxies,
431 GrXferBarrierFlags renderPassXferBarriers);
432
433 // Called by GrDrawingManager when flushing.
434 // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also
435 // insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the
436 // inserted semaphores.
437 void executeFlushInfo(SkSpan<GrSurfaceProxy*>,
438 SkSurfaces::BackendSurfaceAccess access,
439 const GrFlushInfo&,
440 std::optional<GrTimerQuery> timerQuery,
441 const skgpu::MutableTextureState* newState);
442
443 // Called before render tasks are executed during a flush.
willExecute()444 virtual void willExecute() {}
445
submitToGpu()446 bool submitToGpu() {
447 return this->submitToGpu(GrSubmitInfo());
448 }
449 bool submitToGpu(const GrSubmitInfo& info);
450
451 virtual void submit(GrOpsRenderPass*) = 0;
452
453 [[nodiscard]] virtual std::unique_ptr<GrSemaphore> makeSemaphore(bool isOwned = true) = 0;
454 virtual std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&,
455 GrSemaphoreWrapType,
456 GrWrapOwnership) = 0;
457 virtual void insertSemaphore(GrSemaphore* semaphore) = 0;
458 virtual void waitSemaphore(GrSemaphore* semaphore) = 0;
459
startTimerQuery()460 virtual std::optional<GrTimerQuery> startTimerQuery() { return {}; }
461
462 virtual void addFinishedCallback(skgpu::AutoCallback, std::optional<GrTimerQuery> = {}) = 0;
463 virtual void checkFinishedCallbacks() = 0;
464 virtual void finishOutstandingGpuWork() = 0;
465
466 // NOLINTNEXTLINE(performance-unnecessary-value-param)
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>)467 virtual void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) {}
468
469 /**
470 * Checks if we detected an OOM from the underlying 3D API and if so returns true and resets
471 * the internal OOM state to false. Otherwise, returns false.
472 */
473 bool checkAndResetOOMed();
474
475 /**
476 * Put this texture in a safe and known state for use across multiple contexts. Depending on
477 * the backend, this may return a GrSemaphore. If so, other contexts should wait on that
478 * semaphore before using this texture.
479 */
480 virtual std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) = 0;
481
482 /**
483 * Frees any backend specific objects that are not currently in use by the GPU. This is called
484 * when the client is trying to free up as much GPU memory as possible. We will not release
485 * resources connected to programs/pipelines since the cost to recreate those is significantly
486 * higher that other resources.
487 */
releaseUnlockedBackendObjects()488 virtual void releaseUnlockedBackendObjects() {}
489
490 ///////////////////////////////////////////////////////////////////////////
491 // Debugging and Stats
492
493 class Stats {
494 public:
495 #if GR_GPU_STATS
496 Stats() = default;
497
reset()498 void reset() { *this = {}; }
499
textureCreates()500 int textureCreates() const { return fTextureCreates; }
incTextureCreates()501 void incTextureCreates() { fTextureCreates++; }
502
textureUploads()503 int textureUploads() const { return fTextureUploads; }
incTextureUploads()504 void incTextureUploads() { fTextureUploads++; }
505
transfersToTexture()506 int transfersToTexture() const { return fTransfersToTexture; }
incTransfersToTexture()507 void incTransfersToTexture() { fTransfersToTexture++; }
508
transfersFromSurface()509 int transfersFromSurface() const { return fTransfersFromSurface; }
incTransfersFromSurface()510 void incTransfersFromSurface() { fTransfersFromSurface++; }
511
incBufferTransfers()512 void incBufferTransfers() { fBufferTransfers++; }
bufferTransfers()513 int bufferTransfers() const { return fBufferTransfers; }
514
stencilAttachmentCreates()515 int stencilAttachmentCreates() const { return fStencilAttachmentCreates; }
incStencilAttachmentCreates()516 void incStencilAttachmentCreates() { fStencilAttachmentCreates++; }
517
msaaAttachmentCreates()518 int msaaAttachmentCreates() const { return fMSAAAttachmentCreates; }
incMSAAAttachmentCreates()519 void incMSAAAttachmentCreates() { fMSAAAttachmentCreates++; }
520
numDraws()521 int numDraws() const { return fNumDraws; }
incNumDraws()522 void incNumDraws() { fNumDraws++; }
523
numFailedDraws()524 int numFailedDraws() const { return fNumFailedDraws; }
incNumFailedDraws()525 void incNumFailedDraws() { ++fNumFailedDraws; }
526
numSubmitToGpus()527 int numSubmitToGpus() const { return fNumSubmitToGpus; }
incNumSubmitToGpus()528 void incNumSubmitToGpus() { ++fNumSubmitToGpus; }
529
numScratchTexturesReused()530 int numScratchTexturesReused() const { return fNumScratchTexturesReused; }
incNumScratchTexturesReused()531 void incNumScratchTexturesReused() { ++fNumScratchTexturesReused; }
532
numScratchMSAAAttachmentsReused()533 int numScratchMSAAAttachmentsReused() const { return fNumScratchMSAAAttachmentsReused; }
incNumScratchMSAAAttachmentsReused()534 void incNumScratchMSAAAttachmentsReused() { ++fNumScratchMSAAAttachmentsReused; }
535
renderPasses()536 int renderPasses() const { return fRenderPasses; }
incRenderPasses()537 void incRenderPasses() { fRenderPasses++; }
538
numReorderedDAGsOverBudget()539 int numReorderedDAGsOverBudget() const { return fNumReorderedDAGsOverBudget; }
incNumReorderedDAGsOverBudget()540 void incNumReorderedDAGsOverBudget() { fNumReorderedDAGsOverBudget++; }
541
542 #if defined(GPU_TEST_UTILS)
543 void dump(SkString*);
544 void dumpKeyValuePairs(
545 skia_private::TArray<SkString>* keys, skia_private::TArray<double>* values);
546 #endif
547 private:
548 int fTextureCreates = 0;
549 int fTextureUploads = 0;
550 int fTransfersToTexture = 0;
551 int fTransfersFromSurface = 0;
552 int fBufferTransfers = 0;
553 int fStencilAttachmentCreates = 0;
554 int fMSAAAttachmentCreates = 0;
555 int fNumDraws = 0;
556 int fNumFailedDraws = 0;
557 int fNumSubmitToGpus = 0;
558 int fNumScratchTexturesReused = 0;
559 int fNumScratchMSAAAttachmentsReused = 0;
560 int fRenderPasses = 0;
561 int fNumReorderedDAGsOverBudget = 0;
562
563 #else // !GR_GPU_STATS
564
565 #if defined(GPU_TEST_UTILS)
566 void dump(SkString*) {}
567 void dumpKeyValuePairs(skia_private::TArray<SkString>*, skia_private::TArray<double>*) {}
568 #endif
569 void incTextureCreates() {}
570 void incTextureUploads() {}
571 void incTransfersToTexture() {}
572 void incBufferTransfers() {}
573 void incTransfersFromSurface() {}
574 void incStencilAttachmentCreates() {}
575 void incMSAAAttachmentCreates() {}
576 void incNumDraws() {}
577 void incNumFailedDraws() {}
578 void incNumSubmitToGpus() {}
579 void incNumScratchTexturesReused() {}
580 void incNumScratchMSAAAttachmentsReused() {}
581 void incRenderPasses() {}
582 void incNumReorderedDAGsOverBudget() {}
583 #endif
584 };
585
stats()586 Stats* stats() { return &fStats; }
587 void dumpJSON(SkJSONWriter*) const;
588
589
590 /**
591 * Creates a texture directly in the backend API without wrapping it in a GrTexture.
592 * Must be matched with a call to deleteBackendTexture().
593 *
594 * If data is null the texture is uninitialized.
595 *
596 * If data represents a color then all texture levels are cleared to that color.
597 *
598 * If data represents pixmaps then it must have a either one pixmap or, if mipmapping
599 * is specified, a complete MIP hierarchy of pixmaps. Additionally, if provided, the mip
600 * levels must be sized correctly according to the MIP sizes implied by dimensions. They
601 * must all have the same color type and that color type must be compatible with the
602 * texture format.
603 */
604 GrBackendTexture createBackendTexture(SkISize dimensions,
605 const GrBackendFormat&,
606 GrRenderable,
607 skgpu::Mipmapped,
608 GrProtected,
609 std::string_view label);
610
611 bool clearBackendTexture(const GrBackendTexture&,
612 sk_sp<skgpu::RefCntedCallback> finishedCallback,
613 std::array<float, 4> color);
614
615 /**
616 * Same as the createBackendTexture case except compressed backend textures can
617 * never be renderable.
618 */
619 GrBackendTexture createCompressedBackendTexture(SkISize dimensions,
620 const GrBackendFormat&,
621 skgpu::Mipmapped,
622 GrProtected);
623
624 bool updateCompressedBackendTexture(const GrBackendTexture&,
625 sk_sp<skgpu::RefCntedCallback> finishedCallback,
626 const void* data,
627 size_t length);
628
setBackendTextureState(const GrBackendTexture &,const skgpu::MutableTextureState &,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)629 virtual bool setBackendTextureState(const GrBackendTexture&,
630 const skgpu::MutableTextureState&,
631 skgpu::MutableTextureState* previousState,
632 // NOLINTNEXTLINE(performance-unnecessary-value-param)
633 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
634 return false;
635 }
636
setBackendRenderTargetState(const GrBackendRenderTarget &,const skgpu::MutableTextureState &,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)637 virtual bool setBackendRenderTargetState(const GrBackendRenderTarget&,
638 const skgpu::MutableTextureState&,
639 skgpu::MutableTextureState* previousState,
640 // NOLINTNEXTLINE(performance-unnecessary-value-param)
641 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
642 return false;
643 }
644
645 /**
646 * Frees a texture created by createBackendTexture(). If ownership of the backend
647 * texture has been transferred to a context using adopt semantics this should not be called.
648 */
649 virtual void deleteBackendTexture(const GrBackendTexture&) = 0;
650
651 /**
652 * In this case we have a program descriptor and a program info but no render target.
653 */
654 virtual bool compile(const GrProgramDesc&, const GrProgramInfo&) = 0;
655
precompileShader(const SkData & key,const SkData & data)656 virtual bool precompileShader(const SkData& key, const SkData& data) { return false; }
657
658 #if defined(GPU_TEST_UTILS)
659 /** Check a handle represents an actual texture in the backend API that has not been freed. */
660 virtual bool isTestingOnlyBackendTexture(const GrBackendTexture&) const = 0;
661
662 /**
663 * Creates a GrBackendRenderTarget that can be wrapped using
664 * SkSurfaces::WrapBackendRenderTarget. Ideally this is a non-textureable allocation to
665 * differentiate from testing with SkSurfaces::WrapBackendTexture. When sampleCnt > 1 this
666 * is used to test client wrapped allocations with MSAA where Skia does not allocate a separate
667 * buffer for resolving. If the color is non-null the backing store should be cleared to the
668 * passed in color.
669 */
670 virtual GrBackendRenderTarget createTestingOnlyBackendRenderTarget(
671 SkISize dimensions,
672 GrColorType,
673 int sampleCount = 1,
674 GrProtected = GrProtected::kNo) = 0;
675
676 /**
677 * Deletes a GrBackendRenderTarget allocated with the above. Synchronization to make this safe
678 * is up to the caller.
679 */
680 virtual void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) = 0;
681
682 // This is only to be used in GL-specific tests.
glContextForTesting()683 virtual const GrGLContext* glContextForTesting() const { return nullptr; }
684
685 // This is only to be used by testing code
resetShaderCacheForTesting()686 virtual void resetShaderCacheForTesting() const {}
687
688 /**
689 * Inserted as a pair around a block of code to do a GPU frame capture.
690 * Currently only works with the Metal backend.
691 */
testingOnly_startCapture()692 virtual void testingOnly_startCapture() {}
testingOnly_stopCapture()693 virtual void testingOnly_stopCapture() {}
694 #endif
695
696 // width and height may be larger than rt (if underlying API allows it).
697 // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on
698 // the GrAttachment.
699 virtual sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& colorFormat,
700 SkISize dimensions,
701 int numStencilSamples) = 0;
702
703 virtual GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) = 0;
704
705 // Creates an MSAA surface to be used as an MSAA attachment on a framebuffer.
706 virtual sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions,
707 const GrBackendFormat& format,
708 int numSamples,
709 GrProtected isProtected,
710 GrMemoryless isMemoryless) = 0;
711
handleDirtyContext()712 void handleDirtyContext() {
713 if (fResetBits) {
714 this->resetContext();
715 }
716 }
717
storeVkPipelineCacheData()718 virtual void storeVkPipelineCacheData() {}
719
720 // Called before certain draws in order to guarantee coherent results from dst reads.
721 virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
722
vmaDefragment()723 virtual void vmaDefragment() {}
dumpVmaStats(SkString * out)724 virtual void dumpVmaStats(SkString *out) {}
725 protected:
726 static bool CompressedDataIsCorrect(SkISize dimensions,
727 SkTextureCompressionType,
728 skgpu::Mipmapped,
729 const void* data,
730 size_t length);
731
732 // If the surface is a texture this marks its mipmaps as dirty.
733 void didWriteToSurface(GrSurface* surface,
734 GrSurfaceOrigin origin,
735 const SkIRect* bounds,
736 uint32_t mipLevels = 1) const;
737
setOOMed()738 void setOOMed() { fOOMed = true; }
739
740 Stats fStats;
741
742 // Subclass must call this to initialize caps in its constructor.
743 void initCaps(sk_sp<const GrCaps> caps);
744
745 private:
endTimerQuery(const GrTimerQuery &)746 virtual void endTimerQuery(const GrTimerQuery&) { SK_ABORT("timer query not supported."); }
747
748 virtual GrBackendTexture onCreateBackendTexture(SkISize dimensions,
749 const GrBackendFormat&,
750 GrRenderable,
751 skgpu::Mipmapped,
752 GrProtected,
753 std::string_view label) = 0;
754
755 virtual GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions,
756 const GrBackendFormat&,
757 skgpu::Mipmapped,
758 GrProtected) = 0;
759
760 virtual bool onClearBackendTexture(const GrBackendTexture&,
761 sk_sp<skgpu::RefCntedCallback> finishedCallback,
762 std::array<float, 4> color) = 0;
763
764 virtual bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
765 sk_sp<skgpu::RefCntedCallback> finishedCallback,
766 const void* data,
767 size_t length) = 0;
768
769 // called when the 3D context state is unknown. Subclass should emit any
770 // assumed 3D context state and dirty any state cache.
onResetContext(uint32_t resetBits)771 virtual void onResetContext(uint32_t resetBits) {}
772
773 // Implementation of resetTextureBindings.
onResetTextureBindings()774 virtual void onResetTextureBindings() {}
775
776 // overridden by backend-specific derived class to create objects.
777 // Texture size, renderablility, format support, sample count will have already been validated
778 // in base class before onCreateTexture is called.
779 // If the ith bit is set in levelClearMask then the ith MIP level should be cleared.
780 virtual sk_sp<GrTexture> onCreateTexture(SkISize dimensions,
781 const GrBackendFormat&,
782 GrRenderable,
783 int renderTargetSampleCnt,
784 skgpu::Budgeted,
785 GrProtected,
786 int mipLevelCoont,
787 uint32_t levelClearMask,
788 std::string_view label) = 0;
789 virtual sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
790 const GrBackendFormat&,
791 skgpu::Budgeted,
792 skgpu::Mipmapped,
793 GrProtected,
794 const void* data,
795 size_t dataSize) = 0;
796 virtual sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
797 const GrBackendFormat&,
798 skgpu::Budgeted,
799 skgpu::Mipmapped,
800 GrProtected,
801 OH_NativeBuffer* nativeBuffer,
802 size_t bufferSize) = 0;
803 virtual sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&,
804 GrWrapOwnership,
805 GrWrapCacheable,
806 GrIOType) = 0;
807
808 virtual sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&,
809 GrWrapOwnership,
810 GrWrapCacheable) = 0;
811
812 virtual sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
813 int sampleCnt,
814 GrWrapOwnership,
815 GrWrapCacheable) = 0;
816 virtual sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) = 0;
817 virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
818 const GrVkDrawableInfo&);
819
820 virtual sk_sp<GrGpuBuffer> onCreateBuffer(size_t size,
821 GrGpuBufferType intendedType,
822 GrAccessPattern) = 0;
823
824 // overridden by backend-specific derived class to perform the surface read
825 virtual bool onReadPixels(GrSurface*,
826 SkIRect,
827 GrColorType surfaceColorType,
828 GrColorType dstColorType,
829 void*,
830 size_t rowBytes) = 0;
831
832 // overridden by backend-specific derived class to perform the surface write
833 virtual bool onWritePixels(GrSurface*,
834 SkIRect,
835 GrColorType surfaceColorType,
836 GrColorType srcColorType,
837 const GrMipLevel[],
838 int mipLevelCount,
839 bool prepForTexSampling) = 0;
840
841 // overridden by backend-specific derived class to perform the buffer transfer
842 virtual bool onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
843 size_t srcOffset,
844 sk_sp<GrGpuBuffer> dst,
845 size_t dstOffset,
846 size_t size) = 0;
847
848 // overridden by backend-specific derived class to perform the texture transfer
849 virtual bool onTransferPixelsTo(GrTexture*,
850 SkIRect,
851 GrColorType textureColorType,
852 GrColorType bufferColorType,
853 sk_sp<GrGpuBuffer> transferBuffer,
854 size_t offset,
855 size_t rowBytes) = 0;
856
857 // overridden by backend-specific derived class to perform the surface transfer
858 virtual bool onTransferPixelsFrom(GrSurface*,
859 SkIRect,
860 GrColorType surfaceColorType,
861 GrColorType bufferColorType,
862 sk_sp<GrGpuBuffer> transferBuffer,
863 size_t offset) = 0;
864
865 // overridden by backend-specific derived class to perform the resolve
866 virtual void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) = 0;
867
868 // overridden by backend specific derived class to perform mip map level regeneration.
869 virtual bool onRegenerateMipMapLevels(GrTexture*) = 0;
870
871 // overridden by backend specific derived class to perform the copy surface
872 virtual bool onCopySurface(GrSurface* dst, const SkIRect& dstRect,
873 GrSurface* src, const SkIRect& srcRect,
874 GrSamplerState::Filter) = 0;
875
876 virtual GrOpsRenderPass* onGetOpsRenderPass(
877 GrRenderTarget* renderTarget,
878 bool useMSAASurface,
879 GrAttachment* stencil,
880 GrSurfaceOrigin,
881 const SkIRect& bounds,
882 const GrOpsRenderPass::LoadAndStoreInfo&,
883 const GrOpsRenderPass::StencilLoadAndStoreInfo&,
884 const skia_private::TArray<GrSurfaceProxy*, true>& sampledProxies,
885 GrXferBarrierFlags renderPassXferBarriers) = 0;
886
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurfaces::BackendSurfaceAccess access,const skgpu::MutableTextureState * newState)887 virtual void prepareSurfacesForBackendAccessAndStateUpdates(
888 SkSpan<GrSurfaceProxy*> proxies,
889 SkSurfaces::BackendSurfaceAccess access,
890 const skgpu::MutableTextureState* newState) {}
891
892 virtual bool onSubmitToGpu(const GrSubmitInfo& info) = 0;
893
894 void reportSubmitHistograms();
onReportSubmitHistograms()895 virtual void onReportSubmitHistograms() {}
896
897 #ifdef SK_ENABLE_DUMP_GPU
onDumpJSON(SkJSONWriter *)898 virtual void onDumpJSON(SkJSONWriter*) const {}
899 #endif
900
901 sk_sp<GrTexture> createTextureCommon(SkISize,
902 const GrBackendFormat&,
903 GrTextureType textureType,
904 GrRenderable,
905 int renderTargetSampleCnt,
906 skgpu::Budgeted,
907 GrProtected,
908 int mipLevelCnt,
909 uint32_t levelClearMask,
910 std::string_view label);
911
resetContext()912 void resetContext() {
913 this->onResetContext(fResetBits);
914 fResetBits = 0;
915 }
916
917 void callSubmittedProcs(bool success);
918
919 sk_sp<const GrCaps> fCaps;
920
921 uint32_t fResetBits;
922 // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu.
923 GrDirectContext* fContext;
924
925 struct SubmittedProc {
SubmittedProcSubmittedProc926 SubmittedProc(GrGpuSubmittedProc proc, GrGpuSubmittedContext context)
927 : fProc(proc), fContext(context) {}
928
929 GrGpuSubmittedProc fProc;
930 GrGpuSubmittedContext fContext;
931 };
932 skia_private::STArray<4, SubmittedProc> fSubmittedProcs;
933
934 bool fOOMed = false;
935
936 #if SK_HISTOGRAMS_ENABLED
937 int fCurrentSubmitRenderPassCount = 0;
938 #endif
939
940 using INHERITED = SkRefCnt;
941 };
942
943 #endif
944