• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2011 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrGpu_DEFINED
9 #define GrGpu_DEFINED
10 
11 #include "include/core/SkData.h"
12 #include "include/core/SkRect.h"
13 #include "include/core/SkRefCnt.h"
14 #include "include/core/SkSpan.h"
15 #include "include/core/SkTypes.h"
16 #include "include/gpu/GpuTypes.h"
17 #include "include/gpu/ganesh/GrBackendSurface.h"
18 #include "include/gpu/ganesh/GrTypes.h"
19 #include "include/private/base/SkTArray.h"
20 #include "include/private/gpu/ganesh/GrTypesPriv.h"
21 #include "src/gpu/ganesh/GrCaps.h"
22 #include "src/gpu/ganesh/GrGpuBuffer.h"  // IWYU pragma: keep
23 #include "src/gpu/ganesh/GrOpsRenderPass.h"
24 #include "src/gpu/ganesh/GrSamplerState.h"
25 #include "src/gpu/ganesh/GrXferProcessor.h"
26 
27 #include <array>
28 #include <cstddef>
29 #include <cstdint>
30 #include <memory>
31 #include <optional>
32 #include <string_view>
33 
34 class GrAttachment;
35 class GrBackendSemaphore;
36 class GrDirectContext;
37 class GrGLContext;
38 class GrProgramDesc;
39 class GrProgramInfo;
40 class GrRenderTarget;
41 class GrRingBuffer;
42 class GrSemaphore;
43 class GrStagingBufferManager;
44 class GrSurface;
45 class GrSurfaceProxy;
46 class GrTexture;
47 class GrThreadSafePipelineBuilder;
48 class SkJSONWriter;
49 class SkString;
50 enum class SkTextureCompressionType;
51 struct GrVkDrawableInfo;
52 struct SkISize;
53 struct SkImageInfo;
54 
55 namespace SkSurfaces {
56 enum class BackendSurfaceAccess;
57 }
58 namespace skgpu {
59 class AutoCallback;
60 class MutableTextureState;
61 class RefCntedCallback;
62 }  // namespace skgpu
63 
64 // This is sufficient for the GL implementation (which is all we have now). It can become a
65 // "Backend" SkAnySubclass type to cover other backends in the future.
66 struct GrTimerQuery {
67     uint32_t query;
68 };
69 
70 class GrGpu {
71 public:
72     GrGpu(GrDirectContext* direct);
73     virtual ~GrGpu();
74 
getContext()75     GrDirectContext* getContext() { return fContext; }
getContext()76     const GrDirectContext* getContext() const { return fContext; }
77 
78     /**
79      * Gets the capabilities of the draw target.
80      */
caps()81     const GrCaps* caps() const { return fCaps.get(); }
refCaps()82     sk_sp<const GrCaps> refCaps() const { return fCaps; }
83 
stagingBufferManager()84     virtual GrStagingBufferManager* stagingBufferManager() { return nullptr; }
85 
uniformsRingBuffer()86     virtual GrRingBuffer* uniformsRingBuffer() { return nullptr; }
87 
88     enum class DisconnectType {
89         // No cleanup should be attempted, immediately cease making backend API calls
90         kAbandon,
91         // Free allocated resources (not known by GrResourceCache) before returning and
92         // ensure no backend backend 3D API calls will be made after disconnect() returns.
93         kCleanup,
94     };
95 
96     // Called by context when the underlying backend context is already or will be destroyed
97     // before GrDirectContext.
98     virtual void disconnect(DisconnectType);
99 
100     virtual GrThreadSafePipelineBuilder* pipelineBuilder() = 0;
101     virtual sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() = 0;
102 
103     // Called by GrDirectContext::isContextLost. Returns true if the backend Gpu object has gotten
104     // into an unrecoverable, lost state.
isDeviceLost()105     virtual bool isDeviceLost() const { return false; }
106 
107     /**
108      * The GrGpu object normally assumes that no outsider is setting state
109      * within the underlying 3D API's context/device/whatever. This call informs
110      * the GrGpu that the state was modified and it shouldn't make assumptions
111      * about the state.
112      */
113     void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; }
114 
115     /**
116      * Creates a texture object. If renderable is kYes then the returned texture can
117      * be used as a render target by calling GrTexture::asRenderTarget(). Not all
118      * pixel configs can be used as render targets. Support for configs as textures
119      * or render targets can be checked using GrCaps.
120      *
121      * @param dimensions     dimensions of the texture to be created.
122      * @param format         the format for the texture (not currently used).
123      * @param renderable     should the resulting texture be renderable
124      * @param renderTargetSampleCnt The number of samples to use for rendering if renderable is
125      *                       kYes. If renderable is kNo then this must be 1.
126      * @param budgeted       does this texture count against the resource cache budget?
127      * @param isProtected    should the texture be created as protected.
128      * @param texels         array of mipmap levels containing texel data to load.
129      *                       If level i has pixels then it is assumed that its dimensions are
130      *                       max(1, floor(dimensions.fWidth / 2)) by
131      *                       max(1, floor(dimensions.fHeight / 2)).
132      *                       If texels[i].fPixels == nullptr for all i <= mipLevelCount or
133      *                       mipLevelCount is 0 then the texture's contents are uninitialized.
134      *                       If a level has non-null pixels, its row bytes must be a multiple of the
135      *                       config's bytes-per-pixel. The row bytes must be tight to the
136      *                       level width if !caps->writePixelsRowBytesSupport().
137      *                       If mipLevelCount > 1 and texels[i].fPixels != nullptr for any i > 0
138      *                       then all levels must have non-null pixels. All levels must have
139      *                       non-null pixels if GrCaps::createTextureMustSpecifyAllLevels() is true.
140      * @param textureColorType The color type interpretation of the texture for the purpose of
141      *                       of uploading texel data.
142      * @param srcColorType   The color type of data in texels[].
143      * @param texelLevelCount the number of levels in 'texels'. May be 0, 1, or
144      *                       floor(max((log2(dimensions.fWidth), log2(dimensions.fHeight)))). It
145      *                       must be the latter if GrCaps::createTextureMustSpecifyAllLevels() is
146      *                       true.
147      * @return  The texture object if successful, otherwise nullptr.
148      */
149     sk_sp<GrTexture> createTexture(SkISize dimensions,
150                                    const GrBackendFormat& format,
151                                    GrTextureType textureType,
152                                    GrRenderable renderable,
153                                    int renderTargetSampleCnt,
154                                    skgpu::Budgeted budgeted,
155                                    GrProtected isProtected,
156                                    GrColorType textureColorType,
157                                    GrColorType srcColorType,
158                                    const GrMipLevel texels[],
159                                    int texelLevelCount,
160                                    std::string_view label);
161 
162     /**
163      * Simplified createTexture() interface for when there is no initial texel data to upload.
164      */
165     sk_sp<GrTexture> createTexture(SkISize dimensions,
166                                    const GrBackendFormat& format,
167                                    GrTextureType textureType,
168                                    GrRenderable renderable,
169                                    int renderTargetSampleCnt,
170                                    skgpu::Mipmapped mipmapped,
171                                    skgpu::Budgeted budgeted,
172                                    GrProtected isProtected,
173                                    std::string_view label);
174 
175     sk_sp<GrTexture> createCompressedTexture(SkISize dimensions,
176                                              const GrBackendFormat& format,
177                                              skgpu::Budgeted budgeted,
178                                              skgpu::Mipmapped mipmapped,
179                                              GrProtected isProtected,
180                                              const void* data,
181                                              size_t dataSize);
182 
183     /**
184      * Implements GrResourceProvider::wrapBackendTexture
185      */
186     sk_sp<GrTexture> wrapBackendTexture(const GrBackendTexture&,
187                                         GrWrapOwnership,
188                                         GrWrapCacheable,
189                                         GrIOType);
190 
191     sk_sp<GrTexture> wrapCompressedBackendTexture(const GrBackendTexture&,
192                                                   GrWrapOwnership,
193                                                   GrWrapCacheable);
194 
195     /**
196      * Implements GrResourceProvider::wrapRenderableBackendTexture
197      */
198     sk_sp<GrTexture> wrapRenderableBackendTexture(const GrBackendTexture&,
199                                                   int sampleCnt,
200                                                   GrWrapOwnership,
201                                                   GrWrapCacheable);
202 
203     /**
204      * Implements GrResourceProvider::wrapBackendRenderTarget
205      */
206     sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTarget&);
207 
208     /**
209      * Implements GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget
210      */
211     sk_sp<GrRenderTarget> wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
212                                                               const GrVkDrawableInfo&);
213 
214     /**
215      * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked.
216      *
217      * @param size            size of buffer to create.
218      * @param intendedType    hint to the graphics subsystem about what the buffer will be used for.
219      * @param accessPattern   hint to the graphics subsystem about how the data will be accessed.
220      *
221      * @return the buffer if successful, otherwise nullptr.
222      */
223     sk_sp<GrGpuBuffer> createBuffer(size_t size,
224                                     GrGpuBufferType intendedType,
225                                     GrAccessPattern accessPattern);
226 
227     /**
228      * Resolves MSAA. The resolveRect must already be in the native destination space.
229      */
230     void resolveRenderTarget(GrRenderTarget*, const SkIRect& resolveRect);
231 
232     /**
233      * Uses the base of the texture to recompute the contents of the other levels.
234      */
235     bool regenerateMipMapLevels(GrTexture*);
236 
237     /**
238      * If the backend API has stateful texture bindings, this resets them back to defaults.
239      */
240     void resetTextureBindings();
241 
242     /**
243      * Reads a rectangle of pixels from a render target. No sRGB/linear conversions are performed.
244      *
245      * @param surface           the surface to read from
246      * @param rect              the rectangle of pixels to read
247      * @param surfaceColorType  the color type for this use of the surface.
248      * @param dstColorType      the color type of the destination buffer.
249      * @param buffer            memory to read the rectangle into.
250      * @param rowBytes          the number of bytes between consecutive rows. Must be a multiple of
251      *                          dstColorType's bytes-per-pixel. Must be tight to width if
252      *                          !caps->readPixelsRowBytesSupport().
253      *
254      * @return true if the read succeeded, false if not. The read can fail
255      *              because of the surface doesn't support reading, the color type
256      *              is not allowed for the format of the surface or if the rectangle
257      *              read is not contained in the surface.
258      */
259     bool readPixels(GrSurface* surface,
260                     SkIRect rect,
261                     GrColorType surfaceColorType,
262                     GrColorType dstColorType,
263                     void* buffer,
264                     size_t rowBytes);
265 
266     /**
267      * Updates the pixels in a rectangle of a surface.  No sRGB/linear conversions are performed.
268      *
269      * @param surface            the surface to write to.
270      * @param rect               the rectangle of pixels to overwrite
271      * @param surfaceColorType   the color type for this use of the surface.
272      * @param srcColorType       the color type of the source buffer.
273      * @param texels             array of mipmap levels containing texture data. Row bytes must be a
274      *                           multiple of srcColorType's bytes-per-pixel. Must be tight to level
275      *                           width if !caps->writePixelsRowBytesSupport().
276      * @param mipLevelCount      number of levels in 'texels'
277      * @param prepForTexSampling After doing write pixels should the surface be prepared for texture
278      *                           sampling. This is currently only used by Vulkan for inline uploads
279      *                           to set that layout back to sampled after doing the upload. Inline
280      *                           uploads currently can happen between draws in a single op so it is
281      *                           not trivial to break up the OpsTask into two tasks when we see
282      *                           an inline upload. However, once we are able to support doing that
283      *                           we can remove this parameter.
284      *
285      * @return true if the write succeeded, false if not. The read can fail
286      *              because of the surface doesn't support writing (e.g. read only),
287      *              the color type is not allowed for the format of the surface or
288      *              if the rectangle written is not contained in the surface.
289      */
290     bool writePixels(GrSurface* surface,
291                      SkIRect rect,
292                      GrColorType surfaceColorType,
293                      GrColorType srcColorType,
294                      const GrMipLevel texels[],
295                      int mipLevelCount,
296                      bool prepForTexSampling = false);
297 
298     /**
299      * Helper for the case of a single level.
300      */
301     bool writePixels(GrSurface* surface,
302                      SkIRect rect,
303                      GrColorType surfaceColorType,
304                      GrColorType srcColorType,
305                      const void* buffer,
306                      size_t rowBytes,
307                      bool prepForTexSampling = false) {
308         GrMipLevel mipLevel = {buffer, rowBytes, nullptr};
309         return this->writePixels(surface,
310                                  rect,
311                                  surfaceColorType,
312                                  srcColorType,
313                                  &mipLevel,
314                                  1,
315                                  prepForTexSampling);
316     }
317 
318     /**
319      * Transfer bytes from one GPU buffer to another. The src buffer must have type kXferCpuToGpu
320      * and the dst buffer must not. Neither buffer may currently be mapped. The offsets and size
321      * must be aligned to GrCaps::transferFromBufferToBufferAlignment.
322      *
323      * @param src        the buffer to read from
324      * @param srcOffset  the aligned offset at the src at which the transfer begins.
325      * @param dst        the buffer to write to
326      * @param dstOffset  the aligned offset in the dst at which the transfer begins
327      * @param size       the aligned number of bytes to transfer;
328      */
329     bool transferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
330                                     size_t srcOffset,
331                                     sk_sp<GrGpuBuffer> dst,
332                                     size_t dstOffset,
333                                     size_t size);
334 
335     /**
336      * Updates the pixels in a rectangle of a texture using a buffer. If the texture is MIP mapped,
337      * the base level is written to.
338      *
339      * @param texture          the texture to write to.
340      * @param rect             the rectangle of pixels in the texture to overwrite
341      * @param textureColorType the color type for this use of the surface.
342      * @param bufferColorType  the color type of the transfer buffer's pixel data
343      * @param transferBuffer   GrBuffer to read pixels from (type must be "kXferCpuToGpu")
344      * @param offset           offset from the start of the buffer
345      * @param rowBytes         number of bytes between consecutive rows in the buffer. Must be a
346      *                         multiple of bufferColorType's bytes-per-pixel. Must be tight to
347      *                         rect.width() if !caps->writePixelsRowBytesSupport().
348      */
349     bool transferPixelsTo(GrTexture* texture,
350                           SkIRect rect,
351                           GrColorType textureColorType,
352                           GrColorType bufferColorType,
353                           sk_sp<GrGpuBuffer> transferBuffer,
354                           size_t offset,
355                           size_t rowBytes);
356 
357     /**
358      * Reads the pixels from a rectangle of a surface into a buffer. Use
359      * GrCaps::SupportedRead::fOffsetAlignmentForTransferBuffer to determine the requirements for
360      * the buffer offset alignment. If the surface is a MIP mapped texture, the base level is read.
361      *
362      * If successful the row bytes in the buffer is always:
363      *   GrColorTypeBytesPerPixel(bufferColorType) * rect.width()
364      *
365      * Asserts that the caller has passed a properly aligned offset and that the buffer is
366      * large enough to hold the result
367      *
368      * @param surface          the surface to read from.
369      * @param rect             the rectangle of pixels to read
370      * @param surfaceColorType the color type for this use of the surface.
371      * @param bufferColorType  the color type of the transfer buffer's pixel data
372      * @param transferBuffer   GrBuffer to write pixels to (type must be "kXferGpuToCpu")
373      * @param offset           offset from the start of the buffer
374      */
375     bool transferPixelsFrom(GrSurface* surface,
376                             SkIRect rect,
377                             GrColorType surfaceColorType,
378                             GrColorType bufferColorType,
379                             sk_sp<GrGpuBuffer> transferBuffer,
380                             size_t offset);
381 
382     // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst
383     // take place at higher levels and this function implement faster copy paths. The src and dst
384     // rects are pre-clipped. The src rect and dst rect are guaranteed to be within the
385     // src/dst bounds and non-empty. They must also be in their exact device space coords, including
386     // already being transformed for origin if need be. If canDiscardOutsideDstRect is set to true
387     // then we don't need to preserve any data on the dst surface outside of the copy.
388     //
389     // Backends may or may not support src and dst rects with differing dimensions. This can assume
390     // that GrCaps.canCopySurface() returned true for these surfaces and rects.
391     bool copySurface(GrSurface* dst, const SkIRect& dstRect,
392                      GrSurface* src, const SkIRect& srcRect,
393                      GrSamplerState::Filter filter);
394 
395     // Returns a GrOpsRenderPass which OpsTasks send draw commands to instead of directly
396     // to the Gpu object. The 'bounds' rect is the content rect of the renderTarget.
397     // If a 'stencil' is provided it will be the one bound to 'renderTarget'. If one is not
398     // provided but 'renderTarget' has a stencil buffer then that is a signal that the
399     // render target's stencil buffer should be ignored.
400     GrOpsRenderPass* getOpsRenderPass(
401             GrRenderTarget* renderTarget,
402             bool useMSAASurface,
403             GrAttachment* stencil,
404             GrSurfaceOrigin,
405             const SkIRect& bounds,
406             const GrOpsRenderPass::LoadAndStoreInfo&,
407             const GrOpsRenderPass::StencilLoadAndStoreInfo&,
408             const skia_private::TArray<GrSurfaceProxy*, true>& sampledProxies,
409             GrXferBarrierFlags renderPassXferBarriers);
410 
getCurrentSubmitRenderPassCount()411     int getCurrentSubmitRenderPassCount() const { return fCurrentSubmitRenderPassCount; }
412 
413     // Called by GrDrawingManager when flushing.
414     // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also
415     // insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the
416     // inserted semaphores.
417     void executeFlushInfo(SkSpan<GrSurfaceProxy*>,
418                           SkSurfaces::BackendSurfaceAccess access,
419                           const GrFlushInfo&,
420                           std::optional<GrTimerQuery> timerQuery,
421                           const skgpu::MutableTextureState* newState);
422 
423     // Called before render tasks are executed during a flush.
willExecute()424     virtual void willExecute() {}
425 
submitToGpu()426     bool submitToGpu() {
427         return this->submitToGpu(GrSubmitInfo());
428     }
429     bool submitToGpu(const GrSubmitInfo& info);
430 
431     virtual void submit(GrOpsRenderPass*) = 0;
432 
433     [[nodiscard]] virtual std::unique_ptr<GrSemaphore> makeSemaphore(bool isOwned = true) = 0;
434     virtual std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&,
435                                                               GrSemaphoreWrapType,
436                                                               GrWrapOwnership) = 0;
437     virtual void insertSemaphore(GrSemaphore* semaphore) = 0;
438     virtual void waitSemaphore(GrSemaphore* semaphore) = 0;
439 
startTimerQuery()440     virtual std::optional<GrTimerQuery> startTimerQuery() { return {}; }
441 
442     virtual void addFinishedCallback(skgpu::AutoCallback, std::optional<GrTimerQuery> = {}) = 0;
443     virtual void checkFinishedCallbacks() = 0;
444     virtual void finishOutstandingGpuWork() = 0;
445 
446     // NOLINTNEXTLINE(performance-unnecessary-value-param)
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>)447     virtual void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) {}
448 
449     /**
450      * Checks if we detected an OOM from the underlying 3D API and if so returns true and resets
451      * the internal OOM state to false. Otherwise, returns false.
452      */
453     bool checkAndResetOOMed();
454 
455     /**
456      *  Put this texture in a safe and known state for use across multiple contexts. Depending on
457      *  the backend, this may return a GrSemaphore. If so, other contexts should wait on that
458      *  semaphore before using this texture.
459      */
460     virtual std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) = 0;
461 
462     /**
463      * Frees any backend specific objects that are not currently in use by the GPU. This is called
464      * when the client is trying to free up as much GPU memory as possible. We will not release
465      * resources connected to programs/pipelines since the cost to recreate those is significantly
466      * higher that other resources.
467      */
releaseUnlockedBackendObjects()468     virtual void releaseUnlockedBackendObjects() {}
469 
470     ///////////////////////////////////////////////////////////////////////////
471     // Debugging and Stats
472 
473     class Stats {
474     public:
475 #if GR_GPU_STATS
476         Stats() = default;
477 
reset()478         void reset() { *this = {}; }
479 
textureCreates()480         int textureCreates() const { return fTextureCreates; }
incTextureCreates()481         void incTextureCreates() { fTextureCreates++; }
482 
textureUploads()483         int textureUploads() const { return fTextureUploads; }
incTextureUploads()484         void incTextureUploads() { fTextureUploads++; }
485 
transfersToTexture()486         int transfersToTexture() const { return fTransfersToTexture; }
incTransfersToTexture()487         void incTransfersToTexture() { fTransfersToTexture++; }
488 
transfersFromSurface()489         int transfersFromSurface() const { return fTransfersFromSurface; }
incTransfersFromSurface()490         void incTransfersFromSurface() { fTransfersFromSurface++; }
491 
incBufferTransfers()492         void incBufferTransfers() { fBufferTransfers++; }
bufferTransfers()493         int bufferTransfers() const { return fBufferTransfers; }
494 
stencilAttachmentCreates()495         int stencilAttachmentCreates() const { return fStencilAttachmentCreates; }
incStencilAttachmentCreates()496         void incStencilAttachmentCreates() { fStencilAttachmentCreates++; }
497 
msaaAttachmentCreates()498         int msaaAttachmentCreates() const { return fMSAAAttachmentCreates; }
incMSAAAttachmentCreates()499         void incMSAAAttachmentCreates() { fMSAAAttachmentCreates++; }
500 
numDraws()501         int numDraws() const { return fNumDraws; }
incNumDraws()502         void incNumDraws() { fNumDraws++; }
503 
numFailedDraws()504         int numFailedDraws() const { return fNumFailedDraws; }
incNumFailedDraws()505         void incNumFailedDraws() { ++fNumFailedDraws; }
506 
numSubmitToGpus()507         int numSubmitToGpus() const { return fNumSubmitToGpus; }
incNumSubmitToGpus()508         void incNumSubmitToGpus() { ++fNumSubmitToGpus; }
509 
numScratchTexturesReused()510         int numScratchTexturesReused() const { return fNumScratchTexturesReused; }
incNumScratchTexturesReused()511         void incNumScratchTexturesReused() { ++fNumScratchTexturesReused; }
512 
numScratchMSAAAttachmentsReused()513         int numScratchMSAAAttachmentsReused() const { return fNumScratchMSAAAttachmentsReused; }
incNumScratchMSAAAttachmentsReused()514         void incNumScratchMSAAAttachmentsReused() { ++fNumScratchMSAAAttachmentsReused; }
515 
renderPasses()516         int renderPasses() const { return fRenderPasses; }
incRenderPasses()517         void incRenderPasses() { fRenderPasses++; }
518 
numReorderedDAGsOverBudget()519         int numReorderedDAGsOverBudget() const { return fNumReorderedDAGsOverBudget; }
incNumReorderedDAGsOverBudget()520         void incNumReorderedDAGsOverBudget() { fNumReorderedDAGsOverBudget++; }
521 
522 #if defined(GPU_TEST_UTILS)
523         void dump(SkString*);
524         void dumpKeyValuePairs(
525                 skia_private::TArray<SkString>* keys, skia_private::TArray<double>* values);
526 #endif
527     private:
528         int fTextureCreates = 0;
529         int fTextureUploads = 0;
530         int fTransfersToTexture = 0;
531         int fTransfersFromSurface = 0;
532         int fBufferTransfers = 0;
533         int fStencilAttachmentCreates = 0;
534         int fMSAAAttachmentCreates = 0;
535         int fNumDraws = 0;
536         int fNumFailedDraws = 0;
537         int fNumSubmitToGpus = 0;
538         int fNumScratchTexturesReused = 0;
539         int fNumScratchMSAAAttachmentsReused = 0;
540         int fRenderPasses = 0;
541         int fNumReorderedDAGsOverBudget = 0;
542 
543 #else  // !GR_GPU_STATS
544 
545 #if defined(GPU_TEST_UTILS)
546         void dump(SkString*) {}
547         void dumpKeyValuePairs(skia_private::TArray<SkString>*, skia_private::TArray<double>*) {}
548 #endif
549         void incTextureCreates() {}
550         void incTextureUploads() {}
551         void incTransfersToTexture() {}
552         void incBufferTransfers() {}
553         void incTransfersFromSurface() {}
554         void incStencilAttachmentCreates() {}
555         void incMSAAAttachmentCreates() {}
556         void incNumDraws() {}
557         void incNumFailedDraws() {}
558         void incNumSubmitToGpus() {}
559         void incNumScratchTexturesReused() {}
560         void incNumScratchMSAAAttachmentsReused() {}
561         void incRenderPasses() {}
562         void incNumReorderedDAGsOverBudget() {}
563 #endif
564     };
565 
stats()566     Stats* stats() { return &fStats; }
567     void dumpJSON(SkJSONWriter*) const;
568 
569 
570     /**
571      * Creates a texture directly in the backend API without wrapping it in a GrTexture.
572      * Must be matched with a call to deleteBackendTexture().
573      *
574      * If data is null the texture is uninitialized.
575      *
576      * If data represents a color then all texture levels are cleared to that color.
577      *
578      * If data represents pixmaps then it must have a either one pixmap or, if mipmapping
579      * is specified, a complete MIP hierarchy of pixmaps. Additionally, if provided, the mip
580      * levels must be sized correctly according to the MIP sizes implied by dimensions. They
581      * must all have the same color type and that color type must be compatible with the
582      * texture format.
583      */
584     GrBackendTexture createBackendTexture(SkISize dimensions,
585                                           const GrBackendFormat&,
586                                           GrRenderable,
587                                           skgpu::Mipmapped,
588                                           GrProtected,
589                                           std::string_view label);
590 
591     bool clearBackendTexture(const GrBackendTexture&,
592                              sk_sp<skgpu::RefCntedCallback> finishedCallback,
593                              std::array<float, 4> color);
594 
595     /**
596      * Same as the createBackendTexture case except compressed backend textures can
597      * never be renderable.
598      */
599     GrBackendTexture createCompressedBackendTexture(SkISize dimensions,
600                                                     const GrBackendFormat&,
601                                                     skgpu::Mipmapped,
602                                                     GrProtected);
603 
604     bool updateCompressedBackendTexture(const GrBackendTexture&,
605                                         sk_sp<skgpu::RefCntedCallback> finishedCallback,
606                                         const void* data,
607                                         size_t length);
608 
setBackendTextureState(const GrBackendTexture &,const skgpu::MutableTextureState &,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)609     virtual bool setBackendTextureState(const GrBackendTexture&,
610                                         const skgpu::MutableTextureState&,
611                                         skgpu::MutableTextureState* previousState,
612                                         // NOLINTNEXTLINE(performance-unnecessary-value-param)
613                                         sk_sp<skgpu::RefCntedCallback> finishedCallback) {
614         return false;
615     }
616 
setBackendRenderTargetState(const GrBackendRenderTarget &,const skgpu::MutableTextureState &,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)617     virtual bool setBackendRenderTargetState(const GrBackendRenderTarget&,
618                                              const skgpu::MutableTextureState&,
619                                              skgpu::MutableTextureState* previousState,
620                                             // NOLINTNEXTLINE(performance-unnecessary-value-param)
621                                              sk_sp<skgpu::RefCntedCallback> finishedCallback) {
622         return false;
623     }
624 
625     /**
626      * Frees a texture created by createBackendTexture(). If ownership of the backend
627      * texture has been transferred to a context using adopt semantics this should not be called.
628      */
629     virtual void deleteBackendTexture(const GrBackendTexture&) = 0;
630 
631     /**
632      * In this case we have a program descriptor and a program info but no render target.
633      */
634     virtual bool compile(const GrProgramDesc&, const GrProgramInfo&) = 0;
635 
precompileShader(const SkData & key,const SkData & data)636     virtual bool precompileShader(const SkData& key, const SkData& data) { return false; }
637 
638 #if defined(GPU_TEST_UTILS)
639     /** Check a handle represents an actual texture in the backend API that has not been freed. */
640     virtual bool isTestingOnlyBackendTexture(const GrBackendTexture&) const = 0;
641 
642     /**
643      * Creates a GrBackendRenderTarget that can be wrapped using
644      * SkSurfaces::WrapBackendRenderTarget. Ideally this is a non-textureable allocation to
645      * differentiate from testing with SkSurfaces::WrapBackendTexture. When sampleCnt > 1 this
646      * is used to test client wrapped allocations with MSAA where Skia does not allocate a separate
647      * buffer for resolving. If the color is non-null the backing store should be cleared to the
648      * passed in color.
649      */
650     virtual GrBackendRenderTarget createTestingOnlyBackendRenderTarget(
651             SkISize dimensions,
652             GrColorType,
653             int sampleCount = 1,
654             GrProtected = GrProtected::kNo) = 0;
655 
656     /**
657      * Deletes a GrBackendRenderTarget allocated with the above. Synchronization to make this safe
658      * is up to the caller.
659      */
660     virtual void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) = 0;
661 
662     // This is only to be used in GL-specific tests.
glContextForTesting()663     virtual const GrGLContext* glContextForTesting() const { return nullptr; }
664 
665     // This is only to be used by testing code
resetShaderCacheForTesting()666     virtual void resetShaderCacheForTesting() const {}
667 
668     /**
669      * Inserted as a pair around a block of code to do a GPU frame capture.
670      * Currently only works with the Metal backend.
671      */
testingOnly_startCapture()672     virtual void testingOnly_startCapture() {}
testingOnly_stopCapture()673     virtual void testingOnly_stopCapture() {}
674 #endif
675 
676     // width and height may be larger than rt (if underlying API allows it).
677     // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on
678     // the GrAttachment.
679     virtual sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& colorFormat,
680                                                       SkISize dimensions,
681                                                       int numStencilSamples) = 0;
682 
683     virtual GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) = 0;
684 
685     // Creates an MSAA surface to be used as an MSAA attachment on a framebuffer.
686     virtual sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions,
687                                                    const GrBackendFormat& format,
688                                                    int numSamples,
689                                                    GrProtected isProtected,
690                                                    GrMemoryless isMemoryless) = 0;
691 
handleDirtyContext()692     void handleDirtyContext() {
693         if (fResetBits) {
694             this->resetContext();
695         }
696     }
697 
storeVkPipelineCacheData()698     virtual void storeVkPipelineCacheData() {}
699 
700     // Called before certain draws in order to guarantee coherent results from dst reads.
701     virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
702 
703 protected:
704     static bool CompressedDataIsCorrect(SkISize dimensions,
705                                         SkTextureCompressionType,
706                                         skgpu::Mipmapped,
707                                         const void* data,
708                                         size_t length);
709 
710     // If the surface is a texture this marks its mipmaps as dirty.
711     void didWriteToSurface(GrSurface* surface,
712                            GrSurfaceOrigin origin,
713                            const SkIRect* bounds,
714                            uint32_t mipLevels = 1) const;
715 
setOOMed()716     void setOOMed() { fOOMed = true; }
717 
718     Stats                            fStats;
719 
720     // Subclass must call this to initialize caps in its constructor.
721     void initCaps(sk_sp<const GrCaps> caps);
722 
723 private:
endTimerQuery(const GrTimerQuery &)724     virtual void endTimerQuery(const GrTimerQuery&) { SK_ABORT("timer query not supported."); }
725 
726     virtual GrBackendTexture onCreateBackendTexture(SkISize dimensions,
727                                                     const GrBackendFormat&,
728                                                     GrRenderable,
729                                                     skgpu::Mipmapped,
730                                                     GrProtected,
731                                                     std::string_view label) = 0;
732 
733     virtual GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions,
734                                                               const GrBackendFormat&,
735                                                               skgpu::Mipmapped,
736                                                               GrProtected) = 0;
737 
738     virtual bool onClearBackendTexture(const GrBackendTexture&,
739                                        sk_sp<skgpu::RefCntedCallback> finishedCallback,
740                                        std::array<float, 4> color) = 0;
741 
742     virtual bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
743                                                   sk_sp<skgpu::RefCntedCallback> finishedCallback,
744                                                   const void* data,
745                                                   size_t length) = 0;
746 
747     // called when the 3D context state is unknown. Subclass should emit any
748     // assumed 3D context state and dirty any state cache.
onResetContext(uint32_t resetBits)749     virtual void onResetContext(uint32_t resetBits) {}
750 
751     // Implementation of resetTextureBindings.
onResetTextureBindings()752     virtual void onResetTextureBindings() {}
753 
754     // overridden by backend-specific derived class to create objects.
755     // Texture size, renderablility, format support, sample count will have already been validated
756     // in base class before onCreateTexture is called.
757     // If the ith bit is set in levelClearMask then the ith MIP level should be cleared.
758     virtual sk_sp<GrTexture> onCreateTexture(SkISize dimensions,
759                                              const GrBackendFormat&,
760                                              GrRenderable,
761                                              int renderTargetSampleCnt,
762                                              skgpu::Budgeted,
763                                              GrProtected,
764                                              int mipLevelCoont,
765                                              uint32_t levelClearMask,
766                                              std::string_view label) = 0;
767     virtual sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
768                                                        const GrBackendFormat&,
769                                                        skgpu::Budgeted,
770                                                        skgpu::Mipmapped,
771                                                        GrProtected,
772                                                        const void* data,
773                                                        size_t dataSize) = 0;
774     virtual sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&,
775                                                   GrWrapOwnership,
776                                                   GrWrapCacheable,
777                                                   GrIOType) = 0;
778 
779     virtual sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&,
780                                                             GrWrapOwnership,
781                                                             GrWrapCacheable) = 0;
782 
783     virtual sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
784                                                             int sampleCnt,
785                                                             GrWrapOwnership,
786                                                             GrWrapCacheable) = 0;
787     virtual sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) = 0;
788     virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
789                                                                         const GrVkDrawableInfo&);
790 
791     virtual sk_sp<GrGpuBuffer> onCreateBuffer(size_t size,
792                                               GrGpuBufferType intendedType,
793                                               GrAccessPattern) = 0;
794 
795     // overridden by backend-specific derived class to perform the surface read
796     virtual bool onReadPixels(GrSurface*,
797                               SkIRect,
798                               GrColorType surfaceColorType,
799                               GrColorType dstColorType,
800                               void*,
801                               size_t rowBytes) = 0;
802 
803     // overridden by backend-specific derived class to perform the surface write
804     virtual bool onWritePixels(GrSurface*,
805                                SkIRect,
806                                GrColorType surfaceColorType,
807                                GrColorType srcColorType,
808                                const GrMipLevel[],
809                                int mipLevelCount,
810                                bool prepForTexSampling) = 0;
811 
812     // overridden by backend-specific derived class to perform the buffer transfer
813     virtual bool onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
814                                               size_t srcOffset,
815                                               sk_sp<GrGpuBuffer> dst,
816                                               size_t dstOffset,
817                                               size_t size) = 0;
818 
819     // overridden by backend-specific derived class to perform the texture transfer
820     virtual bool onTransferPixelsTo(GrTexture*,
821                                     SkIRect,
822                                     GrColorType textureColorType,
823                                     GrColorType bufferColorType,
824                                     sk_sp<GrGpuBuffer> transferBuffer,
825                                     size_t offset,
826                                     size_t rowBytes) = 0;
827 
828     // overridden by backend-specific derived class to perform the surface transfer
829     virtual bool onTransferPixelsFrom(GrSurface*,
830                                       SkIRect,
831                                       GrColorType surfaceColorType,
832                                       GrColorType bufferColorType,
833                                       sk_sp<GrGpuBuffer> transferBuffer,
834                                       size_t offset) = 0;
835 
836     // overridden by backend-specific derived class to perform the resolve
837     virtual void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) = 0;
838 
839     // overridden by backend specific derived class to perform mip map level regeneration.
840     virtual bool onRegenerateMipMapLevels(GrTexture*) = 0;
841 
842     // overridden by backend specific derived class to perform the copy surface
843     virtual bool onCopySurface(GrSurface* dst, const SkIRect& dstRect,
844                                GrSurface* src, const SkIRect& srcRect,
845                                GrSamplerState::Filter) = 0;
846 
847     virtual GrOpsRenderPass* onGetOpsRenderPass(
848             GrRenderTarget* renderTarget,
849             bool useMSAASurface,
850             GrAttachment* stencil,
851             GrSurfaceOrigin,
852             const SkIRect& bounds,
853             const GrOpsRenderPass::LoadAndStoreInfo&,
854             const GrOpsRenderPass::StencilLoadAndStoreInfo&,
855             const skia_private::TArray<GrSurfaceProxy*, true>& sampledProxies,
856             GrXferBarrierFlags renderPassXferBarriers) = 0;
857 
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurfaces::BackendSurfaceAccess access,const skgpu::MutableTextureState * newState)858     virtual void prepareSurfacesForBackendAccessAndStateUpdates(
859             SkSpan<GrSurfaceProxy*> proxies,
860             SkSurfaces::BackendSurfaceAccess access,
861             const skgpu::MutableTextureState* newState) {}
862 
863     virtual bool onSubmitToGpu(const GrSubmitInfo& info) = 0;
864 
865     void reportSubmitHistograms();
onReportSubmitHistograms()866     virtual void onReportSubmitHistograms() {}
867 
868 #ifdef SK_ENABLE_DUMP_GPU
onDumpJSON(SkJSONWriter *)869     virtual void onDumpJSON(SkJSONWriter*) const {}
870 #endif
871 
872     sk_sp<GrTexture> createTextureCommon(SkISize,
873                                          const GrBackendFormat&,
874                                          GrTextureType textureType,
875                                          GrRenderable,
876                                          int renderTargetSampleCnt,
877                                          skgpu::Budgeted,
878                                          GrProtected,
879                                          int mipLevelCnt,
880                                          uint32_t levelClearMask,
881                                          std::string_view label);
882 
resetContext()883     void resetContext() {
884         this->onResetContext(fResetBits);
885         fResetBits = 0;
886     }
887 
888     void callSubmittedProcs(bool success);
889 
890     sk_sp<const GrCaps>             fCaps;
891 
892     uint32_t fResetBits;
893     // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu.
894     GrDirectContext* fContext;
895 
896     struct SubmittedProc {
SubmittedProcSubmittedProc897         SubmittedProc(GrGpuSubmittedProc proc, GrGpuSubmittedContext context)
898                 : fProc(proc), fContext(context) {}
899 
900         GrGpuSubmittedProc fProc;
901         GrGpuSubmittedContext fContext;
902     };
903     skia_private::STArray<4, SubmittedProc> fSubmittedProcs;
904 
905     bool fOOMed = false;
906 
907     int fCurrentSubmitRenderPassCount = 0;
908 
909     using INHERITED = SkRefCnt;
910 };
911 
912 #endif
913