• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2011 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrGpu_DEFINED
9 #define GrGpu_DEFINED
10 
11 #include "include/core/SkData.h"
12 #include "include/core/SkRect.h"
13 #include "include/core/SkRefCnt.h"
14 #include "include/core/SkSpan.h"
15 #include "include/core/SkTypes.h"
16 #include "include/gpu/GpuTypes.h"
17 #include "include/gpu/GrBackendSurface.h"
18 #include "include/gpu/GrTypes.h"
19 #include "include/private/base/SkTArray.h"
20 #include "include/private/gpu/ganesh/GrTypesPriv.h"
21 #include "src/gpu/ganesh/GrCaps.h"
22 #include "src/gpu/ganesh/GrGpuBuffer.h"  // IWYU pragma: keep
23 #include "src/gpu/ganesh/GrOpsRenderPass.h"
24 #include "src/gpu/ganesh/GrSamplerState.h"
25 #include "src/gpu/ganesh/GrXferProcessor.h"
26 
27 #include <array>
28 #include <cstddef>
29 #include <cstdint>
30 #include <memory>
31 #include <string_view>
32 
33 class GrAttachment;
34 class GrBackendSemaphore;
35 class GrDirectContext;
36 class GrGLContext;
37 class GrProgramDesc;
38 class GrProgramInfo;
39 class GrRenderTarget;
40 class GrRingBuffer;
41 class GrSemaphore;
42 class GrStagingBufferManager;
43 class GrSurface;
44 class GrSurfaceProxy;
45 class GrTexture;
46 class GrThreadSafePipelineBuilder;
47 class SkJSONWriter;
48 class SkString;
49 enum class SkTextureCompressionType;
50 struct GrVkDrawableInfo;
51 struct SkISize;
52 struct SkImageInfo;
53 
54 namespace SkSurfaces {
55 enum class BackendSurfaceAccess;
56 }
57 namespace skgpu {
58 class MutableTextureState;
59 class RefCntedCallback;
60 }  // namespace skgpu
61 
62 class GrGpu {
63 public:
64     GrGpu(GrDirectContext* direct);
65     virtual ~GrGpu();
66 
getContext()67     GrDirectContext* getContext() { return fContext; }
getContext()68     const GrDirectContext* getContext() const { return fContext; }
69 
70     /**
71      * Gets the capabilities of the draw target.
72      */
caps()73     const GrCaps* caps() const { return fCaps.get(); }
refCaps()74     sk_sp<const GrCaps> refCaps() const { return fCaps; }
75 
stagingBufferManager()76     virtual GrStagingBufferManager* stagingBufferManager() { return nullptr; }
77 
uniformsRingBuffer()78     virtual GrRingBuffer* uniformsRingBuffer() { return nullptr; }
79 
80     enum class DisconnectType {
81         // No cleanup should be attempted, immediately cease making backend API calls
82         kAbandon,
83         // Free allocated resources (not known by GrResourceCache) before returning and
84         // ensure no backend backend 3D API calls will be made after disconnect() returns.
85         kCleanup,
86     };
87 
88     // Called by context when the underlying backend context is already or will be destroyed
89     // before GrDirectContext.
90     virtual void disconnect(DisconnectType);
91 
92     virtual GrThreadSafePipelineBuilder* pipelineBuilder() = 0;
93     virtual sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() = 0;
94 
95     // Called by GrDirectContext::isContextLost. Returns true if the backend Gpu object has gotten
96     // into an unrecoverable, lost state.
isDeviceLost()97     virtual bool isDeviceLost() const { return false; }
98 
99     /**
100      * The GrGpu object normally assumes that no outsider is setting state
101      * within the underlying 3D API's context/device/whatever. This call informs
102      * the GrGpu that the state was modified and it shouldn't make assumptions
103      * about the state.
104      */
105     void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; }
106 
107     /**
108      * Creates a texture object. If renderable is kYes then the returned texture can
109      * be used as a render target by calling GrTexture::asRenderTarget(). Not all
110      * pixel configs can be used as render targets. Support for configs as textures
111      * or render targets can be checked using GrCaps.
112      *
113      * @param dimensions     dimensions of the texture to be created.
114      * @param format         the format for the texture (not currently used).
115      * @param renderable     should the resulting texture be renderable
116      * @param renderTargetSampleCnt The number of samples to use for rendering if renderable is
117      *                       kYes. If renderable is kNo then this must be 1.
118      * @param budgeted       does this texture count against the resource cache budget?
119      * @param isProtected    should the texture be created as protected.
120      * @param texels         array of mipmap levels containing texel data to load.
121      *                       If level i has pixels then it is assumed that its dimensions are
122      *                       max(1, floor(dimensions.fWidth / 2)) by
123      *                       max(1, floor(dimensions.fHeight / 2)).
124      *                       If texels[i].fPixels == nullptr for all i <= mipLevelCount or
125      *                       mipLevelCount is 0 then the texture's contents are uninitialized.
126      *                       If a level has non-null pixels, its row bytes must be a multiple of the
127      *                       config's bytes-per-pixel. The row bytes must be tight to the
128      *                       level width if !caps->writePixelsRowBytesSupport().
129      *                       If mipLevelCount > 1 and texels[i].fPixels != nullptr for any i > 0
130      *                       then all levels must have non-null pixels. All levels must have
131      *                       non-null pixels if GrCaps::createTextureMustSpecifyAllLevels() is true.
132      * @param textureColorType The color type interpretation of the texture for the purpose of
133      *                       of uploading texel data.
134      * @param srcColorType   The color type of data in texels[].
135      * @param texelLevelCount the number of levels in 'texels'. May be 0, 1, or
136      *                       floor(max((log2(dimensions.fWidth), log2(dimensions.fHeight)))). It
137      *                       must be the latter if GrCaps::createTextureMustSpecifyAllLevels() is
138      *                       true.
139      * @return  The texture object if successful, otherwise nullptr.
140      */
141     sk_sp<GrTexture> createTexture(SkISize dimensions,
142                                    const GrBackendFormat& format,
143                                    GrTextureType textureType,
144                                    GrRenderable renderable,
145                                    int renderTargetSampleCnt,
146                                    skgpu::Budgeted budgeted,
147                                    GrProtected isProtected,
148                                    GrColorType textureColorType,
149                                    GrColorType srcColorType,
150                                    const GrMipLevel texels[],
151                                    int texelLevelCount,
152                                    std::string_view label);
153 
154     /**
155      * Simplified createTexture() interface for when there is no initial texel data to upload.
156      */
157     sk_sp<GrTexture> createTexture(SkISize dimensions,
158                                    const GrBackendFormat& format,
159                                    GrTextureType textureType,
160                                    GrRenderable renderable,
161                                    int renderTargetSampleCnt,
162                                    skgpu::Mipmapped mipmapped,
163                                    skgpu::Budgeted budgeted,
164                                    GrProtected isProtected,
165                                    std::string_view label);
166 
167     sk_sp<GrTexture> createCompressedTexture(SkISize dimensions,
168                                              const GrBackendFormat& format,
169                                              skgpu::Budgeted budgeted,
170                                              skgpu::Mipmapped mipmapped,
171                                              GrProtected isProtected,
172                                              const void* data,
173                                              size_t dataSize);
174 
175     /**
176      * Implements GrResourceProvider::wrapBackendTexture
177      */
178     sk_sp<GrTexture> wrapBackendTexture(const GrBackendTexture&,
179                                         GrWrapOwnership,
180                                         GrWrapCacheable,
181                                         GrIOType);
182 
183     sk_sp<GrTexture> wrapCompressedBackendTexture(const GrBackendTexture&,
184                                                   GrWrapOwnership,
185                                                   GrWrapCacheable);
186 
187     /**
188      * Implements GrResourceProvider::wrapRenderableBackendTexture
189      */
190     sk_sp<GrTexture> wrapRenderableBackendTexture(const GrBackendTexture&,
191                                                   int sampleCnt,
192                                                   GrWrapOwnership,
193                                                   GrWrapCacheable);
194 
195     /**
196      * Implements GrResourceProvider::wrapBackendRenderTarget
197      */
198     sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTarget&);
199 
200     /**
201      * Implements GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget
202      */
203     sk_sp<GrRenderTarget> wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
204                                                               const GrVkDrawableInfo&);
205 
206     /**
207      * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked.
208      *
209      * @param size            size of buffer to create.
210      * @param intendedType    hint to the graphics subsystem about what the buffer will be used for.
211      * @param accessPattern   hint to the graphics subsystem about how the data will be accessed.
212      *
213      * @return the buffer if successful, otherwise nullptr.
214      */
215     sk_sp<GrGpuBuffer> createBuffer(size_t size,
216                                     GrGpuBufferType intendedType,
217                                     GrAccessPattern accessPattern);
218 
219     /**
220      * Resolves MSAA. The resolveRect must already be in the native destination space.
221      */
222     void resolveRenderTarget(GrRenderTarget*, const SkIRect& resolveRect);
223 
224     /**
225      * Uses the base of the texture to recompute the contents of the other levels.
226      */
227     bool regenerateMipMapLevels(GrTexture*);
228 
229     /**
230      * If the backend API has stateful texture bindings, this resets them back to defaults.
231      */
232     void resetTextureBindings();
233 
234     /**
235      * Reads a rectangle of pixels from a render target. No sRGB/linear conversions are performed.
236      *
237      * @param surface           the surface to read from
238      * @param rect              the rectangle of pixels to read
239      * @param surfaceColorType  the color type for this use of the surface.
240      * @param dstColorType      the color type of the destination buffer.
241      * @param buffer            memory to read the rectangle into.
242      * @param rowBytes          the number of bytes between consecutive rows. Must be a multiple of
243      *                          dstColorType's bytes-per-pixel. Must be tight to width if
244      *                          !caps->readPixelsRowBytesSupport().
245      *
246      * @return true if the read succeeded, false if not. The read can fail
247      *              because of the surface doesn't support reading, the color type
248      *              is not allowed for the format of the surface or if the rectangle
249      *              read is not contained in the surface.
250      */
251     bool readPixels(GrSurface* surface,
252                     SkIRect rect,
253                     GrColorType surfaceColorType,
254                     GrColorType dstColorType,
255                     void* buffer,
256                     size_t rowBytes);
257 
258     /**
259      * Updates the pixels in a rectangle of a surface.  No sRGB/linear conversions are performed.
260      *
261      * @param surface            the surface to write to.
262      * @param rect               the rectangle of pixels to overwrite
263      * @param surfaceColorType   the color type for this use of the surface.
264      * @param srcColorType       the color type of the source buffer.
265      * @param texels             array of mipmap levels containing texture data. Row bytes must be a
266      *                           multiple of srcColorType's bytes-per-pixel. Must be tight to level
267      *                           width if !caps->writePixelsRowBytesSupport().
268      * @param mipLevelCount      number of levels in 'texels'
269      * @param prepForTexSampling After doing write pixels should the surface be prepared for texture
270      *                           sampling. This is currently only used by Vulkan for inline uploads
271      *                           to set that layout back to sampled after doing the upload. Inline
272      *                           uploads currently can happen between draws in a single op so it is
273      *                           not trivial to break up the OpsTask into two tasks when we see
274      *                           an inline upload. However, once we are able to support doing that
275      *                           we can remove this parameter.
276      *
277      * @return true if the write succeeded, false if not. The read can fail
278      *              because of the surface doesn't support writing (e.g. read only),
279      *              the color type is not allowed for the format of the surface or
280      *              if the rectangle written is not contained in the surface.
281      */
282     bool writePixels(GrSurface* surface,
283                      SkIRect rect,
284                      GrColorType surfaceColorType,
285                      GrColorType srcColorType,
286                      const GrMipLevel texels[],
287                      int mipLevelCount,
288                      bool prepForTexSampling = false);
289 
290     /**
291      * Helper for the case of a single level.
292      */
293     bool writePixels(GrSurface* surface,
294                      SkIRect rect,
295                      GrColorType surfaceColorType,
296                      GrColorType srcColorType,
297                      const void* buffer,
298                      size_t rowBytes,
299                      bool prepForTexSampling = false) {
300         GrMipLevel mipLevel = {buffer, rowBytes, nullptr};
301         return this->writePixels(surface,
302                                  rect,
303                                  surfaceColorType,
304                                  srcColorType,
305                                  &mipLevel,
306                                  1,
307                                  prepForTexSampling);
308     }
309 
310     /**
311      * Transfer bytes from one GPU buffer to another. The src buffer must have type kXferCpuToGpu
312      * and the dst buffer must not. Neither buffer may currently be mapped. The offsets and size
313      * must be aligned to GrCaps::transferFromBufferToBufferAlignment.
314      *
315      * @param src        the buffer to read from
316      * @param srcOffset  the aligned offset at the src at which the transfer begins.
317      * @param dst        the buffer to write to
318      * @param dstOffset  the aligned offset in the dst at which the transfer begins
319      * @param size       the aligned number of bytes to transfer;
320      */
321     bool transferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
322                                     size_t srcOffset,
323                                     sk_sp<GrGpuBuffer> dst,
324                                     size_t dstOffset,
325                                     size_t size);
326 
327     /**
328      * Updates the pixels in a rectangle of a texture using a buffer. If the texture is MIP mapped,
329      * the base level is written to.
330      *
331      * @param texture          the texture to write to.
332      * @param rect             the rectangle of pixels in the texture to overwrite
333      * @param textureColorType the color type for this use of the surface.
334      * @param bufferColorType  the color type of the transfer buffer's pixel data
335      * @param transferBuffer   GrBuffer to read pixels from (type must be "kXferCpuToGpu")
336      * @param offset           offset from the start of the buffer
337      * @param rowBytes         number of bytes between consecutive rows in the buffer. Must be a
338      *                         multiple of bufferColorType's bytes-per-pixel. Must be tight to
339      *                         rect.width() if !caps->writePixelsRowBytesSupport().
340      */
341     bool transferPixelsTo(GrTexture* texture,
342                           SkIRect rect,
343                           GrColorType textureColorType,
344                           GrColorType bufferColorType,
345                           sk_sp<GrGpuBuffer> transferBuffer,
346                           size_t offset,
347                           size_t rowBytes);
348 
349     /**
350      * Reads the pixels from a rectangle of a surface into a buffer. Use
351      * GrCaps::SupportedRead::fOffsetAlignmentForTransferBuffer to determine the requirements for
352      * the buffer offset alignment. If the surface is a MIP mapped texture, the base level is read.
353      *
354      * If successful the row bytes in the buffer is always:
355      *   GrColorTypeBytesPerPixel(bufferColorType) * rect.width()
356      *
357      * Asserts that the caller has passed a properly aligned offset and that the buffer is
358      * large enough to hold the result
359      *
360      * @param surface          the surface to read from.
361      * @param rect             the rectangle of pixels to read
362      * @param surfaceColorType the color type for this use of the surface.
363      * @param bufferColorType  the color type of the transfer buffer's pixel data
364      * @param transferBuffer   GrBuffer to write pixels to (type must be "kXferGpuToCpu")
365      * @param offset           offset from the start of the buffer
366      */
367     bool transferPixelsFrom(GrSurface* surface,
368                             SkIRect rect,
369                             GrColorType surfaceColorType,
370                             GrColorType bufferColorType,
371                             sk_sp<GrGpuBuffer> transferBuffer,
372                             size_t offset);
373 
374     // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst
375     // take place at higher levels and this function implement faster copy paths. The src and dst
376     // rects are pre-clipped. The src rect and dst rect are guaranteed to be within the
377     // src/dst bounds and non-empty. They must also be in their exact device space coords, including
378     // already being transformed for origin if need be. If canDiscardOutsideDstRect is set to true
379     // then we don't need to preserve any data on the dst surface outside of the copy.
380     //
381     // Backends may or may not support src and dst rects with differing dimensions. This can assume
382     // that GrCaps.canCopySurface() returned true for these surfaces and rects.
383     bool copySurface(GrSurface* dst, const SkIRect& dstRect,
384                      GrSurface* src, const SkIRect& srcRect,
385                      GrSamplerState::Filter filter);
386 
387     // Returns a GrOpsRenderPass which OpsTasks send draw commands to instead of directly
388     // to the Gpu object. The 'bounds' rect is the content rect of the renderTarget.
389     // If a 'stencil' is provided it will be the one bound to 'renderTarget'. If one is not
390     // provided but 'renderTarget' has a stencil buffer then that is a signal that the
391     // render target's stencil buffer should be ignored.
392     GrOpsRenderPass* getOpsRenderPass(
393             GrRenderTarget* renderTarget,
394             bool useMSAASurface,
395             GrAttachment* stencil,
396             GrSurfaceOrigin,
397             const SkIRect& bounds,
398             const GrOpsRenderPass::LoadAndStoreInfo&,
399             const GrOpsRenderPass::StencilLoadAndStoreInfo&,
400             const skia_private::TArray<GrSurfaceProxy*, true>& sampledProxies,
401             GrXferBarrierFlags renderPassXferBarriers);
402 
403     // Called by GrDrawingManager when flushing.
404     // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also
405     // insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the
406     // inserted semaphores.
407     void executeFlushInfo(SkSpan<GrSurfaceProxy*>,
408                           SkSurfaces::BackendSurfaceAccess access,
409                           const GrFlushInfo&,
410                           const skgpu::MutableTextureState* newState);
411 
412     // Called before render tasks are executed during a flush.
willExecute()413     virtual void willExecute() {}
414 
415     bool submitToGpu(GrSyncCpu sync);
416 
417     virtual void submit(GrOpsRenderPass*) = 0;
418 
419     [[nodiscard]] virtual std::unique_ptr<GrSemaphore> makeSemaphore(bool isOwned = true) = 0;
420     virtual std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&,
421                                                               GrSemaphoreWrapType,
422                                                               GrWrapOwnership) = 0;
423     virtual void insertSemaphore(GrSemaphore* semaphore) = 0;
424     virtual void waitSemaphore(GrSemaphore* semaphore) = 0;
425 
426     virtual void addFinishedProc(GrGpuFinishedProc finishedProc,
427                                  GrGpuFinishedContext finishedContext) = 0;
428     virtual void checkFinishProcs() = 0;
429     virtual void finishOutstandingGpuWork() = 0;
430 
431     // NOLINTNEXTLINE(performance-unnecessary-value-param)
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>)432     virtual void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) {}
433 
434     /**
435      * Checks if we detected an OOM from the underlying 3D API and if so returns true and resets
436      * the internal OOM state to false. Otherwise, returns false.
437      */
438     bool checkAndResetOOMed();
439 
440     /**
441      *  Put this texture in a safe and known state for use across multiple contexts. Depending on
442      *  the backend, this may return a GrSemaphore. If so, other contexts should wait on that
443      *  semaphore before using this texture.
444      */
445     virtual std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) = 0;
446 
447     /**
448      * Frees any backend specific objects that are not currently in use by the GPU. This is called
449      * when the client is trying to free up as much GPU memory as possible. We will not release
450      * resources connected to programs/pipelines since the cost to recreate those is significantly
451      * higher that other resources.
452      */
releaseUnlockedBackendObjects()453     virtual void releaseUnlockedBackendObjects() {}
454 
455     ///////////////////////////////////////////////////////////////////////////
456     // Debugging and Stats
457 
458     class Stats {
459     public:
460 #if GR_GPU_STATS
461         Stats() = default;
462 
reset()463         void reset() { *this = {}; }
464 
textureCreates()465         int textureCreates() const { return fTextureCreates; }
incTextureCreates()466         void incTextureCreates() { fTextureCreates++; }
467 
textureUploads()468         int textureUploads() const { return fTextureUploads; }
incTextureUploads()469         void incTextureUploads() { fTextureUploads++; }
470 
transfersToTexture()471         int transfersToTexture() const { return fTransfersToTexture; }
incTransfersToTexture()472         void incTransfersToTexture() { fTransfersToTexture++; }
473 
transfersFromSurface()474         int transfersFromSurface() const { return fTransfersFromSurface; }
incTransfersFromSurface()475         void incTransfersFromSurface() { fTransfersFromSurface++; }
476 
incBufferTransfers()477         void incBufferTransfers() { fBufferTransfers++; }
bufferTransfers()478         int bufferTransfers() const { return fBufferTransfers; }
479 
stencilAttachmentCreates()480         int stencilAttachmentCreates() const { return fStencilAttachmentCreates; }
incStencilAttachmentCreates()481         void incStencilAttachmentCreates() { fStencilAttachmentCreates++; }
482 
msaaAttachmentCreates()483         int msaaAttachmentCreates() const { return fMSAAAttachmentCreates; }
incMSAAAttachmentCreates()484         void incMSAAAttachmentCreates() { fMSAAAttachmentCreates++; }
485 
numDraws()486         int numDraws() const { return fNumDraws; }
incNumDraws()487         void incNumDraws() { fNumDraws++; }
488 
numFailedDraws()489         int numFailedDraws() const { return fNumFailedDraws; }
incNumFailedDraws()490         void incNumFailedDraws() { ++fNumFailedDraws; }
491 
numSubmitToGpus()492         int numSubmitToGpus() const { return fNumSubmitToGpus; }
incNumSubmitToGpus()493         void incNumSubmitToGpus() { ++fNumSubmitToGpus; }
494 
numScratchTexturesReused()495         int numScratchTexturesReused() const { return fNumScratchTexturesReused; }
incNumScratchTexturesReused()496         void incNumScratchTexturesReused() { ++fNumScratchTexturesReused; }
497 
numScratchMSAAAttachmentsReused()498         int numScratchMSAAAttachmentsReused() const { return fNumScratchMSAAAttachmentsReused; }
incNumScratchMSAAAttachmentsReused()499         void incNumScratchMSAAAttachmentsReused() { ++fNumScratchMSAAAttachmentsReused; }
500 
renderPasses()501         int renderPasses() const { return fRenderPasses; }
incRenderPasses()502         void incRenderPasses() { fRenderPasses++; }
503 
numReorderedDAGsOverBudget()504         int numReorderedDAGsOverBudget() const { return fNumReorderedDAGsOverBudget; }
incNumReorderedDAGsOverBudget()505         void incNumReorderedDAGsOverBudget() { fNumReorderedDAGsOverBudget++; }
506 
507 #if defined(GR_TEST_UTILS)
508         void dump(SkString*);
509         void dumpKeyValuePairs(
510                 skia_private::TArray<SkString>* keys, skia_private::TArray<double>* values);
511 #endif
512     private:
513         int fTextureCreates = 0;
514         int fTextureUploads = 0;
515         int fTransfersToTexture = 0;
516         int fTransfersFromSurface = 0;
517         int fBufferTransfers = 0;
518         int fStencilAttachmentCreates = 0;
519         int fMSAAAttachmentCreates = 0;
520         int fNumDraws = 0;
521         int fNumFailedDraws = 0;
522         int fNumSubmitToGpus = 0;
523         int fNumScratchTexturesReused = 0;
524         int fNumScratchMSAAAttachmentsReused = 0;
525         int fRenderPasses = 0;
526         int fNumReorderedDAGsOverBudget = 0;
527 
528 #else  // !GR_GPU_STATS
529 
530 #if defined(GR_TEST_UTILS)
531         void dump(SkString*) {}
532         void dumpKeyValuePairs(skia_private::TArray<SkString>*, skia_private::TArray<double>*) {}
533 #endif
534         void incTextureCreates() {}
535         void incTextureUploads() {}
536         void incTransfersToTexture() {}
537         void incBufferTransfers() {}
538         void incTransfersFromSurface() {}
539         void incStencilAttachmentCreates() {}
540         void incMSAAAttachmentCreates() {}
541         void incNumDraws() {}
542         void incNumFailedDraws() {}
543         void incNumSubmitToGpus() {}
544         void incNumScratchTexturesReused() {}
545         void incNumScratchMSAAAttachmentsReused() {}
546         void incRenderPasses() {}
547         void incNumReorderedDAGsOverBudget() {}
548 #endif
549     };
550 
stats()551     Stats* stats() { return &fStats; }
552     void dumpJSON(SkJSONWriter*) const;
553 
554 
555     /**
556      * Creates a texture directly in the backend API without wrapping it in a GrTexture.
557      * Must be matched with a call to deleteBackendTexture().
558      *
559      * If data is null the texture is uninitialized.
560      *
561      * If data represents a color then all texture levels are cleared to that color.
562      *
563      * If data represents pixmaps then it must have a either one pixmap or, if mipmapping
564      * is specified, a complete MIP hierarchy of pixmaps. Additionally, if provided, the mip
565      * levels must be sized correctly according to the MIP sizes implied by dimensions. They
566      * must all have the same color type and that color type must be compatible with the
567      * texture format.
568      */
569     GrBackendTexture createBackendTexture(SkISize dimensions,
570                                           const GrBackendFormat&,
571                                           GrRenderable,
572                                           skgpu::Mipmapped,
573                                           GrProtected,
574                                           std::string_view label);
575 
576     bool clearBackendTexture(const GrBackendTexture&,
577                              sk_sp<skgpu::RefCntedCallback> finishedCallback,
578                              std::array<float, 4> color);
579 
580     /**
581      * Same as the createBackendTexture case except compressed backend textures can
582      * never be renderable.
583      */
584     GrBackendTexture createCompressedBackendTexture(SkISize dimensions,
585                                                     const GrBackendFormat&,
586                                                     skgpu::Mipmapped,
587                                                     GrProtected);
588 
589     bool updateCompressedBackendTexture(const GrBackendTexture&,
590                                         sk_sp<skgpu::RefCntedCallback> finishedCallback,
591                                         const void* data,
592                                         size_t length);
593 
setBackendTextureState(const GrBackendTexture &,const skgpu::MutableTextureState &,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)594     virtual bool setBackendTextureState(const GrBackendTexture&,
595                                         const skgpu::MutableTextureState&,
596                                         skgpu::MutableTextureState* previousState,
597                                         // NOLINTNEXTLINE(performance-unnecessary-value-param)
598                                         sk_sp<skgpu::RefCntedCallback> finishedCallback) {
599         return false;
600     }
601 
setBackendRenderTargetState(const GrBackendRenderTarget &,const skgpu::MutableTextureState &,skgpu::MutableTextureState * previousState,sk_sp<skgpu::RefCntedCallback> finishedCallback)602     virtual bool setBackendRenderTargetState(const GrBackendRenderTarget&,
603                                              const skgpu::MutableTextureState&,
604                                              skgpu::MutableTextureState* previousState,
605                                             // NOLINTNEXTLINE(performance-unnecessary-value-param)
606                                              sk_sp<skgpu::RefCntedCallback> finishedCallback) {
607         return false;
608     }
609 
610     /**
611      * Frees a texture created by createBackendTexture(). If ownership of the backend
612      * texture has been transferred to a context using adopt semantics this should not be called.
613      */
614     virtual void deleteBackendTexture(const GrBackendTexture&) = 0;
615 
616     /**
617      * In this case we have a program descriptor and a program info but no render target.
618      */
619     virtual bool compile(const GrProgramDesc&, const GrProgramInfo&) = 0;
620 
precompileShader(const SkData & key,const SkData & data)621     virtual bool precompileShader(const SkData& key, const SkData& data) { return false; }
622 
623 #if defined(GR_TEST_UTILS)
624     /** Check a handle represents an actual texture in the backend API that has not been freed. */
625     virtual bool isTestingOnlyBackendTexture(const GrBackendTexture&) const = 0;
626 
627     /**
628      * Creates a GrBackendRenderTarget that can be wrapped using
629      * SkSurfaces::WrapBackendRenderTarget. Ideally this is a non-textureable allocation to
630      * differentiate from testing with SkSurfaces::WrapBackendTexture. When sampleCnt > 1 this
631      * is used to test client wrapped allocations with MSAA where Skia does not allocate a separate
632      * buffer for resolving. If the color is non-null the backing store should be cleared to the
633      * passed in color.
634      */
635     virtual GrBackendRenderTarget createTestingOnlyBackendRenderTarget(
636             SkISize dimensions,
637             GrColorType,
638             int sampleCount = 1,
639             GrProtected = GrProtected::kNo) = 0;
640 
641     /**
642      * Deletes a GrBackendRenderTarget allocated with the above. Synchronization to make this safe
643      * is up to the caller.
644      */
645     virtual void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) = 0;
646 
647     // This is only to be used in GL-specific tests.
glContextForTesting()648     virtual const GrGLContext* glContextForTesting() const { return nullptr; }
649 
650     // This is only to be used by testing code
resetShaderCacheForTesting()651     virtual void resetShaderCacheForTesting() const {}
652 
653     /**
654      * Inserted as a pair around a block of code to do a GPU frame capture.
655      * Currently only works with the Metal backend.
656      */
testingOnly_startCapture()657     virtual void testingOnly_startCapture() {}
testingOnly_stopCapture()658     virtual void testingOnly_stopCapture() {}
659 #endif
660 
661     // width and height may be larger than rt (if underlying API allows it).
662     // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on
663     // the GrAttachment.
664     virtual sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& colorFormat,
665                                                       SkISize dimensions,
666                                                       int numStencilSamples) = 0;
667 
668     virtual GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) = 0;
669 
670     // Creates an MSAA surface to be used as an MSAA attachment on a framebuffer.
671     virtual sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions,
672                                                    const GrBackendFormat& format,
673                                                    int numSamples,
674                                                    GrProtected isProtected,
675                                                    GrMemoryless isMemoryless) = 0;
676 
handleDirtyContext()677     void handleDirtyContext() {
678         if (fResetBits) {
679             this->resetContext();
680         }
681     }
682 
storeVkPipelineCacheData()683     virtual void storeVkPipelineCacheData() {}
684 
685     // Called before certain draws in order to guarantee coherent results from dst reads.
686     virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
687 
688 protected:
689     static bool CompressedDataIsCorrect(SkISize dimensions,
690                                         SkTextureCompressionType,
691                                         skgpu::Mipmapped,
692                                         const void* data,
693                                         size_t length);
694 
695     // If the surface is a texture this marks its mipmaps as dirty.
696     void didWriteToSurface(GrSurface* surface,
697                            GrSurfaceOrigin origin,
698                            const SkIRect* bounds,
699                            uint32_t mipLevels = 1) const;
700 
setOOMed()701     void setOOMed() { fOOMed = true; }
702 
703     Stats                            fStats;
704 
705     // Subclass must call this to initialize caps in its constructor.
706     void initCaps(sk_sp<const GrCaps> caps);
707 
708 private:
709     virtual GrBackendTexture onCreateBackendTexture(SkISize dimensions,
710                                                     const GrBackendFormat&,
711                                                     GrRenderable,
712                                                     skgpu::Mipmapped,
713                                                     GrProtected,
714                                                     std::string_view label) = 0;
715 
716     virtual GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions,
717                                                               const GrBackendFormat&,
718                                                               skgpu::Mipmapped,
719                                                               GrProtected) = 0;
720 
721     virtual bool onClearBackendTexture(const GrBackendTexture&,
722                                        sk_sp<skgpu::RefCntedCallback> finishedCallback,
723                                        std::array<float, 4> color) = 0;
724 
725     virtual bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
726                                                   sk_sp<skgpu::RefCntedCallback> finishedCallback,
727                                                   const void* data,
728                                                   size_t length) = 0;
729 
730     // called when the 3D context state is unknown. Subclass should emit any
731     // assumed 3D context state and dirty any state cache.
onResetContext(uint32_t resetBits)732     virtual void onResetContext(uint32_t resetBits) {}
733 
734     // Implementation of resetTextureBindings.
onResetTextureBindings()735     virtual void onResetTextureBindings() {}
736 
737     // overridden by backend-specific derived class to create objects.
738     // Texture size, renderablility, format support, sample count will have already been validated
739     // in base class before onCreateTexture is called.
740     // If the ith bit is set in levelClearMask then the ith MIP level should be cleared.
741     virtual sk_sp<GrTexture> onCreateTexture(SkISize dimensions,
742                                              const GrBackendFormat&,
743                                              GrRenderable,
744                                              int renderTargetSampleCnt,
745                                              skgpu::Budgeted,
746                                              GrProtected,
747                                              int mipLevelCoont,
748                                              uint32_t levelClearMask,
749                                              std::string_view label) = 0;
750     virtual sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
751                                                        const GrBackendFormat&,
752                                                        skgpu::Budgeted,
753                                                        skgpu::Mipmapped,
754                                                        GrProtected,
755                                                        const void* data,
756                                                        size_t dataSize) = 0;
757     virtual sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&,
758                                                   GrWrapOwnership,
759                                                   GrWrapCacheable,
760                                                   GrIOType) = 0;
761 
762     virtual sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&,
763                                                             GrWrapOwnership,
764                                                             GrWrapCacheable) = 0;
765 
766     virtual sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
767                                                             int sampleCnt,
768                                                             GrWrapOwnership,
769                                                             GrWrapCacheable) = 0;
770     virtual sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) = 0;
771     virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
772                                                                         const GrVkDrawableInfo&);
773 
774     virtual sk_sp<GrGpuBuffer> onCreateBuffer(size_t size,
775                                               GrGpuBufferType intendedType,
776                                               GrAccessPattern) = 0;
777 
778     // overridden by backend-specific derived class to perform the surface read
779     virtual bool onReadPixels(GrSurface*,
780                               SkIRect,
781                               GrColorType surfaceColorType,
782                               GrColorType dstColorType,
783                               void*,
784                               size_t rowBytes) = 0;
785 
786     // overridden by backend-specific derived class to perform the surface write
787     virtual bool onWritePixels(GrSurface*,
788                                SkIRect,
789                                GrColorType surfaceColorType,
790                                GrColorType srcColorType,
791                                const GrMipLevel[],
792                                int mipLevelCount,
793                                bool prepForTexSampling) = 0;
794 
795     // overridden by backend-specific derived class to perform the buffer transfer
796     virtual bool onTransferFromBufferToBuffer(sk_sp<GrGpuBuffer> src,
797                                               size_t srcOffset,
798                                               sk_sp<GrGpuBuffer> dst,
799                                               size_t dstOffset,
800                                               size_t size) = 0;
801 
802     // overridden by backend-specific derived class to perform the texture transfer
803     virtual bool onTransferPixelsTo(GrTexture*,
804                                     SkIRect,
805                                     GrColorType textureColorType,
806                                     GrColorType bufferColorType,
807                                     sk_sp<GrGpuBuffer> transferBuffer,
808                                     size_t offset,
809                                     size_t rowBytes) = 0;
810 
811     // overridden by backend-specific derived class to perform the surface transfer
812     virtual bool onTransferPixelsFrom(GrSurface*,
813                                       SkIRect,
814                                       GrColorType surfaceColorType,
815                                       GrColorType bufferColorType,
816                                       sk_sp<GrGpuBuffer> transferBuffer,
817                                       size_t offset) = 0;
818 
819     // overridden by backend-specific derived class to perform the resolve
820     virtual void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) = 0;
821 
822     // overridden by backend specific derived class to perform mip map level regeneration.
823     virtual bool onRegenerateMipMapLevels(GrTexture*) = 0;
824 
825     // overridden by backend specific derived class to perform the copy surface
826     virtual bool onCopySurface(GrSurface* dst, const SkIRect& dstRect,
827                                GrSurface* src, const SkIRect& srcRect,
828                                GrSamplerState::Filter) = 0;
829 
830     virtual GrOpsRenderPass* onGetOpsRenderPass(
831             GrRenderTarget* renderTarget,
832             bool useMSAASurface,
833             GrAttachment* stencil,
834             GrSurfaceOrigin,
835             const SkIRect& bounds,
836             const GrOpsRenderPass::LoadAndStoreInfo&,
837             const GrOpsRenderPass::StencilLoadAndStoreInfo&,
838             const skia_private::TArray<GrSurfaceProxy*, true>& sampledProxies,
839             GrXferBarrierFlags renderPassXferBarriers) = 0;
840 
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurfaces::BackendSurfaceAccess access,const skgpu::MutableTextureState * newState)841     virtual void prepareSurfacesForBackendAccessAndStateUpdates(
842             SkSpan<GrSurfaceProxy*> proxies,
843             SkSurfaces::BackendSurfaceAccess access,
844             const skgpu::MutableTextureState* newState) {}
845 
846     virtual bool onSubmitToGpu(GrSyncCpu sync) = 0;
847 
848     void reportSubmitHistograms();
onReportSubmitHistograms()849     virtual void onReportSubmitHistograms() {}
850 
851 #ifdef SK_ENABLE_DUMP_GPU
onDumpJSON(SkJSONWriter *)852     virtual void onDumpJSON(SkJSONWriter*) const {}
853 #endif
854 
855     sk_sp<GrTexture> createTextureCommon(SkISize,
856                                          const GrBackendFormat&,
857                                          GrTextureType textureType,
858                                          GrRenderable,
859                                          int renderTargetSampleCnt,
860                                          skgpu::Budgeted,
861                                          GrProtected,
862                                          int mipLevelCnt,
863                                          uint32_t levelClearMask,
864                                          std::string_view label);
865 
resetContext()866     void resetContext() {
867         this->onResetContext(fResetBits);
868         fResetBits = 0;
869     }
870 
871     void callSubmittedProcs(bool success);
872 
873     sk_sp<const GrCaps>             fCaps;
874 
875     uint32_t fResetBits;
876     // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu.
877     GrDirectContext* fContext;
878 
879     struct SubmittedProc {
SubmittedProcSubmittedProc880         SubmittedProc(GrGpuSubmittedProc proc, GrGpuSubmittedContext context)
881                 : fProc(proc), fContext(context) {}
882 
883         GrGpuSubmittedProc fProc;
884         GrGpuSubmittedContext fContext;
885     };
886     skia_private::STArray<4, SubmittedProc> fSubmittedProcs;
887 
888     bool fOOMed = false;
889 
890 #if SK_HISTOGRAMS_ENABLED
891     int fCurrentSubmitRenderPassCount = 0;
892 #endif
893 
894     using INHERITED = SkRefCnt;
895 };
896 
897 #endif
898