• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2011 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrGpu_DEFINED
9 #define GrGpu_DEFINED
10 
11 #include "include/core/SkPath.h"
12 #include "include/core/SkSpan.h"
13 #include "include/core/SkSurface.h"
14 #include "include/gpu/GrDirectContext.h"
15 #include "include/gpu/GrTypes.h"
16 #include "include/private/SkTArray.h"
17 #include "src/core/SkTInternalLList.h"
18 #include "src/gpu/GrAttachment.h"
19 #include "src/gpu/GrCaps.h"
20 #include "src/gpu/GrOpsRenderPass.h"
21 #include "src/gpu/GrPixmap.h"
22 #include "src/gpu/GrSwizzle.h"
23 #include "src/gpu/GrXferProcessor.h"
24 
25 class GrAttachment;
26 class GrBackendRenderTarget;
27 class GrBackendSemaphore;
28 struct GrContextOptions;
29 class GrDirectContext;
30 class GrGpuBuffer;
31 class GrGLContext;
32 class GrPath;
33 class GrPathRenderer;
34 class GrPathRendererChain;
35 class GrPipeline;
36 class GrGeometryProcessor;
37 class GrRenderTarget;
38 class GrRingBuffer;
39 class GrSemaphore;
40 class GrStagingBufferManager;
41 class GrStencilSettings;
42 class GrSurface;
43 class GrTexture;
44 class GrThreadSafePipelineBuilder;
45 struct GrVkDrawableInfo;
46 class SkJSONWriter;
47 
48 namespace SkSL {
49     class Compiler;
50 }
51 
52 class GrGpu : public SkRefCnt {
53 public:
54     GrGpu(GrDirectContext* direct);
55     ~GrGpu() override;
56 
getContext()57     GrDirectContext* getContext() { return fContext; }
getContext()58     const GrDirectContext* getContext() const { return fContext; }
59 
setCurrentGrResourceTag(const GrGpuResourceTag tag)60 void setCurrentGrResourceTag(const GrGpuResourceTag tag) {
61     if (fContext) {
62         fContext->setCurrentGrResourceTag(tag);
63     }
64 }
65 
66     /**
67      * Gets the capabilities of the draw target.
68      */
caps()69     const GrCaps* caps() const { return fCaps.get(); }
refCaps()70     sk_sp<const GrCaps> refCaps() const { return fCaps; }
71 
stagingBufferManager()72     virtual GrStagingBufferManager* stagingBufferManager() { return nullptr; }
73 
uniformsRingBuffer()74     virtual GrRingBuffer* uniformsRingBuffer() { return nullptr; }
75 
shaderCompiler()76     SkSL::Compiler* shaderCompiler() const { return fCompiler.get(); }
77 
78     enum class DisconnectType {
79         // No cleanup should be attempted, immediately cease making backend API calls
80         kAbandon,
81         // Free allocated resources (not known by GrResourceCache) before returning and
82         // ensure no backend backend 3D API calls will be made after disconnect() returns.
83         kCleanup,
84     };
85 
86     // Called by context when the underlying backend context is already or will be destroyed
87     // before GrDirectContext.
88     virtual void disconnect(DisconnectType);
89 
90     virtual GrThreadSafePipelineBuilder* pipelineBuilder() = 0;
91     virtual sk_sp<GrThreadSafePipelineBuilder> refPipelineBuilder() = 0;
92 
93     // Called by GrDirectContext::isContextLost. Returns true if the backend Gpu object has gotten
94     // into an unrecoverable, lost state.
isDeviceLost()95     virtual bool isDeviceLost() const { return false; }
96 
97     /**
98      * The GrGpu object normally assumes that no outsider is setting state
99      * within the underlying 3D API's context/device/whatever. This call informs
100      * the GrGpu that the state was modified and it shouldn't make assumptions
101      * about the state.
102      */
103     void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; }
104 
105     /**
106      * Creates a texture object. If renderable is kYes then the returned texture can
107      * be used as a render target by calling GrTexture::asRenderTarget(). Not all
108      * pixel configs can be used as render targets. Support for configs as textures
109      * or render targets can be checked using GrCaps.
110      *
111      * @param dimensions     dimensions of the texture to be created.
112      * @param format         the format for the texture (not currently used).
113      * @param renderable     should the resulting texture be renderable
114      * @param renderTargetSampleCnt The number of samples to use for rendering if renderable is
115      *                       kYes. If renderable is kNo then this must be 1.
116      * @param budgeted       does this texture count against the resource cache budget?
117      * @param isProtected    should the texture be created as protected.
118      * @param texels         array of mipmap levels containing texel data to load.
119      *                       If level i has pixels then it is assumed that its dimensions are
120      *                       max(1, floor(dimensions.fWidth / 2)) by
121      *                       max(1, floor(dimensions.fHeight / 2)).
122      *                       If texels[i].fPixels == nullptr for all i <= mipLevelCount or
123      *                       mipLevelCount is 0 then the texture's contents are uninitialized.
124      *                       If a level has non-null pixels, its row bytes must be a multiple of the
125      *                       config's bytes-per-pixel. The row bytes must be tight to the
126      *                       level width if !caps->writePixelsRowBytesSupport().
127      *                       If mipLevelCount > 1 and texels[i].fPixels != nullptr for any i > 0
128      *                       then all levels must have non-null pixels. All levels must have
129      *                       non-null pixels if GrCaps::createTextureMustSpecifyAllLevels() is true.
130      * @param textureColorType The color type interpretation of the texture for the purpose of
131      *                       of uploading texel data.
132      * @param srcColorType   The color type of data in texels[].
133      * @param texelLevelCount the number of levels in 'texels'. May be 0, 1, or
134      *                       floor(max((log2(dimensions.fWidth), log2(dimensions.fHeight)))). It
135      *                       must be the latter if GrCaps::createTextureMustSpecifyAllLevels() is
136      *                       true.
137      * @return  The texture object if successful, otherwise nullptr.
138      */
139     sk_sp<GrTexture> createTexture(SkISize dimensions,
140                                    const GrBackendFormat& format,
141                                    GrTextureType textureType,
142                                    GrRenderable renderable,
143                                    int renderTargetSampleCnt,
144                                    SkBudgeted budgeted,
145                                    GrProtected isProtected,
146                                    GrColorType textureColorType,
147                                    GrColorType srcColorType,
148                                    const GrMipLevel texels[],
149                                    int texelLevelCount);
150 
151     /**
152      * Simplified createTexture() interface for when there is no initial texel data to upload.
153      */
154     sk_sp<GrTexture> createTexture(SkISize dimensions,
155                                    const GrBackendFormat& format,
156                                    GrTextureType textureType,
157                                    GrRenderable renderable,
158                                    int renderTargetSampleCnt,
159                                    GrMipmapped mipMapped,
160                                    SkBudgeted budgeted,
161                                    GrProtected isProtected);
162 
163     sk_sp<GrTexture> createCompressedTexture(SkISize dimensions,
164                                              const GrBackendFormat& format,
165                                              SkBudgeted budgeted,
166                                              GrMipmapped mipMapped,
167                                              GrProtected isProtected,
168                                              const void* data, size_t dataSize);
169 
170     /**
171      * Implements GrResourceProvider::wrapBackendTexture
172      */
173     sk_sp<GrTexture> wrapBackendTexture(const GrBackendTexture&,
174                                         GrWrapOwnership,
175                                         GrWrapCacheable,
176                                         GrIOType);
177 
178     sk_sp<GrTexture> wrapCompressedBackendTexture(const GrBackendTexture&,
179                                                   GrWrapOwnership,
180                                                   GrWrapCacheable);
181 
182     /**
183      * Implements GrResourceProvider::wrapRenderableBackendTexture
184      */
185     sk_sp<GrTexture> wrapRenderableBackendTexture(const GrBackendTexture&,
186                                                   int sampleCnt,
187                                                   GrWrapOwnership,
188                                                   GrWrapCacheable);
189 
190     /**
191      * Implements GrResourceProvider::wrapBackendRenderTarget
192      */
193     sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTarget&);
194 
195     /**
196      * Implements GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget
197      */
198     sk_sp<GrRenderTarget> wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
199                                                               const GrVkDrawableInfo&);
200 
201     /**
202      * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked.
203      *
204      * @param size            size of buffer to create.
205      * @param intendedType    hint to the graphics subsystem about what the buffer will be used for.
206      * @param accessPattern   hint to the graphics subsystem about how the data will be accessed.
207      * @param data            optional data with which to initialize the buffer.
208      *
209      * @return the buffer if successful, otherwise nullptr.
210      */
211     sk_sp<GrGpuBuffer> createBuffer(size_t size, GrGpuBufferType intendedType,
212                                     GrAccessPattern accessPattern, const void* data = nullptr);
213 
214     /**
215      * Resolves MSAA. The resolveRect must already be in the native destination space.
216      */
217     void resolveRenderTarget(GrRenderTarget*, const SkIRect& resolveRect);
218 
219     /**
220      * Uses the base of the texture to recompute the contents of the other levels.
221      */
222     bool regenerateMipMapLevels(GrTexture*);
223 
224     /**
225      * If the backend API has stateful texture bindings, this resets them back to defaults.
226      */
227     void resetTextureBindings();
228 
229     /**
230      * Reads a rectangle of pixels from a render target. No sRGB/linear conversions are performed.
231      *
232      * @param surface           the surface to read from
233      * @param rect              the rectangle of pixels to read
234      * @param surfaceColorType  the color type for this use of the surface.
235      * @param dstColorType      the color type of the destination buffer.
236      * @param buffer            memory to read the rectangle into.
237      * @param rowBytes          the number of bytes between consecutive rows. Must be a multiple of
238      *                          dstColorType's bytes-per-pixel. Must be tight to width if
239      *                          !caps->readPixelsRowBytesSupport().
240      *
241      * @return true if the read succeeded, false if not. The read can fail
242      *              because of the surface doesn't support reading, the color type
243      *              is not allowed for the format of the surface or if the rectangle
244      *              read is not contained in the surface.
245      */
246     bool readPixels(GrSurface* surface,
247                     SkIRect rect,
248                     GrColorType surfaceColorType,
249                     GrColorType dstColorType,
250                     void* buffer,
251                     size_t rowBytes);
252 
253     /**
254      * Updates the pixels in a rectangle of a surface.  No sRGB/linear conversions are performed.
255      *
256      * @param surface            the surface to write to.
257      * @param rect               the rectangle of pixels to overwrite
258      * @param surfaceColorType   the color type for this use of the surface.
259      * @param srcColorType       the color type of the source buffer.
260      * @param texels             array of mipmap levels containing texture data. Row bytes must be a
261      *                           multiple of srcColorType's bytes-per-pixel. Must be tight to level
262      *                           width if !caps->writePixelsRowBytesSupport().
263      * @param mipLevelCount      number of levels in 'texels'
264      * @param prepForTexSampling After doing write pixels should the surface be prepared for texture
265      *                           sampling. This is currently only used by Vulkan for inline uploads
266      *                           to set that layout back to sampled after doing the upload. Inline
267      *                           uploads currently can happen between draws in a single op so it is
268      *                           not trivial to break up the OpsTask into two tasks when we see
269      *                           an inline upload. However, once we are able to support doing that
270      *                           we can remove this parameter.
271      *
272      * @return true if the write succeeded, false if not. The read can fail
273      *              because of the surface doesn't support writing (e.g. read only),
274      *              the color type is not allowed for the format of the surface or
275      *              if the rectangle written is not contained in the surface.
276      */
277     bool writePixels(GrSurface* surface,
278                      SkIRect rect,
279                      GrColorType surfaceColorType,
280                      GrColorType srcColorType,
281                      const GrMipLevel texels[],
282                      int mipLevelCount,
283                      bool prepForTexSampling = false);
284 
285     /**
286      * Helper for the case of a single level.
287      */
288     bool writePixels(GrSurface* surface,
289                      SkIRect rect,
290                      GrColorType surfaceColorType,
291                      GrColorType srcColorType,
292                      const void* buffer,
293                      size_t rowBytes,
294                      bool prepForTexSampling = false) {
295         GrMipLevel mipLevel = {buffer, rowBytes, nullptr};
296         return this->writePixels(surface,
297                                  rect,
298                                  surfaceColorType,
299                                  srcColorType,
300                                  &mipLevel,
301                                  1,
302                                  prepForTexSampling);
303     }
304 
305     /**
306      * Updates the pixels in a rectangle of a texture using a buffer. If the texture is MIP mapped,
307      * the base level is written to.
308      *
309      * @param texture          the texture to write to.
310      * @param rect             the rectangle of pixels in the texture to overwrite
311      * @param textureColorType the color type for this use of the surface.
312      * @param bufferColorType  the color type of the transfer buffer's pixel data
313      * @param transferBuffer   GrBuffer to read pixels from (type must be "kXferCpuToGpu")
314      * @param offset           offset from the start of the buffer
315      * @param rowBytes         number of bytes between consecutive rows in the buffer. Must be a
316      *                         multiple of bufferColorType's bytes-per-pixel. Must be tight to
317      *                         rect.width() if !caps->writePixelsRowBytesSupport().
318      */
319     bool transferPixelsTo(GrTexture* texture,
320                           SkIRect rect,
321                           GrColorType textureColorType,
322                           GrColorType bufferColorType,
323                           sk_sp<GrGpuBuffer> transferBuffer,
324                           size_t offset,
325                           size_t rowBytes);
326 
327     /**
328      * Reads the pixels from a rectangle of a surface into a buffer. Use
329      * GrCaps::SupportedRead::fOffsetAlignmentForTransferBuffer to determine the requirements for
330      * the buffer offset alignment. If the surface is a MIP mapped texture, the base level is read.
331      *
332      * If successful the row bytes in the buffer is always:
333      *   GrColorTypeBytesPerPixel(bufferColorType) * rect.width()
334      *
335      * Asserts that the caller has passed a properly aligned offset and that the buffer is
336      * large enough to hold the result
337      *
338      * @param surface          the surface to read from.
339      * @param rect             the rectangle of pixels to read
340      * @param surfaceColorType the color type for this use of the surface.
341      * @param bufferColorType  the color type of the transfer buffer's pixel data
342      * @param transferBuffer   GrBuffer to write pixels to (type must be "kXferGpuToCpu")
343      * @param offset           offset from the start of the buffer
344      */
345     bool transferPixelsFrom(GrSurface* surface,
346                             SkIRect rect,
347                             GrColorType surfaceColorType,
348                             GrColorType bufferColorType,
349                             sk_sp<GrGpuBuffer> transferBuffer,
350                             size_t offset);
351 
352     // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst
353     // take place at higher levels and this function implement faster copy paths. The rect
354     // and point are pre-clipped. The src rect and implied dst rect are guaranteed to be within the
355     // src/dst bounds and non-empty. They must also be in their exact device space coords, including
356     // already being transformed for origin if need be. If canDiscardOutsideDstRect is set to true
357     // then we don't need to preserve any data on the dst surface outside of the copy.
358     bool copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
359                      const SkIPoint& dstPoint);
360 
361     // Returns a GrOpsRenderPass which OpsTasks send draw commands to instead of directly
362     // to the Gpu object. The 'bounds' rect is the content rect of the renderTarget.
363     // If a 'stencil' is provided it will be the one bound to 'renderTarget'. If one is not
364     // provided but 'renderTarget' has a stencil buffer then that is a signal that the
365     // render target's stencil buffer should be ignored.
366     GrOpsRenderPass* getOpsRenderPass(GrRenderTarget* renderTarget,
367                                       bool useMSAASurface,
368                                       GrAttachment* stencil,
369                                       GrSurfaceOrigin,
370                                       const SkIRect& bounds,
371                                       const GrOpsRenderPass::LoadAndStoreInfo&,
372                                       const GrOpsRenderPass::StencilLoadAndStoreInfo&,
373                                       const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
374                                       GrXferBarrierFlags renderPassXferBarriers);
375 
376     // Called by GrDrawingManager when flushing.
377     // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also
378     // insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the
379     // inserted semaphores.
380     void executeFlushInfo(SkSpan<GrSurfaceProxy*>,
381                           SkSurface::BackendSurfaceAccess access,
382                           const GrFlushInfo&,
383                           const GrBackendSurfaceMutableState* newState);
384 
385     bool submitToGpu(bool syncCpu);
386 
387     virtual void submit(GrOpsRenderPass*) = 0;
388 
389     virtual GrFence SK_WARN_UNUSED_RESULT insertFence() = 0;
390     virtual bool waitFence(GrFence) = 0;
391     virtual void deleteFence(GrFence) const = 0;
392 
393     virtual std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(
394             bool isOwned = true) = 0;
395     virtual std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&,
396                                                               GrSemaphoreWrapType,
397                                                               GrWrapOwnership) = 0;
398     virtual void insertSemaphore(GrSemaphore* semaphore) = 0;
399     virtual void waitSemaphore(GrSemaphore* semaphore) = 0;
400 
401     virtual void addFinishedProc(GrGpuFinishedProc finishedProc,
402                                  GrGpuFinishedContext finishedContext) = 0;
403     virtual void checkFinishProcs() = 0;
404     virtual void finishOutstandingGpuWork() = 0;
405 
takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>)406     virtual void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) {}
407 
408     /**
409      * Checks if we detected an OOM from the underlying 3D API and if so returns true and resets
410      * the internal OOM state to false. Otherwise, returns false.
411      */
412     bool checkAndResetOOMed();
413 
414     /**
415      *  Put this texture in a safe and known state for use across multiple contexts. Depending on
416      *  the backend, this may return a GrSemaphore. If so, other contexts should wait on that
417      *  semaphore before using this texture.
418      */
419     virtual std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) = 0;
420 
421     /**
422      * Frees any backend specific objects that are not currently in use by the GPU. This is called
423      * when the client is trying to free up as much GPU memory as possible. We will not release
424      * resources connected to programs/pipelines since the cost to recreate those is significantly
425      * higher that other resources.
426      */
releaseUnlockedBackendObjects()427     virtual void releaseUnlockedBackendObjects() {}
428 
429     ///////////////////////////////////////////////////////////////////////////
430     // Debugging and Stats
431 
432     class Stats {
433     public:
434 #if GR_GPU_STATS
435         Stats() = default;
436 
reset()437         void reset() { *this = {}; }
438 
textureCreates()439         int textureCreates() const { return fTextureCreates; }
incTextureCreates()440         void incTextureCreates() { fTextureCreates++; }
441 
textureUploads()442         int textureUploads() const { return fTextureUploads; }
incTextureUploads()443         void incTextureUploads() { fTextureUploads++; }
444 
transfersToTexture()445         int transfersToTexture() const { return fTransfersToTexture; }
incTransfersToTexture()446         void incTransfersToTexture() { fTransfersToTexture++; }
447 
transfersFromSurface()448         int transfersFromSurface() const { return fTransfersFromSurface; }
incTransfersFromSurface()449         void incTransfersFromSurface() { fTransfersFromSurface++; }
450 
stencilAttachmentCreates()451         int stencilAttachmentCreates() const { return fStencilAttachmentCreates; }
incStencilAttachmentCreates()452         void incStencilAttachmentCreates() { fStencilAttachmentCreates++; }
453 
msaaAttachmentCreates()454         int msaaAttachmentCreates() const { return fMSAAAttachmentCreates; }
incMSAAAttachmentCreates()455         void incMSAAAttachmentCreates() { fMSAAAttachmentCreates++; }
456 
numDraws()457         int numDraws() const { return fNumDraws; }
incNumDraws()458         void incNumDraws() { fNumDraws++; }
459 
numFailedDraws()460         int numFailedDraws() const { return fNumFailedDraws; }
incNumFailedDraws()461         void incNumFailedDraws() { ++fNumFailedDraws; }
462 
numSubmitToGpus()463         int numSubmitToGpus() const { return fNumSubmitToGpus; }
incNumSubmitToGpus()464         void incNumSubmitToGpus() { ++fNumSubmitToGpus; }
465 
numScratchTexturesReused()466         int numScratchTexturesReused() const { return fNumScratchTexturesReused; }
incNumScratchTexturesReused()467         void incNumScratchTexturesReused() { ++fNumScratchTexturesReused; }
468 
numScratchMSAAAttachmentsReused()469         int numScratchMSAAAttachmentsReused() const { return fNumScratchMSAAAttachmentsReused; }
incNumScratchMSAAAttachmentsReused()470         void incNumScratchMSAAAttachmentsReused() { ++fNumScratchMSAAAttachmentsReused; }
471 
renderPasses()472         int renderPasses() const { return fRenderPasses; }
incRenderPasses()473         void incRenderPasses() { fRenderPasses++; }
474 
numReorderedDAGsOverBudget()475         int numReorderedDAGsOverBudget() const { return fNumReorderedDAGsOverBudget; }
incNumReorderedDAGsOverBudget()476         void incNumReorderedDAGsOverBudget() { fNumReorderedDAGsOverBudget++; }
477 
478 #if GR_TEST_UTILS
479         void dump(SkString*);
480         void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values);
481 #endif
482     private:
483         int fTextureCreates = 0;
484         int fTextureUploads = 0;
485         int fTransfersToTexture = 0;
486         int fTransfersFromSurface = 0;
487         int fStencilAttachmentCreates = 0;
488         int fMSAAAttachmentCreates = 0;
489         int fNumDraws = 0;
490         int fNumFailedDraws = 0;
491         int fNumSubmitToGpus = 0;
492         int fNumScratchTexturesReused = 0;
493         int fNumScratchMSAAAttachmentsReused = 0;
494         int fRenderPasses = 0;
495         int fNumReorderedDAGsOverBudget = 0;
496 
497 #else  // !GR_GPU_STATS
498 
499 #if GR_TEST_UTILS
500         void dump(SkString*) {}
501         void dumpKeyValuePairs(SkTArray<SkString>*, SkTArray<double>*) {}
502 #endif
503         void incTextureCreates() {}
504         void incTextureUploads() {}
505         void incTransfersToTexture() {}
506         void incTransfersFromSurface() {}
507         void incStencilAttachmentCreates() {}
508         void incMSAAAttachmentCreates() {}
509         void incNumDraws() {}
510         void incNumFailedDraws() {}
511         void incNumSubmitToGpus() {}
512         void incNumScratchTexturesReused() {}
513         void incNumScratchMSAAAttachmentsReused() {}
514         void incRenderPasses() {}
515         void incNumReorderedDAGsOverBudget() {}
516 #endif
517     };
518 
stats()519     Stats* stats() { return &fStats; }
520     void dumpJSON(SkJSONWriter*) const;
521 
522 
523     /**
524      * Creates a texture directly in the backend API without wrapping it in a GrTexture.
525      * Must be matched with a call to deleteBackendTexture().
526      *
527      * If data is null the texture is uninitialized.
528      *
529      * If data represents a color then all texture levels are cleared to that color.
530      *
531      * If data represents pixmaps then it must have a either one pixmap or, if mipmapping
532      * is specified, a complete MIP hierarchy of pixmaps. Additionally, if provided, the mip
533      * levels must be sized correctly according to the MIP sizes implied by dimensions. They
534      * must all have the same color type and that color type must be compatible with the
535      * texture format.
536      */
537     GrBackendTexture createBackendTexture(SkISize dimensions,
538                                           const GrBackendFormat&,
539                                           GrRenderable,
540                                           GrMipmapped,
541                                           GrProtected);
542 
543     bool clearBackendTexture(const GrBackendTexture&,
544                              sk_sp<GrRefCntedCallback> finishedCallback,
545                              std::array<float, 4> color);
546 
547     /**
548      * Same as the createBackendTexture case except compressed backend textures can
549      * never be renderable.
550      */
551     GrBackendTexture createCompressedBackendTexture(SkISize dimensions,
552                                                     const GrBackendFormat&,
553                                                     GrMipmapped,
554                                                     GrProtected);
555 
556     bool updateCompressedBackendTexture(const GrBackendTexture&,
557                                         sk_sp<GrRefCntedCallback> finishedCallback,
558                                         const void* data,
559                                         size_t length);
560 
setBackendTextureState(const GrBackendTexture &,const GrBackendSurfaceMutableState &,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)561     virtual bool setBackendTextureState(const GrBackendTexture&,
562                                         const GrBackendSurfaceMutableState&,
563                                         GrBackendSurfaceMutableState* previousState,
564                                         sk_sp<GrRefCntedCallback> finishedCallback) {
565         return false;
566     }
567 
setBackendRenderTargetState(const GrBackendRenderTarget &,const GrBackendSurfaceMutableState &,GrBackendSurfaceMutableState * previousState,sk_sp<GrRefCntedCallback> finishedCallback)568     virtual bool setBackendRenderTargetState(const GrBackendRenderTarget&,
569                                              const GrBackendSurfaceMutableState&,
570                                              GrBackendSurfaceMutableState* previousState,
571                                              sk_sp<GrRefCntedCallback> finishedCallback) {
572         return false;
573     }
574 
575     /**
576      * Frees a texture created by createBackendTexture(). If ownership of the backend
577      * texture has been transferred to a context using adopt semantics this should not be called.
578      */
579     virtual void deleteBackendTexture(const GrBackendTexture&) = 0;
580 
581     /**
582      * In this case we have a program descriptor and a program info but no render target.
583      */
584     virtual bool compile(const GrProgramDesc&, const GrProgramInfo&) = 0;
585 
precompileShader(const SkData & key,const SkData & data)586     virtual bool precompileShader(const SkData& key, const SkData& data) { return false; }
587 
588 #if GR_TEST_UTILS
589     /** Check a handle represents an actual texture in the backend API that has not been freed. */
590     virtual bool isTestingOnlyBackendTexture(const GrBackendTexture&) const = 0;
591 
592     /**
593      * Creates a GrBackendRenderTarget that can be wrapped using
594      * SkSurface::MakeFromBackendRenderTarget. Ideally this is a non-textureable allocation to
595      * differentiate from testing with SkSurface::MakeFromBackendTexture. When sampleCnt > 1 this
596      * is used to test client wrapped allocations with MSAA where Skia does not allocate a separate
597      * buffer for resolving. If the color is non-null the backing store should be cleared to the
598      * passed in color.
599      */
600     virtual GrBackendRenderTarget createTestingOnlyBackendRenderTarget(
601             SkISize dimensions,
602             GrColorType,
603             int sampleCount = 1,
604             GrProtected = GrProtected::kNo) = 0;
605 
606     /**
607      * Deletes a GrBackendRenderTarget allocated with the above. Synchronization to make this safe
608      * is up to the caller.
609      */
610     virtual void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) = 0;
611 
612     // This is only to be used in GL-specific tests.
glContextForTesting()613     virtual const GrGLContext* glContextForTesting() const { return nullptr; }
614 
615     // This is only to be used by testing code
resetShaderCacheForTesting()616     virtual void resetShaderCacheForTesting() const {}
617 
618     /**
619      * Inserted as a pair around a block of code to do a GPU frame capture.
620      * Currently only works with the Metal backend.
621      */
testingOnly_startCapture()622     virtual void testingOnly_startCapture() {}
testingOnly_endCapture()623     virtual void testingOnly_endCapture() {}
624 #endif
625 
626     // width and height may be larger than rt (if underlying API allows it).
627     // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on
628     // the GrAttachment.
629     virtual sk_sp<GrAttachment> makeStencilAttachment(const GrBackendFormat& colorFormat,
630                                                       SkISize dimensions,
631                                                       int numStencilSamples) = 0;
632 
633     virtual GrBackendFormat getPreferredStencilFormat(const GrBackendFormat&) = 0;
634 
635     // Creates an MSAA surface to be used as an MSAA attachment on a framebuffer.
636     virtual sk_sp<GrAttachment> makeMSAAAttachment(SkISize dimensions,
637                                                    const GrBackendFormat& format,
638                                                    int numSamples,
639                                                    GrProtected isProtected,
640                                                    GrMemoryless isMemoryless) = 0;
641 
handleDirtyContext()642     void handleDirtyContext() {
643         if (fResetBits) {
644             this->resetContext();
645         }
646     }
647 
storeVkPipelineCacheData()648     virtual void storeVkPipelineCacheData() {}
649 
650     // http://skbug.com/9739
insertManualFramebufferBarrier()651     virtual void insertManualFramebufferBarrier() {
652         SkASSERT(!this->caps()->requiresManualFBBarrierAfterTessellatedStencilDraw());
653         SK_ABORT("Manual framebuffer barrier not supported.");
654     }
655 
656     // Called before certain draws in order to guarantee coherent results from dst reads.
657     virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
658 
659 protected:
660     static bool CompressedDataIsCorrect(SkISize dimensions,
661                                         SkImage::CompressionType,
662                                         GrMipmapped,
663                                         const void* data,
664                                         size_t length);
665 
666     // Handles cases where a surface will be updated without a call to flushRenderTarget.
667     void didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
668                            uint32_t mipLevels = 1) const;
669 
setOOMed()670     void setOOMed() { fOOMed = true; }
671 
672     Stats                            fStats;
673 
674     // Subclass must call this to initialize caps & compiler in its constructor.
675     void initCapsAndCompiler(sk_sp<const GrCaps> caps);
676 
677 private:
678     virtual GrBackendTexture onCreateBackendTexture(SkISize dimensions,
679                                                     const GrBackendFormat&,
680                                                     GrRenderable,
681                                                     GrMipmapped,
682                                                     GrProtected) = 0;
683 
684     virtual GrBackendTexture onCreateCompressedBackendTexture(
685             SkISize dimensions, const GrBackendFormat&, GrMipmapped, GrProtected) = 0;
686 
687     virtual bool onClearBackendTexture(const GrBackendTexture&,
688                                        sk_sp<GrRefCntedCallback> finishedCallback,
689                                        std::array<float, 4> color) = 0;
690 
691     virtual bool onUpdateCompressedBackendTexture(const GrBackendTexture&,
692                                                   sk_sp<GrRefCntedCallback> finishedCallback,
693                                                   const void* data,
694                                                   size_t length) = 0;
695 
696     // called when the 3D context state is unknown. Subclass should emit any
697     // assumed 3D context state and dirty any state cache.
onResetContext(uint32_t resetBits)698     virtual void onResetContext(uint32_t resetBits) {}
699 
700     // Implementation of resetTextureBindings.
onResetTextureBindings()701     virtual void onResetTextureBindings() {}
702 
703     // overridden by backend-specific derived class to create objects.
704     // Texture size, renderablility, format support, sample count will have already been validated
705     // in base class before onCreateTexture is called.
706     // If the ith bit is set in levelClearMask then the ith MIP level should be cleared.
707     virtual sk_sp<GrTexture> onCreateTexture(SkISize dimensions,
708                                              const GrBackendFormat&,
709                                              GrRenderable,
710                                              int renderTargetSampleCnt,
711                                              SkBudgeted,
712                                              GrProtected,
713                                              int mipLevelCoont,
714                                              uint32_t levelClearMask) = 0;
715     virtual sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
716                                                        const GrBackendFormat&,
717                                                        SkBudgeted,
718                                                        GrMipmapped,
719                                                        GrProtected,
720                                                        const void* data, size_t dataSize) = 0;
721     virtual sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&,
722                                                   GrWrapOwnership,
723                                                   GrWrapCacheable,
724                                                   GrIOType) = 0;
725 
726     virtual sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&,
727                                                             GrWrapOwnership,
728                                                             GrWrapCacheable) = 0;
729 
730     virtual sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
731                                                             int sampleCnt,
732                                                             GrWrapOwnership,
733                                                             GrWrapCacheable) = 0;
734     virtual sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) = 0;
735     virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
736                                                                         const GrVkDrawableInfo&);
737 
738     virtual sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType,
739                                               GrAccessPattern, const void* data) = 0;
740 
741     // overridden by backend-specific derived class to perform the surface read
742     virtual bool onReadPixels(GrSurface*,
743                               SkIRect,
744                               GrColorType surfaceColorType,
745                               GrColorType dstColorType,
746                               void*,
747                               size_t rowBytes) = 0;
748 
749     // overridden by backend-specific derived class to perform the surface write
750     virtual bool onWritePixels(GrSurface*,
751                                SkIRect,
752                                GrColorType surfaceColorType,
753                                GrColorType srcColorType,
754                                const GrMipLevel[],
755                                int mipLevelCount,
756                                bool prepForTexSampling) = 0;
757 
758     // overridden by backend-specific derived class to perform the texture transfer
759     virtual bool onTransferPixelsTo(GrTexture*,
760                                     SkIRect,
761                                     GrColorType textiueColorType,
762                                     GrColorType bufferColorType,
763                                     sk_sp<GrGpuBuffer> transferBuffer,
764                                     size_t offset,
765                                     size_t rowBytes) = 0;
766 
767     // overridden by backend-specific derived class to perform the surface transfer
768     virtual bool onTransferPixelsFrom(GrSurface*,
769                                       SkIRect,
770                                       GrColorType surfaceColorType,
771                                       GrColorType bufferColorType,
772                                       sk_sp<GrGpuBuffer> transferBuffer,
773                                       size_t offset) = 0;
774 
775     // overridden by backend-specific derived class to perform the resolve
776     virtual void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) = 0;
777 
778     // overridden by backend specific derived class to perform mip map level regeneration.
779     virtual bool onRegenerateMipMapLevels(GrTexture*) = 0;
780 
781     // overridden by backend specific derived class to perform the copy surface
782     virtual bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
783                                const SkIPoint& dstPoint) = 0;
784 
785     virtual GrOpsRenderPass* onGetOpsRenderPass(
786             GrRenderTarget* renderTarget,
787             bool useMSAASurface,
788             GrAttachment* stencil,
789             GrSurfaceOrigin,
790             const SkIRect& bounds,
791             const GrOpsRenderPass::LoadAndStoreInfo&,
792             const GrOpsRenderPass::StencilLoadAndStoreInfo&,
793             const SkTArray<GrSurfaceProxy*, true>& sampledProxies,
794             GrXferBarrierFlags renderPassXferBarriers) = 0;
795 
prepareSurfacesForBackendAccessAndStateUpdates(SkSpan<GrSurfaceProxy * > proxies,SkSurface::BackendSurfaceAccess access,const GrBackendSurfaceMutableState * newState)796     virtual void prepareSurfacesForBackendAccessAndStateUpdates(
797             SkSpan<GrSurfaceProxy*> proxies,
798             SkSurface::BackendSurfaceAccess access,
799             const GrBackendSurfaceMutableState* newState) {}
800 
801     virtual bool onSubmitToGpu(bool syncCpu) = 0;
802 
803     void reportSubmitHistograms();
onReportSubmitHistograms()804     virtual void onReportSubmitHistograms() {}
805 
806 #ifdef SK_ENABLE_DUMP_GPU
onDumpJSON(SkJSONWriter *)807     virtual void onDumpJSON(SkJSONWriter*) const {}
808 #endif
809 
810     sk_sp<GrTexture> createTextureCommon(SkISize,
811                                          const GrBackendFormat&,
812                                          GrTextureType textureType,
813                                          GrRenderable,
814                                          int renderTargetSampleCnt,
815                                          SkBudgeted,
816                                          GrProtected,
817                                          int mipLevelCnt,
818                                          uint32_t levelClearMask);
819 
resetContext()820     void resetContext() {
821         this->onResetContext(fResetBits);
822         fResetBits = 0;
823     }
824 
825     void callSubmittedProcs(bool success);
826 
827     sk_sp<const GrCaps>             fCaps;
828     // Compiler used for compiling SkSL into backend shader code. We only want to create the
829     // compiler once, as there is significant overhead to the first compile.
830     std::unique_ptr<SkSL::Compiler> fCompiler;
831 
832     uint32_t fResetBits;
833     // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu.
834     GrDirectContext* fContext;
835 
836     struct SubmittedProc {
SubmittedProcSubmittedProc837         SubmittedProc(GrGpuSubmittedProc proc, GrGpuSubmittedContext context)
838                 : fProc(proc), fContext(context) {}
839 
840         GrGpuSubmittedProc fProc;
841         GrGpuSubmittedContext fContext;
842     };
843     SkSTArray<4, SubmittedProc> fSubmittedProcs;
844 
845     bool fOOMed = false;
846 
847 #if SK_HISTOGRAMS_ENABLED
848     int fCurrentSubmitRenderPassCount = 0;
849 #endif
850 
851     friend class GrPathRendering;
852     using INHERITED = SkRefCnt;
853 };
854 
855 #endif
856