• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*
3  * Copyright 2013 Google Inc.
4  *
5  * Use of this source code is governed by a BSD-style license that can be
6  * found in the LICENSE file.
7  */
8 #ifndef GrCaps_DEFINED
9 #define GrCaps_DEFINED
10 
11 #include "include/core/SkImageInfo.h"
12 #include "include/core/SkRefCnt.h"
13 #include "include/core/SkString.h"
14 #include "include/gpu/GrDriverBugWorkarounds.h"
15 #include "include/private/GrTypesPriv.h"
16 #include "src/core/SkCompressedDataUtils.h"
17 #include "src/gpu/GrBlend.h"
18 #include "src/gpu/GrSamplerState.h"
19 #include "src/gpu/GrShaderCaps.h"
20 #include "src/gpu/GrSurfaceProxy.h"
21 #include "src/gpu/GrSwizzle.h"
22 
23 class GrBackendFormat;
24 class GrBackendRenderTarget;
25 class GrBackendTexture;
26 struct GrContextOptions;
27 class GrProcessorKeyBuilder;
28 class GrProgramDesc;
29 class GrProgramInfo;
30 class GrRenderTargetProxy;
31 class GrSurface;
32 class SkJSONWriter;
33 
34 /**
35  * Represents the capabilities of a GrContext.
36  */
37 class GrCaps : public SkRefCnt {
38 public:
39     GrCaps(const GrContextOptions&);
40 
41     void dumpJSON(SkJSONWriter*) const;
42 
shaderCaps()43     const GrShaderCaps* shaderCaps() const { return fShaderCaps.get(); }
44 
npotTextureTileSupport()45     bool npotTextureTileSupport() const { return fNPOTTextureTileSupport; }
46     /** To avoid as-yet-unnecessary complexity we don't allow any partial support of MIP Maps (e.g.
47         only for POT textures) */
mipmapSupport()48     bool mipmapSupport() const { return fMipmapSupport; }
49 
gpuTracingSupport()50     bool gpuTracingSupport() const { return fGpuTracingSupport; }
oversizedStencilSupport()51     bool oversizedStencilSupport() const { return fOversizedStencilSupport; }
textureBarrierSupport()52     bool textureBarrierSupport() const { return fTextureBarrierSupport; }
sampleLocationsSupport()53     bool sampleLocationsSupport() const { return fSampleLocationsSupport; }
drawInstancedSupport()54     bool drawInstancedSupport() const { return fDrawInstancedSupport; }
55     // Is there hardware support for indirect draws? (Ganesh always supports indirect draws as long
56     // as it can polyfill them with instanced calls, but this cap tells us if they are supported
57     // natively.)
nativeDrawIndirectSupport()58     bool nativeDrawIndirectSupport() const { return fNativeDrawIndirectSupport; }
useClientSideIndirectBuffers()59     bool useClientSideIndirectBuffers() const {
60 #ifdef SK_DEBUG
61         if (!fNativeDrawIndirectSupport || fNativeDrawIndexedIndirectIsBroken) {
62             // We might implement indirect draws with a polyfill, so the commands need to reside in
63             // CPU memory.
64             SkASSERT(fUseClientSideIndirectBuffers);
65         }
66 #endif
67         return fUseClientSideIndirectBuffers;
68     }
conservativeRasterSupport()69     bool conservativeRasterSupport() const { return fConservativeRasterSupport; }
wireframeSupport()70     bool wireframeSupport() const { return fWireframeSupport; }
71     // This flag indicates that we never have to resolve MSAA. In practice, it means that we have
72     // an MSAA-render-to-texture extension: Any render target we create internally will use the
73     // extension, and any wrapped render target is the client's responsibility.
msaaResolvesAutomatically()74     bool msaaResolvesAutomatically() const { return fMSAAResolvesAutomatically; }
halfFloatVertexAttributeSupport()75     bool halfFloatVertexAttributeSupport() const { return fHalfFloatVertexAttributeSupport; }
76 
77     // Primitive restart functionality is core in ES 3.0, but using it will cause slowdowns on some
78     // systems. This cap is only set if primitive restart will improve performance.
usePrimitiveRestart()79     bool usePrimitiveRestart() const { return fUsePrimitiveRestart; }
80 
preferClientSideDynamicBuffers()81     bool preferClientSideDynamicBuffers() const { return fPreferClientSideDynamicBuffers; }
82 
83     // On tilers, an initial fullscreen clear is an OPTIMIZATION. It allows the hardware to
84     // initialize each tile with a constant value rather than loading each pixel from memory.
preferFullscreenClears()85     bool preferFullscreenClears() const { return fPreferFullscreenClears; }
86 
87     // Should we discard stencil values after a render pass? (Tilers get better performance if we
88     // always load stencil buffers with a "clear" op, and then discard the content when finished.)
discardStencilValuesAfterRenderPass()89     bool discardStencilValuesAfterRenderPass() const {
90         // b/160958008
91         return false;
92 #if 0
93         // This method is actually just a duplicate of preferFullscreenClears(), with a descriptive
94         // name for the sake of readability.
95         return this->preferFullscreenClears();
96 #endif
97     }
98 
99     // D3D does not allow the refs or masks to differ on a two-sided stencil draw.
twoSidedStencilRefsAndMasksMustMatch()100     bool twoSidedStencilRefsAndMasksMustMatch() const {
101         return fTwoSidedStencilRefsAndMasksMustMatch;
102     }
103 
preferVRAMUseOverFlushes()104     bool preferVRAMUseOverFlushes() const { return fPreferVRAMUseOverFlushes; }
105 
avoidStencilBuffers()106     bool avoidStencilBuffers() const { return fAvoidStencilBuffers; }
107 
avoidWritePixelsFastPath()108     bool avoidWritePixelsFastPath() const { return fAvoidWritePixelsFastPath; }
109 
110     // http://skbug.com/9739
requiresManualFBBarrierAfterTessellatedStencilDraw()111     bool requiresManualFBBarrierAfterTessellatedStencilDraw() const {
112         return fRequiresManualFBBarrierAfterTessellatedStencilDraw;
113     }
114 
115     // glDrawElementsIndirect fails GrMeshTest on every Win10 Intel bot.
nativeDrawIndexedIndirectIsBroken()116     bool nativeDrawIndexedIndirectIsBroken() const { return fNativeDrawIndexedIndirectIsBroken; }
117 
118     /**
119      * Indicates the capabilities of the fixed function blend unit.
120      */
121     enum BlendEquationSupport {
122         kBasic_BlendEquationSupport,             //<! Support to select the operator that
123                                                  //   combines src and dst terms.
124         kAdvanced_BlendEquationSupport,          //<! Additional fixed function support for specific
125                                                  //   SVG/PDF blend modes. Requires blend barriers.
126         kAdvancedCoherent_BlendEquationSupport,  //<! Advanced blend equation support that does not
127                                                  //   require blend barriers, and permits overlap.
128 
129         kLast_BlendEquationSupport = kAdvancedCoherent_BlendEquationSupport
130     };
131 
blendEquationSupport()132     BlendEquationSupport blendEquationSupport() const { return fBlendEquationSupport; }
133 
advancedBlendEquationSupport()134     bool advancedBlendEquationSupport() const {
135         return fBlendEquationSupport >= kAdvanced_BlendEquationSupport;
136     }
137 
advancedCoherentBlendEquationSupport()138     bool advancedCoherentBlendEquationSupport() const {
139         return kAdvancedCoherent_BlendEquationSupport == fBlendEquationSupport;
140     }
141 
isAdvancedBlendEquationDisabled(GrBlendEquation equation)142     bool isAdvancedBlendEquationDisabled(GrBlendEquation equation) const {
143         SkASSERT(GrBlendEquationIsAdvanced(equation));
144         SkASSERT(this->advancedBlendEquationSupport());
145         return SkToBool(fAdvBlendEqDisableFlags & (1 << equation));
146     }
147 
148     // On some GPUs it is a performance win to disable blending instead of doing src-over with a src
149     // alpha equal to 1. To disable blending we collapse src-over to src and the backends will
150     // handle the disabling of blending.
shouldCollapseSrcOverToSrcWhenAble()151     bool shouldCollapseSrcOverToSrcWhenAble() const {
152         return fShouldCollapseSrcOverToSrcWhenAble;
153     }
154 
155     // When abandoning the GrDirectContext do we need to sync the GPU before we start abandoning
156     // resources.
mustSyncGpuDuringAbandon()157     bool mustSyncGpuDuringAbandon() const {
158         return fMustSyncGpuDuringAbandon;
159     }
160 
161     // Shortcut for shaderCaps()->reducedShaderMode().
reducedShaderMode()162     bool reducedShaderMode() const { return this->shaderCaps()->reducedShaderMode(); }
163 
164     /**
165      * Indicates whether GPU->CPU memory mapping for GPU resources such as vertex buffers and
166      * textures allows partial mappings or full mappings.
167      */
168     enum MapFlags {
169         kNone_MapFlags      = 0x0,   //<! Cannot map the resource.
170 
171         kCanMap_MapFlag     = 0x1,   //<! The resource can be mapped. Must be set for any of
172                                      //   the other flags to have meaning.
173         kSubset_MapFlag     = 0x2,   //<! The resource can be partially mapped.
174         kAsyncRead_MapFlag  = 0x4,   //<! Are maps for reading asynchronous WRT GrOpsRenderPass
175                                      //   submitted to GrGpu.
176     };
177 
178     // This returns the general mapping support for the GPU. However, even if this returns a flag
179     // that says buffers can be mapped, it does NOT mean that every buffer will be mappable. Thus
180     // calls of map should still check to see if a valid pointer was returned from the map call and
181     // handle fallbacks appropriately. If this does return kNone_MapFlags then all calls to map() on
182     // any buffer will fail.
mapBufferFlags()183     uint32_t mapBufferFlags() const { return fMapBufferFlags; }
184 
185     // Scratch textures not being reused means that those scratch textures
186     // that we upload to (i.e., don't have a render target) will not be
187     // recycled in the texture cache. This is to prevent ghosting by drivers
188     // (in particular for deferred architectures).
reuseScratchTextures()189     bool reuseScratchTextures() const { return fReuseScratchTextures; }
reuseScratchBuffers()190     bool reuseScratchBuffers() const { return fReuseScratchBuffers; }
191 
192     /// maximum number of attribute values per vertex
maxVertexAttributes()193     int maxVertexAttributes() const { return fMaxVertexAttributes; }
194 
maxRenderTargetSize()195     int maxRenderTargetSize() const { return fMaxRenderTargetSize; }
196 
197     /** This is the largest render target size that can be used without incurring extra perfomance
198         cost. It is usually the max RT size, unless larger render targets are known to be slower. */
maxPreferredRenderTargetSize()199     int maxPreferredRenderTargetSize() const { return fMaxPreferredRenderTargetSize; }
200 
maxTextureSize()201     int maxTextureSize() const { return fMaxTextureSize; }
202 
maxWindowRectangles()203     int maxWindowRectangles() const { return fMaxWindowRectangles; }
204 
205     // Returns whether window rectangles are supported for the given backend render target.
isWindowRectanglesSupportedForRT(const GrBackendRenderTarget & rt)206     bool isWindowRectanglesSupportedForRT(const GrBackendRenderTarget& rt) const {
207         return this->maxWindowRectangles() > 0 && this->onIsWindowRectanglesSupportedForRT(rt);
208     }
209 
210     // Hardware tessellation seems to have a fixed upfront cost. If there is a somewhat small number
211     // of verbs, we seem to be faster emulating tessellation with instanced draws instead.
minPathVerbsForHwTessellation()212     int minPathVerbsForHwTessellation() const { return fMinPathVerbsForHwTessellation; }
minStrokeVerbsForHwTessellation()213     int minStrokeVerbsForHwTessellation() const { return fMinStrokeVerbsForHwTessellation; }
214 
maxPushConstantsSize()215     uint32_t maxPushConstantsSize() const { return fMaxPushConstantsSize; }
216 
transferBufferAlignment()217     size_t transferBufferAlignment() const { return fTransferBufferAlignment; }
218 
219     virtual bool isFormatSRGB(const GrBackendFormat&) const = 0;
220 
221     bool isFormatCompressed(const GrBackendFormat& format) const;
222 
223     // Can a texture be made with the GrBackendFormat and texture type, and then be bound and
224     // sampled in a shader.
225     virtual bool isFormatTexturable(const GrBackendFormat&, GrTextureType) const = 0;
226 
227     // Returns whether a texture of the given format can be copied to a texture of the same format.
228     virtual bool isFormatCopyable(const GrBackendFormat&) const = 0;
229 
230     // Returns the maximum supported sample count for a format. 0 means the format is not renderable
231     // 1 means the format is renderable but doesn't support MSAA.
232     virtual int maxRenderTargetSampleCount(const GrBackendFormat&) const = 0;
233 
234     // Returns the number of samples to use when performing draws to the given config with internal
235     // MSAA. If 0, Ganesh should not attempt to use internal multisampling.
internalMultisampleCount(const GrBackendFormat & format)236     int internalMultisampleCount(const GrBackendFormat& format) const {
237         return std::min(fInternalMultisampleCount, this->maxRenderTargetSampleCount(format));
238     }
239 
240     virtual bool isFormatAsColorTypeRenderable(GrColorType ct, const GrBackendFormat& format,
241                                                int sampleCount = 1) const = 0;
242 
243     virtual bool isFormatRenderable(const GrBackendFormat& format, int sampleCount) const = 0;
244 
245     // Find a sample count greater than or equal to the requested count which is supported for a
246     // render target of the given format or 0 if no such sample count is supported. If the requested
247     // sample count is 1 then 1 will be returned if non-MSAA rendering is supported, otherwise 0.
248     // For historical reasons requestedCount==0 is handled identically to requestedCount==1.
249     virtual int getRenderTargetSampleCount(int requestedCount, const GrBackendFormat&) const = 0;
250 
251     /**
252      * Backends may have restrictions on what types of surfaces support GrGpu::writePixels().
253      * If this returns false then the caller should implement a fallback where a temporary texture
254      * is created, pixels are written to it, and then that is copied or drawn into the the surface.
255      */
256     bool surfaceSupportsWritePixels(const GrSurface*) const;
257 
258     /**
259      * Indicates whether surface supports GrGpu::readPixels, must be copied, or cannot be read.
260      */
261     enum class SurfaceReadPixelsSupport {
262         /** GrGpu::readPixels is supported by the surface. */
263         kSupported,
264         /**
265          * GrGpu::readPixels is not supported by this surface but this surface can be drawn
266          * or copied to a Ganesh-created GrTextureType::kTexture2D and then that surface will be
267          * readable.
268          */
269         kCopyToTexture2D,
270         /**
271          * Not supported
272          */
273         kUnsupported,
274     };
275     /**
276      * Backends may have restrictions on what types of surfaces support GrGpu::readPixels(). We may
277      * either be able to read directly from the surface, read from a copy of the surface, or not
278      * read at all.
279      */
280     virtual SurfaceReadPixelsSupport surfaceSupportsReadPixels(const GrSurface*) const = 0;
281 
282     struct SupportedWrite {
283         GrColorType fColorType;
284         // If the write is occurring using GrGpu::transferPixelsTo then this provides the
285         // minimum alignment of the offset into the transfer buffer.
286         size_t fOffsetAlignmentForTransferBuffer;
287     };
288 
289     /**
290      * Given a dst pixel config and a src color type what color type must the caller coax the
291      * the data into in order to use GrGpu::writePixels().
292      */
293     virtual SupportedWrite supportedWritePixelsColorType(GrColorType surfaceColorType,
294                                                          const GrBackendFormat& surfaceFormat,
295                                                          GrColorType srcColorType) const = 0;
296 
297     struct SupportedRead {
298         GrColorType fColorType;
299         // If the read is occurring using GrGpu::transferPixelsFrom then this provides the
300         // minimum alignment of the offset into the transfer buffer.
301         size_t fOffsetAlignmentForTransferBuffer;
302     };
303 
304     /**
305      * Given a src surface's color type and its backend format as well as a color type the caller
306      * would like read into, this provides a legal color type that the caller may pass to
307      * GrGpu::readPixels(). The returned color type may differ from the passed dstColorType, in
308      * which case the caller must convert the read pixel data (see GrConvertPixels). When converting
309      * to dstColorType the swizzle in the returned struct should be applied. The caller must check
310      * the returned color type for kUnknown.
311      */
312     SupportedRead supportedReadPixelsColorType(GrColorType srcColorType,
313                                                const GrBackendFormat& srcFormat,
314                                                GrColorType dstColorType) const;
315 
316     /**
317      * Does GrGpu::writePixels() support a src buffer where the row bytes is not equal to bpp * w?
318      */
writePixelsRowBytesSupport()319     bool writePixelsRowBytesSupport() const { return fWritePixelsRowBytesSupport; }
320 
321     /**
322      * Does GrGpu::transferPixelsTo() support a src buffer where the row bytes is not equal to
323      * bpp * w?
324      */
transferPixelsToRowBytesSupport()325     bool transferPixelsToRowBytesSupport() const { return fTransferPixelsToRowBytesSupport; }
326 
327     /**
328      * Does GrGpu::readPixels() support a dst buffer where the row bytes is not equal to bpp * w?
329      */
readPixelsRowBytesSupport()330     bool readPixelsRowBytesSupport() const { return fReadPixelsRowBytesSupport; }
331 
transferFromSurfaceToBufferSupport()332     bool transferFromSurfaceToBufferSupport() const { return fTransferFromSurfaceToBufferSupport; }
transferFromBufferToTextureSupport()333     bool transferFromBufferToTextureSupport() const { return fTransferFromBufferToTextureSupport; }
334 
suppressPrints()335     bool suppressPrints() const { return fSuppressPrints; }
336 
bufferMapThreshold()337     size_t bufferMapThreshold() const {
338         SkASSERT(fBufferMapThreshold >= 0);
339         return fBufferMapThreshold;
340     }
341 
342     /** True in environments that will issue errors if memory uploaded to buffers
343         is not initialized (even if not read by draw calls). */
mustClearUploadedBufferData()344     bool mustClearUploadedBufferData() const { return fMustClearUploadedBufferData; }
345 
346     /** For some environments, there is a performance or safety concern to not
347         initializing textures. For example, with WebGL and Firefox, there is a large
348         performance hit to not doing it.
349      */
shouldInitializeTextures()350     bool shouldInitializeTextures() const { return fShouldInitializeTextures; }
351 
352     /** Returns true if the given backend supports importing AHardwareBuffers via the
353      * GrAHardwarebufferImageGenerator. This will only ever be supported on Android devices with API
354      * level >= 26.
355      * */
supportsAHardwareBufferImages()356     bool supportsAHardwareBufferImages() const { return fSupportsAHardwareBufferImages; }
357 
wireframeMode()358     bool wireframeMode() const { return fWireframeMode; }
359 
360     /** Supports using GrFence. */
fenceSyncSupport()361     bool fenceSyncSupport() const { return fFenceSyncSupport; }
362 
363     /** Supports using GrSemaphore. */
semaphoreSupport()364     bool semaphoreSupport() const { return fSemaphoreSupport; }
365 
crossContextTextureSupport()366     bool crossContextTextureSupport() const { return fCrossContextTextureSupport; }
367     /**
368      * Returns whether or not we will be able to do a copy given the passed in params
369      */
370     bool canCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
371                         const SkIRect& srcRect, const SkIPoint& dstPoint) const;
372 
dynamicStateArrayGeometryProcessorTextureSupport()373     bool dynamicStateArrayGeometryProcessorTextureSupport() const {
374         return fDynamicStateArrayGeometryProcessorTextureSupport;
375     }
376 
377     // Not all backends support clearing with a scissor test (e.g. Metal), this will always
378     // return true if performColorClearsAsDraws() returns true.
performPartialClearsAsDraws()379     bool performPartialClearsAsDraws() const {
380         return fPerformColorClearsAsDraws || fPerformPartialClearsAsDraws;
381     }
382 
383     // Many drivers have issues with color clears.
performColorClearsAsDraws()384     bool performColorClearsAsDraws() const { return fPerformColorClearsAsDraws; }
385 
avoidLargeIndexBufferDraws()386     bool avoidLargeIndexBufferDraws() const { return fAvoidLargeIndexBufferDraws; }
387 
388     /// Adreno 4xx devices experience an issue when there are a large number of stencil clip bit
389     /// clears. The minimal repro steps are not precisely known but drawing a rect with a stencil
390     /// op instead of using glClear seems to resolve the issue.
performStencilClearsAsDraws()391     bool performStencilClearsAsDraws() const { return fPerformStencilClearsAsDraws; }
392 
393     // Should we disable the clip mask atlas due to a faulty driver?
driverDisableMSAAClipAtlas()394     bool driverDisableMSAAClipAtlas() const { return fDriverDisableMSAAClipAtlas; }
395 
396     // Should we disable TessellationPathRenderer due to a faulty driver?
disableTessellationPathRenderer()397     bool disableTessellationPathRenderer() const { return fDisableTessellationPathRenderer; }
398 
399     // Returns how to sample the dst values for the passed in GrRenderTargetProxy.
400     GrDstSampleFlags getDstSampleFlagsForProxy(const GrRenderTargetProxy*, bool drawUsesMSAA) const;
401 
402     /**
403      * This is used to try to ensure a successful copy a dst in order to perform shader-based
404      * blending.
405      *
406      * fRectsMustMatch will be set to true if the copy operation must ensure that the src and dest
407      * rects are identical.
408      *
409      * fMustCopyWholeSrc will be set to true if copy rect must equal src's bounds.
410      *
411      * Caller will detect cases when copy cannot succeed and try copy-as-draw as a fallback.
412      */
413     struct DstCopyRestrictions {
414         GrSurfaceProxy::RectsMustMatch fRectsMustMatch = GrSurfaceProxy::RectsMustMatch::kNo;
415         bool fMustCopyWholeSrc = false;
416     };
getDstCopyRestrictions(const GrRenderTargetProxy * src,GrColorType ct)417     virtual DstCopyRestrictions getDstCopyRestrictions(const GrRenderTargetProxy* src,
418                                                        GrColorType ct) const {
419         return {};
420     }
421 
422     bool validateSurfaceParams(const SkISize&, const GrBackendFormat&, GrRenderable renderable,
423                                int renderTargetSampleCnt, GrMipmapped, GrTextureType) const;
424 
425     bool areColorTypeAndFormatCompatible(GrColorType grCT, const GrBackendFormat& format) const;
426 
427     /** These are used when creating a new texture internally. */
428     GrBackendFormat getDefaultBackendFormat(GrColorType, GrRenderable) const;
429 
430     virtual GrBackendFormat getBackendFormatFromCompressionType(SkImage::CompressionType) const = 0;
431 
432     /**
433      * The CLAMP_TO_BORDER wrap mode for texture coordinates was added to desktop GL in 1.3, and
434      * GLES 3.2, but is also available in extensions. Vulkan and Metal always have support.
435      */
clampToBorderSupport()436     bool clampToBorderSupport() const { return fClampToBorderSupport; }
437 
438     /**
439      * Returns the GrSwizzle to use when sampling or reading back from a texture with the passed in
440      * GrBackendFormat and GrColorType.
441      */
442     GrSwizzle getReadSwizzle(const GrBackendFormat& format, GrColorType colorType) const;
443 
444     /**
445      * Returns the GrSwizzle to use when writing colors to a surface with the passed in
446      * GrBackendFormat and GrColorType.
447      */
448     virtual GrSwizzle getWriteSwizzle(const GrBackendFormat&, GrColorType) const = 0;
449 
450     virtual uint64_t computeFormatKey(const GrBackendFormat&) const = 0;
451 
workarounds()452     const GrDriverBugWorkarounds& workarounds() const { return fDriverBugWorkarounds; }
453 
454     /**
455      * Adds fields to the key to represent the sampler that will be created for the passed
456      * in parameters. Currently this extra keying is only needed when building a vulkan pipeline
457      * with immutable samplers.
458      */
addExtraSamplerKey(GrProcessorKeyBuilder *,GrSamplerState,const GrBackendFormat &)459     virtual void addExtraSamplerKey(GrProcessorKeyBuilder*,
460                                     GrSamplerState,
461                                     const GrBackendFormat&) const {}
462 
463     enum class ProgramDescOverrideFlags {
464         kNone = 0,
465         // If using discardable msaa surfaces in vulkan, when we break up a render pass for an
466         // inline upload, we must do a load msaa subpass for the second render pass. However, if the
467         // original render pass did not have this load subpass (e.g. clear or discard load op), then
468         // all the GrProgramInfos for draws that end up in the second render pass will have been
469         // recorded thinking they will be in a render pass with only 1 subpass. Thus we add an
470         // override flag to the makeDesc call to force the actually VkPipeline that gets created to
471         // be created using a render pass with 2 subpasses. We do miss on the pre-compile with this
472         // approach, but inline uploads are very rare and already slow.
473         kVulkanHasResolveLoadSubpass = 0x1,
474     };
475     GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(ProgramDescOverrideFlags);
476 
477 
478     virtual GrProgramDesc makeDesc(
479             GrRenderTarget*, const GrProgramInfo&,
480             ProgramDescOverrideFlags overrideFlags = ProgramDescOverrideFlags::kNone) const = 0;
481 
482     // This method specifies, for each backend, the extra properties of a RT when Ganesh creates one
483     // internally. For example, for Vulkan, Ganesh always creates RTs that can be used as input
484     // attachments.
getExtraSurfaceFlagsForDeferredRT()485     virtual GrInternalSurfaceFlags getExtraSurfaceFlagsForDeferredRT() const {
486         return GrInternalSurfaceFlags::kNone;
487     }
488 
489     bool supportsDynamicMSAA(const GrRenderTargetProxy*) const;
490 
dmsaaResolveCanBeUsedAsTextureInSameRenderPass()491     virtual bool dmsaaResolveCanBeUsedAsTextureInSameRenderPass() const { return true; }
492 
493     // skbug.com/11935. Task reordering is disabled for some GPUs on GL due to driver bugs.
avoidReorderingRenderTasks()494     bool avoidReorderingRenderTasks() const {
495         return fAvoidReorderingRenderTasks;
496     }
497 
avoidDithering()498     bool avoidDithering() const {
499         return fAvoidDithering;
500     }
501 
502     /**
503      * Checks whether the passed color type is renderable. If so, the same color type is passed
504      * back along with the default format used for the color type. If not, provides an alternative
505      * (perhaps lower bit depth and/or unorm instead of float) color type that is supported
506      * along with it's default format or kUnknown if there no renderable fallback format.
507      */
508     std::tuple<GrColorType, GrBackendFormat> getFallbackColorTypeAndFormat(GrColorType,
509                                                                            int sampleCount) const;
510 
511 #if GR_TEST_UTILS
512     struct TestFormatColorTypeCombination {
513         GrColorType fColorType;
514         GrBackendFormat fFormat;
515     };
516 
517     virtual std::vector<TestFormatColorTypeCombination> getTestingCombinations() const = 0;
518 #endif
519 
520 protected:
521     // Subclasses must call this at the end of their init method in order to do final processing on
522     // the caps (including overrides requested by the client).
523     // NOTE: this method will only reduce the caps, never expand them.
524     void finishInitialization(const GrContextOptions& options);
525 
onSupportsDynamicMSAA(const GrRenderTargetProxy *)526     virtual bool onSupportsDynamicMSAA(const GrRenderTargetProxy*) const { return false; }
527 
528     std::unique_ptr<GrShaderCaps> fShaderCaps;
529 
530     bool fNPOTTextureTileSupport                     : 1;
531     bool fMipmapSupport                              : 1;
532     bool fReuseScratchTextures                       : 1;
533     bool fReuseScratchBuffers                        : 1;
534     bool fGpuTracingSupport                          : 1;
535     bool fOversizedStencilSupport                    : 1;
536     bool fTextureBarrierSupport                      : 1;
537     bool fSampleLocationsSupport                     : 1;
538     bool fDrawInstancedSupport                       : 1;
539     bool fNativeDrawIndirectSupport                  : 1;
540     bool fUseClientSideIndirectBuffers               : 1;
541     bool fConservativeRasterSupport                  : 1;
542     bool fWireframeSupport                           : 1;
543     bool fMSAAResolvesAutomatically                  : 1;
544     bool fUsePrimitiveRestart                        : 1;
545     bool fPreferClientSideDynamicBuffers             : 1;
546     bool fPreferFullscreenClears                     : 1;
547     bool fTwoSidedStencilRefsAndMasksMustMatch       : 1;
548     bool fMustClearUploadedBufferData                : 1;
549     bool fShouldInitializeTextures                   : 1;
550     bool fSupportsAHardwareBufferImages              : 1;
551     bool fHalfFloatVertexAttributeSupport            : 1;
552     bool fClampToBorderSupport                       : 1;
553     bool fPerformPartialClearsAsDraws                : 1;
554     bool fPerformColorClearsAsDraws                  : 1;
555     bool fAvoidLargeIndexBufferDraws                 : 1;
556     bool fPerformStencilClearsAsDraws                : 1;
557     bool fTransferFromBufferToTextureSupport         : 1;
558     bool fTransferFromSurfaceToBufferSupport         : 1;
559     bool fWritePixelsRowBytesSupport                 : 1;
560     bool fTransferPixelsToRowBytesSupport            : 1;
561     bool fReadPixelsRowBytesSupport                  : 1;
562     bool fShouldCollapseSrcOverToSrcWhenAble         : 1;
563     bool fMustSyncGpuDuringAbandon                   : 1;
564 
565     // Driver workaround
566     bool fDriverDisableMSAAClipAtlas                 : 1;
567     bool fDisableTessellationPathRenderer            : 1;
568     bool fAvoidStencilBuffers                        : 1;
569     bool fAvoidWritePixelsFastPath                   : 1;
570     bool fRequiresManualFBBarrierAfterTessellatedStencilDraw : 1;
571     bool fNativeDrawIndexedIndirectIsBroken          : 1;
572     bool fAvoidReorderingRenderTasks                 : 1;
573     bool fAvoidDithering                             : 1;
574 
575     // ANGLE performance workaround
576     bool fPreferVRAMUseOverFlushes                   : 1;
577 
578     bool fFenceSyncSupport                           : 1;
579     bool fSemaphoreSupport                           : 1;
580 
581     // Requires fence sync support in GL.
582     bool fCrossContextTextureSupport                 : 1;
583 
584     // Not (yet) implemented in VK backend.
585     bool fDynamicStateArrayGeometryProcessorTextureSupport : 1;
586 
587     BlendEquationSupport fBlendEquationSupport;
588     uint32_t fAdvBlendEqDisableFlags;
589     static_assert(kLast_GrBlendEquation < 32);
590 
591     uint32_t fMapBufferFlags;
592     int fBufferMapThreshold;
593 
594     int fMaxRenderTargetSize;
595     int fMaxPreferredRenderTargetSize;
596     int fMaxVertexAttributes;
597     int fMaxTextureSize;
598     int fMaxWindowRectangles;
599     int fInternalMultisampleCount;
600     int fMinPathVerbsForHwTessellation = 25;
601     int fMinStrokeVerbsForHwTessellation = 50;
602     uint32_t fMaxPushConstantsSize = 0;
603     size_t fTransferBufferAlignment = 1;
604 
605     GrDriverBugWorkarounds fDriverBugWorkarounds;
606 
607 private:
608     void applyOptionsOverrides(const GrContextOptions& options);
609 
onApplyOptionsOverrides(const GrContextOptions &)610     virtual void onApplyOptionsOverrides(const GrContextOptions&) {}
onDumpJSON(SkJSONWriter *)611     virtual void onDumpJSON(SkJSONWriter*) const {}
612     virtual bool onSurfaceSupportsWritePixels(const GrSurface*) const = 0;
613     virtual bool onCanCopySurface(const GrSurfaceProxy* dst, const GrSurfaceProxy* src,
614                                   const SkIRect& srcRect, const SkIPoint& dstPoint) const = 0;
615     virtual GrBackendFormat onGetDefaultBackendFormat(GrColorType) const = 0;
616 
617     // Backends should implement this if they have any extra requirements for use of window
618     // rectangles for a specific GrBackendRenderTarget outside of basic support.
onIsWindowRectanglesSupportedForRT(const GrBackendRenderTarget &)619     virtual bool onIsWindowRectanglesSupportedForRT(const GrBackendRenderTarget&) const {
620         return true;
621     }
622 
623     virtual bool onAreColorTypeAndFormatCompatible(GrColorType, const GrBackendFormat&) const = 0;
624 
625     virtual SupportedRead onSupportedReadPixelsColorType(GrColorType srcColorType,
626                                                          const GrBackendFormat& srcFormat,
627                                                          GrColorType dstColorType) const = 0;
628 
629     virtual GrSwizzle onGetReadSwizzle(const GrBackendFormat&, GrColorType) const = 0;
630 
onGetDstSampleFlagsForProxy(const GrRenderTargetProxy *)631     virtual GrDstSampleFlags onGetDstSampleFlagsForProxy(const GrRenderTargetProxy*) const {
632         return GrDstSampleFlags::kNone;
633     }
634 
635     bool fSuppressPrints : 1;
636     bool fWireframeMode  : 1;
637 
638     using INHERITED = SkRefCnt;
639 };
640 
641 GR_MAKE_BITFIELD_CLASS_OPS(GrCaps::ProgramDescOverrideFlags)
642 
643 #endif
644