1 /* 2 * Copyright 2022 Google LLC 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef skgpu_graphite_RendererProvider_DEFINED 9 #define skgpu_graphite_RendererProvider_DEFINED 10 11 #include "include/core/SkPathTypes.h" 12 #include "include/core/SkVertices.h" 13 #include "src/gpu/AtlasTypes.h" 14 #include "src/gpu/graphite/Renderer.h" 15 16 #include <vector> 17 18 namespace skgpu::graphite { 19 20 class Caps; 21 class StaticBufferManager; 22 23 #ifdef SK_ENABLE_VELLO_SHADERS 24 class VelloRenderer; 25 #endif 26 27 /** 28 * Graphite defines a limited set of renderers in order to increase the likelihood of batching 29 * across draw calls, and reducing the number of shader permutations required. These Renderers are 30 * stateless singletons and remain alive for the life of the Context and its Recorders. 31 * 32 * Because Renderers are immutable and the defined Renderers are created at context initialization, 33 * RendererProvider is trivially thread-safe. 34 */ 35 class RendererProvider { 36 public: 37 static bool IsVelloRendererSupported(const Caps*); 38 39 ~RendererProvider(); 40 41 // TODO: Add configuration options to disable "optimization" renderers in favor of the more 42 // general case, or renderers that won't be used by the application. When that's added, these 43 // functions could return null. 44 45 // Path rendering for fills and strokes stencilTessellatedCurvesAndTris(SkPathFillType type)46 const Renderer* stencilTessellatedCurvesAndTris(SkPathFillType type) const { 47 return &fStencilTessellatedCurves[(int) type]; 48 } stencilTessellatedWedges(SkPathFillType type)49 const Renderer* stencilTessellatedWedges(SkPathFillType type) const { 50 return &fStencilTessellatedWedges[(int) type]; 51 } convexTessellatedWedges()52 const Renderer* convexTessellatedWedges() const { return &fConvexTessellatedWedges; } tessellatedStrokes()53 const Renderer* tessellatedStrokes() const { return &fTessellatedStrokes; } 54 55 // Coverage mask rendering coverageMask()56 const Renderer* coverageMask() const { return &fCoverageMask; } 57 58 // Atlased text rendering bitmapText(bool useLCDText,skgpu::MaskFormat format)59 const Renderer* bitmapText(bool useLCDText, skgpu::MaskFormat format) const { 60 // We use 565 here to represent all LCD rendering, regardless of texture format 61 if (useLCDText) { 62 return &fBitmapText[(int)skgpu::MaskFormat::kA565]; 63 } 64 SkASSERT(format != skgpu::MaskFormat::kA565); 65 return &fBitmapText[(int)format]; 66 } sdfText(bool useLCDText)67 const Renderer* sdfText(bool useLCDText) const { return &fSDFText[useLCDText]; } 68 69 // Mesh rendering vertices(SkVertices::VertexMode mode,bool hasColors,bool hasTexCoords)70 const Renderer* vertices(SkVertices::VertexMode mode, bool hasColors, bool hasTexCoords) const { 71 SkASSERT(mode != SkVertices::kTriangleFan_VertexMode); // Should be converted to kTriangles 72 bool triStrip = mode == SkVertices::kTriangleStrip_VertexMode; 73 return &fVertices[4*triStrip + 2*hasColors + hasTexCoords]; 74 } 75 76 // Filled and stroked [r]rects analyticRRect()77 const Renderer* analyticRRect() const { return &fAnalyticRRect; } 78 79 // Per-edge AA quadrilaterals perEdgeAAQuad()80 const Renderer* perEdgeAAQuad() const { return &fPerEdgeAAQuad; } 81 82 // Non-AA bounds filling (can handle inverse "fills" but will touch every pixel within the clip) nonAABounds()83 const Renderer* nonAABounds() const { return &fNonAABoundsFill; } 84 85 // Circular arcs circularArc()86 const Renderer* circularArc() const { return &fCircularArc; } 87 analyticBlur()88 const Renderer* analyticBlur() const { return &fAnalyticBlur; } 89 90 // TODO: May need to add support for inverse filled strokes (need to check SVG spec if this is a 91 // real thing). 92 93 // Iterate over all available Renderers to combine with specified paint combinations when 94 // pre-compiling pipelines. renderers()95 SkSpan<const Renderer* const> renderers() const { 96 return {fRenderers.data(), fRenderers.size()}; 97 } 98 lookup(RenderStep::RenderStepID renderStepID)99 const RenderStep* lookup(RenderStep::RenderStepID renderStepID) const { 100 return fRenderSteps[(int) renderStepID].get(); 101 } 102 103 #ifdef SK_ENABLE_VELLO_SHADERS 104 // Compute shader-based path renderer and compositor. velloRenderer()105 const VelloRenderer* velloRenderer() const { return fVelloRenderer.get(); } 106 #endif 107 108 private: 109 static constexpr int kPathTypeCount = 4; 110 static constexpr int kVerticesCount = 8; // 2 modes * 2 color configs * 2 tex coord configs 111 112 friend class Context; // for ctor 113 114 // TODO: Take in caps that determines which Renderers to use for each category 115 RendererProvider(const Caps*, StaticBufferManager* bufferManager); 116 117 // Cannot be moved or copied 118 RendererProvider(const RendererProvider&) = delete; 119 RendererProvider(RendererProvider&&) = delete; 120 assumeOwnership(std::unique_ptr<RenderStep> renderStep)121 RenderStep* assumeOwnership(std::unique_ptr<RenderStep> renderStep) { 122 int index = (int) renderStep->renderStepID(); 123 SkASSERT(!fRenderSteps[index]); 124 fRenderSteps[index] = std::move(renderStep); 125 return fRenderSteps[index].get(); 126 } 127 128 // Renderers are composed of 1+ steps, and some steps can be shared by multiple Renderers. 129 // Renderers don't keep their RenderSteps alive so RendererProvider holds them here. 130 std::unique_ptr<RenderStep> fRenderSteps[RenderStep::kNumRenderSteps]; 131 132 // NOTE: Keep all Renderers dense to support automatically completing 'fRenderers'. 133 Renderer fStencilTessellatedCurves[kPathTypeCount]; 134 Renderer fStencilTessellatedWedges[kPathTypeCount]; 135 Renderer fConvexTessellatedWedges; 136 Renderer fTessellatedStrokes; 137 138 Renderer fCoverageMask; 139 140 Renderer fBitmapText[3]; // int variant 141 Renderer fSDFText[2]; // bool isLCD 142 143 Renderer fAnalyticRRect; 144 Renderer fPerEdgeAAQuad; 145 Renderer fNonAABoundsFill; 146 Renderer fCircularArc; 147 148 Renderer fAnalyticBlur; 149 150 Renderer fVertices[kVerticesCount]; 151 152 // Aggregate of all enabled Renderers for convenient iteration when pre-compiling 153 std::vector<const Renderer*> fRenderers; 154 155 #ifdef SK_ENABLE_VELLO_SHADERS 156 std::unique_ptr<VelloRenderer> fVelloRenderer; 157 #endif 158 }; 159 160 } // namespace skgpu::graphite 161 162 #endif // skgpu_graphite_RendererProvider_DEFINED 163