1 /* 2 * Copyright 2022 Google LLC 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef sktext_gpu_SubRunContainer_DEFINED 9 #define sktext_gpu_SubRunContainer_DEFINED 10 11 #include "include/core/SkPoint.h" 12 #include "include/core/SkRefCnt.h" 13 #include "src/core/SkDevice.h" 14 #include "src/gpu/AtlasTypes.h" 15 #include "src/text/gpu/SubRunAllocator.h" 16 17 class SkMatrix; 18 class SkMatrixProvider; 19 class SkPaint; 20 class SkReadBuffer; 21 class SkStrikeClient; 22 class SkWriteBuffer; 23 24 namespace sktext { 25 class GlyphRunList; 26 class StrikeForGPUCacheInterface; 27 namespace gpu { 28 class Glyph; 29 class StrikeCache; 30 } 31 } 32 33 #if defined(SK_GANESH) // Ganesh support 34 #include "src/gpu/ganesh/GrColor.h" 35 #include "src/gpu/ganesh/ops/GrOp.h" 36 37 class GrAtlasManager; 38 class GrDeferredUploadTarget; 39 class GrMeshDrawTarget; 40 class GrClip; 41 namespace skgpu::v1 { class SurfaceDrawContext; } 42 #endif 43 44 #if defined(SK_GRAPHITE) 45 #include "src/gpu/graphite/geom/Rect.h" 46 #include "src/gpu/graphite/geom/SubRunData.h" 47 #include "src/gpu/graphite/geom/Transform_graphite.h" 48 49 namespace skgpu { 50 enum class MaskFormat : int; 51 } 52 53 namespace skgpu::graphite { 54 class DrawWriter; 55 class Recorder; 56 class Renderer; 57 class RendererProvider; 58 } 59 #endif 60 61 namespace sktext::gpu { 62 // -- AtlasSubRun -------------------------------------------------------------------------------- 63 // AtlasSubRun is the API that AtlasTextOp uses to generate vertex data for drawing. 64 // There are three different ways AtlasSubRun is specialized. 65 // * DirectMaskSubRun* - this is by far the most common type of SubRun. The mask pixels are 66 // in 1:1 correspondence with the pixels on the device. The destination rectangles in this 67 // SubRun are in device space. This SubRun handles color glyphs. 68 // * TransformedMaskSubRun* - handles glyph where the image in the atlas needs to be 69 // transformed to the screen. It is usually used for large color glyph which can't be 70 // drawn with paths or scaled distance fields, but will be used to draw bitmap glyphs to 71 // the screen, if the matrix does not map 1:1 to the screen. The destination rectangles 72 // are in source space. 73 // * SDFTSubRun* - scaled distance field text handles largish single color glyphs that still 74 // can fit in the atlas; the sizes between direct SubRun, and path SubRun. The destination 75 // rectangles are in source space. 76 class AtlasSubRun { 77 public: 78 virtual ~AtlasSubRun() = default; 79 80 virtual int glyphCount() const = 0; 81 virtual skgpu::MaskFormat maskFormat() const = 0; 82 83 #if defined(SK_GANESH) 84 virtual size_t vertexStride(const SkMatrix& drawMatrix) const = 0; 85 86 virtual std::tuple<const GrClip*, GrOp::Owner> 87 makeAtlasTextOp( 88 const GrClip*, 89 const SkMatrixProvider& viewMatrix, 90 SkPoint drawOrigin, 91 const SkPaint&, 92 sk_sp<SkRefCnt>&& subRunStorage, 93 skgpu::v1::SurfaceDrawContext*) const = 0; 94 95 virtual void fillVertexData( 96 void* vertexDst, int offset, int count, 97 GrColor color, 98 const SkMatrix& drawMatrix, 99 SkPoint drawOrigin, 100 SkIRect clip) const = 0; 101 102 // This call is not thread safe. It should only be called from GrDrawOp::onPrepare which 103 // is single threaded. 104 virtual std::tuple<bool, int> regenerateAtlas( 105 int begin, int end, GrMeshDrawTarget* target) const = 0; 106 #endif 107 108 #if defined(SK_GRAPHITE) 109 virtual std::tuple<bool, int> regenerateAtlas( 110 int begin, int end, skgpu::graphite::Recorder*) const = 0; 111 112 // returns bounds of the stored data and matrix to transform it to device space 113 virtual std::tuple<skgpu::graphite::Rect, skgpu::graphite::Transform> boundsAndDeviceMatrix( 114 const skgpu::graphite::Transform& localToDevice, SkPoint drawOrigin) const = 0; 115 116 virtual const skgpu::graphite::Renderer* renderer( 117 const skgpu::graphite::RendererProvider*) const = 0; 118 119 virtual void fillInstanceData( 120 skgpu::graphite::DrawWriter*, 121 int offset, int count, 122 int ssboIndex, 123 SkScalar depth) const = 0; 124 #endif 125 126 virtual void testingOnly_packedGlyphIDToGlyph(StrikeCache* cache) const = 0; 127 128 protected: 129 #if defined(SK_GRAPHITE) 130 void draw(skgpu::graphite::Device*, 131 SkPoint drawOrigin, 132 const SkPaint&, 133 sk_sp<SkRefCnt> subRunStorage) const; 134 #endif 135 }; 136 137 // -- SubRun ------------------------------------------------------------------------------------- 138 // SubRun defines the most basic functionality of a SubRun; the ability to draw, and the 139 // ability to be in a list. 140 class SubRun; 141 using SubRunOwner = std::unique_ptr<SubRun, SubRunAllocator::Destroyer>; 142 class SubRun { 143 public: 144 virtual ~SubRun(); 145 #if defined(SK_GANESH) 146 // Produce GPU ops for this subRun or just draw them. 147 virtual void draw(SkCanvas*, 148 const GrClip*, 149 const SkMatrixProvider& viewMatrix, 150 SkPoint drawOrigin, 151 const SkPaint&, 152 sk_sp<SkRefCnt> subRunStorage, 153 skgpu::v1::SurfaceDrawContext*) const = 0; 154 #endif 155 #if defined(SK_GRAPHITE) 156 // Produce uploads and draws for this subRun 157 virtual void draw(SkCanvas*, 158 SkPoint drawOrigin, 159 const SkPaint&, 160 sk_sp<SkRefCnt> subRunStorage, 161 skgpu::graphite::Device*) const = 0; 162 #endif 163 164 void flatten(SkWriteBuffer& buffer) const; 165 static SubRunOwner MakeFromBuffer(const SkMatrix& initialPositionMatrix, 166 SkReadBuffer& buffer, 167 sktext::gpu::SubRunAllocator* alloc, 168 const SkStrikeClient* client); 169 170 // Size hint for unflattening this run. If this is accurate, it will help with the allocation 171 // of the slug. If it's off then there may be more allocations needed to unflatten. 172 virtual int unflattenSize() const = 0; 173 174 // Given an already cached subRun, can this subRun handle this combination paint, matrix, and 175 // position. 176 virtual bool canReuse(const SkPaint& paint, const SkMatrix& positionMatrix) const = 0; 177 178 // Return the underlying atlas SubRun if it exists. Otherwise, return nullptr. 179 // * Don't use this API. It is only to support testing. 180 virtual const AtlasSubRun* testingOnly_atlasSubRun() const = 0; 181 182 protected: 183 enum SubRunType : int; 184 virtual SubRunType subRunType() const = 0; 185 virtual void doFlatten(SkWriteBuffer& buffer) const = 0; 186 187 private: 188 friend class SubRunList; 189 SubRunOwner fNext; 190 }; 191 192 // -- SubRunList --------------------------------------------------------------------------------- 193 class SubRunList { 194 public: 195 class Iterator { 196 public: 197 using value_type = SubRun; 198 using difference_type = ptrdiff_t; 199 using pointer = value_type*; 200 using reference = value_type&; 201 using iterator_category = std::input_iterator_tag; Iterator(SubRun * subRun)202 Iterator(SubRun* subRun) : fPtr{subRun} { } 203 Iterator& operator++() { fPtr = fPtr->fNext.get(); return *this; } 204 Iterator operator++(int) { Iterator tmp(*this); operator++(); return tmp; } 205 bool operator==(const Iterator& rhs) const { return fPtr == rhs.fPtr; } 206 bool operator!=(const Iterator& rhs) const { return fPtr != rhs.fPtr; } 207 reference operator*() { return *fPtr; } 208 209 private: 210 SubRun* fPtr; 211 }; 212 append(SubRunOwner subRun)213 void append(SubRunOwner subRun) { 214 SubRunOwner* newTail = &subRun->fNext; 215 *fTail = std::move(subRun); 216 fTail = newTail; 217 } isEmpty()218 bool isEmpty() const { return fHead == nullptr; } begin()219 Iterator begin() { return Iterator{ fHead.get()}; } end()220 Iterator end() { return Iterator{nullptr}; } begin()221 Iterator begin() const { return Iterator{ fHead.get()}; } end()222 Iterator end() const { return Iterator{nullptr}; } front()223 SubRun& front() const {return *fHead; } 224 225 private: 226 SubRunOwner fHead{nullptr}; 227 SubRunOwner* fTail{&fHead}; 228 }; 229 230 // -- SubRunContainer ------------------------------------------------------------------------------ 231 class SubRunContainer; 232 using SubRunContainerOwner = std::unique_ptr<SubRunContainer, SubRunAllocator::Destroyer>; 233 class SubRunContainer { 234 public: 235 explicit SubRunContainer(const SkMatrix& initialPositionMatrix); 236 SubRunContainer() = delete; 237 SubRunContainer(const SubRunContainer&) = delete; 238 SubRunContainer& operator=(const SubRunContainer&) = delete; 239 240 // Delete the move operations because the SubRuns contain pointers to fInitialPositionMatrix. 241 SubRunContainer(SubRunContainer&&) = delete; 242 SubRunContainer& operator=(SubRunContainer&&) = delete; 243 244 void flattenAllocSizeHint(SkWriteBuffer& buffer) const; 245 static int AllocSizeHintFromBuffer(SkReadBuffer& buffer); 246 247 void flattenRuns(SkWriteBuffer& buffer) const; 248 static SubRunContainerOwner MakeFromBufferInAlloc(SkReadBuffer& buffer, 249 const SkStrikeClient* client, 250 SubRunAllocator* alloc); 251 252 enum SubRunCreationBehavior {kAddSubRuns, kStrikeCalculationsOnly}; 253 // The returned SubRunContainerOwner will never be null. If subRunCreation == 254 // kStrikeCalculationsOnly, then the returned container will be empty. 255 static SK_WARN_UNUSED_RESULT SubRunContainerOwner MakeInAlloc( 256 const GlyphRunList& glyphRunList, 257 const SkMatrix& positionMatrix, 258 const SkPaint& runPaint, 259 SkStrikeDeviceInfo strikeDeviceInfo, 260 StrikeForGPUCacheInterface* strikeCache, 261 sktext::gpu::SubRunAllocator* alloc, 262 SubRunCreationBehavior creationBehavior, 263 const char* tag); 264 265 static size_t EstimateAllocSize(const GlyphRunList& glyphRunList); 266 267 #if defined(SK_GANESH) 268 void draw(SkCanvas* canvas, 269 const GrClip* clip, 270 const SkMatrixProvider& viewMatrix, 271 SkPoint drawOrigin, 272 const SkPaint& paint, 273 const SkRefCnt* subRunStorage, 274 skgpu::v1::SurfaceDrawContext* sdc) const; 275 #endif 276 #if defined(SK_GRAPHITE) 277 void draw(SkCanvas*, 278 SkPoint drawOrigin, 279 const SkPaint&, 280 const SkRefCnt* subRunStorage, 281 skgpu::graphite::Device*) const; 282 #endif 283 initialPosition()284 const SkMatrix& initialPosition() const { return fInitialPositionMatrix; } isEmpty()285 bool isEmpty() const { return fSubRuns.isEmpty(); } 286 bool canReuse(const SkPaint& paint, const SkMatrix& positionMatrix) const; 287 288 private: 289 friend struct SubRunContainerPeer; 290 const SkMatrix fInitialPositionMatrix; 291 SubRunList fSubRuns; 292 }; 293 } // namespace sktext::gpu 294 295 #endif // sktext_gpu_SubRunContainer_DEFINED 296