• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include <new>
9 
10 #include "include/core/SkPoint.h"
11 #include "include/core/SkPoint3.h"
12 #include "include/gpu/GrRecordingContext.h"
13 #include "include/private/SkFloatingPoint.h"
14 #include "include/private/SkTo.h"
15 #include "src/core/SkMathPriv.h"
16 #include "src/core/SkMatrixPriv.h"
17 #include "src/core/SkRectPriv.h"
18 #include "src/gpu/GrAppliedClip.h"
19 #include "src/gpu/GrCaps.h"
20 #include "src/gpu/GrDrawOpTest.h"
21 #include "src/gpu/GrGeometryProcessor.h"
22 #include "src/gpu/GrGpu.h"
23 #include "src/gpu/GrMemoryPool.h"
24 #include "src/gpu/GrOpFlushState.h"
25 #include "src/gpu/GrOpsTypes.h"
26 #include "src/gpu/GrRecordingContextPriv.h"
27 #include "src/gpu/GrResourceProvider.h"
28 #include "src/gpu/GrResourceProviderPriv.h"
29 #include "src/gpu/GrShaderCaps.h"
30 #include "src/gpu/GrTexture.h"
31 #include "src/gpu/GrTextureProxy.h"
32 #include "src/gpu/SkGr.h"
33 #include "src/gpu/effects/GrBlendFragmentProcessor.h"
34 #include "src/gpu/effects/GrTextureEffect.h"
35 #include "src/gpu/geometry/GrQuad.h"
36 #include "src/gpu/geometry/GrQuadBuffer.h"
37 #include "src/gpu/geometry/GrQuadUtils.h"
38 #include "src/gpu/geometry/GrRect.h"
39 #include "src/gpu/glsl/GrGLSLVarying.h"
40 #include "src/gpu/ops/FillRectOp.h"
41 #include "src/gpu/ops/GrMeshDrawOp.h"
42 #include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
43 #include "src/gpu/ops/QuadPerEdgeAA.h"
44 #include "src/gpu/ops/TextureOp.h"
45 #include "src/gpu/v1/SurfaceDrawContext_v1.h"
46 
47 namespace {
48 
49 using Subset = skgpu::v1::QuadPerEdgeAA::Subset;
50 using VertexSpec = skgpu::v1::QuadPerEdgeAA::VertexSpec;
51 using ColorType = skgpu::v1::QuadPerEdgeAA::ColorType;
52 
53 // Extracts lengths of vertical and horizontal edges of axis-aligned quad. "width" is the edge
54 // between v0 and v2 (or v1 and v3), "height" is the edge between v0 and v1 (or v2 and v3).
axis_aligned_quad_size(const GrQuad & quad)55 SkSize axis_aligned_quad_size(const GrQuad& quad) {
56     SkASSERT(quad.quadType() == GrQuad::Type::kAxisAligned);
57     // Simplification of regular edge length equation, since it's axis aligned and can avoid sqrt
58     float dw = sk_float_abs(quad.x(2) - quad.x(0)) + sk_float_abs(quad.y(2) - quad.y(0));
59     float dh = sk_float_abs(quad.x(1) - quad.x(0)) + sk_float_abs(quad.y(1) - quad.y(0));
60     return {dw, dh};
61 }
62 
63 std::tuple<bool /* filter */,
64            bool /* mipmap */>
filter_and_mm_have_effect(const GrQuad & srcQuad,const GrQuad & dstQuad)65 filter_and_mm_have_effect(const GrQuad& srcQuad, const GrQuad& dstQuad) {
66     // If not axis-aligned in src or dst, then always say it has an effect
67     if (srcQuad.quadType() != GrQuad::Type::kAxisAligned ||
68         dstQuad.quadType() != GrQuad::Type::kAxisAligned) {
69         return {true, true};
70     }
71 
72     SkRect srcRect;
73     SkRect dstRect;
74     if (srcQuad.asRect(&srcRect) && dstQuad.asRect(&dstRect)) {
75         // Disable filtering when there is no scaling (width and height are the same), and the
76         // top-left corners have the same fraction (so src and dst snap to the pixel grid
77         // identically).
78         SkASSERT(srcRect.isSorted());
79         bool filter = srcRect.width() != dstRect.width() || srcRect.height() != dstRect.height() ||
80                       SkScalarFraction(srcRect.fLeft) != SkScalarFraction(dstRect.fLeft) ||
81                       SkScalarFraction(srcRect.fTop)  != SkScalarFraction(dstRect.fTop);
82         bool mm = srcRect.width() > dstRect.width() || srcRect.height() > dstRect.height();
83         return {filter, mm};
84     }
85     // Extract edge lengths
86     SkSize srcSize = axis_aligned_quad_size(srcQuad);
87     SkSize dstSize = axis_aligned_quad_size(dstQuad);
88     // Although the quads are axis-aligned, the local coordinate system is transformed such
89     // that fractionally-aligned sample centers will not align with the device coordinate system
90     // So disable filtering when edges are the same length and both srcQuad and dstQuad
91     // 0th vertex is integer aligned.
92     bool filter = srcSize != dstSize ||
93                   !SkScalarIsInt(srcQuad.x(0)) ||
94                   !SkScalarIsInt(srcQuad.y(0)) ||
95                   !SkScalarIsInt(dstQuad.x(0)) ||
96                   !SkScalarIsInt(dstQuad.y(0));
97     bool mm = srcSize.fWidth > dstSize.fWidth || srcSize.fHeight > dstSize.fHeight;
98     return {filter, mm};
99 }
100 
101 // Describes function for normalizing src coords: [x * iw, y * ih + yOffset] can represent
102 // regular and rectangular textures, w/ or w/o origin correction.
103 struct NormalizationParams {
104     float fIW; // 1 / width of texture, or 1.0 for texture rectangles
105     float fInvH; // 1 / height of texture, or 1.0 for tex rects, X -1 if bottom-left origin
106     float fYOffset; // 0 for top-left origin, height of [normalized] tex if bottom-left
107 };
proxy_normalization_params(const GrSurfaceProxy * proxy,GrSurfaceOrigin origin)108 NormalizationParams proxy_normalization_params(const GrSurfaceProxy* proxy,
109                                                GrSurfaceOrigin origin) {
110     // Whether or not the proxy is instantiated, this is the size its texture will be, so we can
111     // normalize the src coordinates up front.
112     SkISize dimensions = proxy->backingStoreDimensions();
113     float iw, ih, h;
114     if (proxy->backendFormat().textureType() == GrTextureType::kRectangle) {
115         iw = ih = 1.f;
116         h = dimensions.height();
117     } else {
118         iw = 1.f / dimensions.width();
119         ih = 1.f / dimensions.height();
120         h = 1.f;
121     }
122 
123     if (origin == kBottomLeft_GrSurfaceOrigin) {
124         return {iw, -ih, h};
125     } else {
126         return {iw, ih, 0.0f};
127     }
128 }
129 
130 // Normalize the subset. If 'subsetRect' is null, it is assumed no subset constraint is desired,
131 // so a sufficiently large rect is returned even if the quad ends up batched with an op that uses
132 // subsets overall. When there is a subset it will be inset based on the filter mode. Normalization
133 // and y-flipping are applied as indicated by NormalizationParams.
normalize_and_inset_subset(GrSamplerState::Filter filter,const NormalizationParams & params,const SkRect * subsetRect)134 SkRect normalize_and_inset_subset(GrSamplerState::Filter filter,
135                                   const NormalizationParams& params,
136                                   const SkRect* subsetRect) {
137     static constexpr SkRect kLargeRect = {-100000, -100000, 1000000, 1000000};
138     if (!subsetRect) {
139         // Either the quad has no subset constraint and is batched with a subset constrained op
140         // (in which case we want a subset that doesn't restrict normalized tex coords), or the
141         // entire op doesn't use the subset, in which case the returned value is ignored.
142         return kLargeRect;
143     }
144 
145     auto ltrb = skvx::Vec<4, float>::Load(subsetRect);
146     auto flipHi = skvx::Vec<4, float>({1.f, 1.f, -1.f, -1.f});
147     if (filter == GrSamplerState::Filter::kNearest) {
148         // Make sure our insetting puts us at pixel centers.
149         ltrb = skvx::floor(ltrb*flipHi)*flipHi;
150     }
151     // Inset with pin to the rect center.
152     ltrb += skvx::Vec<4, float>({.5f, .5f, -.5f, -.5f});
153     auto mid = (skvx::shuffle<2, 3, 0, 1>(ltrb) + ltrb)*0.5f;
154     ltrb = skvx::min(ltrb*flipHi, mid*flipHi)*flipHi;
155 
156     // Normalize and offset
157     ltrb = ltrb * skvx::Vec<4, float>{params.fIW, params.fInvH, params.fIW, params.fInvH} +
158                skvx::Vec<4, float>{0.f, params.fYOffset, 0.f, params.fYOffset};
159     if (params.fInvH < 0.f) {
160         // Flip top and bottom to keep the rect sorted when loaded back to SkRect.
161         ltrb = skvx::shuffle<0, 3, 2, 1>(ltrb);
162     }
163 
164     SkRect out;
165     ltrb.store(&out);
166     return out;
167 }
168 
169 // Normalizes logical src coords and corrects for origin
normalize_src_quad(const NormalizationParams & params,GrQuad * srcQuad)170 void normalize_src_quad(const NormalizationParams& params,
171                         GrQuad* srcQuad) {
172     // The src quad should not have any perspective
173     SkASSERT(!srcQuad->hasPerspective());
174     skvx::Vec<4, float> xs = srcQuad->x4f() * params.fIW;
175     skvx::Vec<4, float> ys = srcQuad->y4f() * params.fInvH + params.fYOffset;
176     xs.store(srcQuad->xs());
177     ys.store(srcQuad->ys());
178 }
179 
180 // Count the number of proxy runs in the entry set. This usually is already computed by
181 // SkGpuDevice, but when the BatchLengthLimiter chops the set up it must determine a new proxy count
182 // for each split.
proxy_run_count(const GrTextureSetEntry set[],int count)183 int proxy_run_count(const GrTextureSetEntry set[], int count) {
184     int actualProxyRunCount = 0;
185     const GrSurfaceProxy* lastProxy = nullptr;
186     for (int i = 0; i < count; ++i) {
187         if (set[i].fProxyView.proxy() != lastProxy) {
188             actualProxyRunCount++;
189             lastProxy = set[i].fProxyView.proxy();
190         }
191     }
192     return actualProxyRunCount;
193 }
194 
safe_to_ignore_subset_rect(GrAAType aaType,GrSamplerState::Filter filter,const DrawQuad & quad,const SkRect & subsetRect)195 bool safe_to_ignore_subset_rect(GrAAType aaType, GrSamplerState::Filter filter,
196                                 const DrawQuad& quad, const SkRect& subsetRect) {
197     // If both the device and local quad are both axis-aligned, and filtering is off, the local quad
198     // can push all the way up to the edges of the the subset rect and the sampler shouldn't
199     // overshoot. Unfortunately, antialiasing adds enough jitter that we can only rely on this in
200     // the non-antialiased case.
201     SkRect localBounds = quad.fLocal.bounds();
202     if (aaType == GrAAType::kNone &&
203         filter == GrSamplerState::Filter::kNearest &&
204         quad.fDevice.quadType() == GrQuad::Type::kAxisAligned &&
205         quad.fLocal.quadType() == GrQuad::Type::kAxisAligned &&
206         subsetRect.contains(localBounds)) {
207 
208         return true;
209     }
210 
211     // If the local quad is inset by at least 0.5 pixels into the subset rect's bounds, the
212     // sampler shouldn't overshoot, even when antialiasing and filtering is taken into account.
213     if (subsetRect.makeInset(0.5f, 0.5f).contains(localBounds)) {
214         return true;
215     }
216 
217     // The subset rect cannot be ignored safely.
218     return false;
219 }
220 
221 /**
222  * Op that implements TextureOp::Make. It draws textured quads. Each quad can modulate against a
223  * the texture by color. The blend with the destination is always src-over. The edges are non-AA.
224  */
225 class TextureOpImpl final : public GrMeshDrawOp {
226 public:
227     using Saturate = skgpu::v1::TextureOp::Saturate;
228 
Make(GrRecordingContext * context,GrSurfaceProxyView proxyView,sk_sp<GrColorSpaceXform> textureXform,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,const SkPMColor4f & color,Saturate saturate,GrAAType aaType,DrawQuad * quad,const SkRect * subset,uint32_t stencilRef,bool supportOpaqueOpt=false)229     static GrOp::Owner Make(GrRecordingContext* context,
230                             GrSurfaceProxyView proxyView,
231                             sk_sp<GrColorSpaceXform> textureXform,
232                             GrSamplerState::Filter filter,
233                             GrSamplerState::MipmapMode mm,
234                             const SkPMColor4f& color,
235                             Saturate saturate,
236                             GrAAType aaType,
237                             DrawQuad* quad,
238 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
239                             const SkRect* subset,
240 #ifdef SUPPORT_OPAQUE_OPTIMIZATION
241                             uint32_t stencilRef,
242                             bool supportOpaqueOpt = false) {
243         return GrOp::Make<TextureOpImpl>(context, std::move(proxyView), std::move(textureXform),
244                                          filter, mm, color, saturate, aaType, quad, subset, stencilRef, supportOpaqueOpt);
245 #else
246                             uint32_t stencilRef) {
247         return GrOp::Make<TextureOpImpl>(context, std::move(proxyView), std::move(textureXform),
248                                          filter, mm, color, saturate, aaType, quad, subset, stencilRef);
249 #endif
250 #else
251 #ifdef SUPPORT_OPAQUE_OPTIMIZATION
252                             const SkRect* subset,
253                             bool supportOpaqueOpt = false) {
254         return GrOp::Make<TextureOpImpl>(context, std::move(proxyView), std::move(textureXform),
255                                          filter, mm, color, saturate, aaType, quad, subset, supportOpaqueOpt);
256 #else
257                             const SkRect* subset) {
258         return GrOp::Make<TextureOpImpl>(context, std::move(proxyView), std::move(textureXform),
259                                          filter, mm, color, saturate, aaType, quad, subset);
260 #endif
261 #endif
262     }
263 
264     static GrOp::Owner Make(GrRecordingContext* context,
265                             GrTextureSetEntry set[],
266                             int cnt,
267                             int proxyRunCnt,
268                             GrSamplerState::Filter filter,
269                             GrSamplerState::MipmapMode mm,
270                             Saturate saturate,
271                             GrAAType aaType,
272                             SkCanvas::SrcRectConstraint constraint,
273                             const SkMatrix& viewMatrix,
274                             sk_sp<GrColorSpaceXform> textureColorSpaceXform) {
275         // Allocate size based on proxyRunCnt, since that determines number of ViewCountPairs.
276         SkASSERT(proxyRunCnt <= cnt);
277         return GrOp::MakeWithExtraMemory<TextureOpImpl>(
278                 context, sizeof(ViewCountPair) * (proxyRunCnt - 1),
279                 set, cnt, proxyRunCnt, filter, mm, saturate, aaType, constraint,
280                 viewMatrix, std::move(textureColorSpaceXform));
281     }
282 
283     ~TextureOpImpl() override {
284         for (unsigned p = 1; p < fMetadata.fProxyCount; ++p) {
285             fViewCountPairs[p].~ViewCountPair();
286         }
287     }
288 
289     const char* name() const override { return "TextureOp"; }
290 
291     void visitProxies(const GrVisitProxyFunc& func) const override {
292         bool mipped = (fMetadata.mipmapMode() != GrSamplerState::MipmapMode::kNone);
293         for (unsigned p = 0; p <  fMetadata.fProxyCount; ++p) {
294             func(fViewCountPairs[p].fProxy.get(), GrMipmapped(mipped));
295         }
296         if (fDesc && fDesc->fProgramInfo) {
297             fDesc->fProgramInfo->visitFPProxies(func);
298         }
299     }
300 
301 #ifdef SK_DEBUG
302     static void ValidateResourceLimits() {
303         // The op implementation has an upper bound on the number of quads that it can represent.
304         // However, the resource manager imposes its own limit on the number of quads, which should
305         // always be lower than the numerical limit this op can hold.
306         using CountStorage = decltype(Metadata::fTotalQuadCount);
307         CountStorage maxQuadCount = std::numeric_limits<CountStorage>::max();
308         // GrResourceProvider::Max...() is typed as int, so don't compare across signed/unsigned.
309         int resourceLimit = SkTo<int>(maxQuadCount);
310         SkASSERT(GrResourceProvider::MaxNumAAQuads() <= resourceLimit &&
311                  GrResourceProvider::MaxNumNonAAQuads() <= resourceLimit);
312     }
313 #endif
314 
315     GrProcessorSet::Analysis finalize(const GrCaps& caps, const GrAppliedClip* clip,
316                                       GrClampType clampType) override {
317         SkASSERT(fMetadata.colorType() == ColorType::kNone);
318         auto iter = fQuads.metadata();
319         while(iter.next()) {
320             auto colorType = skgpu::v1::QuadPerEdgeAA::MinColorType(iter->fColor);
321             colorType = std::max(static_cast<ColorType>(fMetadata.fColorType),
322                                  colorType);
323             if (caps.reducedShaderMode()) {
324                 colorType = std::max(colorType, ColorType::kByte);
325             }
326             fMetadata.fColorType = static_cast<uint16_t>(colorType);
327 #ifdef SUPPORT_OPAQUE_OPTIMIZATION
328             fRRect = clip->getRRect();
329 #endif
330         }
331         return GrProcessorSet::EmptySetAnalysis();
332     }
333 
334     FixedFunctionFlags fixedFunctionFlags() const override {
335 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
336         if (fStencilRef != UINT32_MAX) {
337             return FixedFunctionFlags::kUsesStencil;
338         }
339 #endif
340         return fMetadata.aaType() == GrAAType::kMSAA ? FixedFunctionFlags::kUsesHWAA
341                                                      : FixedFunctionFlags::kNone;
342     }
343 
344 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
345     bool isStencilCullingOp() override {
346         return fStencilRef != UINT32_MAX;
347     }
348 #endif
349 
350     DEFINE_OP_CLASS_ID
351 
352 private:
353     friend class ::GrOp;
354 
355     struct ColorSubsetAndAA {
356         ColorSubsetAndAA(const SkPMColor4f& color, const SkRect& subsetRect, GrQuadAAFlags aaFlags)
357                 : fColor(color)
358                 , fSubsetRect(subsetRect)
359                 , fAAFlags(static_cast<uint16_t>(aaFlags)) {
360             SkASSERT(fAAFlags == static_cast<uint16_t>(aaFlags));
361         }
362 
363         SkPMColor4f fColor;
364         // If the op doesn't use subsets, this is ignored. If the op uses subsets and the specific
365         // entry does not, this rect will equal kLargeRect, so it automatically has no effect.
366         SkRect fSubsetRect;
367         unsigned fAAFlags : 4;
368 
369         GrQuadAAFlags aaFlags() const { return static_cast<GrQuadAAFlags>(fAAFlags); }
370     };
371 
372     struct ViewCountPair {
373         // Normally this would be a GrSurfaceProxyView, but TextureOp applies the GrOrigin right
374         // away so it doesn't need to be stored, and all ViewCountPairs in an op have the same
375         // swizzle so that is stored in the op metadata.
376         sk_sp<GrSurfaceProxy> fProxy;
377         int fQuadCnt;
378     };
379 
380     // TextureOp and ViewCountPair are 8 byte aligned. This is packed into 8 bytes to minimally
381     // increase the size of the op; increasing the op size can have a surprising impact on
382     // performance (since texture ops are one of the most commonly used in an app).
383     struct Metadata {
384         // AAType must be filled after initialization; ColorType is determined in finalize()
385         Metadata(const GrSwizzle& swizzle,
386                  GrSamplerState::Filter filter,
387                  GrSamplerState::MipmapMode mm,
388                  Subset subset,
389                  Saturate saturate)
390             : fSwizzle(swizzle)
391             , fProxyCount(1)
392             , fTotalQuadCount(1)
393             , fFilter(static_cast<uint16_t>(filter))
394             , fMipmapMode(static_cast<uint16_t>(mm))
395             , fAAType(static_cast<uint16_t>(GrAAType::kNone))
396             , fColorType(static_cast<uint16_t>(ColorType::kNone))
397             , fSubset(static_cast<uint16_t>(subset))
398             , fSaturate(static_cast<uint16_t>(saturate)) {}
399 
400         GrSwizzle fSwizzle; // sizeof(GrSwizzle) == uint16_t
401         uint16_t  fProxyCount;
402         // This will be >= fProxyCount, since a proxy may be drawn multiple times
403         uint16_t  fTotalQuadCount;
404 
405         // These must be based on uint16_t to help MSVC's pack bitfields optimally
406         uint16_t  fFilter     : 2; // GrSamplerState::Filter
407         uint16_t  fMipmapMode : 2; // GrSamplerState::MipmapMode
408         uint16_t  fAAType     : 2; // GrAAType
409         uint16_t  fColorType  : 2; // GrQuadPerEdgeAA::ColorType
410         uint16_t  fSubset     : 1; // bool
411         uint16_t  fSaturate   : 1; // bool
412         uint16_t  fUnused     : 6; // # of bits left before Metadata exceeds 8 bytes
413 
414         GrSamplerState::Filter filter() const {
415             return static_cast<GrSamplerState::Filter>(fFilter);
416         }
417         GrSamplerState::MipmapMode mipmapMode() const {
418             return static_cast<GrSamplerState::MipmapMode>(fMipmapMode);
419         }
420         GrAAType aaType() const { return static_cast<GrAAType>(fAAType); }
421         ColorType colorType() const { return static_cast<ColorType>(fColorType); }
422         Subset subset() const { return static_cast<Subset>(fSubset); }
423         Saturate saturate() const { return static_cast<Saturate>(fSaturate); }
424 
425         static_assert(GrSamplerState::kFilterCount <= 4);
426         static_assert(kGrAATypeCount <= 4);
427         static_assert(skgpu::v1::QuadPerEdgeAA::kColorTypeCount <= 4);
428     };
429     static_assert(sizeof(Metadata) == 8);
430 
431     // This descriptor is used to store the draw info we decide on during on(Pre)PrepareDraws. We
432     // store the data in a separate struct in order to minimize the size of the TextureOp.
433     // Historically, increasing the TextureOp's size has caused surprising perf regressions, but we
434     // may want to re-evaluate whether this is still necessary.
435     //
436     // In the onPrePrepareDraws case it is allocated in the creation-time opData arena, and
437     // allocatePrePreparedVertices is also called.
438     //
439     // In the onPrepareDraws case this descriptor is allocated in the flush-time arena (i.e., as
440     // part of the flushState).
441     struct Desc {
442         VertexSpec fVertexSpec;
443         int fNumProxies = 0;
444         int fNumTotalQuads = 0;
445 
446         // This member variable is only used by 'onPrePrepareDraws'.
447         char* fPrePreparedVertices = nullptr;
448 
449         GrProgramInfo* fProgramInfo = nullptr;
450 
451         sk_sp<const GrBuffer> fIndexBuffer;
452         sk_sp<const GrBuffer> fVertexBuffer;
453         int fBaseVertex;
454 
455         // How big should 'fVertices' be to hold all the vertex data?
456         size_t totalSizeInBytes() const {
457             return this->totalNumVertices() * fVertexSpec.vertexSize();
458         }
459 
460         int totalNumVertices() const {
461             return fNumTotalQuads * fVertexSpec.verticesPerQuad();
462         }
463 
464         void allocatePrePreparedVertices(SkArenaAlloc* arena) {
465             fPrePreparedVertices = arena->makeArrayDefault<char>(this->totalSizeInBytes());
466         }
467     };
468     // If subsetRect is not null it will be used to apply a strict src rect-style constraint.
469     TextureOpImpl(GrSurfaceProxyView proxyView,
470                   sk_sp<GrColorSpaceXform> textureColorSpaceXform,
471                   GrSamplerState::Filter filter,
472                   GrSamplerState::MipmapMode mm,
473                   const SkPMColor4f& color,
474                   Saturate saturate,
475                   GrAAType aaType,
476                   DrawQuad* quad,
477 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
478                   const SkRect* subsetRect,
479 #ifdef SUPPORT_OPAQUE_OPTIMIZATION
480                   uint32_t stencilRef = UINT32_MAX,
481                   bool supportOpaqueOpt = false)
482 #else
483                   uint32_t stencilRef = UINT32_MAX)
484 #endif
485             : INHERITED(ClassID())
486             , fQuads(1, true /* includes locals */)
487             , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
488             , fDesc(nullptr)
489             , fMetadata(proxyView.swizzle(), filter, mm, Subset(!!subsetRect), saturate)
490             , fStencilRef(stencilRef) {
491 #else
492 #ifdef SUPPORT_OPAQUE_OPTIMIZATION
493                   const SkRect* subsetRect,
494                   bool supportOpaqueOpt = false)
495 #else
496                   const SkRect* subsetRect)
497 #endif
498             : INHERITED(ClassID())
499             , fQuads(1, true /* includes locals */)
500             , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
501             , fDesc(nullptr)
502             , fMetadata(proxyView.swizzle(), filter, mm, Subset(!!subsetRect), saturate) {
503 #endif
504         // Clean up disparities between the overall aa type and edge configuration and apply
505         // optimizations based on the rect and matrix when appropriate
506 #ifdef SUPPORT_OPAQUE_OPTIMIZATION
507         fSupportOpaqueOpt = \
508         ((color.fA < 1.0 - 1e-7) ||
509         (subsetRect != nullptr)) ? false : supportOpaqueOpt;
510 #endif
511         GrQuadUtils::ResolveAAType(aaType, quad->fEdgeFlags, quad->fDevice,
512                                    &aaType, &quad->fEdgeFlags);
513         fMetadata.fAAType = static_cast<uint16_t>(aaType);
514 
515         // We expect our caller to have already caught this optimization.
516         SkASSERT(!subsetRect ||
517                  !subsetRect->contains(proxyView.proxy()->backingStoreBoundsRect()));
518 
519         // We may have had a strict constraint with nearest filter solely due to possible AA bloat.
520         // Try to identify cases where the subsetting isn't actually necessary, and skip it.
521         if (subsetRect) {
522             if (safe_to_ignore_subset_rect(aaType, filter, *quad, *subsetRect)) {
523                 subsetRect = nullptr;
524                 fMetadata.fSubset = static_cast<uint16_t>(Subset::kNo);
525             }
526         }
527 
528         // Normalize src coordinates and the subset (if set)
529         NormalizationParams params = proxy_normalization_params(proxyView.proxy(),
530                                                                 proxyView.origin());
531         normalize_src_quad(params, &quad->fLocal);
532         SkRect subset = normalize_and_inset_subset(filter, params, subsetRect);
533 
534         // Set bounds before clipping so we don't have to worry about unioning the bounds of
535         // the two potential quads (GrQuad::bounds() is perspective-safe).
536         bool hairline = GrQuadUtils::WillUseHairline(quad->fDevice, aaType, quad->fEdgeFlags);
537         this->setBounds(quad->fDevice.bounds(), HasAABloat(aaType == GrAAType::kCoverage),
538                         hairline ? IsHairline::kYes : IsHairline::kNo);
539         int quadCount = this->appendQuad(quad, color, subset);
540         fViewCountPairs[0] = {proxyView.detachProxy(), quadCount};
541     }
542 
543     TextureOpImpl(GrTextureSetEntry set[],
544                   int cnt,
545                   int proxyRunCnt,
546                   const GrSamplerState::Filter filter,
547                   const GrSamplerState::MipmapMode mm,
548                   const Saturate saturate,
549                   const GrAAType aaType,
550                   const SkCanvas::SrcRectConstraint constraint,
551                   const SkMatrix& viewMatrix,
552                   sk_sp<GrColorSpaceXform> textureColorSpaceXform)
553             : INHERITED(ClassID())
554             , fQuads(cnt, true /* includes locals */)
555             , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
556             , fDesc(nullptr)
557             , fMetadata(set[0].fProxyView.swizzle(),
558                         GrSamplerState::Filter::kNearest,
559                         GrSamplerState::MipmapMode::kNone,
560                         Subset::kNo,
561                         saturate) {
562         // Update counts to reflect the batch op
563         fMetadata.fProxyCount = SkToUInt(proxyRunCnt);
564         fMetadata.fTotalQuadCount = SkToUInt(cnt);
565 
566         SkRect bounds = SkRectPriv::MakeLargestInverted();
567 
568         GrAAType netAAType = GrAAType::kNone; // aa type maximally compatible with all dst rects
569         Subset netSubset = Subset::kNo;
570         GrSamplerState::Filter netFilter = GrSamplerState::Filter::kNearest;
571         GrSamplerState::MipmapMode netMM = GrSamplerState::MipmapMode::kNone;
572         bool hasSubpixel = false;
573 
574         const GrSurfaceProxy* curProxy = nullptr;
575 
576         // 'q' is the index in 'set' and fQuadBuffer; 'p' is the index in fViewCountPairs and only
577         // increases when set[q]'s proxy changes.
578         int p = 0;
579         for (int q = 0; q < cnt; ++q) {
580             SkASSERT(mm == GrSamplerState::MipmapMode::kNone ||
581                      (set[0].fProxyView.proxy()->asTextureProxy()->mipmapped() ==
582                       GrMipmapped::kYes));
583             if (q == 0) {
584                 // We do not placement new the first ViewCountPair since that one is allocated and
585                 // initialized as part of the TextureOp creation.
586                 fViewCountPairs[0].fProxy = set[0].fProxyView.detachProxy();
587                 fViewCountPairs[0].fQuadCnt = 0;
588                 curProxy = fViewCountPairs[0].fProxy.get();
589             } else if (set[q].fProxyView.proxy() != curProxy) {
590                 // We must placement new the ViewCountPairs here so that the sk_sps in the
591                 // GrSurfaceProxyView get initialized properly.
592                 new(&fViewCountPairs[++p])ViewCountPair({set[q].fProxyView.detachProxy(), 0});
593 
594                 curProxy = fViewCountPairs[p].fProxy.get();
595                 SkASSERT(GrTextureProxy::ProxiesAreCompatibleAsDynamicState(
596                         curProxy, fViewCountPairs[0].fProxy.get()));
597                 SkASSERT(fMetadata.fSwizzle == set[q].fProxyView.swizzle());
598             } // else another quad referencing the same proxy
599 
600             SkMatrix ctm = viewMatrix;
601             if (set[q].fPreViewMatrix) {
602                 ctm.preConcat(*set[q].fPreViewMatrix);
603             }
604 
605             // Use dstRect/srcRect unless dstClip is provided, in which case derive new source
606             // coordinates by mapping dstClipQuad by the dstRect to srcRect transform.
607             DrawQuad quad;
608             if (set[q].fDstClipQuad) {
609                 quad.fDevice = GrQuad::MakeFromSkQuad(set[q].fDstClipQuad, ctm);
610 
611                 SkPoint srcPts[4];
612                 GrMapRectPoints(set[q].fDstRect, set[q].fSrcRect, set[q].fDstClipQuad, srcPts, 4);
613                 quad.fLocal = GrQuad::MakeFromSkQuad(srcPts, SkMatrix::I());
614             } else {
615                 quad.fDevice = GrQuad::MakeFromRect(set[q].fDstRect, ctm);
616                 quad.fLocal = GrQuad(set[q].fSrcRect);
617             }
618 
619             // This may be reduced per-quad from the requested aggregate filtering level, and used
620             // to determine if the subset is needed for the entry as well.
621             GrSamplerState::Filter filterForQuad = filter;
622             if (netFilter != filter || netMM != mm) {
623                 // The only way netFilter != filter is if linear is requested and we haven't yet
624                 // found a quad that requires linear (so net is still nearest). Similar for mip
625                 // mapping.
626                 SkASSERT(filter == netFilter ||
627                          (netFilter == GrSamplerState::Filter::kNearest && filter > netFilter));
628                 SkASSERT(mm == netMM ||
629                          (netMM == GrSamplerState::MipmapMode::kNone && mm > netMM));
630                 auto [mustFilter, mustMM] = filter_and_mm_have_effect(quad.fLocal, quad.fDevice);
631                 if (filter != GrSamplerState::Filter::kNearest) {
632                     if (mustFilter) {
633                         netFilter = filter; // upgrade batch to higher filter level
634                     } else {
635                         filterForQuad = GrSamplerState::Filter::kNearest; // downgrade entry
636                     }
637                 }
638                 if (mustMM && mm != GrSamplerState::MipmapMode::kNone) {
639                     netMM = mm;
640                 }
641             }
642 
643             // Determine the AA type for the quad, then merge with net AA type
644             GrAAType aaForQuad;
645             GrQuadUtils::ResolveAAType(aaType, set[q].fAAFlags, quad.fDevice,
646                                        &aaForQuad, &quad.fEdgeFlags);
647             // Update overall bounds of the op as the union of all quads
648             bounds.joinPossiblyEmptyRect(quad.fDevice.bounds());
649             hasSubpixel |= GrQuadUtils::WillUseHairline(quad.fDevice, aaForQuad, quad.fEdgeFlags);
650 
651             // Resolve sets aaForQuad to aaType or None, there is never a change between aa methods
652             SkASSERT(aaForQuad == GrAAType::kNone || aaForQuad == aaType);
653             if (netAAType == GrAAType::kNone && aaForQuad != GrAAType::kNone) {
654                 netAAType = aaType;
655             }
656 
657             // Calculate metadata for the entry
658             const SkRect* subsetForQuad = nullptr;
659             if (constraint == SkCanvas::kStrict_SrcRectConstraint) {
660                 // Check (briefly) if the subset rect is actually needed for this set entry.
661                 SkRect* subsetRect = &set[q].fSrcRect;
662                 if (!subsetRect->contains(curProxy->backingStoreBoundsRect())) {
663                     if (!safe_to_ignore_subset_rect(aaForQuad, filterForQuad, quad, *subsetRect)) {
664                         netSubset = Subset::kYes;
665                         subsetForQuad = subsetRect;
666                     }
667                 }
668             }
669 
670             // Normalize the src quads and apply origin
671             NormalizationParams proxyParams = proxy_normalization_params(
672                     curProxy, set[q].fProxyView.origin());
673             normalize_src_quad(proxyParams, &quad.fLocal);
674 
675             // This subset may represent a no-op, otherwise it will have the origin and dimensions
676             // of the texture applied to it.
677             SkRect subset = normalize_and_inset_subset(filter, proxyParams, subsetForQuad);
678 
679             // Always append a quad (or 2 if perspective clipped), it just may refer back to a prior
680             // ViewCountPair (this frequently happens when Chrome draws 9-patches).
681             fViewCountPairs[p].fQuadCnt += this->appendQuad(&quad, set[q].fColor, subset);
682         }
683         // The # of proxy switches should match what was provided (+1 because we incremented p
684         // when a new proxy was encountered).
685         SkASSERT((p + 1) == fMetadata.fProxyCount);
686         SkASSERT(fQuads.count() == fMetadata.fTotalQuadCount);
687 
688         fMetadata.fAAType = static_cast<uint16_t>(netAAType);
689         fMetadata.fFilter = static_cast<uint16_t>(netFilter);
690         fMetadata.fSubset = static_cast<uint16_t>(netSubset);
691 
692         this->setBounds(bounds, HasAABloat(netAAType == GrAAType::kCoverage),
693                         hasSubpixel ? IsHairline::kYes : IsHairline::kNo);
694     }
695 
696     int appendQuad(DrawQuad* quad, const SkPMColor4f& color, const SkRect& subset) {
697         DrawQuad extra;
698         // Always clip to W0 to stay consistent with GrQuad::bounds
699         int quadCount = GrQuadUtils::ClipToW0(quad, &extra);
700         if (quadCount == 0) {
701             // We can't discard the op at this point, but disable AA flags so it won't go through
702             // inset/outset processing
703             quad->fEdgeFlags = GrQuadAAFlags::kNone;
704             quadCount = 1;
705         }
706         fQuads.append(quad->fDevice, {color, subset, quad->fEdgeFlags},  &quad->fLocal);
707         if (quadCount > 1) {
708             fQuads.append(extra.fDevice, {color, subset, extra.fEdgeFlags}, &extra.fLocal);
709             fMetadata.fTotalQuadCount++;
710         }
711         return quadCount;
712     }
713 
714     GrProgramInfo* programInfo() override {
715         // Although this Op implements its own onPrePrepareDraws it calls GrMeshDrawOps' version so
716         // this entry point will be called.
717         return (fDesc) ? fDesc->fProgramInfo : nullptr;
718     }
719 
720     void onCreateProgramInfo(const GrCaps* caps,
721                              SkArenaAlloc* arena,
722                              const GrSurfaceProxyView& writeView,
723                              bool usesMSAASurface,
724                              GrAppliedClip&& appliedClip,
725                              const GrDstProxyView& dstProxyView,
726                              GrXferBarrierFlags renderPassXferBarriers,
727                              GrLoadOp colorLoadOp) override {
728         SkASSERT(fDesc);
729 
730         GrGeometryProcessor* gp;
731 
732         {
733             const GrBackendFormat& backendFormat =
734                     fViewCountPairs[0].fProxy->backendFormat();
735 
736             GrSamplerState samplerState = GrSamplerState(GrSamplerState::WrapMode::kClamp,
737                                                          fMetadata.filter());
738 
739             gp = skgpu::v1::QuadPerEdgeAA::MakeTexturedProcessor(
740                     arena, fDesc->fVertexSpec, *caps->shaderCaps(), backendFormat, samplerState,
741                     fMetadata.fSwizzle, std::move(fTextureColorSpaceXform), fMetadata.saturate());
742 
743             SkASSERT(fDesc->fVertexSpec.vertexSize() == gp->vertexStride());
744         }
745 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
746         const GrUserStencilSettings* st = &GrUserStencilSettings::kUnused;
747         if (fStencilRef != UINT32_MAX && !fShouldDisableStencilCulling && fStencilRef < kStencilLayersMax) {
748             TRACE_EVENT0("skia.gpu", "StencilCullingOpt TextureOpImpl::onCreateProgramInfo with stencil");
749             st = GrUserStencilSettings::kGE[fStencilRef];
750         }
751         fDesc->fProgramInfo = GrSimpleMeshDrawOpHelper::CreateProgramInfo(
752                 caps, arena, writeView, usesMSAASurface, std::move(appliedClip), dstProxyView, gp,
753                 GrProcessorSet::MakeEmptySet(), fDesc->fVertexSpec.primitiveType(),
754                 renderPassXferBarriers, colorLoadOp, GrPipeline::InputFlags::kNone, st);
755 #else
756         fDesc->fProgramInfo = GrSimpleMeshDrawOpHelper::CreateProgramInfo(
757                 caps, arena, writeView, usesMSAASurface, std::move(appliedClip), dstProxyView, gp,
758                 GrProcessorSet::MakeEmptySet(), fDesc->fVertexSpec.primitiveType(),
759                 renderPassXferBarriers, colorLoadOp, GrPipeline::InputFlags::kNone);
760 #endif
761     }
762 
763     void onPrePrepareDraws(GrRecordingContext* context,
764                            const GrSurfaceProxyView& writeView,
765                            GrAppliedClip* clip,
766                            const GrDstProxyView& dstProxyView,
767                            GrXferBarrierFlags renderPassXferBarriers,
768                            GrLoadOp colorLoadOp) override {
769         TRACE_EVENT0("skia.gpu", TRACE_FUNC);
770 
771         SkDEBUGCODE(this->validate();)
772         SkASSERT(!fDesc);
773 
774         SkArenaAlloc* arena = context->priv().recordTimeAllocator();
775 
776         fDesc = arena->make<Desc>();
777         this->characterize(fDesc);
778         fDesc->allocatePrePreparedVertices(arena);
779         FillInVertices(*context->priv().caps(), this, fDesc, fDesc->fPrePreparedVertices);
780 
781         // This will call onCreateProgramInfo and register the created program with the DDL.
782         this->INHERITED::onPrePrepareDraws(context, writeView, clip, dstProxyView,
783                                            renderPassXferBarriers, colorLoadOp);
784     }
785 
786     static void FillInVertices(const GrCaps& caps,
787                                TextureOpImpl* texOp,
788                                Desc* desc,
789                                char* vertexData) {
790         SkASSERT(vertexData);
791 
792         SkDEBUGCODE(int totQuadsSeen = 0;)
793         SkDEBUGCODE(int totVerticesSeen = 0;)
794         SkDEBUGCODE(const size_t vertexSize = desc->fVertexSpec.vertexSize());
795 
796         skgpu::v1::QuadPerEdgeAA::Tessellator tessellator(desc->fVertexSpec, vertexData);
797         for (const auto& op : ChainRange<TextureOpImpl>(texOp)) {
798             auto iter = op.fQuads.iterator();
799             for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) {
800                 const int quadCnt = op.fViewCountPairs[p].fQuadCnt;
801                 SkDEBUGCODE(int meshVertexCnt = quadCnt * desc->fVertexSpec.verticesPerQuad());
802 
803                 for (int i = 0; i < quadCnt && iter.next(); ++i) {
804                     SkASSERT(iter.isLocalValid());
805                     const ColorSubsetAndAA& info = iter.metadata();
806 
807                     tessellator.append(iter.deviceQuad(), iter.localQuad(), info.fColor,
808                                        info.fSubsetRect, info.aaFlags());
809                 }
810 
811                 SkASSERT((totVerticesSeen + meshVertexCnt) * vertexSize
812                          == (size_t)(tessellator.vertices() - vertexData));
813 
814                 SkDEBUGCODE(totQuadsSeen += quadCnt;)
815                 SkDEBUGCODE(totVerticesSeen += meshVertexCnt);
816                 SkASSERT(totQuadsSeen * desc->fVertexSpec.verticesPerQuad() == totVerticesSeen);
817             }
818 
819             // If quad counts per proxy were calculated correctly, the entire iterator
820             // should have been consumed.
821             SkASSERT(!iter.next());
822         }
823 
824         SkASSERT(desc->totalSizeInBytes() == (size_t)(tessellator.vertices() - vertexData));
825         SkASSERT(totQuadsSeen == desc->fNumTotalQuads);
826         SkASSERT(totVerticesSeen == desc->totalNumVertices());
827     }
828 
829 #ifdef SK_DEBUG
830     static int validate_op(GrTextureType textureType,
831                            GrAAType aaType,
832                            GrSwizzle swizzle,
833                            const TextureOpImpl* op) {
834         SkASSERT(op->fMetadata.fSwizzle == swizzle);
835 
836         int quadCount = 0;
837         for (unsigned p = 0; p < op->fMetadata.fProxyCount; ++p) {
838             auto* proxy = op->fViewCountPairs[p].fProxy->asTextureProxy();
839             quadCount += op->fViewCountPairs[p].fQuadCnt;
840             SkASSERT(proxy);
841             SkASSERT(proxy->textureType() == textureType);
842         }
843 
844         SkASSERT(aaType == op->fMetadata.aaType());
845         return quadCount;
846     }
847 
848     void validate() const override {
849         // NOTE: Since this is debug-only code, we use the virtual asTextureProxy()
850         auto textureType = fViewCountPairs[0].fProxy->asTextureProxy()->textureType();
851         GrAAType aaType = fMetadata.aaType();
852         GrSwizzle swizzle = fMetadata.fSwizzle;
853 
854         int quadCount = validate_op(textureType, aaType, swizzle, this);
855 
856         for (const GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) {
857             quadCount += validate_op(textureType, aaType, swizzle,
858                                      static_cast<const TextureOpImpl*>(tmp));
859         }
860 
861         for (const GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) {
862             quadCount += validate_op(textureType, aaType, swizzle,
863                                      static_cast<const TextureOpImpl*>(tmp));
864         }
865 
866         SkASSERT(quadCount == this->numChainedQuads());
867     }
868 
869 #endif
870 
871 #if GR_TEST_UTILS
872     int numQuads() const final { return this->totNumQuads(); }
873 #endif
874 
875     void characterize(Desc* desc) const {
876         SkDEBUGCODE(this->validate();)
877 
878         GrQuad::Type quadType = GrQuad::Type::kAxisAligned;
879         ColorType colorType = ColorType::kNone;
880         GrQuad::Type srcQuadType = GrQuad::Type::kAxisAligned;
881         Subset subset = Subset::kNo;
882         GrAAType overallAAType = fMetadata.aaType();
883 
884         desc->fNumProxies = 0;
885         desc->fNumTotalQuads = 0;
886         int maxQuadsPerMesh = 0;
887 
888         for (const auto& op : ChainRange<TextureOpImpl>(this)) {
889             if (op.fQuads.deviceQuadType() > quadType) {
890                 quadType = op.fQuads.deviceQuadType();
891             }
892             if (op.fQuads.localQuadType() > srcQuadType) {
893                 srcQuadType = op.fQuads.localQuadType();
894             }
895             if (op.fMetadata.subset() == Subset::kYes) {
896                 subset = Subset::kYes;
897             }
898             colorType = std::max(colorType, op.fMetadata.colorType());
899             desc->fNumProxies += op.fMetadata.fProxyCount;
900 
901             for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) {
902                 maxQuadsPerMesh = std::max(op.fViewCountPairs[p].fQuadCnt, maxQuadsPerMesh);
903             }
904             desc->fNumTotalQuads += op.totNumQuads();
905 
906             if (op.fMetadata.aaType() == GrAAType::kCoverage) {
907                 overallAAType = GrAAType::kCoverage;
908             }
909         }
910 
911         SkASSERT(desc->fNumTotalQuads == this->numChainedQuads());
912 
913         SkASSERT(!CombinedQuadCountWillOverflow(overallAAType, false, desc->fNumTotalQuads));
914 
915         auto indexBufferOption = skgpu::v1::QuadPerEdgeAA::CalcIndexBufferOption(overallAAType,
916                                                                                  maxQuadsPerMesh);
917 
918         desc->fVertexSpec = VertexSpec(quadType, colorType, srcQuadType, /* hasLocal */ true,
919                                        subset, overallAAType, /* alpha as coverage */ true,
920                                        indexBufferOption);
921 
922         SkASSERT(desc->fNumTotalQuads <= skgpu::v1::QuadPerEdgeAA::QuadLimit(indexBufferOption));
923     }
924 
925     int totNumQuads() const {
926 #ifdef SK_DEBUG
927         int tmp = 0;
928         for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) {
929             tmp += fViewCountPairs[p].fQuadCnt;
930         }
931         SkASSERT(tmp == fMetadata.fTotalQuadCount);
932 #endif
933 
934         return fMetadata.fTotalQuadCount;
935     }
936 
937     int numChainedQuads() const {
938         int numChainedQuads = this->totNumQuads();
939 
940         for (const GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) {
941             numChainedQuads += ((const TextureOpImpl*)tmp)->totNumQuads();
942         }
943 
944         for (const GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) {
945             numChainedQuads += ((const TextureOpImpl*)tmp)->totNumQuads();
946         }
947 
948         return numChainedQuads;
949     }
950 
951     // onPrePrepareDraws may or may not have been called at this point
952     void onPrepareDraws(GrMeshDrawTarget* target) override {
953         TRACE_EVENT0("skia.gpu", TRACE_FUNC);
954 
955         SkDEBUGCODE(this->validate();)
956 
957         SkASSERT(!fDesc || fDesc->fPrePreparedVertices);
958 
959         if (!fDesc) {
960             SkArenaAlloc* arena = target->allocator();
961             fDesc = arena->make<Desc>();
962             this->characterize(fDesc);
963             SkASSERT(!fDesc->fPrePreparedVertices);
964         }
965 
966         size_t vertexSize = fDesc->fVertexSpec.vertexSize();
967 
968         void* vdata = target->makeVertexSpace(vertexSize, fDesc->totalNumVertices(),
969                                               &fDesc->fVertexBuffer, &fDesc->fBaseVertex);
970         if (!vdata) {
971             SkDebugf("Could not allocate vertices\n");
972             return;
973         }
974 
975         if (fDesc->fVertexSpec.needsIndexBuffer()) {
976             fDesc->fIndexBuffer = skgpu::v1::QuadPerEdgeAA::GetIndexBuffer(
977                     target, fDesc->fVertexSpec.indexBufferOption());
978             if (!fDesc->fIndexBuffer) {
979                 SkDebugf("Could not allocate indices\n");
980                 return;
981             }
982         }
983 
984         if (fDesc->fPrePreparedVertices) {
985             memcpy(vdata, fDesc->fPrePreparedVertices, fDesc->totalSizeInBytes());
986         } else {
987             FillInVertices(target->caps(), this, fDesc, (char*) vdata);
988         }
989     }
990 
991 #ifdef SUPPORT_OPAQUE_OPTIMIZATION
992     bool canUseOpaqueRegion(const TextureOpImpl& op, GrOpFlushState* flushState) {
993         bool isUseOpaqueRegion = flushState->caps().supportsOpaqueRegion() &&
994                                                     op.fSupportOpaqueOpt &&
995                                                     !(op.fRRect.isEmpty()) &&
996                                                     (op.fQuads.count() == 1) &&
997                                                     (op.fMetadata.fProxyCount == 1);
998         HITRACE_OHOS_NAME_FMT_LEVEL(DebugTraceLevel::DETAIL,
999             "OpaqueRegion: %d [devSupport %d, isOpaque %d, hasRRect %d, QuadCount %d %d]",
1000                                                             isUseOpaqueRegion,
1001                                                             flushState->caps().supportsOpaqueRegion(),
1002                                                             op.fSupportOpaqueOpt,
1003                                                             !(op.fRRect.isEmpty()),
1004                                                             (op.fQuads.count() == 1),
1005                                                             (op.fMetadata.fProxyCount == 1));
1006         return isUseOpaqueRegion;
1007     }
1008 
1009     inline int32_t computeLowEdge(SkScalar rawEdge, SkScalar rrect1, SkScalar rrect2) {
1010         return ceil(rawEdge + fmax(rrect1, rrect2));
1011     }
1012 
1013     inline int32_t computeHighEdge(SkScalar rawEdge, SkScalar rrect1, SkScalar rrect2) {
1014         return floor(rawEdge - fmax(rrect1, rrect2));
1015     }
1016 
1017     SkIRect computeOpaqueRegion(const TextureOpImpl& op) {
1018         SkRect rawRegion = op.bounds();
1019         SkIRect opaqueRegion;
1020         opaqueRegion.fLeft = computeLowEdge(rawRegion.fLeft
1021                             , op.fRRect.radii(SkRRect::Corner::kUpperLeft_Corner).x()
1022                             , op.fRRect.radii(SkRRect::Corner::kLowerLeft_Corner).x());
1023         opaqueRegion.fTop = computeLowEdge(rawRegion.fTop
1024                             , op.fRRect.radii(SkRRect::Corner::kUpperLeft_Corner).y()
1025                             , op.fRRect.radii(SkRRect::Corner::kUpperRight_Corner).y());
1026         opaqueRegion.fRight = computeHighEdge(rawRegion.fRight
1027                             , op.fRRect.radii(SkRRect::Corner::kUpperRight_Corner).x()
1028                             , op.fRRect.radii(SkRRect::Corner::kLowerRight_Corner).x());
1029         opaqueRegion.fBottom = computeHighEdge(rawRegion.fBottom
1030                             , op.fRRect.radii(SkRRect::Corner::kLowerRight_Corner).y()
1031                             , op.fRRect.radii(SkRRect::Corner::kLowerLeft_Corner).y());
1032         return opaqueRegion.width() > 0 && opaqueRegion.height() > 0 ? opaqueRegion : SkIRect::MakeEmpty();
1033     }
1034 #endif
1035 
1036     void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
1037         if (!fDesc->fVertexBuffer) {
1038             return;
1039         }
1040 
1041         if (fDesc->fVertexSpec.needsIndexBuffer() && !fDesc->fIndexBuffer) {
1042             return;
1043         }
1044 
1045         if (!fDesc->fProgramInfo) {
1046             this->createProgramInfo(flushState);
1047             SkASSERT(fDesc->fProgramInfo);
1048         }
1049 
1050         flushState->bindPipelineAndScissorClip(*fDesc->fProgramInfo, chainBounds);
1051         flushState->bindBuffers(std::move(fDesc->fIndexBuffer), nullptr,
1052                                 std::move(fDesc->fVertexBuffer));
1053 
1054         int totQuadsSeen = 0;
1055         SkDEBUGCODE(int numDraws = 0;)
1056         for (const auto& op : ChainRange<TextureOpImpl>(this)) {
1057             for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) {
1058                 const int quadCnt = op.fViewCountPairs[p].fQuadCnt;
1059                 SkASSERT(numDraws < fDesc->fNumProxies);
1060 #ifdef SUPPORT_OPAQUE_OPTIMIZATION
1061                 bool isUseOpaqueRegion = canUseOpaqueRegion(op, flushState);
1062                 if (isUseOpaqueRegion) {
1063                     SkIRect opaqueRegion = computeOpaqueRegion(op);
1064                     flushState->setOpaqueRegion(1, &opaqueRegion);
1065                 }
1066 #endif
1067                 flushState->bindTextures(fDesc->fProgramInfo->geomProc(),
1068                                          *op.fViewCountPairs[p].fProxy,
1069                                          fDesc->fProgramInfo->pipeline());
1070                 skgpu::v1::QuadPerEdgeAA::IssueDraw(flushState->caps(), flushState->opsRenderPass(),
1071                                                     fDesc->fVertexSpec, totQuadsSeen, quadCnt,
1072                                                     fDesc->totalNumVertices(), fDesc->fBaseVertex);
1073                 totQuadsSeen += quadCnt;
1074                 SkDEBUGCODE(++numDraws;)
1075 #ifdef SUPPORT_OPAQUE_OPTIMIZATION
1076                 if (isUseOpaqueRegion) {
1077                     flushState->setOpaqueRegion(0, nullptr);
1078                 }
1079 #endif
1080             }
1081         }
1082 
1083         SkASSERT(totQuadsSeen == fDesc->fNumTotalQuads);
1084         SkASSERT(numDraws == fDesc->fNumProxies);
1085     }
1086 
1087     void propagateCoverageAAThroughoutChain() {
1088         fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage);
1089 
1090         for (GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) {
1091             auto tex = static_cast<TextureOpImpl*>(tmp);
1092             SkASSERT(tex->fMetadata.aaType() == GrAAType::kCoverage ||
1093                      tex->fMetadata.aaType() == GrAAType::kNone);
1094             tex->fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage);
1095         }
1096 
1097         for (GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) {
1098             auto tex = static_cast<TextureOpImpl*>(tmp);
1099             SkASSERT(tex->fMetadata.aaType() == GrAAType::kCoverage ||
1100                      tex->fMetadata.aaType() == GrAAType::kNone);
1101             tex->fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage);
1102         }
1103     }
1104 
1105     CombineResult onCombineIfPossible(GrOp* t, SkArenaAlloc*, const GrCaps& caps) override {
1106         TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1107         auto that = t->cast<TextureOpImpl>();
1108 
1109         SkDEBUGCODE(this->validate();)
1110         SkDEBUGCODE(that->validate();)
1111 
1112         if (fDesc || that->fDesc) {
1113             // This should never happen (since only DDL recorded ops should be prePrepared)
1114             // but, in any case, we should never combine ops that that been prePrepared
1115             return CombineResult::kCannotCombine;
1116         }
1117 
1118 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
1119         if (fStencilRef != that->fStencilRef) {
1120             return CombineResult::kCannotCombine;
1121         }
1122 #endif
1123 
1124         if (fMetadata.subset() != that->fMetadata.subset()) {
1125             // It is technically possible to combine operations across subset modes, but performance
1126             // testing suggests it's better to make more draw calls where some take advantage of
1127             // the more optimal shader path without coordinate clamping.
1128             return CombineResult::kCannotCombine;
1129         }
1130         if (!GrColorSpaceXform::Equals(fTextureColorSpaceXform.get(),
1131                                        that->fTextureColorSpaceXform.get())) {
1132             return CombineResult::kCannotCombine;
1133         }
1134 
1135         bool upgradeToCoverageAAOnMerge = false;
1136         if (fMetadata.aaType() != that->fMetadata.aaType()) {
1137             if (!CanUpgradeAAOnMerge(fMetadata.aaType(), that->fMetadata.aaType())) {
1138                 return CombineResult::kCannotCombine;
1139             }
1140             upgradeToCoverageAAOnMerge = true;
1141         }
1142 
1143         if (CombinedQuadCountWillOverflow(fMetadata.aaType(), upgradeToCoverageAAOnMerge,
1144                                           this->numChainedQuads() + that->numChainedQuads())) {
1145             return CombineResult::kCannotCombine;
1146         }
1147 
1148         if (fMetadata.saturate() != that->fMetadata.saturate()) {
1149             return CombineResult::kCannotCombine;
1150         }
1151         if (fMetadata.filter() != that->fMetadata.filter()) {
1152             return CombineResult::kCannotCombine;
1153         }
1154         if (fMetadata.mipmapMode() != that->fMetadata.mipmapMode()) {
1155             return CombineResult::kCannotCombine;
1156         }
1157         if (fMetadata.fSwizzle != that->fMetadata.fSwizzle) {
1158             return CombineResult::kCannotCombine;
1159         }
1160         const auto* thisProxy = fViewCountPairs[0].fProxy.get();
1161         const auto* thatProxy = that->fViewCountPairs[0].fProxy.get();
1162         if (fMetadata.fProxyCount > 1 || that->fMetadata.fProxyCount > 1 ||
1163             thisProxy != thatProxy) {
1164             // We can't merge across different proxies. Check if 'this' can be chained with 'that'.
1165             if (GrTextureProxy::ProxiesAreCompatibleAsDynamicState(thisProxy, thatProxy) &&
1166                 caps.dynamicStateArrayGeometryProcessorTextureSupport() &&
1167                 fMetadata.aaType() == that->fMetadata.aaType()) {
1168                 // We only allow chaining when the aaTypes match bc otherwise the AA type
1169                 // reported by the chain can be inconsistent. That is, since chaining doesn't
1170                 // propagate revised AA information throughout the chain, the head of the chain
1171                 // could have an AA setting of kNone while the chain as a whole could have a
1172                 // setting of kCoverage. This inconsistency would then interfere with the validity
1173                 // of the CombinedQuadCountWillOverflow calls.
1174                 // This problem doesn't occur w/ merging bc we do propagate the AA information
1175                 // (in propagateCoverageAAThroughoutChain) below.
1176                 return CombineResult::kMayChain;
1177             }
1178             return CombineResult::kCannotCombine;
1179         }
1180 
1181         fMetadata.fSubset |= that->fMetadata.fSubset;
1182         fMetadata.fColorType = std::max(fMetadata.fColorType, that->fMetadata.fColorType);
1183 
1184         // Concatenate quad lists together
1185         fQuads.concat(that->fQuads);
1186         fViewCountPairs[0].fQuadCnt += that->fQuads.count();
1187         fMetadata.fTotalQuadCount += that->fQuads.count();
1188 
1189         if (upgradeToCoverageAAOnMerge) {
1190             // This merger may be the start of a concatenation of two chains. When one
1191             // of the chains mutates its AA the other must follow suit or else the above AA
1192             // check may prevent later ops from chaining together. A specific example of this is
1193             // when chain2 is prepended onto chain1:
1194             //  chain1 (that): opA (non-AA/mergeable) opB (non-AA/non-mergeable)
1195             //  chain2 (this): opC (cov-AA/non-mergeable) opD (cov-AA/mergeable)
1196             // W/o this propagation, after opD & opA merge, opB and opC would say they couldn't
1197             // chain - which would stop the concatenation process.
1198             this->propagateCoverageAAThroughoutChain();
1199             that->propagateCoverageAAThroughoutChain();
1200         }
1201 
1202         SkDEBUGCODE(this->validate();)
1203 
1204         return CombineResult::kMerged;
1205     }
1206 
1207 #if GR_TEST_UTILS
1208     SkString onDumpInfo() const override {
1209         SkString str = SkStringPrintf("# draws: %d\n", fQuads.count());
1210         auto iter = fQuads.iterator();
1211         for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) {
1212             SkString proxyStr = fViewCountPairs[p].fProxy->dump();
1213             str.append(proxyStr);
1214             str.appendf(", Filter: %d, MM: %d\n",
1215                         static_cast<int>(fMetadata.fFilter),
1216                         static_cast<int>(fMetadata.fMipmapMode));
1217             for (int i = 0; i < fViewCountPairs[p].fQuadCnt && iter.next(); ++i) {
1218                 const GrQuad* quad = iter.deviceQuad();
1219                 GrQuad uv = iter.isLocalValid() ? *(iter.localQuad()) : GrQuad();
1220                 const ColorSubsetAndAA& info = iter.metadata();
1221                 str.appendf(
1222                         "%d: Color: 0x%08x, Subset(%d): [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n"
1223                         "  UVs  [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n"
1224                         "  Quad [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n",
1225                         i, info.fColor.toBytes_RGBA(), fMetadata.fSubset, info.fSubsetRect.fLeft,
1226                         info.fSubsetRect.fTop, info.fSubsetRect.fRight, info.fSubsetRect.fBottom,
1227                         quad->point(0).fX, quad->point(0).fY, quad->point(1).fX, quad->point(1).fY,
1228                         quad->point(2).fX, quad->point(2).fY, quad->point(3).fX, quad->point(3).fY,
1229                         uv.point(0).fX, uv.point(0).fY, uv.point(1).fX, uv.point(1).fY,
1230                         uv.point(2).fX, uv.point(2).fY, uv.point(3).fX, uv.point(3).fY);
1231             }
1232         }
1233         return str;
1234     }
1235 #endif
1236 
1237     GrQuadBuffer<ColorSubsetAndAA> fQuads;
1238     sk_sp<GrColorSpaceXform> fTextureColorSpaceXform;
1239     // Most state of TextureOp is packed into these two field to minimize the op's size.
1240     // Historically, increasing the size of TextureOp has caused surprising perf regressions, so
1241     // consider/measure changes with care.
1242     Desc* fDesc;
1243     Metadata fMetadata;
1244 
1245 #ifdef SUPPORT_OPAQUE_OPTIMIZATION
1246     SkRRect fRRect;
1247     bool fSupportOpaqueOpt;
1248 #endif
1249     // This field must go last. When allocating this op, we will allocate extra space to hold
1250     // additional ViewCountPairs immediately after the op's allocation so we can treat this
1251     // as an fProxyCnt-length array.
1252     ViewCountPair fViewCountPairs[1];
1253 
1254     using INHERITED = GrMeshDrawOp;
1255 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
1256     // Stencil Culling use
1257     uint32_t fStencilRef = UINT32_MAX;
1258 #endif
1259 };
1260 
1261 }  // anonymous namespace
1262 
1263 namespace skgpu::v1 {
1264 
1265 #if GR_TEST_UTILS
1266 uint32_t TextureOp::ClassID() {
1267     return TextureOpImpl::ClassID();
1268 }
1269 #endif
1270 
1271 GrOp::Owner TextureOp::Make(GrRecordingContext* context,
1272                             GrSurfaceProxyView proxyView,
1273                             SkAlphaType alphaType,
1274                             sk_sp<GrColorSpaceXform> textureXform,
1275                             GrSamplerState::Filter filter,
1276                             GrSamplerState::MipmapMode mm,
1277                             const SkPMColor4f& color,
1278                             Saturate saturate,
1279                             SkBlendMode blendMode,
1280                             GrAAType aaType,
1281                             DrawQuad* quad,
1282 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
1283                             const SkRect* subset,
1284 #ifdef SUPPORT_OPAQUE_OPTIMIZATION
1285                             uint32_t stencilRef,
1286                             bool supportOpaqueOpt) {
1287 #else
1288                             uint32_t stencilRef) {
1289 #endif
1290 #else
1291 #ifdef SUPPORT_OPAQUE_OPTIMIZATION
1292                             const SkRect* subset,
1293                             bool supportOpaqueOpt) {
1294 #else
1295                             const SkRect* subset) {
1296 #endif
1297 #endif
1298     // Apply optimizations that are valid whether or not using TextureOp or FillRectOp
1299     if (subset && subset->contains(proxyView.proxy()->backingStoreBoundsRect())) {
1300         // No need for a shader-based subset if hardware clamping achieves the same effect
1301         subset = nullptr;
1302     }
1303 
1304     if (filter != GrSamplerState::Filter::kNearest || mm != GrSamplerState::MipmapMode::kNone) {
1305         auto [mustFilter, mustMM] = filter_and_mm_have_effect(quad->fLocal, quad->fDevice);
1306         if (!mustFilter) {
1307             filter = GrSamplerState::Filter::kNearest;
1308         }
1309         if (!mustMM) {
1310             mm = GrSamplerState::MipmapMode::kNone;
1311         }
1312     }
1313 
1314     if (blendMode == SkBlendMode::kSrcOver) {
1315         return TextureOpImpl::Make(context, std::move(proxyView), std::move(textureXform), filter,
1316 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
1317 #ifdef SUPPORT_OPAQUE_OPTIMIZATION
1318                                    mm, color, saturate, aaType, std::move(quad), subset, stencilRef, supportOpaqueOpt);
1319 #else
1320                                    mm, color, saturate, aaType, std::move(quad), subset, stencilRef);
1321 #endif
1322 #else
1323 #ifdef SUPPORT_OPAQUE_OPTIMIZATION
1324                                    mm, color, saturate, aaType, std::move(quad), subset, supportOpaqueOpt);
1325 #else
1326                                    mm, color, saturate, aaType, std::move(quad), subset);
1327 #endif
1328 #endif
1329     } else {
1330         // Emulate complex blending using FillRectOp
1331         GrSamplerState samplerState(GrSamplerState::WrapMode::kClamp, filter, mm);
1332         GrPaint paint;
1333         paint.setColor4f(color);
1334         paint.setXPFactory(SkBlendMode_AsXPFactory(blendMode));
1335 
1336         std::unique_ptr<GrFragmentProcessor> fp;
1337         const auto& caps = *context->priv().caps();
1338         if (subset) {
1339             SkRect localRect;
1340             if (quad->fLocal.asRect(&localRect)) {
1341                 fp = GrTextureEffect::MakeSubset(std::move(proxyView), alphaType, SkMatrix::I(),
1342                                                  samplerState, *subset, localRect, caps);
1343             } else {
1344                 fp = GrTextureEffect::MakeSubset(std::move(proxyView), alphaType, SkMatrix::I(),
1345                                                  samplerState, *subset, caps);
1346             }
1347         } else {
1348             fp = GrTextureEffect::Make(std::move(proxyView), alphaType, SkMatrix::I(), samplerState,
1349                                        caps);
1350         }
1351         fp = GrColorSpaceXformEffect::Make(std::move(fp), std::move(textureXform));
1352         fp = GrBlendFragmentProcessor::Make(std::move(fp), nullptr, SkBlendMode::kModulate);
1353         if (saturate == Saturate::kYes) {
1354             fp = GrFragmentProcessor::ClampOutput(std::move(fp));
1355         }
1356         paint.setColorFragmentProcessor(std::move(fp));
1357         return FillRectOp::Make(context, std::move(paint), aaType, quad);
1358     }
1359 }
1360 
1361 // A helper class that assists in breaking up bulk API quad draws into manageable chunks.
1362 class TextureOp::BatchSizeLimiter {
1363 public:
1364     BatchSizeLimiter(SurfaceDrawContext* sdc,
1365                      const GrClip* clip,
1366                      GrRecordingContext* rContext,
1367                      int numEntries,
1368                      GrSamplerState::Filter filter,
1369                      GrSamplerState::MipmapMode mm,
1370                      Saturate saturate,
1371                      SkCanvas::SrcRectConstraint constraint,
1372                      const SkMatrix& viewMatrix,
1373                      sk_sp<GrColorSpaceXform> textureColorSpaceXform)
1374             : fSDC(sdc)
1375             , fClip(clip)
1376             , fContext(rContext)
1377             , fFilter(filter)
1378             , fMipmapMode(mm)
1379             , fSaturate(saturate)
1380             , fConstraint(constraint)
1381             , fViewMatrix(viewMatrix)
1382             , fTextureColorSpaceXform(textureColorSpaceXform)
1383             , fNumLeft(numEntries) {}
1384 
1385     void createOp(GrTextureSetEntry set[], int clumpSize, GrAAType aaType) {
1386 
1387         int clumpProxyCount = proxy_run_count(&set[fNumClumped], clumpSize);
1388         GrOp::Owner op = TextureOpImpl::Make(fContext,
1389                                              &set[fNumClumped],
1390                                              clumpSize,
1391                                              clumpProxyCount,
1392                                              fFilter,
1393                                              fMipmapMode,
1394                                              fSaturate,
1395                                              aaType,
1396                                              fConstraint,
1397                                              fViewMatrix,
1398                                              fTextureColorSpaceXform);
1399         fSDC->addDrawOp(fClip, std::move(op));
1400 
1401         fNumLeft -= clumpSize;
1402         fNumClumped += clumpSize;
1403     }
1404 
1405     int numLeft() const { return fNumLeft;  }
1406     int baseIndex() const { return fNumClumped; }
1407 
1408 private:
1409     SurfaceDrawContext*         fSDC;
1410     const GrClip*               fClip;
1411     GrRecordingContext*         fContext;
1412     GrSamplerState::Filter      fFilter;
1413     GrSamplerState::MipmapMode  fMipmapMode;
1414     Saturate                    fSaturate;
1415     SkCanvas::SrcRectConstraint fConstraint;
1416     const SkMatrix&             fViewMatrix;
1417     sk_sp<GrColorSpaceXform>    fTextureColorSpaceXform;
1418 
1419     int                         fNumLeft;
1420     int                         fNumClumped = 0; // also the offset for the start of the next clump
1421 };
1422 
1423 // Greedily clump quad draws together until the index buffer limit is exceeded.
1424 void TextureOp::AddTextureSetOps(SurfaceDrawContext* sdc,
1425                                  const GrClip* clip,
1426                                  GrRecordingContext* context,
1427                                  GrTextureSetEntry set[],
1428                                  int cnt,
1429                                  int proxyRunCnt,
1430                                  GrSamplerState::Filter filter,
1431                                  GrSamplerState::MipmapMode mm,
1432                                  Saturate saturate,
1433                                  SkBlendMode blendMode,
1434                                  GrAAType aaType,
1435                                  SkCanvas::SrcRectConstraint constraint,
1436                                  const SkMatrix& viewMatrix,
1437                                  sk_sp<GrColorSpaceXform> textureColorSpaceXform) {
1438     // Ensure that the index buffer limits are lower than the proxy and quad count limits of
1439     // the op's metadata so we don't need to worry about overflow.
1440     SkDEBUGCODE(TextureOpImpl::ValidateResourceLimits();)
1441     SkASSERT(proxy_run_count(set, cnt) == proxyRunCnt);
1442 
1443     // First check if we can support batches as a single op
1444     if (blendMode != SkBlendMode::kSrcOver ||
1445         !context->priv().caps()->dynamicStateArrayGeometryProcessorTextureSupport()) {
1446         // Append each entry as its own op; these may still be GrTextureOps if the blend mode is
1447         // src-over but the backend doesn't support dynamic state changes. Otherwise Make()
1448         // automatically creates the appropriate FillRectOp to emulate TextureOp.
1449         SkMatrix ctm;
1450         for (int i = 0; i < cnt; ++i) {
1451             ctm = viewMatrix;
1452             if (set[i].fPreViewMatrix) {
1453                 ctm.preConcat(*set[i].fPreViewMatrix);
1454             }
1455 
1456             DrawQuad quad;
1457             quad.fEdgeFlags = set[i].fAAFlags;
1458             if (set[i].fDstClipQuad) {
1459                 quad.fDevice = GrQuad::MakeFromSkQuad(set[i].fDstClipQuad, ctm);
1460 
1461                 SkPoint srcPts[4];
1462                 GrMapRectPoints(set[i].fDstRect, set[i].fSrcRect, set[i].fDstClipQuad, srcPts, 4);
1463                 quad.fLocal = GrQuad::MakeFromSkQuad(srcPts, SkMatrix::I());
1464             } else {
1465                 quad.fDevice = GrQuad::MakeFromRect(set[i].fDstRect, ctm);
1466                 quad.fLocal = GrQuad(set[i].fSrcRect);
1467             }
1468 
1469             const SkRect* subset = constraint == SkCanvas::kStrict_SrcRectConstraint
1470                     ? &set[i].fSrcRect : nullptr;
1471 
1472             auto op = Make(context, set[i].fProxyView, set[i].fSrcAlphaType, textureColorSpaceXform,
1473                            filter, mm, set[i].fColor, saturate, blendMode, aaType, &quad, subset);
1474             sdc->addDrawOp(clip, std::move(op));
1475         }
1476         return;
1477     }
1478 
1479     // Second check if we can always just make a single op and avoid the extra iteration
1480     // needed to clump things together.
1481     if (cnt <= std::min(GrResourceProvider::MaxNumNonAAQuads(),
1482                       GrResourceProvider::MaxNumAAQuads())) {
1483         auto op = TextureOpImpl::Make(context, set, cnt, proxyRunCnt, filter, mm, saturate, aaType,
1484                                       constraint, viewMatrix, std::move(textureColorSpaceXform));
1485         sdc->addDrawOp(clip, std::move(op));
1486         return;
1487     }
1488 
1489     BatchSizeLimiter state(sdc, clip, context, cnt, filter, mm, saturate, constraint, viewMatrix,
1490                            std::move(textureColorSpaceXform));
1491 
1492     // kNone and kMSAA never get altered
1493     if (aaType == GrAAType::kNone || aaType == GrAAType::kMSAA) {
1494         // Clump these into series of MaxNumNonAAQuads-sized GrTextureOps
1495         while (state.numLeft() > 0) {
1496             int clumpSize = std::min(state.numLeft(), GrResourceProvider::MaxNumNonAAQuads());
1497 
1498             state.createOp(set, clumpSize, aaType);
1499         }
1500     } else {
1501         // kCoverage can be downgraded to kNone. Note that the following is conservative. kCoverage
1502         // can also get downgraded to kNone if all the quads are on integer coordinates and
1503         // axis-aligned.
1504         SkASSERT(aaType == GrAAType::kCoverage);
1505 
1506         while (state.numLeft() > 0) {
1507             GrAAType runningAA = GrAAType::kNone;
1508             bool clumped = false;
1509 
1510             for (int i = 0; i < state.numLeft(); ++i) {
1511                 int absIndex = state.baseIndex() + i;
1512 
1513                 if (set[absIndex].fAAFlags != GrQuadAAFlags::kNone ||
1514                     runningAA == GrAAType::kCoverage) {
1515 
1516                     if (i >= GrResourceProvider::MaxNumAAQuads()) {
1517                         // Here we either need to boost the AA type to kCoverage, but doing so with
1518                         // all the accumulated quads would overflow, or we have a set of AA quads
1519                         // that has just gotten too large. In either case, calve off the existing
1520                         // quads as their own TextureOp.
1521                         state.createOp(
1522                             set,
1523                             runningAA == GrAAType::kNone ? i : GrResourceProvider::MaxNumAAQuads(),
1524                             runningAA); // maybe downgrading AA here
1525                         clumped = true;
1526                         break;
1527                     }
1528 
1529                     runningAA = GrAAType::kCoverage;
1530                 } else if (runningAA == GrAAType::kNone) {
1531 
1532                     if (i >= GrResourceProvider::MaxNumNonAAQuads()) {
1533                         // Here we've found a consistent batch of non-AA quads that has gotten too
1534                         // large. Calve it off as its own TextureOp.
1535                         state.createOp(set, GrResourceProvider::MaxNumNonAAQuads(),
1536                                        GrAAType::kNone); // definitely downgrading AA here
1537                         clumped = true;
1538                         break;
1539                     }
1540                 }
1541             }
1542 
1543             if (!clumped) {
1544                 // We ran through the above loop w/o hitting a limit. Spit out this last clump of
1545                 // quads and call it a day.
1546                 state.createOp(set, state.numLeft(), runningAA); // maybe downgrading AA here
1547             }
1548         }
1549     }
1550 }
1551 
1552 } // namespace skgpu::v1
1553 
1554 #if GR_TEST_UTILS
1555 #include "include/gpu/GrRecordingContext.h"
1556 #include "src/gpu/GrProxyProvider.h"
1557 #include "src/gpu/GrRecordingContextPriv.h"
1558 
1559 GR_DRAW_OP_TEST_DEFINE(TextureOpImpl) {
1560     SkISize dims;
1561     dims.fHeight = random->nextULessThan(90) + 10;
1562     dims.fWidth = random->nextULessThan(90) + 10;
1563     auto origin = random->nextBool() ? kTopLeft_GrSurfaceOrigin : kBottomLeft_GrSurfaceOrigin;
1564     GrMipmapped mipMapped = random->nextBool() ? GrMipmapped::kYes : GrMipmapped::kNo;
1565     SkBackingFit fit = SkBackingFit::kExact;
1566     if (mipMapped == GrMipmapped::kNo) {
1567         fit = random->nextBool() ? SkBackingFit::kApprox : SkBackingFit::kExact;
1568     }
1569     const GrBackendFormat format =
1570             context->priv().caps()->getDefaultBackendFormat(GrColorType::kRGBA_8888,
1571                                                             GrRenderable::kNo);
1572     GrProxyProvider* proxyProvider = context->priv().proxyProvider();
1573     sk_sp<GrTextureProxy> proxy = proxyProvider->createProxy(
1574             format, dims, GrRenderable::kNo, 1, mipMapped, fit, SkBudgeted::kNo, GrProtected::kNo,
1575             GrInternalSurfaceFlags::kNone);
1576 
1577     SkRect rect = GrTest::TestRect(random);
1578     SkRect srcRect;
1579     srcRect.fLeft = random->nextRangeScalar(0.f, proxy->width() / 2.f);
1580     srcRect.fRight = random->nextRangeScalar(0.f, proxy->width()) + proxy->width() / 2.f;
1581     srcRect.fTop = random->nextRangeScalar(0.f, proxy->height() / 2.f);
1582     srcRect.fBottom = random->nextRangeScalar(0.f, proxy->height()) + proxy->height() / 2.f;
1583     SkMatrix viewMatrix = GrTest::TestMatrixPreservesRightAngles(random);
1584     SkPMColor4f color = SkPMColor4f::FromBytes_RGBA(SkColorToPremulGrColor(random->nextU()));
1585     GrSamplerState::Filter filter = (GrSamplerState::Filter)random->nextULessThan(
1586             static_cast<uint32_t>(GrSamplerState::Filter::kLast) + 1);
1587     GrSamplerState::MipmapMode mm = GrSamplerState::MipmapMode::kNone;
1588     if (mipMapped == GrMipmapped::kYes) {
1589         mm = (GrSamplerState::MipmapMode)random->nextULessThan(
1590                 static_cast<uint32_t>(GrSamplerState::MipmapMode::kLast) + 1);
1591     }
1592 
1593     auto texXform = GrTest::TestColorXform(random);
1594     GrAAType aaType = GrAAType::kNone;
1595     if (random->nextBool()) {
1596         aaType = (numSamples > 1) ? GrAAType::kMSAA : GrAAType::kCoverage;
1597     }
1598     GrQuadAAFlags aaFlags = GrQuadAAFlags::kNone;
1599     aaFlags |= random->nextBool() ? GrQuadAAFlags::kLeft : GrQuadAAFlags::kNone;
1600     aaFlags |= random->nextBool() ? GrQuadAAFlags::kTop : GrQuadAAFlags::kNone;
1601     aaFlags |= random->nextBool() ? GrQuadAAFlags::kRight : GrQuadAAFlags::kNone;
1602     aaFlags |= random->nextBool() ? GrQuadAAFlags::kBottom : GrQuadAAFlags::kNone;
1603     bool useSubset = random->nextBool();
1604     auto saturate = random->nextBool() ? skgpu::v1::TextureOp::Saturate::kYes
1605                                        : skgpu::v1::TextureOp::Saturate::kNo;
1606     GrSurfaceProxyView proxyView(
1607             std::move(proxy), origin,
1608             context->priv().caps()->getReadSwizzle(format, GrColorType::kRGBA_8888));
1609     auto alphaType = static_cast<SkAlphaType>(
1610             random->nextRangeU(kUnknown_SkAlphaType + 1, kLastEnum_SkAlphaType));
1611 
1612     DrawQuad quad = {GrQuad::MakeFromRect(rect, viewMatrix), GrQuad(srcRect), aaFlags};
1613     return skgpu::v1::TextureOp::Make(context, std::move(proxyView), alphaType,
1614                                       std::move(texXform), filter, mm, color, saturate,
1615                                       SkBlendMode::kSrcOver, aaType, &quad,
1616                                       useSubset ? &srcRect : nullptr);
1617 }
1618 
1619 #endif // GR_TEST_UTILS
1620