1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include <new>
9
10 #include "include/core/SkPoint.h"
11 #include "include/core/SkPoint3.h"
12 #include "include/gpu/GrRecordingContext.h"
13 #include "include/private/SkFloatingPoint.h"
14 #include "include/private/SkTo.h"
15 #include "src/core/SkMathPriv.h"
16 #include "src/core/SkMatrixPriv.h"
17 #include "src/core/SkRectPriv.h"
18 #include "src/gpu/GrAppliedClip.h"
19 #include "src/gpu/GrCaps.h"
20 #include "src/gpu/GrDrawOpTest.h"
21 #include "src/gpu/GrGeometryProcessor.h"
22 #include "src/gpu/GrGpu.h"
23 #include "src/gpu/GrMemoryPool.h"
24 #include "src/gpu/GrOpFlushState.h"
25 #include "src/gpu/GrRecordingContextPriv.h"
26 #include "src/gpu/GrResourceProvider.h"
27 #include "src/gpu/GrResourceProviderPriv.h"
28 #include "src/gpu/GrShaderCaps.h"
29 #include "src/gpu/GrTexture.h"
30 #include "src/gpu/GrTextureProxy.h"
31 #include "src/gpu/SkGr.h"
32 #include "src/gpu/effects/GrBlendFragmentProcessor.h"
33 #include "src/gpu/effects/generated/GrClampFragmentProcessor.h"
34 #include "src/gpu/geometry/GrQuad.h"
35 #include "src/gpu/geometry/GrQuadBuffer.h"
36 #include "src/gpu/geometry/GrQuadUtils.h"
37 #include "src/gpu/glsl/GrGLSLVarying.h"
38 #include "src/gpu/ops/GrFillRectOp.h"
39 #include "src/gpu/ops/GrMeshDrawOp.h"
40 #include "src/gpu/ops/GrQuadPerEdgeAA.h"
41 #include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
42 #include "src/gpu/ops/GrTextureOp.h"
43
44 namespace {
45
46 using Subset = GrQuadPerEdgeAA::Subset;
47 using VertexSpec = GrQuadPerEdgeAA::VertexSpec;
48 using ColorType = GrQuadPerEdgeAA::ColorType;
49
50 // Extracts lengths of vertical and horizontal edges of axis-aligned quad. "width" is the edge
51 // between v0 and v2 (or v1 and v3), "height" is the edge between v0 and v1 (or v2 and v3).
axis_aligned_quad_size(const GrQuad & quad)52 static SkSize axis_aligned_quad_size(const GrQuad& quad) {
53 SkASSERT(quad.quadType() == GrQuad::Type::kAxisAligned);
54 // Simplification of regular edge length equation, since it's axis aligned and can avoid sqrt
55 float dw = sk_float_abs(quad.x(2) - quad.x(0)) + sk_float_abs(quad.y(2) - quad.y(0));
56 float dh = sk_float_abs(quad.x(1) - quad.x(0)) + sk_float_abs(quad.y(1) - quad.y(0));
57 return {dw, dh};
58 }
59
60 static std::tuple<bool /* filter */,
61 bool /* mipmap */>
filter_and_mm_have_effect(const GrQuad & srcQuad,const GrQuad & dstQuad)62 filter_and_mm_have_effect(const GrQuad& srcQuad, const GrQuad& dstQuad) {
63 // If not axis-aligned in src or dst, then always say it has an effect
64 if (srcQuad.quadType() != GrQuad::Type::kAxisAligned ||
65 dstQuad.quadType() != GrQuad::Type::kAxisAligned) {
66 return {true, true};
67 }
68
69 SkRect srcRect;
70 SkRect dstRect;
71 if (srcQuad.asRect(&srcRect) && dstQuad.asRect(&dstRect)) {
72 // Disable filtering when there is no scaling (width and height are the same), and the
73 // top-left corners have the same fraction (so src and dst snap to the pixel grid
74 // identically).
75 SkASSERT(srcRect.isSorted());
76 bool filter = srcRect.width() != dstRect.width() || srcRect.height() != dstRect.height() ||
77 SkScalarFraction(srcRect.fLeft) != SkScalarFraction(dstRect.fLeft) ||
78 SkScalarFraction(srcRect.fTop) != SkScalarFraction(dstRect.fTop);
79 bool mm = srcRect.width() > dstRect.width() || srcRect.height() > dstRect.height();
80 return {filter, mm};
81 }
82 // Extract edge lengths
83 SkSize srcSize = axis_aligned_quad_size(srcQuad);
84 SkSize dstSize = axis_aligned_quad_size(dstQuad);
85 // Although the quads are axis-aligned, the local coordinate system is transformed such
86 // that fractionally-aligned sample centers will not align with the device coordinate system
87 // So disable filtering when edges are the same length and both srcQuad and dstQuad
88 // 0th vertex is integer aligned.
89 bool filter = srcSize != dstSize ||
90 !SkScalarIsInt(srcQuad.x(0)) ||
91 !SkScalarIsInt(srcQuad.y(0)) ||
92 !SkScalarIsInt(dstQuad.x(0)) ||
93 !SkScalarIsInt(dstQuad.y(0));
94 bool mm = srcSize.fWidth > dstSize.fWidth || srcSize.fHeight > dstSize.fHeight;
95 return {filter, mm};
96 }
97
98 // Describes function for normalizing src coords: [x * iw, y * ih + yOffset] can represent
99 // regular and rectangular textures, w/ or w/o origin correction.
100 struct NormalizationParams {
101 float fIW; // 1 / width of texture, or 1.0 for texture rectangles
102 float fInvH; // 1 / height of texture, or 1.0 for tex rects, X -1 if bottom-left origin
103 float fYOffset; // 0 for top-left origin, height of [normalized] tex if bottom-left
104 };
proxy_normalization_params(const GrSurfaceProxy * proxy,GrSurfaceOrigin origin)105 static NormalizationParams proxy_normalization_params(const GrSurfaceProxy* proxy,
106 GrSurfaceOrigin origin) {
107 // Whether or not the proxy is instantiated, this is the size its texture will be, so we can
108 // normalize the src coordinates up front.
109 SkISize dimensions = proxy->backingStoreDimensions();
110 float iw, ih, h;
111 if (proxy->backendFormat().textureType() == GrTextureType::kRectangle) {
112 iw = ih = 1.f;
113 h = dimensions.height();
114 } else {
115 iw = 1.f / dimensions.width();
116 ih = 1.f / dimensions.height();
117 h = 1.f;
118 }
119
120 if (origin == kBottomLeft_GrSurfaceOrigin) {
121 return {iw, -ih, h};
122 } else {
123 return {iw, ih, 0.0f};
124 }
125 }
126
127 // Normalize the subset. If 'subsetRect' is null, it is assumed no subset constraint is desired,
128 // so a sufficiently large rect is returned even if the quad ends up batched with an op that uses
129 // subsets overall. When there is a subset it will be inset based on the filter mode. Normalization
130 // and y-flipping are applied as indicated by NormalizationParams.
normalize_and_inset_subset(GrSamplerState::Filter filter,const NormalizationParams & params,const SkRect * subsetRect)131 static SkRect normalize_and_inset_subset(GrSamplerState::Filter filter,
132 const NormalizationParams& params,
133 const SkRect* subsetRect) {
134 static constexpr SkRect kLargeRect = {-100000, -100000, 1000000, 1000000};
135 if (!subsetRect) {
136 // Either the quad has no subset constraint and is batched with a subset constrained op
137 // (in which case we want a subset that doesn't restrict normalized tex coords), or the
138 // entire op doesn't use the subset, in which case the returned value is ignored.
139 return kLargeRect;
140 }
141
142 auto ltrb = skvx::Vec<4, float>::Load(subsetRect);
143 auto flipHi = skvx::Vec<4, float>({1.f, 1.f, -1.f, -1.f});
144 if (filter == GrSamplerState::Filter::kNearest) {
145 // Make sure our insetting puts us at pixel centers.
146 ltrb = skvx::floor(ltrb*flipHi)*flipHi;
147 }
148 // Inset with pin to the rect center.
149 ltrb += skvx::Vec<4, float>({.5f, .5f, -.5f, -.5f});
150 auto mid = (skvx::shuffle<2, 3, 0, 1>(ltrb) + ltrb)*0.5f;
151 ltrb = skvx::min(ltrb*flipHi, mid*flipHi)*flipHi;
152
153 // Normalize and offset
154 ltrb = ltrb * skvx::Vec<4, float>{params.fIW, params.fInvH, params.fIW, params.fInvH} +
155 skvx::Vec<4, float>{0.f, params.fYOffset, 0.f, params.fYOffset};
156 if (params.fInvH < 0.f) {
157 // Flip top and bottom to keep the rect sorted when loaded back to SkRect.
158 ltrb = skvx::shuffle<0, 3, 2, 1>(ltrb);
159 }
160
161 SkRect out;
162 ltrb.store(&out);
163 return out;
164 }
165
166 // Normalizes logical src coords and corrects for origin
normalize_src_quad(const NormalizationParams & params,GrQuad * srcQuad)167 static void normalize_src_quad(const NormalizationParams& params,
168 GrQuad* srcQuad) {
169 // The src quad should not have any perspective
170 SkASSERT(!srcQuad->hasPerspective());
171 skvx::Vec<4, float> xs = srcQuad->x4f() * params.fIW;
172 skvx::Vec<4, float> ys = srcQuad->y4f() * params.fInvH + params.fYOffset;
173 xs.store(srcQuad->xs());
174 ys.store(srcQuad->ys());
175 }
176
177 // Count the number of proxy runs in the entry set. This usually is already computed by
178 // SkGpuDevice, but when the BatchLengthLimiter chops the set up it must determine a new proxy count
179 // for each split.
proxy_run_count(const GrSurfaceDrawContext::TextureSetEntry set[],int count)180 static int proxy_run_count(const GrSurfaceDrawContext::TextureSetEntry set[], int count) {
181 int actualProxyRunCount = 0;
182 const GrSurfaceProxy* lastProxy = nullptr;
183 for (int i = 0; i < count; ++i) {
184 if (set[i].fProxyView.proxy() != lastProxy) {
185 actualProxyRunCount++;
186 lastProxy = set[i].fProxyView.proxy();
187 }
188 }
189 return actualProxyRunCount;
190 }
191
safe_to_ignore_subset_rect(GrAAType aaType,GrSamplerState::Filter filter,const DrawQuad & quad,const SkRect & subsetRect)192 static bool safe_to_ignore_subset_rect(GrAAType aaType, GrSamplerState::Filter filter,
193 const DrawQuad& quad, const SkRect& subsetRect) {
194 // If both the device and local quad are both axis-aligned, and filtering is off, the local quad
195 // can push all the way up to the edges of the the subset rect and the sampler shouldn't
196 // overshoot. Unfortunately, antialiasing adds enough jitter that we can only rely on this in
197 // the non-antialiased case.
198 SkRect localBounds = quad.fLocal.bounds();
199 if (aaType == GrAAType::kNone &&
200 filter == GrSamplerState::Filter::kNearest &&
201 quad.fDevice.quadType() == GrQuad::Type::kAxisAligned &&
202 quad.fLocal.quadType() == GrQuad::Type::kAxisAligned &&
203 subsetRect.contains(localBounds)) {
204
205 return true;
206 }
207
208 // If the subset rect is inset by at least 0.5 pixels into the local quad's bounds, the
209 // sampler shouldn't overshoot, even when antialiasing and filtering is taken into account.
210 if (subsetRect.makeInset(0.5f, 0.5f).contains(localBounds)) {
211 return true;
212 }
213
214 // The subset rect cannot be ignored safely.
215 return false;
216 }
217
218 /**
219 * Op that implements GrTextureOp::Make. It draws textured quads. Each quad can modulate against a
220 * the texture by color. The blend with the destination is always src-over. The edges are non-AA.
221 */
222 class TextureOp final : public GrMeshDrawOp {
223 public:
Make(GrRecordingContext * context,GrSurfaceProxyView proxyView,sk_sp<GrColorSpaceXform> textureXform,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,const SkPMColor4f & color,GrTextureOp::Saturate saturate,GrAAType aaType,DrawQuad * quad,const SkRect * subset)224 static GrOp::Owner Make(GrRecordingContext* context,
225 GrSurfaceProxyView proxyView,
226 sk_sp<GrColorSpaceXform> textureXform,
227 GrSamplerState::Filter filter,
228 GrSamplerState::MipmapMode mm,
229 const SkPMColor4f& color,
230 GrTextureOp::Saturate saturate,
231 GrAAType aaType,
232 DrawQuad* quad,
233 const SkRect* subset) {
234
235 return GrOp::Make<TextureOp>(context, std::move(proxyView), std::move(textureXform),
236 filter, mm, color, saturate, aaType, quad, subset);
237 }
238
Make(GrRecordingContext * context,GrSurfaceDrawContext::TextureSetEntry set[],int cnt,int proxyRunCnt,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,GrTextureOp::Saturate saturate,GrAAType aaType,SkCanvas::SrcRectConstraint constraint,const SkMatrix & viewMatrix,sk_sp<GrColorSpaceXform> textureColorSpaceXform)239 static GrOp::Owner Make(GrRecordingContext* context,
240 GrSurfaceDrawContext::TextureSetEntry set[],
241 int cnt,
242 int proxyRunCnt,
243 GrSamplerState::Filter filter,
244 GrSamplerState::MipmapMode mm,
245 GrTextureOp::Saturate saturate,
246 GrAAType aaType,
247 SkCanvas::SrcRectConstraint constraint,
248 const SkMatrix& viewMatrix,
249 sk_sp<GrColorSpaceXform> textureColorSpaceXform) {
250 // Allocate size based on proxyRunCnt, since that determines number of ViewCountPairs.
251 SkASSERT(proxyRunCnt <= cnt);
252 return GrOp::MakeWithExtraMemory<TextureOp>(
253 context, sizeof(ViewCountPair) * (proxyRunCnt - 1),
254 set, cnt, proxyRunCnt, filter, mm, saturate, aaType, constraint,
255 viewMatrix, std::move(textureColorSpaceXform));
256 }
257
~TextureOp()258 ~TextureOp() override {
259 for (unsigned p = 1; p < fMetadata.fProxyCount; ++p) {
260 fViewCountPairs[p].~ViewCountPair();
261 }
262 }
263
name() const264 const char* name() const override { return "TextureOp"; }
265
visitProxies(const VisitProxyFunc & func) const266 void visitProxies(const VisitProxyFunc& func) const override {
267 bool mipped = (fMetadata.mipmapMode() != GrSamplerState::MipmapMode::kNone);
268 for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) {
269 func(fViewCountPairs[p].fProxy.get(), GrMipmapped(mipped));
270 }
271 if (fDesc && fDesc->fProgramInfo) {
272 fDesc->fProgramInfo->visitFPProxies(func);
273 }
274 }
275
276 #ifdef SK_DEBUG
ValidateResourceLimits()277 static void ValidateResourceLimits() {
278 // The op implementation has an upper bound on the number of quads that it can represent.
279 // However, the resource manager imposes its own limit on the number of quads, which should
280 // always be lower than the numerical limit this op can hold.
281 using CountStorage = decltype(Metadata::fTotalQuadCount);
282 CountStorage maxQuadCount = std::numeric_limits<CountStorage>::max();
283 // GrResourceProvider::Max...() is typed as int, so don't compare across signed/unsigned.
284 int resourceLimit = SkTo<int>(maxQuadCount);
285 SkASSERT(GrResourceProvider::MaxNumAAQuads() <= resourceLimit &&
286 GrResourceProvider::MaxNumNonAAQuads() <= resourceLimit);
287 }
288 #endif
289
finalize(const GrCaps & caps,const GrAppliedClip *,GrClampType clampType)290 GrProcessorSet::Analysis finalize(const GrCaps& caps, const GrAppliedClip*,
291 GrClampType clampType) override {
292 SkASSERT(fMetadata.colorType() == ColorType::kNone);
293 auto iter = fQuads.metadata();
294 while(iter.next()) {
295 auto colorType = GrQuadPerEdgeAA::MinColorType(iter->fColor);
296 colorType = std::max(static_cast<GrQuadPerEdgeAA::ColorType>(fMetadata.fColorType),
297 colorType);
298 if (caps.reducedShaderMode()) {
299 colorType = std::max(colorType, GrQuadPerEdgeAA::ColorType::kByte);
300 }
301 fMetadata.fColorType = static_cast<uint16_t>(colorType);
302 }
303 return GrProcessorSet::EmptySetAnalysis();
304 }
305
fixedFunctionFlags() const306 FixedFunctionFlags fixedFunctionFlags() const override {
307 return fMetadata.aaType() == GrAAType::kMSAA ? FixedFunctionFlags::kUsesHWAA
308 : FixedFunctionFlags::kNone;
309 }
310
311 DEFINE_OP_CLASS_ID
312
313 private:
314 friend class ::GrOp;
315
316 struct ColorSubsetAndAA {
ColorSubsetAndAA__anona5b4f17c0111::TextureOp::ColorSubsetAndAA317 ColorSubsetAndAA(const SkPMColor4f& color, const SkRect& subsetRect, GrQuadAAFlags aaFlags)
318 : fColor(color)
319 , fSubsetRect(subsetRect)
320 , fAAFlags(static_cast<uint16_t>(aaFlags)) {
321 SkASSERT(fAAFlags == static_cast<uint16_t>(aaFlags));
322 }
323
324 SkPMColor4f fColor;
325 // If the op doesn't use subsets, this is ignored. If the op uses subsets and the specific
326 // entry does not, this rect will equal kLargeRect, so it automatically has no effect.
327 SkRect fSubsetRect;
328 unsigned fAAFlags : 4;
329
aaFlags__anona5b4f17c0111::TextureOp::ColorSubsetAndAA330 GrQuadAAFlags aaFlags() const { return static_cast<GrQuadAAFlags>(fAAFlags); }
331 };
332
333 struct ViewCountPair {
334 // Normally this would be a GrSurfaceProxyView, but GrTextureOp applies the GrOrigin right
335 // away so it doesn't need to be stored, and all ViewCountPairs in an op have the same
336 // swizzle so that is stored in the op metadata.
337 sk_sp<GrSurfaceProxy> fProxy;
338 int fQuadCnt;
339 };
340
341 // TextureOp and ViewCountPair are 8 byte aligned. This is packed into 8 bytes to minimally
342 // increase the size of the op; increasing the op size can have a surprising impact on
343 // performance (since texture ops are one of the most commonly used in an app).
344 struct Metadata {
345 // AAType must be filled after initialization; ColorType is determined in finalize()
Metadata__anona5b4f17c0111::TextureOp::Metadata346 Metadata(const GrSwizzle& swizzle,
347 GrSamplerState::Filter filter,
348 GrSamplerState::MipmapMode mm,
349 GrQuadPerEdgeAA::Subset subset,
350 GrTextureOp::Saturate saturate)
351 : fSwizzle(swizzle)
352 , fProxyCount(1)
353 , fTotalQuadCount(1)
354 , fFilter(static_cast<uint16_t>(filter))
355 , fMipmapMode(static_cast<uint16_t>(mm))
356 , fAAType(static_cast<uint16_t>(GrAAType::kNone))
357 , fColorType(static_cast<uint16_t>(ColorType::kNone))
358 , fSubset(static_cast<uint16_t>(subset))
359 , fSaturate(static_cast<uint16_t>(saturate)) {}
360
361 GrSwizzle fSwizzle; // sizeof(GrSwizzle) == uint16_t
362 uint16_t fProxyCount;
363 // This will be >= fProxyCount, since a proxy may be drawn multiple times
364 uint16_t fTotalQuadCount;
365
366 // These must be based on uint16_t to help MSVC's pack bitfields optimally
367 uint16_t fFilter : 2; // GrSamplerState::Filter
368 uint16_t fMipmapMode : 2; // GrSamplerState::MipmapMode
369 uint16_t fAAType : 2; // GrAAType
370 uint16_t fColorType : 2; // GrQuadPerEdgeAA::ColorType
371 uint16_t fSubset : 1; // bool
372 uint16_t fSaturate : 1; // bool
373 uint16_t fUnused : 6; // # of bits left before Metadata exceeds 8 bytes
374
filter__anona5b4f17c0111::TextureOp::Metadata375 GrSamplerState::Filter filter() const {
376 return static_cast<GrSamplerState::Filter>(fFilter);
377 }
mipmapMode__anona5b4f17c0111::TextureOp::Metadata378 GrSamplerState::MipmapMode mipmapMode() const {
379 return static_cast<GrSamplerState::MipmapMode>(fMipmapMode);
380 }
aaType__anona5b4f17c0111::TextureOp::Metadata381 GrAAType aaType() const { return static_cast<GrAAType>(fAAType); }
colorType__anona5b4f17c0111::TextureOp::Metadata382 ColorType colorType() const { return static_cast<ColorType>(fColorType); }
subset__anona5b4f17c0111::TextureOp::Metadata383 Subset subset() const { return static_cast<Subset>(fSubset); }
saturate__anona5b4f17c0111::TextureOp::Metadata384 GrTextureOp::Saturate saturate() const {
385 return static_cast<GrTextureOp::Saturate>(fSaturate);
386 }
387
388 static_assert(GrSamplerState::kFilterCount <= 4);
389 static_assert(kGrAATypeCount <= 4);
390 static_assert(GrQuadPerEdgeAA::kColorTypeCount <= 4);
391 };
392 static_assert(sizeof(Metadata) == 8);
393
394 // This descriptor is used to store the draw info we decide on during on(Pre)PrepareDraws. We
395 // store the data in a separate struct in order to minimize the size of the TextureOp.
396 // Historically, increasing the TextureOp's size has caused surprising perf regressions, but we
397 // may want to re-evaluate whether this is still necessary.
398 //
399 // In the onPrePrepareDraws case it is allocated in the creation-time opData arena, and
400 // allocatePrePreparedVertices is also called.
401 //
402 // In the onPrepareDraws case this descriptor is allocated in the flush-time arena (i.e., as
403 // part of the flushState).
404 struct Desc {
405 VertexSpec fVertexSpec;
406 int fNumProxies = 0;
407 int fNumTotalQuads = 0;
408
409 // This member variable is only used by 'onPrePrepareDraws'.
410 char* fPrePreparedVertices = nullptr;
411
412 GrProgramInfo* fProgramInfo = nullptr;
413
414 sk_sp<const GrBuffer> fIndexBuffer;
415 sk_sp<const GrBuffer> fVertexBuffer;
416 int fBaseVertex;
417
418 // How big should 'fVertices' be to hold all the vertex data?
totalSizeInBytes__anona5b4f17c0111::TextureOp::Desc419 size_t totalSizeInBytes() const {
420 return this->totalNumVertices() * fVertexSpec.vertexSize();
421 }
422
totalNumVertices__anona5b4f17c0111::TextureOp::Desc423 int totalNumVertices() const {
424 return fNumTotalQuads * fVertexSpec.verticesPerQuad();
425 }
426
allocatePrePreparedVertices__anona5b4f17c0111::TextureOp::Desc427 void allocatePrePreparedVertices(SkArenaAlloc* arena) {
428 fPrePreparedVertices = arena->makeArrayDefault<char>(this->totalSizeInBytes());
429 }
430 };
431 // If subsetRect is not null it will be used to apply a strict src rect-style constraint.
TextureOp(GrSurfaceProxyView proxyView,sk_sp<GrColorSpaceXform> textureColorSpaceXform,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,const SkPMColor4f & color,GrTextureOp::Saturate saturate,GrAAType aaType,DrawQuad * quad,const SkRect * subsetRect)432 TextureOp(GrSurfaceProxyView proxyView,
433 sk_sp<GrColorSpaceXform> textureColorSpaceXform,
434 GrSamplerState::Filter filter,
435 GrSamplerState::MipmapMode mm,
436 const SkPMColor4f& color,
437 GrTextureOp::Saturate saturate,
438 GrAAType aaType,
439 DrawQuad* quad,
440 const SkRect* subsetRect)
441 : INHERITED(ClassID())
442 , fQuads(1, true /* includes locals */)
443 , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
444 , fDesc(nullptr)
445 , fMetadata(proxyView.swizzle(), filter, mm, Subset(!!subsetRect), saturate) {
446 // Clean up disparities between the overall aa type and edge configuration and apply
447 // optimizations based on the rect and matrix when appropriate
448 GrQuadUtils::ResolveAAType(aaType, quad->fEdgeFlags, quad->fDevice,
449 &aaType, &quad->fEdgeFlags);
450 fMetadata.fAAType = static_cast<uint16_t>(aaType);
451
452 // We expect our caller to have already caught this optimization.
453 SkASSERT(!subsetRect ||
454 !subsetRect->contains(proxyView.proxy()->backingStoreBoundsRect()));
455
456 // We may have had a strict constraint with nearest filter solely due to possible AA bloat.
457 // Try to identify cases where the subsetting isn't actually necessary, and skip it.
458 if (subsetRect) {
459 if (safe_to_ignore_subset_rect(aaType, filter, *quad, *subsetRect)) {
460 subsetRect = nullptr;
461 fMetadata.fSubset = static_cast<uint16_t>(Subset::kNo);
462 }
463 }
464
465 // Normalize src coordinates and the subset (if set)
466 NormalizationParams params = proxy_normalization_params(proxyView.proxy(),
467 proxyView.origin());
468 normalize_src_quad(params, &quad->fLocal);
469 SkRect subset = normalize_and_inset_subset(filter, params, subsetRect);
470
471 // Set bounds before clipping so we don't have to worry about unioning the bounds of
472 // the two potential quads (GrQuad::bounds() is perspective-safe).
473 this->setBounds(quad->fDevice.bounds(), HasAABloat(aaType == GrAAType::kCoverage),
474 IsHairline::kNo);
475
476 int quadCount = this->appendQuad(quad, color, subset);
477 fViewCountPairs[0] = {proxyView.detachProxy(), quadCount};
478 }
479
TextureOp(GrSurfaceDrawContext::TextureSetEntry set[],int cnt,int proxyRunCnt,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,GrTextureOp::Saturate saturate,GrAAType aaType,SkCanvas::SrcRectConstraint constraint,const SkMatrix & viewMatrix,sk_sp<GrColorSpaceXform> textureColorSpaceXform)480 TextureOp(GrSurfaceDrawContext::TextureSetEntry set[],
481 int cnt,
482 int proxyRunCnt,
483 GrSamplerState::Filter filter,
484 GrSamplerState::MipmapMode mm,
485 GrTextureOp::Saturate saturate,
486 GrAAType aaType,
487 SkCanvas::SrcRectConstraint constraint,
488 const SkMatrix& viewMatrix,
489 sk_sp<GrColorSpaceXform> textureColorSpaceXform)
490 : INHERITED(ClassID())
491 , fQuads(cnt, true /* includes locals */)
492 , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
493 , fDesc(nullptr)
494 , fMetadata(set[0].fProxyView.swizzle(),
495 GrSamplerState::Filter::kNearest,
496 GrSamplerState::MipmapMode::kNone,
497 Subset::kNo,
498 saturate) {
499 // Update counts to reflect the batch op
500 fMetadata.fProxyCount = SkToUInt(proxyRunCnt);
501 fMetadata.fTotalQuadCount = SkToUInt(cnt);
502
503 SkRect bounds = SkRectPriv::MakeLargestInverted();
504
505 GrAAType netAAType = GrAAType::kNone; // aa type maximally compatible with all dst rects
506 Subset netSubset = Subset::kNo;
507 GrSamplerState::Filter netFilter = GrSamplerState::Filter::kNearest;
508 GrSamplerState::MipmapMode netMM = GrSamplerState::MipmapMode::kNone;
509
510 const GrSurfaceProxy* curProxy = nullptr;
511
512 // 'q' is the index in 'set' and fQuadBuffer; 'p' is the index in fViewCountPairs and only
513 // increases when set[q]'s proxy changes.
514 int p = 0;
515 for (int q = 0; q < cnt; ++q) {
516 SkASSERT(mm == GrSamplerState::MipmapMode::kNone ||
517 (set[0].fProxyView.proxy()->asTextureProxy()->mipmapped() ==
518 GrMipmapped::kYes));
519 if (q == 0) {
520 // We do not placement new the first ViewCountPair since that one is allocated and
521 // initialized as part of the GrTextureOp creation.
522 fViewCountPairs[0].fProxy = set[0].fProxyView.detachProxy();
523 fViewCountPairs[0].fQuadCnt = 0;
524 curProxy = fViewCountPairs[0].fProxy.get();
525 } else if (set[q].fProxyView.proxy() != curProxy) {
526 // We must placement new the ViewCountPairs here so that the sk_sps in the
527 // GrSurfaceProxyView get initialized properly.
528 new(&fViewCountPairs[++p])ViewCountPair({set[q].fProxyView.detachProxy(), 0});
529
530 curProxy = fViewCountPairs[p].fProxy.get();
531 SkASSERT(GrTextureProxy::ProxiesAreCompatibleAsDynamicState(
532 curProxy, fViewCountPairs[0].fProxy.get()));
533 SkASSERT(fMetadata.fSwizzle == set[q].fProxyView.swizzle());
534 } // else another quad referencing the same proxy
535
536 SkMatrix ctm = viewMatrix;
537 if (set[q].fPreViewMatrix) {
538 ctm.preConcat(*set[q].fPreViewMatrix);
539 }
540
541 // Use dstRect/srcRect unless dstClip is provided, in which case derive new source
542 // coordinates by mapping dstClipQuad by the dstRect to srcRect transform.
543 DrawQuad quad;
544 if (set[q].fDstClipQuad) {
545 quad.fDevice = GrQuad::MakeFromSkQuad(set[q].fDstClipQuad, ctm);
546
547 SkPoint srcPts[4];
548 GrMapRectPoints(set[q].fDstRect, set[q].fSrcRect, set[q].fDstClipQuad, srcPts, 4);
549 quad.fLocal = GrQuad::MakeFromSkQuad(srcPts, SkMatrix::I());
550 } else {
551 quad.fDevice = GrQuad::MakeFromRect(set[q].fDstRect, ctm);
552 quad.fLocal = GrQuad(set[q].fSrcRect);
553 }
554
555 if (netFilter != filter || netMM != mm) {
556 // The only way netFilter != filter is if linear is requested and we haven't yet
557 // found a quad that requires linear (so net is still nearest). Similar for mip
558 // mapping.
559 SkASSERT(filter == netFilter ||
560 (netFilter == GrSamplerState::Filter::kNearest && filter > netFilter));
561 SkASSERT(mm == netMM ||
562 (netMM == GrSamplerState::MipmapMode::kNone && mm > netMM));
563 auto [mustFilter, mustMM] = filter_and_mm_have_effect(quad.fLocal, quad.fDevice);
564 if (mustFilter && filter != GrSamplerState::Filter::kNearest) {
565 netFilter = filter;
566 }
567 if (mustMM && mm != GrSamplerState::MipmapMode::kNone) {
568 netMM = mm;
569 }
570 }
571
572 // Update overall bounds of the op as the union of all quads
573 bounds.joinPossiblyEmptyRect(quad.fDevice.bounds());
574
575 // Determine the AA type for the quad, then merge with net AA type
576 GrAAType aaForQuad;
577 GrQuadUtils::ResolveAAType(aaType, set[q].fAAFlags, quad.fDevice,
578 &aaForQuad, &quad.fEdgeFlags);
579
580 // Resolve sets aaForQuad to aaType or None, there is never a change between aa methods
581 SkASSERT(aaForQuad == GrAAType::kNone || aaForQuad == aaType);
582 if (netAAType == GrAAType::kNone && aaForQuad != GrAAType::kNone) {
583 netAAType = aaType;
584 }
585
586 // Calculate metadata for the entry
587 const SkRect* subsetForQuad = nullptr;
588 if (constraint == SkCanvas::kStrict_SrcRectConstraint) {
589 // Check (briefly) if the subset rect is actually needed for this set entry.
590 SkRect* subsetRect = &set[q].fSrcRect;
591 if (!subsetRect->contains(curProxy->backingStoreBoundsRect())) {
592 if (!safe_to_ignore_subset_rect(aaForQuad, filter, quad, *subsetRect)) {
593 netSubset = Subset::kYes;
594 subsetForQuad = subsetRect;
595 }
596 }
597 }
598
599 // Normalize the src quads and apply origin
600 NormalizationParams proxyParams = proxy_normalization_params(
601 curProxy, set[q].fProxyView.origin());
602 normalize_src_quad(proxyParams, &quad.fLocal);
603
604 // This subset may represent a no-op, otherwise it will have the origin and dimensions
605 // of the texture applied to it. Insetting for bilinear filtering is deferred until
606 // on[Pre]Prepare so that the overall filter can be lazily determined.
607 SkRect subset = normalize_and_inset_subset(filter, proxyParams, subsetForQuad);
608
609 // Always append a quad (or 2 if perspective clipped), it just may refer back to a prior
610 // ViewCountPair (this frequently happens when Chrome draws 9-patches).
611 fViewCountPairs[p].fQuadCnt += this->appendQuad(&quad, set[q].fColor, subset);
612 }
613 // The # of proxy switches should match what was provided (+1 because we incremented p
614 // when a new proxy was encountered).
615 SkASSERT((p + 1) == fMetadata.fProxyCount);
616 SkASSERT(fQuads.count() == fMetadata.fTotalQuadCount);
617
618 fMetadata.fAAType = static_cast<uint16_t>(netAAType);
619 fMetadata.fFilter = static_cast<uint16_t>(netFilter);
620 fMetadata.fSubset = static_cast<uint16_t>(netSubset);
621
622 this->setBounds(bounds, HasAABloat(netAAType == GrAAType::kCoverage), IsHairline::kNo);
623 }
624
appendQuad(DrawQuad * quad,const SkPMColor4f & color,const SkRect & subset)625 int appendQuad(DrawQuad* quad, const SkPMColor4f& color, const SkRect& subset) {
626 DrawQuad extra;
627 // Only clip when there's anti-aliasing. When non-aa, the GPU clips just fine and there's
628 // no inset/outset math that requires w > 0.
629 int quadCount = quad->fEdgeFlags != GrQuadAAFlags::kNone
630 ? GrQuadUtils::ClipToW0(quad, &extra)
631 : 1;
632 if (quadCount == 0) {
633 // We can't discard the op at this point, but disable AA flags so it won't go through
634 // inset/outset processing
635 quad->fEdgeFlags = GrQuadAAFlags::kNone;
636 quadCount = 1;
637 }
638 fQuads.append(quad->fDevice, {color, subset, quad->fEdgeFlags}, &quad->fLocal);
639 if (quadCount > 1) {
640 fQuads.append(extra.fDevice, {color, subset, extra.fEdgeFlags}, &extra.fLocal);
641 fMetadata.fTotalQuadCount++;
642 }
643 return quadCount;
644 }
645
programInfo()646 GrProgramInfo* programInfo() override {
647 // Although this Op implements its own onPrePrepareDraws it calls GrMeshDrawOps' version so
648 // this entry point will be called.
649 return (fDesc) ? fDesc->fProgramInfo : nullptr;
650 }
651
onCreateProgramInfo(const GrCaps * caps,SkArenaAlloc * arena,const GrSurfaceProxyView & writeView,GrAppliedClip && appliedClip,const GrXferProcessor::DstProxyView & dstProxyView,GrXferBarrierFlags renderPassXferBarriers,GrLoadOp colorLoadOp)652 void onCreateProgramInfo(const GrCaps* caps,
653 SkArenaAlloc* arena,
654 const GrSurfaceProxyView& writeView,
655 GrAppliedClip&& appliedClip,
656 const GrXferProcessor::DstProxyView& dstProxyView,
657 GrXferBarrierFlags renderPassXferBarriers,
658 GrLoadOp colorLoadOp) override {
659 SkASSERT(fDesc);
660
661 GrGeometryProcessor* gp;
662
663 {
664 const GrBackendFormat& backendFormat =
665 fViewCountPairs[0].fProxy->backendFormat();
666
667 GrSamplerState samplerState = GrSamplerState(GrSamplerState::WrapMode::kClamp,
668 fMetadata.filter());
669
670 gp = GrQuadPerEdgeAA::MakeTexturedProcessor(
671 arena, fDesc->fVertexSpec, *caps->shaderCaps(), backendFormat, samplerState,
672 fMetadata.fSwizzle, std::move(fTextureColorSpaceXform), fMetadata.saturate());
673
674 SkASSERT(fDesc->fVertexSpec.vertexSize() == gp->vertexStride());
675 }
676
677 auto pipelineFlags = (GrAAType::kMSAA == fMetadata.aaType()) ?
678 GrPipeline::InputFlags::kHWAntialias : GrPipeline::InputFlags::kNone;
679
680 fDesc->fProgramInfo = GrSimpleMeshDrawOpHelper::CreateProgramInfo(
681 caps, arena, writeView, std::move(appliedClip), dstProxyView, gp,
682 GrProcessorSet::MakeEmptySet(), fDesc->fVertexSpec.primitiveType(),
683 renderPassXferBarriers, colorLoadOp, pipelineFlags);
684 }
685
onPrePrepareDraws(GrRecordingContext * context,const GrSurfaceProxyView & writeView,GrAppliedClip * clip,const GrXferProcessor::DstProxyView & dstProxyView,GrXferBarrierFlags renderPassXferBarriers,GrLoadOp colorLoadOp)686 void onPrePrepareDraws(GrRecordingContext* context,
687 const GrSurfaceProxyView& writeView,
688 GrAppliedClip* clip,
689 const GrXferProcessor::DstProxyView& dstProxyView,
690 GrXferBarrierFlags renderPassXferBarriers,
691 GrLoadOp colorLoadOp) override {
692 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
693
694 SkDEBUGCODE(this->validate();)
695 SkASSERT(!fDesc);
696
697 SkArenaAlloc* arena = context->priv().recordTimeAllocator();
698
699 fDesc = arena->make<Desc>();
700 this->characterize(fDesc);
701 fDesc->allocatePrePreparedVertices(arena);
702 FillInVertices(*context->priv().caps(), this, fDesc, fDesc->fPrePreparedVertices);
703
704 // This will call onCreateProgramInfo and register the created program with the DDL.
705 this->INHERITED::onPrePrepareDraws(context, writeView, clip, dstProxyView,
706 renderPassXferBarriers, colorLoadOp);
707 }
708
FillInVertices(const GrCaps & caps,TextureOp * texOp,Desc * desc,char * vertexData)709 static void FillInVertices(const GrCaps& caps, TextureOp* texOp, Desc* desc, char* vertexData) {
710 SkASSERT(vertexData);
711
712 int totQuadsSeen = 0;
713 SkDEBUGCODE(int totVerticesSeen = 0;)
714 SkDEBUGCODE(const size_t vertexSize = desc->fVertexSpec.vertexSize());
715
716 GrQuadPerEdgeAA::Tessellator tessellator(desc->fVertexSpec, vertexData);
717 for (const auto& op : ChainRange<TextureOp>(texOp)) {
718 auto iter = op.fQuads.iterator();
719 for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) {
720 const int quadCnt = op.fViewCountPairs[p].fQuadCnt;
721 SkDEBUGCODE(int meshVertexCnt = quadCnt * desc->fVertexSpec.verticesPerQuad());
722
723 for (int i = 0; i < quadCnt && iter.next(); ++i) {
724 SkASSERT(iter.isLocalValid());
725 const ColorSubsetAndAA& info = iter.metadata();
726
727 tessellator.append(iter.deviceQuad(), iter.localQuad(), info.fColor,
728 info.fSubsetRect, info.aaFlags());
729 }
730
731 SkASSERT((totVerticesSeen + meshVertexCnt) * vertexSize
732 == (size_t)(tessellator.vertices() - vertexData));
733
734 totQuadsSeen += quadCnt;
735 SkDEBUGCODE(totVerticesSeen += meshVertexCnt);
736 SkASSERT(totQuadsSeen * desc->fVertexSpec.verticesPerQuad() == totVerticesSeen);
737 }
738
739 // If quad counts per proxy were calculated correctly, the entire iterator
740 // should have been consumed.
741 SkASSERT(!iter.next());
742 }
743
744 SkASSERT(desc->totalSizeInBytes() == (size_t)(tessellator.vertices() - vertexData));
745 SkASSERT(totQuadsSeen == desc->fNumTotalQuads);
746 SkASSERT(totVerticesSeen == desc->totalNumVertices());
747 }
748
749 #ifdef SK_DEBUG
validate_op(GrTextureType textureType,GrAAType aaType,GrSwizzle swizzle,const TextureOp * op)750 static int validate_op(GrTextureType textureType,
751 GrAAType aaType,
752 GrSwizzle swizzle,
753 const TextureOp* op) {
754 SkASSERT(op->fMetadata.fSwizzle == swizzle);
755
756 int quadCount = 0;
757 for (unsigned p = 0; p < op->fMetadata.fProxyCount; ++p) {
758 auto* proxy = op->fViewCountPairs[p].fProxy->asTextureProxy();
759 quadCount += op->fViewCountPairs[p].fQuadCnt;
760 SkASSERT(proxy);
761 SkASSERT(proxy->textureType() == textureType);
762 }
763
764 SkASSERT(aaType == op->fMetadata.aaType());
765 return quadCount;
766 }
767
validate() const768 void validate() const override {
769 // NOTE: Since this is debug-only code, we use the virtual asTextureProxy()
770 auto textureType = fViewCountPairs[0].fProxy->asTextureProxy()->textureType();
771 GrAAType aaType = fMetadata.aaType();
772 GrSwizzle swizzle = fMetadata.fSwizzle;
773
774 int quadCount = validate_op(textureType, aaType, swizzle, this);
775
776 for (const GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) {
777 quadCount += validate_op(textureType, aaType, swizzle,
778 static_cast<const TextureOp*>(tmp));
779 }
780
781 for (const GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) {
782 quadCount += validate_op(textureType, aaType, swizzle,
783 static_cast<const TextureOp*>(tmp));
784 }
785
786 SkASSERT(quadCount == this->numChainedQuads());
787 }
788
789 #endif
790
791 #if GR_TEST_UTILS
numQuads() const792 int numQuads() const final { return this->totNumQuads(); }
793 #endif
794
characterize(Desc * desc) const795 void characterize(Desc* desc) const {
796 SkDEBUGCODE(this->validate();)
797
798 GrQuad::Type quadType = GrQuad::Type::kAxisAligned;
799 ColorType colorType = ColorType::kNone;
800 GrQuad::Type srcQuadType = GrQuad::Type::kAxisAligned;
801 Subset subset = Subset::kNo;
802 GrAAType overallAAType = fMetadata.aaType();
803
804 desc->fNumProxies = 0;
805 desc->fNumTotalQuads = 0;
806 int maxQuadsPerMesh = 0;
807
808 for (const auto& op : ChainRange<TextureOp>(this)) {
809 if (op.fQuads.deviceQuadType() > quadType) {
810 quadType = op.fQuads.deviceQuadType();
811 }
812 if (op.fQuads.localQuadType() > srcQuadType) {
813 srcQuadType = op.fQuads.localQuadType();
814 }
815 if (op.fMetadata.subset() == Subset::kYes) {
816 subset = Subset::kYes;
817 }
818 colorType = std::max(colorType, op.fMetadata.colorType());
819 desc->fNumProxies += op.fMetadata.fProxyCount;
820
821 for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) {
822 maxQuadsPerMesh = std::max(op.fViewCountPairs[p].fQuadCnt, maxQuadsPerMesh);
823 }
824 desc->fNumTotalQuads += op.totNumQuads();
825
826 if (op.fMetadata.aaType() == GrAAType::kCoverage) {
827 overallAAType = GrAAType::kCoverage;
828 }
829 }
830
831 SkASSERT(desc->fNumTotalQuads == this->numChainedQuads());
832
833 SkASSERT(!CombinedQuadCountWillOverflow(overallAAType, false, desc->fNumTotalQuads));
834
835 auto indexBufferOption = GrQuadPerEdgeAA::CalcIndexBufferOption(overallAAType,
836 maxQuadsPerMesh);
837
838 desc->fVertexSpec = VertexSpec(quadType, colorType, srcQuadType, /* hasLocal */ true,
839 subset, overallAAType, /* alpha as coverage */ true,
840 indexBufferOption);
841
842 SkASSERT(desc->fNumTotalQuads <= GrQuadPerEdgeAA::QuadLimit(indexBufferOption));
843 }
844
totNumQuads() const845 int totNumQuads() const {
846 #ifdef SK_DEBUG
847 int tmp = 0;
848 for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) {
849 tmp += fViewCountPairs[p].fQuadCnt;
850 }
851 SkASSERT(tmp == fMetadata.fTotalQuadCount);
852 #endif
853
854 return fMetadata.fTotalQuadCount;
855 }
856
numChainedQuads() const857 int numChainedQuads() const {
858 int numChainedQuads = this->totNumQuads();
859
860 for (const GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) {
861 numChainedQuads += ((const TextureOp*)tmp)->totNumQuads();
862 }
863
864 for (const GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) {
865 numChainedQuads += ((const TextureOp*)tmp)->totNumQuads();
866 }
867
868 return numChainedQuads;
869 }
870
871 // onPrePrepareDraws may or may not have been called at this point
onPrepareDraws(Target * target)872 void onPrepareDraws(Target* target) override {
873 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
874
875 SkDEBUGCODE(this->validate();)
876
877 SkASSERT(!fDesc || fDesc->fPrePreparedVertices);
878
879 if (!fDesc) {
880 SkArenaAlloc* arena = target->allocator();
881 fDesc = arena->make<Desc>();
882 this->characterize(fDesc);
883 SkASSERT(!fDesc->fPrePreparedVertices);
884 }
885
886 size_t vertexSize = fDesc->fVertexSpec.vertexSize();
887
888 void* vdata = target->makeVertexSpace(vertexSize, fDesc->totalNumVertices(),
889 &fDesc->fVertexBuffer, &fDesc->fBaseVertex);
890 if (!vdata) {
891 SkDebugf("Could not allocate vertices\n");
892 return;
893 }
894
895 if (fDesc->fVertexSpec.needsIndexBuffer()) {
896 fDesc->fIndexBuffer = GrQuadPerEdgeAA::GetIndexBuffer(
897 target, fDesc->fVertexSpec.indexBufferOption());
898 if (!fDesc->fIndexBuffer) {
899 SkDebugf("Could not allocate indices\n");
900 return;
901 }
902 }
903
904 if (fDesc->fPrePreparedVertices) {
905 memcpy(vdata, fDesc->fPrePreparedVertices, fDesc->totalSizeInBytes());
906 } else {
907 FillInVertices(target->caps(), this, fDesc, (char*) vdata);
908 }
909 }
910
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)911 void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
912 if (!fDesc->fVertexBuffer) {
913 return;
914 }
915
916 if (fDesc->fVertexSpec.needsIndexBuffer() && !fDesc->fIndexBuffer) {
917 return;
918 }
919
920 if (!fDesc->fProgramInfo) {
921 this->createProgramInfo(flushState);
922 SkASSERT(fDesc->fProgramInfo);
923 }
924
925 flushState->bindPipelineAndScissorClip(*fDesc->fProgramInfo, chainBounds);
926 flushState->bindBuffers(std::move(fDesc->fIndexBuffer), nullptr,
927 std::move(fDesc->fVertexBuffer));
928
929 int totQuadsSeen = 0;
930 SkDEBUGCODE(int numDraws = 0;)
931 for (const auto& op : ChainRange<TextureOp>(this)) {
932 for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) {
933 const int quadCnt = op.fViewCountPairs[p].fQuadCnt;
934 SkASSERT(numDraws < fDesc->fNumProxies);
935 flushState->bindTextures(fDesc->fProgramInfo->geomProc(),
936 *op.fViewCountPairs[p].fProxy,
937 fDesc->fProgramInfo->pipeline());
938 GrQuadPerEdgeAA::IssueDraw(flushState->caps(), flushState->opsRenderPass(),
939 fDesc->fVertexSpec, totQuadsSeen, quadCnt,
940 fDesc->totalNumVertices(), fDesc->fBaseVertex);
941 totQuadsSeen += quadCnt;
942 SkDEBUGCODE(++numDraws;)
943 }
944 }
945
946 SkASSERT(totQuadsSeen == fDesc->fNumTotalQuads);
947 SkASSERT(numDraws == fDesc->fNumProxies);
948 }
949
propagateCoverageAAThroughoutChain()950 void propagateCoverageAAThroughoutChain() {
951 fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage);
952
953 for (GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) {
954 TextureOp* tex = static_cast<TextureOp*>(tmp);
955 SkASSERT(tex->fMetadata.aaType() == GrAAType::kCoverage ||
956 tex->fMetadata.aaType() == GrAAType::kNone);
957 tex->fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage);
958 }
959
960 for (GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) {
961 TextureOp* tex = static_cast<TextureOp*>(tmp);
962 SkASSERT(tex->fMetadata.aaType() == GrAAType::kCoverage ||
963 tex->fMetadata.aaType() == GrAAType::kNone);
964 tex->fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage);
965 }
966 }
967
onCombineIfPossible(GrOp * t,SkArenaAlloc *,const GrCaps & caps)968 CombineResult onCombineIfPossible(GrOp* t, SkArenaAlloc*, const GrCaps& caps) override {
969 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
970 auto* that = t->cast<TextureOp>();
971
972 SkDEBUGCODE(this->validate();)
973 SkDEBUGCODE(that->validate();)
974
975 if (fDesc || that->fDesc) {
976 // This should never happen (since only DDL recorded ops should be prePrepared)
977 // but, in any case, we should never combine ops that that been prePrepared
978 return CombineResult::kCannotCombine;
979 }
980
981 if (fMetadata.subset() != that->fMetadata.subset()) {
982 // It is technically possible to combine operations across subset modes, but performance
983 // testing suggests it's better to make more draw calls where some take advantage of
984 // the more optimal shader path without coordinate clamping.
985 return CombineResult::kCannotCombine;
986 }
987 if (!GrColorSpaceXform::Equals(fTextureColorSpaceXform.get(),
988 that->fTextureColorSpaceXform.get())) {
989 return CombineResult::kCannotCombine;
990 }
991
992 bool upgradeToCoverageAAOnMerge = false;
993 if (fMetadata.aaType() != that->fMetadata.aaType()) {
994 if (!CanUpgradeAAOnMerge(fMetadata.aaType(), that->fMetadata.aaType())) {
995 return CombineResult::kCannotCombine;
996 }
997 upgradeToCoverageAAOnMerge = true;
998 }
999
1000 if (CombinedQuadCountWillOverflow(fMetadata.aaType(), upgradeToCoverageAAOnMerge,
1001 this->numChainedQuads() + that->numChainedQuads())) {
1002 return CombineResult::kCannotCombine;
1003 }
1004
1005 if (fMetadata.saturate() != that->fMetadata.saturate()) {
1006 return CombineResult::kCannotCombine;
1007 }
1008 if (fMetadata.filter() != that->fMetadata.filter()) {
1009 return CombineResult::kCannotCombine;
1010 }
1011 if (fMetadata.mipmapMode() != that->fMetadata.mipmapMode()) {
1012 return CombineResult::kCannotCombine;
1013 }
1014 if (fMetadata.fSwizzle != that->fMetadata.fSwizzle) {
1015 return CombineResult::kCannotCombine;
1016 }
1017 const auto* thisProxy = fViewCountPairs[0].fProxy.get();
1018 const auto* thatProxy = that->fViewCountPairs[0].fProxy.get();
1019 if (fMetadata.fProxyCount > 1 || that->fMetadata.fProxyCount > 1 ||
1020 thisProxy != thatProxy) {
1021 // We can't merge across different proxies. Check if 'this' can be chained with 'that'.
1022 if (GrTextureProxy::ProxiesAreCompatibleAsDynamicState(thisProxy, thatProxy) &&
1023 caps.dynamicStateArrayGeometryProcessorTextureSupport() &&
1024 fMetadata.aaType() == that->fMetadata.aaType()) {
1025 // We only allow chaining when the aaTypes match bc otherwise the AA type
1026 // reported by the chain can be inconsistent. That is, since chaining doesn't
1027 // propagate revised AA information throughout the chain, the head of the chain
1028 // could have an AA setting of kNone while the chain as a whole could have a
1029 // setting of kCoverage. This inconsistency would then interfere with the validity
1030 // of the CombinedQuadCountWillOverflow calls.
1031 // This problem doesn't occur w/ merging bc we do propagate the AA information
1032 // (in propagateCoverageAAThroughoutChain) below.
1033 return CombineResult::kMayChain;
1034 }
1035 return CombineResult::kCannotCombine;
1036 }
1037
1038 fMetadata.fSubset |= that->fMetadata.fSubset;
1039 fMetadata.fColorType = std::max(fMetadata.fColorType, that->fMetadata.fColorType);
1040
1041 // Concatenate quad lists together
1042 fQuads.concat(that->fQuads);
1043 fViewCountPairs[0].fQuadCnt += that->fQuads.count();
1044 fMetadata.fTotalQuadCount += that->fQuads.count();
1045
1046 if (upgradeToCoverageAAOnMerge) {
1047 // This merger may be the start of a concatenation of two chains. When one
1048 // of the chains mutates its AA the other must follow suit or else the above AA
1049 // check may prevent later ops from chaining together. A specific example of this is
1050 // when chain2 is prepended onto chain1:
1051 // chain1 (that): opA (non-AA/mergeable) opB (non-AA/non-mergeable)
1052 // chain2 (this): opC (cov-AA/non-mergeable) opD (cov-AA/mergeable)
1053 // W/o this propagation, after opD & opA merge, opB and opC would say they couldn't
1054 // chain - which would stop the concatenation process.
1055 this->propagateCoverageAAThroughoutChain();
1056 that->propagateCoverageAAThroughoutChain();
1057 }
1058
1059 SkDEBUGCODE(this->validate();)
1060
1061 return CombineResult::kMerged;
1062 }
1063
1064 #if GR_TEST_UTILS
onDumpInfo() const1065 SkString onDumpInfo() const override {
1066 SkString str = SkStringPrintf("# draws: %d\n", fQuads.count());
1067 auto iter = fQuads.iterator();
1068 for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) {
1069 SkString proxyStr = fViewCountPairs[p].fProxy->dump();
1070 str.append(proxyStr);
1071 str.appendf(", Filter: %d, MM: %d\n",
1072 static_cast<int>(fMetadata.fFilter),
1073 static_cast<int>(fMetadata.fMipmapMode));
1074 for (int i = 0; i < fViewCountPairs[p].fQuadCnt && iter.next(); ++i) {
1075 const GrQuad* quad = iter.deviceQuad();
1076 GrQuad uv = iter.isLocalValid() ? *(iter.localQuad()) : GrQuad();
1077 const ColorSubsetAndAA& info = iter.metadata();
1078 str.appendf(
1079 "%d: Color: 0x%08x, Subset(%d): [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n"
1080 " UVs [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n"
1081 " Quad [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n",
1082 i, info.fColor.toBytes_RGBA(), fMetadata.fSubset, info.fSubsetRect.fLeft,
1083 info.fSubsetRect.fTop, info.fSubsetRect.fRight, info.fSubsetRect.fBottom,
1084 quad->point(0).fX, quad->point(0).fY, quad->point(1).fX, quad->point(1).fY,
1085 quad->point(2).fX, quad->point(2).fY, quad->point(3).fX, quad->point(3).fY,
1086 uv.point(0).fX, uv.point(0).fY, uv.point(1).fX, uv.point(1).fY,
1087 uv.point(2).fX, uv.point(2).fY, uv.point(3).fX, uv.point(3).fY);
1088 }
1089 }
1090 return str;
1091 }
1092 #endif
1093
1094 GrQuadBuffer<ColorSubsetAndAA> fQuads;
1095 sk_sp<GrColorSpaceXform> fTextureColorSpaceXform;
1096 // Most state of TextureOp is packed into these two field to minimize the op's size.
1097 // Historically, increasing the size of TextureOp has caused surprising perf regressions, so
1098 // consider/measure changes with care.
1099 Desc* fDesc;
1100 Metadata fMetadata;
1101
1102 // This field must go last. When allocating this op, we will allocate extra space to hold
1103 // additional ViewCountPairs immediately after the op's allocation so we can treat this
1104 // as an fProxyCnt-length array.
1105 ViewCountPair fViewCountPairs[1];
1106
1107 using INHERITED = GrMeshDrawOp;
1108 };
1109
1110 } // anonymous namespace
1111
1112 #if GR_TEST_UTILS
ClassID()1113 uint32_t GrTextureOp::ClassID() {
1114 return TextureOp::ClassID();
1115 }
1116 #endif
1117
Make(GrRecordingContext * context,GrSurfaceProxyView proxyView,SkAlphaType alphaType,sk_sp<GrColorSpaceXform> textureXform,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,const SkPMColor4f & color,Saturate saturate,SkBlendMode blendMode,GrAAType aaType,DrawQuad * quad,const SkRect * subset)1118 GrOp::Owner GrTextureOp::Make(GrRecordingContext* context,
1119 GrSurfaceProxyView proxyView,
1120 SkAlphaType alphaType,
1121 sk_sp<GrColorSpaceXform> textureXform,
1122 GrSamplerState::Filter filter,
1123 GrSamplerState::MipmapMode mm,
1124 const SkPMColor4f& color,
1125 Saturate saturate,
1126 SkBlendMode blendMode,
1127 GrAAType aaType,
1128 DrawQuad* quad,
1129 const SkRect* subset) {
1130 // Apply optimizations that are valid whether or not using GrTextureOp or GrFillRectOp
1131 if (subset && subset->contains(proxyView.proxy()->backingStoreBoundsRect())) {
1132 // No need for a shader-based subset if hardware clamping achieves the same effect
1133 subset = nullptr;
1134 }
1135
1136 if (filter != GrSamplerState::Filter::kNearest || mm != GrSamplerState::MipmapMode::kNone) {
1137 auto [mustFilter, mustMM] = filter_and_mm_have_effect(quad->fLocal, quad->fDevice);
1138 if (!mustFilter) {
1139 filter = GrSamplerState::Filter::kNearest;
1140 }
1141 if (!mustMM) {
1142 mm = GrSamplerState::MipmapMode::kNone;
1143 }
1144 }
1145
1146 if (blendMode == SkBlendMode::kSrcOver) {
1147 return TextureOp::Make(context, std::move(proxyView), std::move(textureXform), filter, mm,
1148 color, saturate, aaType, std::move(quad), subset);
1149 } else {
1150 // Emulate complex blending using GrFillRectOp
1151 GrSamplerState samplerState(GrSamplerState::WrapMode::kClamp, filter, mm);
1152 GrPaint paint;
1153 paint.setColor4f(color);
1154 paint.setXPFactory(SkBlendMode_AsXPFactory(blendMode));
1155
1156 std::unique_ptr<GrFragmentProcessor> fp;
1157 const auto& caps = *context->priv().caps();
1158 if (subset) {
1159 SkRect localRect;
1160 if (quad->fLocal.asRect(&localRect)) {
1161 fp = GrTextureEffect::MakeSubset(std::move(proxyView), alphaType, SkMatrix::I(),
1162 samplerState, *subset, localRect, caps);
1163 } else {
1164 fp = GrTextureEffect::MakeSubset(std::move(proxyView), alphaType, SkMatrix::I(),
1165 samplerState, *subset, caps);
1166 }
1167 } else {
1168 fp = GrTextureEffect::Make(std::move(proxyView), alphaType, SkMatrix::I(), samplerState,
1169 caps);
1170 }
1171 fp = GrColorSpaceXformEffect::Make(std::move(fp), std::move(textureXform));
1172 fp = GrBlendFragmentProcessor::Make(std::move(fp), nullptr, SkBlendMode::kModulate);
1173 if (saturate == GrTextureOp::Saturate::kYes) {
1174 fp = GrClampFragmentProcessor::Make(std::move(fp), /*clampToPremul=*/false);
1175 }
1176 paint.setColorFragmentProcessor(std::move(fp));
1177 return GrFillRectOp::Make(context, std::move(paint), aaType, quad);
1178 }
1179 }
1180
1181 // A helper class that assists in breaking up bulk API quad draws into manageable chunks.
1182 class GrTextureOp::BatchSizeLimiter {
1183 public:
BatchSizeLimiter(GrSurfaceDrawContext * rtc,const GrClip * clip,GrRecordingContext * context,int numEntries,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,GrTextureOp::Saturate saturate,SkCanvas::SrcRectConstraint constraint,const SkMatrix & viewMatrix,sk_sp<GrColorSpaceXform> textureColorSpaceXform)1184 BatchSizeLimiter(GrSurfaceDrawContext* rtc,
1185 const GrClip* clip,
1186 GrRecordingContext* context,
1187 int numEntries,
1188 GrSamplerState::Filter filter,
1189 GrSamplerState::MipmapMode mm,
1190 GrTextureOp::Saturate saturate,
1191 SkCanvas::SrcRectConstraint constraint,
1192 const SkMatrix& viewMatrix,
1193 sk_sp<GrColorSpaceXform> textureColorSpaceXform)
1194 : fRTC(rtc)
1195 , fClip(clip)
1196 , fContext(context)
1197 , fFilter(filter)
1198 , fMipmapMode(mm)
1199 , fSaturate(saturate)
1200 , fConstraint(constraint)
1201 , fViewMatrix(viewMatrix)
1202 , fTextureColorSpaceXform(textureColorSpaceXform)
1203 , fNumLeft(numEntries) {}
1204
createOp(GrSurfaceDrawContext::TextureSetEntry set[],int clumpSize,GrAAType aaType)1205 void createOp(GrSurfaceDrawContext::TextureSetEntry set[],
1206 int clumpSize,
1207 GrAAType aaType) {
1208 int clumpProxyCount = proxy_run_count(&set[fNumClumped], clumpSize);
1209 GrOp::Owner op = TextureOp::Make(fContext,
1210 &set[fNumClumped],
1211 clumpSize,
1212 clumpProxyCount,
1213 fFilter,
1214 fMipmapMode,
1215 fSaturate,
1216 aaType,
1217 fConstraint,
1218 fViewMatrix,
1219 fTextureColorSpaceXform);
1220 fRTC->addDrawOp(fClip, std::move(op));
1221
1222 fNumLeft -= clumpSize;
1223 fNumClumped += clumpSize;
1224 }
1225
numLeft() const1226 int numLeft() const { return fNumLeft; }
baseIndex() const1227 int baseIndex() const { return fNumClumped; }
1228
1229 private:
1230 GrSurfaceDrawContext* fRTC;
1231 const GrClip* fClip;
1232 GrRecordingContext* fContext;
1233 GrSamplerState::Filter fFilter;
1234 GrSamplerState::MipmapMode fMipmapMode;
1235 GrTextureOp::Saturate fSaturate;
1236 SkCanvas::SrcRectConstraint fConstraint;
1237 const SkMatrix& fViewMatrix;
1238 sk_sp<GrColorSpaceXform> fTextureColorSpaceXform;
1239
1240 int fNumLeft;
1241 int fNumClumped = 0; // also the offset for the start of the next clump
1242 };
1243
1244 // Greedily clump quad draws together until the index buffer limit is exceeded.
AddTextureSetOps(GrSurfaceDrawContext * rtc,const GrClip * clip,GrRecordingContext * context,GrSurfaceDrawContext::TextureSetEntry set[],int cnt,int proxyRunCnt,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,Saturate saturate,SkBlendMode blendMode,GrAAType aaType,SkCanvas::SrcRectConstraint constraint,const SkMatrix & viewMatrix,sk_sp<GrColorSpaceXform> textureColorSpaceXform)1245 void GrTextureOp::AddTextureSetOps(GrSurfaceDrawContext* rtc,
1246 const GrClip* clip,
1247 GrRecordingContext* context,
1248 GrSurfaceDrawContext::TextureSetEntry set[],
1249 int cnt,
1250 int proxyRunCnt,
1251 GrSamplerState::Filter filter,
1252 GrSamplerState::MipmapMode mm,
1253 Saturate saturate,
1254 SkBlendMode blendMode,
1255 GrAAType aaType,
1256 SkCanvas::SrcRectConstraint constraint,
1257 const SkMatrix& viewMatrix,
1258 sk_sp<GrColorSpaceXform> textureColorSpaceXform) {
1259 // Ensure that the index buffer limits are lower than the proxy and quad count limits of
1260 // the op's metadata so we don't need to worry about overflow.
1261 SkDEBUGCODE(TextureOp::ValidateResourceLimits();)
1262 SkASSERT(proxy_run_count(set, cnt) == proxyRunCnt);
1263
1264 // First check if we can support batches as a single op
1265 if (blendMode != SkBlendMode::kSrcOver ||
1266 !context->priv().caps()->dynamicStateArrayGeometryProcessorTextureSupport()) {
1267 // Append each entry as its own op; these may still be GrTextureOps if the blend mode is
1268 // src-over but the backend doesn't support dynamic state changes. Otherwise Make()
1269 // automatically creates the appropriate GrFillRectOp to emulate GrTextureOp.
1270 SkMatrix ctm;
1271 for (int i = 0; i < cnt; ++i) {
1272 ctm = viewMatrix;
1273 if (set[i].fPreViewMatrix) {
1274 ctm.preConcat(*set[i].fPreViewMatrix);
1275 }
1276
1277 DrawQuad quad;
1278 quad.fEdgeFlags = set[i].fAAFlags;
1279 if (set[i].fDstClipQuad) {
1280 quad.fDevice = GrQuad::MakeFromSkQuad(set[i].fDstClipQuad, ctm);
1281
1282 SkPoint srcPts[4];
1283 GrMapRectPoints(set[i].fDstRect, set[i].fSrcRect, set[i].fDstClipQuad, srcPts, 4);
1284 quad.fLocal = GrQuad::MakeFromSkQuad(srcPts, SkMatrix::I());
1285 } else {
1286 quad.fDevice = GrQuad::MakeFromRect(set[i].fDstRect, ctm);
1287 quad.fLocal = GrQuad(set[i].fSrcRect);
1288 }
1289
1290 const SkRect* subset = constraint == SkCanvas::kStrict_SrcRectConstraint
1291 ? &set[i].fSrcRect : nullptr;
1292
1293 auto op = Make(context, set[i].fProxyView, set[i].fSrcAlphaType, textureColorSpaceXform,
1294 filter, mm, set[i].fColor, saturate, blendMode, aaType, &quad, subset);
1295 rtc->addDrawOp(clip, std::move(op));
1296 }
1297 return;
1298 }
1299
1300 // Second check if we can always just make a single op and avoid the extra iteration
1301 // needed to clump things together.
1302 if (cnt <= std::min(GrResourceProvider::MaxNumNonAAQuads(),
1303 GrResourceProvider::MaxNumAAQuads())) {
1304 auto op = TextureOp::Make(context, set, cnt, proxyRunCnt, filter, mm, saturate, aaType,
1305 constraint, viewMatrix, std::move(textureColorSpaceXform));
1306 rtc->addDrawOp(clip, std::move(op));
1307 return;
1308 }
1309
1310 BatchSizeLimiter state(rtc, clip, context, cnt, filter, mm, saturate, constraint, viewMatrix,
1311 std::move(textureColorSpaceXform));
1312
1313 // kNone and kMSAA never get altered
1314 if (aaType == GrAAType::kNone || aaType == GrAAType::kMSAA) {
1315 // Clump these into series of MaxNumNonAAQuads-sized GrTextureOps
1316 while (state.numLeft() > 0) {
1317 int clumpSize = std::min(state.numLeft(), GrResourceProvider::MaxNumNonAAQuads());
1318
1319 state.createOp(set, clumpSize, aaType);
1320 }
1321 } else {
1322 // kCoverage can be downgraded to kNone. Note that the following is conservative. kCoverage
1323 // can also get downgraded to kNone if all the quads are on integer coordinates and
1324 // axis-aligned.
1325 SkASSERT(aaType == GrAAType::kCoverage);
1326
1327 while (state.numLeft() > 0) {
1328 GrAAType runningAA = GrAAType::kNone;
1329 bool clumped = false;
1330
1331 for (int i = 0; i < state.numLeft(); ++i) {
1332 int absIndex = state.baseIndex() + i;
1333
1334 if (set[absIndex].fAAFlags != GrQuadAAFlags::kNone ||
1335 runningAA == GrAAType::kCoverage) {
1336
1337 if (i >= GrResourceProvider::MaxNumAAQuads()) {
1338 // Here we either need to boost the AA type to kCoverage, but doing so with
1339 // all the accumulated quads would overflow, or we have a set of AA quads
1340 // that has just gotten too large. In either case, calve off the existing
1341 // quads as their own TextureOp.
1342 state.createOp(
1343 set,
1344 runningAA == GrAAType::kNone ? i : GrResourceProvider::MaxNumAAQuads(),
1345 runningAA); // maybe downgrading AA here
1346 clumped = true;
1347 break;
1348 }
1349
1350 runningAA = GrAAType::kCoverage;
1351 } else if (runningAA == GrAAType::kNone) {
1352
1353 if (i >= GrResourceProvider::MaxNumNonAAQuads()) {
1354 // Here we've found a consistent batch of non-AA quads that has gotten too
1355 // large. Calve it off as its own GrTextureOp.
1356 state.createOp(set, GrResourceProvider::MaxNumNonAAQuads(),
1357 GrAAType::kNone); // definitely downgrading AA here
1358 clumped = true;
1359 break;
1360 }
1361 }
1362 }
1363
1364 if (!clumped) {
1365 // We ran through the above loop w/o hitting a limit. Spit out this last clump of
1366 // quads and call it a day.
1367 state.createOp(set, state.numLeft(), runningAA); // maybe downgrading AA here
1368 }
1369 }
1370 }
1371 }
1372
1373 #if GR_TEST_UTILS
1374 #include "include/gpu/GrRecordingContext.h"
1375 #include "src/gpu/GrProxyProvider.h"
1376 #include "src/gpu/GrRecordingContextPriv.h"
1377
GR_DRAW_OP_TEST_DEFINE(TextureOp)1378 GR_DRAW_OP_TEST_DEFINE(TextureOp) {
1379 SkISize dims;
1380 dims.fHeight = random->nextULessThan(90) + 10;
1381 dims.fWidth = random->nextULessThan(90) + 10;
1382 auto origin = random->nextBool() ? kTopLeft_GrSurfaceOrigin : kBottomLeft_GrSurfaceOrigin;
1383 GrMipmapped mipMapped = random->nextBool() ? GrMipmapped::kYes : GrMipmapped::kNo;
1384 SkBackingFit fit = SkBackingFit::kExact;
1385 if (mipMapped == GrMipmapped::kNo) {
1386 fit = random->nextBool() ? SkBackingFit::kApprox : SkBackingFit::kExact;
1387 }
1388 const GrBackendFormat format =
1389 context->priv().caps()->getDefaultBackendFormat(GrColorType::kRGBA_8888,
1390 GrRenderable::kNo);
1391 GrProxyProvider* proxyProvider = context->priv().proxyProvider();
1392 sk_sp<GrTextureProxy> proxy = proxyProvider->createProxy(
1393 format, dims, GrRenderable::kNo, 1, mipMapped, fit, SkBudgeted::kNo, GrProtected::kNo,
1394 GrInternalSurfaceFlags::kNone);
1395
1396 SkRect rect = GrTest::TestRect(random);
1397 SkRect srcRect;
1398 srcRect.fLeft = random->nextRangeScalar(0.f, proxy->width() / 2.f);
1399 srcRect.fRight = random->nextRangeScalar(0.f, proxy->width()) + proxy->width() / 2.f;
1400 srcRect.fTop = random->nextRangeScalar(0.f, proxy->height() / 2.f);
1401 srcRect.fBottom = random->nextRangeScalar(0.f, proxy->height()) + proxy->height() / 2.f;
1402 SkMatrix viewMatrix = GrTest::TestMatrixPreservesRightAngles(random);
1403 SkPMColor4f color = SkPMColor4f::FromBytes_RGBA(SkColorToPremulGrColor(random->nextU()));
1404 GrSamplerState::Filter filter = (GrSamplerState::Filter)random->nextULessThan(
1405 static_cast<uint32_t>(GrSamplerState::Filter::kLast) + 1);
1406 GrSamplerState::MipmapMode mm = GrSamplerState::MipmapMode::kNone;
1407 if (mipMapped == GrMipmapped::kYes) {
1408 mm = (GrSamplerState::MipmapMode)random->nextULessThan(
1409 static_cast<uint32_t>(GrSamplerState::MipmapMode::kLast) + 1);
1410 }
1411
1412 auto texXform = GrTest::TestColorXform(random);
1413 GrAAType aaType = GrAAType::kNone;
1414 if (random->nextBool()) {
1415 aaType = (numSamples > 1) ? GrAAType::kMSAA : GrAAType::kCoverage;
1416 }
1417 GrQuadAAFlags aaFlags = GrQuadAAFlags::kNone;
1418 aaFlags |= random->nextBool() ? GrQuadAAFlags::kLeft : GrQuadAAFlags::kNone;
1419 aaFlags |= random->nextBool() ? GrQuadAAFlags::kTop : GrQuadAAFlags::kNone;
1420 aaFlags |= random->nextBool() ? GrQuadAAFlags::kRight : GrQuadAAFlags::kNone;
1421 aaFlags |= random->nextBool() ? GrQuadAAFlags::kBottom : GrQuadAAFlags::kNone;
1422 bool useSubset = random->nextBool();
1423 auto saturate = random->nextBool() ? GrTextureOp::Saturate::kYes : GrTextureOp::Saturate::kNo;
1424 GrSurfaceProxyView proxyView(
1425 std::move(proxy), origin,
1426 context->priv().caps()->getReadSwizzle(format, GrColorType::kRGBA_8888));
1427 auto alphaType = static_cast<SkAlphaType>(
1428 random->nextRangeU(kUnknown_SkAlphaType + 1, kLastEnum_SkAlphaType));
1429
1430 DrawQuad quad = {GrQuad::MakeFromRect(rect, viewMatrix), GrQuad(srcRect), aaFlags};
1431 return GrTextureOp::Make(context, std::move(proxyView), alphaType, std::move(texXform), filter,
1432 mm, color, saturate, SkBlendMode::kSrcOver, aaType, &quad,
1433 useSubset ? &srcRect : nullptr);
1434 }
1435
1436 #endif
1437