1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include <new>
9
10 #include "include/core/SkPoint.h"
11 #include "include/core/SkPoint3.h"
12 #include "include/gpu/GrRecordingContext.h"
13 #include "include/private/SkFloatingPoint.h"
14 #include "include/private/SkTo.h"
15 #include "src/core/SkMathPriv.h"
16 #include "src/core/SkMatrixPriv.h"
17 #include "src/core/SkRectPriv.h"
18 #include "src/gpu/GrAppliedClip.h"
19 #include "src/gpu/GrCaps.h"
20 #include "src/gpu/GrDrawOpTest.h"
21 #include "src/gpu/GrGeometryProcessor.h"
22 #include "src/gpu/GrGpu.h"
23 #include "src/gpu/GrMemoryPool.h"
24 #include "src/gpu/GrOpFlushState.h"
25 #include "src/gpu/GrOpsTypes.h"
26 #include "src/gpu/GrRecordingContextPriv.h"
27 #include "src/gpu/GrResourceProvider.h"
28 #include "src/gpu/GrResourceProviderPriv.h"
29 #include "src/gpu/GrShaderCaps.h"
30 #include "src/gpu/GrTexture.h"
31 #include "src/gpu/GrTextureProxy.h"
32 #include "src/gpu/SkGr.h"
33 #include "src/gpu/effects/GrBlendFragmentProcessor.h"
34 #include "src/gpu/effects/GrTextureEffect.h"
35 #include "src/gpu/geometry/GrQuad.h"
36 #include "src/gpu/geometry/GrQuadBuffer.h"
37 #include "src/gpu/geometry/GrQuadUtils.h"
38 #include "src/gpu/geometry/GrRect.h"
39 #include "src/gpu/glsl/GrGLSLVarying.h"
40 #include "src/gpu/ops/FillRectOp.h"
41 #include "src/gpu/ops/GrMeshDrawOp.h"
42 #include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h"
43 #include "src/gpu/ops/QuadPerEdgeAA.h"
44 #include "src/gpu/ops/TextureOp.h"
45 #include "src/gpu/v1/SurfaceDrawContext_v1.h"
46
47 namespace {
48
49 using Subset = skgpu::v1::QuadPerEdgeAA::Subset;
50 using VertexSpec = skgpu::v1::QuadPerEdgeAA::VertexSpec;
51 using ColorType = skgpu::v1::QuadPerEdgeAA::ColorType;
52
53 // Extracts lengths of vertical and horizontal edges of axis-aligned quad. "width" is the edge
54 // between v0 and v2 (or v1 and v3), "height" is the edge between v0 and v1 (or v2 and v3).
axis_aligned_quad_size(const GrQuad & quad)55 SkSize axis_aligned_quad_size(const GrQuad& quad) {
56 SkASSERT(quad.quadType() == GrQuad::Type::kAxisAligned);
57 // Simplification of regular edge length equation, since it's axis aligned and can avoid sqrt
58 float dw = sk_float_abs(quad.x(2) - quad.x(0)) + sk_float_abs(quad.y(2) - quad.y(0));
59 float dh = sk_float_abs(quad.x(1) - quad.x(0)) + sk_float_abs(quad.y(1) - quad.y(0));
60 return {dw, dh};
61 }
62
63 std::tuple<bool /* filter */,
64 bool /* mipmap */>
filter_and_mm_have_effect(const GrQuad & srcQuad,const GrQuad & dstQuad)65 filter_and_mm_have_effect(const GrQuad& srcQuad, const GrQuad& dstQuad) {
66 // If not axis-aligned in src or dst, then always say it has an effect
67 if (srcQuad.quadType() != GrQuad::Type::kAxisAligned ||
68 dstQuad.quadType() != GrQuad::Type::kAxisAligned) {
69 return {true, true};
70 }
71
72 SkRect srcRect;
73 SkRect dstRect;
74 if (srcQuad.asRect(&srcRect) && dstQuad.asRect(&dstRect)) {
75 // Disable filtering when there is no scaling (width and height are the same), and the
76 // top-left corners have the same fraction (so src and dst snap to the pixel grid
77 // identically).
78 SkASSERT(srcRect.isSorted());
79 bool filter = srcRect.width() != dstRect.width() || srcRect.height() != dstRect.height() ||
80 SkScalarFraction(srcRect.fLeft) != SkScalarFraction(dstRect.fLeft) ||
81 SkScalarFraction(srcRect.fTop) != SkScalarFraction(dstRect.fTop);
82 bool mm = srcRect.width() > dstRect.width() || srcRect.height() > dstRect.height();
83 return {filter, mm};
84 }
85 // Extract edge lengths
86 SkSize srcSize = axis_aligned_quad_size(srcQuad);
87 SkSize dstSize = axis_aligned_quad_size(dstQuad);
88 // Although the quads are axis-aligned, the local coordinate system is transformed such
89 // that fractionally-aligned sample centers will not align with the device coordinate system
90 // So disable filtering when edges are the same length and both srcQuad and dstQuad
91 // 0th vertex is integer aligned.
92 bool filter = srcSize != dstSize ||
93 !SkScalarIsInt(srcQuad.x(0)) ||
94 !SkScalarIsInt(srcQuad.y(0)) ||
95 !SkScalarIsInt(dstQuad.x(0)) ||
96 !SkScalarIsInt(dstQuad.y(0));
97 bool mm = srcSize.fWidth > dstSize.fWidth || srcSize.fHeight > dstSize.fHeight;
98 return {filter, mm};
99 }
100
101 // Describes function for normalizing src coords: [x * iw, y * ih + yOffset] can represent
102 // regular and rectangular textures, w/ or w/o origin correction.
103 struct NormalizationParams {
104 float fIW; // 1 / width of texture, or 1.0 for texture rectangles
105 float fInvH; // 1 / height of texture, or 1.0 for tex rects, X -1 if bottom-left origin
106 float fYOffset; // 0 for top-left origin, height of [normalized] tex if bottom-left
107 };
proxy_normalization_params(const GrSurfaceProxy * proxy,GrSurfaceOrigin origin)108 NormalizationParams proxy_normalization_params(const GrSurfaceProxy* proxy,
109 GrSurfaceOrigin origin) {
110 // Whether or not the proxy is instantiated, this is the size its texture will be, so we can
111 // normalize the src coordinates up front.
112 SkISize dimensions = proxy->backingStoreDimensions();
113 float iw, ih, h;
114 if (proxy->backendFormat().textureType() == GrTextureType::kRectangle) {
115 iw = ih = 1.f;
116 h = dimensions.height();
117 } else {
118 iw = 1.f / dimensions.width();
119 ih = 1.f / dimensions.height();
120 h = 1.f;
121 }
122
123 if (origin == kBottomLeft_GrSurfaceOrigin) {
124 return {iw, -ih, h};
125 } else {
126 return {iw, ih, 0.0f};
127 }
128 }
129
130 // Normalize the subset. If 'subsetRect' is null, it is assumed no subset constraint is desired,
131 // so a sufficiently large rect is returned even if the quad ends up batched with an op that uses
132 // subsets overall. When there is a subset it will be inset based on the filter mode. Normalization
133 // and y-flipping are applied as indicated by NormalizationParams.
normalize_and_inset_subset(GrSamplerState::Filter filter,const NormalizationParams & params,const SkRect * subsetRect)134 SkRect normalize_and_inset_subset(GrSamplerState::Filter filter,
135 const NormalizationParams& params,
136 const SkRect* subsetRect) {
137 static constexpr SkRect kLargeRect = {-100000, -100000, 1000000, 1000000};
138 if (!subsetRect) {
139 // Either the quad has no subset constraint and is batched with a subset constrained op
140 // (in which case we want a subset that doesn't restrict normalized tex coords), or the
141 // entire op doesn't use the subset, in which case the returned value is ignored.
142 return kLargeRect;
143 }
144
145 auto ltrb = skvx::Vec<4, float>::Load(subsetRect);
146 auto flipHi = skvx::Vec<4, float>({1.f, 1.f, -1.f, -1.f});
147 if (filter == GrSamplerState::Filter::kNearest) {
148 // Make sure our insetting puts us at pixel centers.
149 ltrb = skvx::floor(ltrb*flipHi)*flipHi;
150 }
151 // Inset with pin to the rect center.
152 ltrb += skvx::Vec<4, float>({.5f, .5f, -.5f, -.5f});
153 auto mid = (skvx::shuffle<2, 3, 0, 1>(ltrb) + ltrb)*0.5f;
154 ltrb = skvx::min(ltrb*flipHi, mid*flipHi)*flipHi;
155
156 // Normalize and offset
157 ltrb = ltrb * skvx::Vec<4, float>{params.fIW, params.fInvH, params.fIW, params.fInvH} +
158 skvx::Vec<4, float>{0.f, params.fYOffset, 0.f, params.fYOffset};
159 if (params.fInvH < 0.f) {
160 // Flip top and bottom to keep the rect sorted when loaded back to SkRect.
161 ltrb = skvx::shuffle<0, 3, 2, 1>(ltrb);
162 }
163
164 SkRect out;
165 ltrb.store(&out);
166 return out;
167 }
168
169 // Normalizes logical src coords and corrects for origin
normalize_src_quad(const NormalizationParams & params,GrQuad * srcQuad)170 void normalize_src_quad(const NormalizationParams& params,
171 GrQuad* srcQuad) {
172 // The src quad should not have any perspective
173 SkASSERT(!srcQuad->hasPerspective());
174 skvx::Vec<4, float> xs = srcQuad->x4f() * params.fIW;
175 skvx::Vec<4, float> ys = srcQuad->y4f() * params.fInvH + params.fYOffset;
176 xs.store(srcQuad->xs());
177 ys.store(srcQuad->ys());
178 }
179
180 // Count the number of proxy runs in the entry set. This usually is already computed by
181 // SkGpuDevice, but when the BatchLengthLimiter chops the set up it must determine a new proxy count
182 // for each split.
proxy_run_count(const GrTextureSetEntry set[],int count)183 int proxy_run_count(const GrTextureSetEntry set[], int count) {
184 int actualProxyRunCount = 0;
185 const GrSurfaceProxy* lastProxy = nullptr;
186 for (int i = 0; i < count; ++i) {
187 if (set[i].fProxyView.proxy() != lastProxy) {
188 actualProxyRunCount++;
189 lastProxy = set[i].fProxyView.proxy();
190 }
191 }
192 return actualProxyRunCount;
193 }
194
safe_to_ignore_subset_rect(GrAAType aaType,GrSamplerState::Filter filter,const DrawQuad & quad,const SkRect & subsetRect)195 bool safe_to_ignore_subset_rect(GrAAType aaType, GrSamplerState::Filter filter,
196 const DrawQuad& quad, const SkRect& subsetRect) {
197 // If both the device and local quad are both axis-aligned, and filtering is off, the local quad
198 // can push all the way up to the edges of the the subset rect and the sampler shouldn't
199 // overshoot. Unfortunately, antialiasing adds enough jitter that we can only rely on this in
200 // the non-antialiased case.
201 SkRect localBounds = quad.fLocal.bounds();
202 if (aaType == GrAAType::kNone &&
203 filter == GrSamplerState::Filter::kNearest &&
204 quad.fDevice.quadType() == GrQuad::Type::kAxisAligned &&
205 quad.fLocal.quadType() == GrQuad::Type::kAxisAligned &&
206 subsetRect.contains(localBounds)) {
207
208 return true;
209 }
210
211 // If the local quad is inset by at least 0.5 pixels into the subset rect's bounds, the
212 // sampler shouldn't overshoot, even when antialiasing and filtering is taken into account.
213 if (subsetRect.makeInset(0.5f, 0.5f).contains(localBounds)) {
214 return true;
215 }
216
217 // The subset rect cannot be ignored safely.
218 return false;
219 }
220
221 /**
222 * Op that implements TextureOp::Make. It draws textured quads. Each quad can modulate against a
223 * the texture by color. The blend with the destination is always src-over. The edges are non-AA.
224 */
225 class TextureOpImpl final : public GrMeshDrawOp {
226 public:
227 using Saturate = skgpu::v1::TextureOp::Saturate;
228
Make(GrRecordingContext * context,GrSurfaceProxyView proxyView,sk_sp<GrColorSpaceXform> textureXform,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,const SkPMColor4f & color,Saturate saturate,GrAAType aaType,DrawQuad * quad,const SkRect * subset,uint32_t stencilRef)229 static GrOp::Owner Make(GrRecordingContext* context,
230 GrSurfaceProxyView proxyView,
231 sk_sp<GrColorSpaceXform> textureXform,
232 GrSamplerState::Filter filter,
233 GrSamplerState::MipmapMode mm,
234 const SkPMColor4f& color,
235 Saturate saturate,
236 GrAAType aaType,
237 DrawQuad* quad,
238 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
239 const SkRect* subset,
240 uint32_t stencilRef) {
241 return GrOp::Make<TextureOpImpl>(context, std::move(proxyView), std::move(textureXform),
242 filter, mm, color, saturate, aaType, quad, subset, stencilRef);
243 #else
244 const SkRect* subset) {
245 return GrOp::Make<TextureOpImpl>(context, std::move(proxyView), std::move(textureXform),
246 filter, mm, color, saturate, aaType, quad, subset);
247 #endif
248 }
249
250 static GrOp::Owner Make(GrRecordingContext* context,
251 GrTextureSetEntry set[],
252 int cnt,
253 int proxyRunCnt,
254 GrSamplerState::Filter filter,
255 GrSamplerState::MipmapMode mm,
256 Saturate saturate,
257 GrAAType aaType,
258 SkCanvas::SrcRectConstraint constraint,
259 const SkMatrix& viewMatrix,
260 sk_sp<GrColorSpaceXform> textureColorSpaceXform) {
261 // Allocate size based on proxyRunCnt, since that determines number of ViewCountPairs.
262 SkASSERT(proxyRunCnt <= cnt);
263 return GrOp::MakeWithExtraMemory<TextureOpImpl>(
264 context, sizeof(ViewCountPair) * (proxyRunCnt - 1),
265 set, cnt, proxyRunCnt, filter, mm, saturate, aaType, constraint,
266 viewMatrix, std::move(textureColorSpaceXform));
267 }
268
269 ~TextureOpImpl() override {
270 for (unsigned p = 1; p < fMetadata.fProxyCount; ++p) {
271 fViewCountPairs[p].~ViewCountPair();
272 }
273 }
274
275 const char* name() const override { return "TextureOp"; }
276
277 void visitProxies(const GrVisitProxyFunc& func) const override {
278 bool mipped = (fMetadata.mipmapMode() != GrSamplerState::MipmapMode::kNone);
279 for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) {
280 func(fViewCountPairs[p].fProxy.get(), GrMipmapped(mipped));
281 }
282 if (fDesc && fDesc->fProgramInfo) {
283 fDesc->fProgramInfo->visitFPProxies(func);
284 }
285 }
286
287 #ifdef SK_DEBUG
288 static void ValidateResourceLimits() {
289 // The op implementation has an upper bound on the number of quads that it can represent.
290 // However, the resource manager imposes its own limit on the number of quads, which should
291 // always be lower than the numerical limit this op can hold.
292 using CountStorage = decltype(Metadata::fTotalQuadCount);
293 CountStorage maxQuadCount = std::numeric_limits<CountStorage>::max();
294 // GrResourceProvider::Max...() is typed as int, so don't compare across signed/unsigned.
295 int resourceLimit = SkTo<int>(maxQuadCount);
296 SkASSERT(GrResourceProvider::MaxNumAAQuads() <= resourceLimit &&
297 GrResourceProvider::MaxNumNonAAQuads() <= resourceLimit);
298 }
299 #endif
300
301 GrProcessorSet::Analysis finalize(const GrCaps& caps, const GrAppliedClip*,
302 GrClampType clampType) override {
303 SkASSERT(fMetadata.colorType() == ColorType::kNone);
304 auto iter = fQuads.metadata();
305 while(iter.next()) {
306 auto colorType = skgpu::v1::QuadPerEdgeAA::MinColorType(iter->fColor);
307 colorType = std::max(static_cast<ColorType>(fMetadata.fColorType),
308 colorType);
309 if (caps.reducedShaderMode()) {
310 colorType = std::max(colorType, ColorType::kByte);
311 }
312 fMetadata.fColorType = static_cast<uint16_t>(colorType);
313 }
314 return GrProcessorSet::EmptySetAnalysis();
315 }
316
317 FixedFunctionFlags fixedFunctionFlags() const override {
318 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
319 if (fStencilRef != UINT32_MAX) {
320 return FixedFunctionFlags::kUsesStencil;
321 }
322 #endif
323 return fMetadata.aaType() == GrAAType::kMSAA ? FixedFunctionFlags::kUsesHWAA
324 : FixedFunctionFlags::kNone;
325 }
326
327 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
328 bool isStencilCullingOp() override {
329 return fStencilRef != UINT32_MAX;
330 }
331 #endif
332
333 DEFINE_OP_CLASS_ID
334
335 private:
336 friend class ::GrOp;
337
338 struct ColorSubsetAndAA {
339 ColorSubsetAndAA(const SkPMColor4f& color, const SkRect& subsetRect, GrQuadAAFlags aaFlags)
340 : fColor(color)
341 , fSubsetRect(subsetRect)
342 , fAAFlags(static_cast<uint16_t>(aaFlags)) {
343 SkASSERT(fAAFlags == static_cast<uint16_t>(aaFlags));
344 }
345
346 SkPMColor4f fColor;
347 // If the op doesn't use subsets, this is ignored. If the op uses subsets and the specific
348 // entry does not, this rect will equal kLargeRect, so it automatically has no effect.
349 SkRect fSubsetRect;
350 unsigned fAAFlags : 4;
351
352 GrQuadAAFlags aaFlags() const { return static_cast<GrQuadAAFlags>(fAAFlags); }
353 };
354
355 struct ViewCountPair {
356 // Normally this would be a GrSurfaceProxyView, but TextureOp applies the GrOrigin right
357 // away so it doesn't need to be stored, and all ViewCountPairs in an op have the same
358 // swizzle so that is stored in the op metadata.
359 sk_sp<GrSurfaceProxy> fProxy;
360 int fQuadCnt;
361 };
362
363 // TextureOp and ViewCountPair are 8 byte aligned. This is packed into 8 bytes to minimally
364 // increase the size of the op; increasing the op size can have a surprising impact on
365 // performance (since texture ops are one of the most commonly used in an app).
366 struct Metadata {
367 // AAType must be filled after initialization; ColorType is determined in finalize()
368 Metadata(const GrSwizzle& swizzle,
369 GrSamplerState::Filter filter,
370 GrSamplerState::MipmapMode mm,
371 Subset subset,
372 Saturate saturate)
373 : fSwizzle(swizzle)
374 , fProxyCount(1)
375 , fTotalQuadCount(1)
376 , fFilter(static_cast<uint16_t>(filter))
377 , fMipmapMode(static_cast<uint16_t>(mm))
378 , fAAType(static_cast<uint16_t>(GrAAType::kNone))
379 , fColorType(static_cast<uint16_t>(ColorType::kNone))
380 , fSubset(static_cast<uint16_t>(subset))
381 , fSaturate(static_cast<uint16_t>(saturate)) {}
382
383 GrSwizzle fSwizzle; // sizeof(GrSwizzle) == uint16_t
384 uint16_t fProxyCount;
385 // This will be >= fProxyCount, since a proxy may be drawn multiple times
386 uint16_t fTotalQuadCount;
387
388 // These must be based on uint16_t to help MSVC's pack bitfields optimally
389 uint16_t fFilter : 2; // GrSamplerState::Filter
390 uint16_t fMipmapMode : 2; // GrSamplerState::MipmapMode
391 uint16_t fAAType : 2; // GrAAType
392 uint16_t fColorType : 2; // GrQuadPerEdgeAA::ColorType
393 uint16_t fSubset : 1; // bool
394 uint16_t fSaturate : 1; // bool
395 uint16_t fUnused : 6; // # of bits left before Metadata exceeds 8 bytes
396
397 GrSamplerState::Filter filter() const {
398 return static_cast<GrSamplerState::Filter>(fFilter);
399 }
400 GrSamplerState::MipmapMode mipmapMode() const {
401 return static_cast<GrSamplerState::MipmapMode>(fMipmapMode);
402 }
403 GrAAType aaType() const { return static_cast<GrAAType>(fAAType); }
404 ColorType colorType() const { return static_cast<ColorType>(fColorType); }
405 Subset subset() const { return static_cast<Subset>(fSubset); }
406 Saturate saturate() const { return static_cast<Saturate>(fSaturate); }
407
408 static_assert(GrSamplerState::kFilterCount <= 4);
409 static_assert(kGrAATypeCount <= 4);
410 static_assert(skgpu::v1::QuadPerEdgeAA::kColorTypeCount <= 4);
411 };
412 static_assert(sizeof(Metadata) == 8);
413
414 // This descriptor is used to store the draw info we decide on during on(Pre)PrepareDraws. We
415 // store the data in a separate struct in order to minimize the size of the TextureOp.
416 // Historically, increasing the TextureOp's size has caused surprising perf regressions, but we
417 // may want to re-evaluate whether this is still necessary.
418 //
419 // In the onPrePrepareDraws case it is allocated in the creation-time opData arena, and
420 // allocatePrePreparedVertices is also called.
421 //
422 // In the onPrepareDraws case this descriptor is allocated in the flush-time arena (i.e., as
423 // part of the flushState).
424 struct Desc {
425 VertexSpec fVertexSpec;
426 int fNumProxies = 0;
427 int fNumTotalQuads = 0;
428
429 // This member variable is only used by 'onPrePrepareDraws'.
430 char* fPrePreparedVertices = nullptr;
431
432 GrProgramInfo* fProgramInfo = nullptr;
433
434 sk_sp<const GrBuffer> fIndexBuffer;
435 sk_sp<const GrBuffer> fVertexBuffer;
436 int fBaseVertex;
437
438 // How big should 'fVertices' be to hold all the vertex data?
439 size_t totalSizeInBytes() const {
440 return this->totalNumVertices() * fVertexSpec.vertexSize();
441 }
442
443 int totalNumVertices() const {
444 return fNumTotalQuads * fVertexSpec.verticesPerQuad();
445 }
446
447 void allocatePrePreparedVertices(SkArenaAlloc* arena) {
448 fPrePreparedVertices = arena->makeArrayDefault<char>(this->totalSizeInBytes());
449 }
450 };
451 // If subsetRect is not null it will be used to apply a strict src rect-style constraint.
452 TextureOpImpl(GrSurfaceProxyView proxyView,
453 sk_sp<GrColorSpaceXform> textureColorSpaceXform,
454 GrSamplerState::Filter filter,
455 GrSamplerState::MipmapMode mm,
456 const SkPMColor4f& color,
457 Saturate saturate,
458 GrAAType aaType,
459 DrawQuad* quad,
460 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
461 const SkRect* subsetRect,
462 uint32_t stencilRef = UINT32_MAX)
463 : INHERITED(ClassID())
464 , fQuads(1, true /* includes locals */)
465 , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
466 , fDesc(nullptr)
467 , fMetadata(proxyView.swizzle(), filter, mm, Subset(!!subsetRect), saturate)
468 , fStencilRef(stencilRef) {
469 #else
470 const SkRect* subsetRect)
471 : INHERITED(ClassID())
472 , fQuads(1, true /* includes locals */)
473 , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
474 , fDesc(nullptr)
475 , fMetadata(proxyView.swizzle(), filter, mm, Subset(!!subsetRect), saturate) {
476 #endif
477 // Clean up disparities between the overall aa type and edge configuration and apply
478 // optimizations based on the rect and matrix when appropriate
479 GrQuadUtils::ResolveAAType(aaType, quad->fEdgeFlags, quad->fDevice,
480 &aaType, &quad->fEdgeFlags);
481 fMetadata.fAAType = static_cast<uint16_t>(aaType);
482
483 // We expect our caller to have already caught this optimization.
484 SkASSERT(!subsetRect ||
485 !subsetRect->contains(proxyView.proxy()->backingStoreBoundsRect()));
486
487 // We may have had a strict constraint with nearest filter solely due to possible AA bloat.
488 // Try to identify cases where the subsetting isn't actually necessary, and skip it.
489 if (subsetRect) {
490 if (safe_to_ignore_subset_rect(aaType, filter, *quad, *subsetRect)) {
491 subsetRect = nullptr;
492 fMetadata.fSubset = static_cast<uint16_t>(Subset::kNo);
493 }
494 }
495
496 // Normalize src coordinates and the subset (if set)
497 NormalizationParams params = proxy_normalization_params(proxyView.proxy(),
498 proxyView.origin());
499 normalize_src_quad(params, &quad->fLocal);
500 SkRect subset = normalize_and_inset_subset(filter, params, subsetRect);
501
502 // Set bounds before clipping so we don't have to worry about unioning the bounds of
503 // the two potential quads (GrQuad::bounds() is perspective-safe).
504 bool hairline = GrQuadUtils::WillUseHairline(quad->fDevice, aaType, quad->fEdgeFlags);
505 this->setBounds(quad->fDevice.bounds(), HasAABloat(aaType == GrAAType::kCoverage),
506 hairline ? IsHairline::kYes : IsHairline::kNo);
507 int quadCount = this->appendQuad(quad, color, subset);
508 fViewCountPairs[0] = {proxyView.detachProxy(), quadCount};
509 }
510
511 TextureOpImpl(GrTextureSetEntry set[],
512 int cnt,
513 int proxyRunCnt,
514 const GrSamplerState::Filter filter,
515 const GrSamplerState::MipmapMode mm,
516 const Saturate saturate,
517 const GrAAType aaType,
518 const SkCanvas::SrcRectConstraint constraint,
519 const SkMatrix& viewMatrix,
520 sk_sp<GrColorSpaceXform> textureColorSpaceXform)
521 : INHERITED(ClassID())
522 , fQuads(cnt, true /* includes locals */)
523 , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
524 , fDesc(nullptr)
525 , fMetadata(set[0].fProxyView.swizzle(),
526 GrSamplerState::Filter::kNearest,
527 GrSamplerState::MipmapMode::kNone,
528 Subset::kNo,
529 saturate) {
530 // Update counts to reflect the batch op
531 fMetadata.fProxyCount = SkToUInt(proxyRunCnt);
532 fMetadata.fTotalQuadCount = SkToUInt(cnt);
533
534 SkRect bounds = SkRectPriv::MakeLargestInverted();
535
536 GrAAType netAAType = GrAAType::kNone; // aa type maximally compatible with all dst rects
537 Subset netSubset = Subset::kNo;
538 GrSamplerState::Filter netFilter = GrSamplerState::Filter::kNearest;
539 GrSamplerState::MipmapMode netMM = GrSamplerState::MipmapMode::kNone;
540 bool hasSubpixel = false;
541
542 const GrSurfaceProxy* curProxy = nullptr;
543
544 // 'q' is the index in 'set' and fQuadBuffer; 'p' is the index in fViewCountPairs and only
545 // increases when set[q]'s proxy changes.
546 int p = 0;
547 for (int q = 0; q < cnt; ++q) {
548 SkASSERT(mm == GrSamplerState::MipmapMode::kNone ||
549 (set[0].fProxyView.proxy()->asTextureProxy()->mipmapped() ==
550 GrMipmapped::kYes));
551 if (q == 0) {
552 // We do not placement new the first ViewCountPair since that one is allocated and
553 // initialized as part of the TextureOp creation.
554 fViewCountPairs[0].fProxy = set[0].fProxyView.detachProxy();
555 fViewCountPairs[0].fQuadCnt = 0;
556 curProxy = fViewCountPairs[0].fProxy.get();
557 } else if (set[q].fProxyView.proxy() != curProxy) {
558 // We must placement new the ViewCountPairs here so that the sk_sps in the
559 // GrSurfaceProxyView get initialized properly.
560 new(&fViewCountPairs[++p])ViewCountPair({set[q].fProxyView.detachProxy(), 0});
561
562 curProxy = fViewCountPairs[p].fProxy.get();
563 SkASSERT(GrTextureProxy::ProxiesAreCompatibleAsDynamicState(
564 curProxy, fViewCountPairs[0].fProxy.get()));
565 SkASSERT(fMetadata.fSwizzle == set[q].fProxyView.swizzle());
566 } // else another quad referencing the same proxy
567
568 SkMatrix ctm = viewMatrix;
569 if (set[q].fPreViewMatrix) {
570 ctm.preConcat(*set[q].fPreViewMatrix);
571 }
572
573 // Use dstRect/srcRect unless dstClip is provided, in which case derive new source
574 // coordinates by mapping dstClipQuad by the dstRect to srcRect transform.
575 DrawQuad quad;
576 if (set[q].fDstClipQuad) {
577 quad.fDevice = GrQuad::MakeFromSkQuad(set[q].fDstClipQuad, ctm);
578
579 SkPoint srcPts[4];
580 GrMapRectPoints(set[q].fDstRect, set[q].fSrcRect, set[q].fDstClipQuad, srcPts, 4);
581 quad.fLocal = GrQuad::MakeFromSkQuad(srcPts, SkMatrix::I());
582 } else {
583 quad.fDevice = GrQuad::MakeFromRect(set[q].fDstRect, ctm);
584 quad.fLocal = GrQuad(set[q].fSrcRect);
585 }
586
587 // This may be reduced per-quad from the requested aggregate filtering level, and used
588 // to determine if the subset is needed for the entry as well.
589 GrSamplerState::Filter filterForQuad = filter;
590 if (netFilter != filter || netMM != mm) {
591 // The only way netFilter != filter is if linear is requested and we haven't yet
592 // found a quad that requires linear (so net is still nearest). Similar for mip
593 // mapping.
594 SkASSERT(filter == netFilter ||
595 (netFilter == GrSamplerState::Filter::kNearest && filter > netFilter));
596 SkASSERT(mm == netMM ||
597 (netMM == GrSamplerState::MipmapMode::kNone && mm > netMM));
598 auto [mustFilter, mustMM] = filter_and_mm_have_effect(quad.fLocal, quad.fDevice);
599 if (filter != GrSamplerState::Filter::kNearest) {
600 if (mustFilter) {
601 netFilter = filter; // upgrade batch to higher filter level
602 } else {
603 filterForQuad = GrSamplerState::Filter::kNearest; // downgrade entry
604 }
605 }
606 if (mustMM && mm != GrSamplerState::MipmapMode::kNone) {
607 netMM = mm;
608 }
609 }
610
611 // Determine the AA type for the quad, then merge with net AA type
612 GrAAType aaForQuad;
613 GrQuadUtils::ResolveAAType(aaType, set[q].fAAFlags, quad.fDevice,
614 &aaForQuad, &quad.fEdgeFlags);
615 // Update overall bounds of the op as the union of all quads
616 bounds.joinPossiblyEmptyRect(quad.fDevice.bounds());
617 hasSubpixel |= GrQuadUtils::WillUseHairline(quad.fDevice, aaForQuad, quad.fEdgeFlags);
618
619 // Resolve sets aaForQuad to aaType or None, there is never a change between aa methods
620 SkASSERT(aaForQuad == GrAAType::kNone || aaForQuad == aaType);
621 if (netAAType == GrAAType::kNone && aaForQuad != GrAAType::kNone) {
622 netAAType = aaType;
623 }
624
625 // Calculate metadata for the entry
626 const SkRect* subsetForQuad = nullptr;
627 if (constraint == SkCanvas::kStrict_SrcRectConstraint) {
628 // Check (briefly) if the subset rect is actually needed for this set entry.
629 SkRect* subsetRect = &set[q].fSrcRect;
630 if (!subsetRect->contains(curProxy->backingStoreBoundsRect())) {
631 if (!safe_to_ignore_subset_rect(aaForQuad, filterForQuad, quad, *subsetRect)) {
632 netSubset = Subset::kYes;
633 subsetForQuad = subsetRect;
634 }
635 }
636 }
637
638 // Normalize the src quads and apply origin
639 NormalizationParams proxyParams = proxy_normalization_params(
640 curProxy, set[q].fProxyView.origin());
641 normalize_src_quad(proxyParams, &quad.fLocal);
642
643 // This subset may represent a no-op, otherwise it will have the origin and dimensions
644 // of the texture applied to it.
645 SkRect subset = normalize_and_inset_subset(filter, proxyParams, subsetForQuad);
646
647 // Always append a quad (or 2 if perspective clipped), it just may refer back to a prior
648 // ViewCountPair (this frequently happens when Chrome draws 9-patches).
649 fViewCountPairs[p].fQuadCnt += this->appendQuad(&quad, set[q].fColor, subset);
650 }
651 // The # of proxy switches should match what was provided (+1 because we incremented p
652 // when a new proxy was encountered).
653 SkASSERT((p + 1) == fMetadata.fProxyCount);
654 SkASSERT(fQuads.count() == fMetadata.fTotalQuadCount);
655
656 fMetadata.fAAType = static_cast<uint16_t>(netAAType);
657 fMetadata.fFilter = static_cast<uint16_t>(netFilter);
658 fMetadata.fSubset = static_cast<uint16_t>(netSubset);
659
660 this->setBounds(bounds, HasAABloat(netAAType == GrAAType::kCoverage),
661 hasSubpixel ? IsHairline::kYes : IsHairline::kNo);
662 }
663
664 int appendQuad(DrawQuad* quad, const SkPMColor4f& color, const SkRect& subset) {
665 DrawQuad extra;
666 // Always clip to W0 to stay consistent with GrQuad::bounds
667 int quadCount = GrQuadUtils::ClipToW0(quad, &extra);
668 if (quadCount == 0) {
669 // We can't discard the op at this point, but disable AA flags so it won't go through
670 // inset/outset processing
671 quad->fEdgeFlags = GrQuadAAFlags::kNone;
672 quadCount = 1;
673 }
674 fQuads.append(quad->fDevice, {color, subset, quad->fEdgeFlags}, &quad->fLocal);
675 if (quadCount > 1) {
676 fQuads.append(extra.fDevice, {color, subset, extra.fEdgeFlags}, &extra.fLocal);
677 fMetadata.fTotalQuadCount++;
678 }
679 return quadCount;
680 }
681
682 GrProgramInfo* programInfo() override {
683 // Although this Op implements its own onPrePrepareDraws it calls GrMeshDrawOps' version so
684 // this entry point will be called.
685 return (fDesc) ? fDesc->fProgramInfo : nullptr;
686 }
687
688 void onCreateProgramInfo(const GrCaps* caps,
689 SkArenaAlloc* arena,
690 const GrSurfaceProxyView& writeView,
691 bool usesMSAASurface,
692 GrAppliedClip&& appliedClip,
693 const GrDstProxyView& dstProxyView,
694 GrXferBarrierFlags renderPassXferBarriers,
695 GrLoadOp colorLoadOp) override {
696 SkASSERT(fDesc);
697
698 GrGeometryProcessor* gp;
699
700 {
701 const GrBackendFormat& backendFormat =
702 fViewCountPairs[0].fProxy->backendFormat();
703
704 GrSamplerState samplerState = GrSamplerState(GrSamplerState::WrapMode::kClamp,
705 fMetadata.filter());
706
707 gp = skgpu::v1::QuadPerEdgeAA::MakeTexturedProcessor(
708 arena, fDesc->fVertexSpec, *caps->shaderCaps(), backendFormat, samplerState,
709 fMetadata.fSwizzle, std::move(fTextureColorSpaceXform), fMetadata.saturate());
710
711 SkASSERT(fDesc->fVertexSpec.vertexSize() == gp->vertexStride());
712 }
713 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
714 const GrUserStencilSettings* st = &GrUserStencilSettings::kUnused;
715 if (fStencilRef != UINT32_MAX && !fShouldDisableStencilCulling && fStencilRef < kStencilLayersMax) {
716 TRACE_EVENT0("skia.gpu", "StencilCullingOpt TextureOpImpl::onCreateProgramInfo with stencil");
717 st = GrUserStencilSettings::kGE[fStencilRef];
718 }
719 fDesc->fProgramInfo = GrSimpleMeshDrawOpHelper::CreateProgramInfo(
720 caps, arena, writeView, usesMSAASurface, std::move(appliedClip), dstProxyView, gp,
721 GrProcessorSet::MakeEmptySet(), fDesc->fVertexSpec.primitiveType(),
722 renderPassXferBarriers, colorLoadOp, GrPipeline::InputFlags::kNone, st);
723 #else
724 fDesc->fProgramInfo = GrSimpleMeshDrawOpHelper::CreateProgramInfo(
725 caps, arena, writeView, usesMSAASurface, std::move(appliedClip), dstProxyView, gp,
726 GrProcessorSet::MakeEmptySet(), fDesc->fVertexSpec.primitiveType(),
727 renderPassXferBarriers, colorLoadOp, GrPipeline::InputFlags::kNone);
728 #endif
729 }
730
731 void onPrePrepareDraws(GrRecordingContext* context,
732 const GrSurfaceProxyView& writeView,
733 GrAppliedClip* clip,
734 const GrDstProxyView& dstProxyView,
735 GrXferBarrierFlags renderPassXferBarriers,
736 GrLoadOp colorLoadOp) override {
737 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
738
739 SkDEBUGCODE(this->validate();)
740 SkASSERT(!fDesc);
741
742 SkArenaAlloc* arena = context->priv().recordTimeAllocator();
743
744 fDesc = arena->make<Desc>();
745 this->characterize(fDesc);
746 fDesc->allocatePrePreparedVertices(arena);
747 FillInVertices(*context->priv().caps(), this, fDesc, fDesc->fPrePreparedVertices);
748
749 // This will call onCreateProgramInfo and register the created program with the DDL.
750 this->INHERITED::onPrePrepareDraws(context, writeView, clip, dstProxyView,
751 renderPassXferBarriers, colorLoadOp);
752 }
753
754 static void FillInVertices(const GrCaps& caps,
755 TextureOpImpl* texOp,
756 Desc* desc,
757 char* vertexData) {
758 SkASSERT(vertexData);
759
760 SkDEBUGCODE(int totQuadsSeen = 0;)
761 SkDEBUGCODE(int totVerticesSeen = 0;)
762 SkDEBUGCODE(const size_t vertexSize = desc->fVertexSpec.vertexSize());
763
764 skgpu::v1::QuadPerEdgeAA::Tessellator tessellator(desc->fVertexSpec, vertexData);
765 for (const auto& op : ChainRange<TextureOpImpl>(texOp)) {
766 auto iter = op.fQuads.iterator();
767 for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) {
768 const int quadCnt = op.fViewCountPairs[p].fQuadCnt;
769 SkDEBUGCODE(int meshVertexCnt = quadCnt * desc->fVertexSpec.verticesPerQuad());
770
771 for (int i = 0; i < quadCnt && iter.next(); ++i) {
772 SkASSERT(iter.isLocalValid());
773 const ColorSubsetAndAA& info = iter.metadata();
774
775 tessellator.append(iter.deviceQuad(), iter.localQuad(), info.fColor,
776 info.fSubsetRect, info.aaFlags());
777 }
778
779 SkASSERT((totVerticesSeen + meshVertexCnt) * vertexSize
780 == (size_t)(tessellator.vertices() - vertexData));
781
782 SkDEBUGCODE(totQuadsSeen += quadCnt;)
783 SkDEBUGCODE(totVerticesSeen += meshVertexCnt);
784 SkASSERT(totQuadsSeen * desc->fVertexSpec.verticesPerQuad() == totVerticesSeen);
785 }
786
787 // If quad counts per proxy were calculated correctly, the entire iterator
788 // should have been consumed.
789 SkASSERT(!iter.next());
790 }
791
792 SkASSERT(desc->totalSizeInBytes() == (size_t)(tessellator.vertices() - vertexData));
793 SkASSERT(totQuadsSeen == desc->fNumTotalQuads);
794 SkASSERT(totVerticesSeen == desc->totalNumVertices());
795 }
796
797 #ifdef SK_DEBUG
798 static int validate_op(GrTextureType textureType,
799 GrAAType aaType,
800 GrSwizzle swizzle,
801 const TextureOpImpl* op) {
802 SkASSERT(op->fMetadata.fSwizzle == swizzle);
803
804 int quadCount = 0;
805 for (unsigned p = 0; p < op->fMetadata.fProxyCount; ++p) {
806 auto* proxy = op->fViewCountPairs[p].fProxy->asTextureProxy();
807 quadCount += op->fViewCountPairs[p].fQuadCnt;
808 SkASSERT(proxy);
809 SkASSERT(proxy->textureType() == textureType);
810 }
811
812 SkASSERT(aaType == op->fMetadata.aaType());
813 return quadCount;
814 }
815
816 void validate() const override {
817 // NOTE: Since this is debug-only code, we use the virtual asTextureProxy()
818 auto textureType = fViewCountPairs[0].fProxy->asTextureProxy()->textureType();
819 GrAAType aaType = fMetadata.aaType();
820 GrSwizzle swizzle = fMetadata.fSwizzle;
821
822 int quadCount = validate_op(textureType, aaType, swizzle, this);
823
824 for (const GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) {
825 quadCount += validate_op(textureType, aaType, swizzle,
826 static_cast<const TextureOpImpl*>(tmp));
827 }
828
829 for (const GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) {
830 quadCount += validate_op(textureType, aaType, swizzle,
831 static_cast<const TextureOpImpl*>(tmp));
832 }
833
834 SkASSERT(quadCount == this->numChainedQuads());
835 }
836
837 #endif
838
839 #if GR_TEST_UTILS
840 int numQuads() const final { return this->totNumQuads(); }
841 #endif
842
843 void characterize(Desc* desc) const {
844 SkDEBUGCODE(this->validate();)
845
846 GrQuad::Type quadType = GrQuad::Type::kAxisAligned;
847 ColorType colorType = ColorType::kNone;
848 GrQuad::Type srcQuadType = GrQuad::Type::kAxisAligned;
849 Subset subset = Subset::kNo;
850 GrAAType overallAAType = fMetadata.aaType();
851
852 desc->fNumProxies = 0;
853 desc->fNumTotalQuads = 0;
854 int maxQuadsPerMesh = 0;
855
856 for (const auto& op : ChainRange<TextureOpImpl>(this)) {
857 if (op.fQuads.deviceQuadType() > quadType) {
858 quadType = op.fQuads.deviceQuadType();
859 }
860 if (op.fQuads.localQuadType() > srcQuadType) {
861 srcQuadType = op.fQuads.localQuadType();
862 }
863 if (op.fMetadata.subset() == Subset::kYes) {
864 subset = Subset::kYes;
865 }
866 colorType = std::max(colorType, op.fMetadata.colorType());
867 desc->fNumProxies += op.fMetadata.fProxyCount;
868
869 for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) {
870 maxQuadsPerMesh = std::max(op.fViewCountPairs[p].fQuadCnt, maxQuadsPerMesh);
871 }
872 desc->fNumTotalQuads += op.totNumQuads();
873
874 if (op.fMetadata.aaType() == GrAAType::kCoverage) {
875 overallAAType = GrAAType::kCoverage;
876 }
877 }
878
879 SkASSERT(desc->fNumTotalQuads == this->numChainedQuads());
880
881 SkASSERT(!CombinedQuadCountWillOverflow(overallAAType, false, desc->fNumTotalQuads));
882
883 auto indexBufferOption = skgpu::v1::QuadPerEdgeAA::CalcIndexBufferOption(overallAAType,
884 maxQuadsPerMesh);
885
886 desc->fVertexSpec = VertexSpec(quadType, colorType, srcQuadType, /* hasLocal */ true,
887 subset, overallAAType, /* alpha as coverage */ true,
888 indexBufferOption);
889
890 SkASSERT(desc->fNumTotalQuads <= skgpu::v1::QuadPerEdgeAA::QuadLimit(indexBufferOption));
891 }
892
893 int totNumQuads() const {
894 #ifdef SK_DEBUG
895 int tmp = 0;
896 for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) {
897 tmp += fViewCountPairs[p].fQuadCnt;
898 }
899 SkASSERT(tmp == fMetadata.fTotalQuadCount);
900 #endif
901
902 return fMetadata.fTotalQuadCount;
903 }
904
905 int numChainedQuads() const {
906 int numChainedQuads = this->totNumQuads();
907
908 for (const GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) {
909 numChainedQuads += ((const TextureOpImpl*)tmp)->totNumQuads();
910 }
911
912 for (const GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) {
913 numChainedQuads += ((const TextureOpImpl*)tmp)->totNumQuads();
914 }
915
916 return numChainedQuads;
917 }
918
919 // onPrePrepareDraws may or may not have been called at this point
920 void onPrepareDraws(GrMeshDrawTarget* target) override {
921 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
922
923 SkDEBUGCODE(this->validate();)
924
925 SkASSERT(!fDesc || fDesc->fPrePreparedVertices);
926
927 if (!fDesc) {
928 SkArenaAlloc* arena = target->allocator();
929 fDesc = arena->make<Desc>();
930 this->characterize(fDesc);
931 SkASSERT(!fDesc->fPrePreparedVertices);
932 }
933
934 size_t vertexSize = fDesc->fVertexSpec.vertexSize();
935
936 void* vdata = target->makeVertexSpace(vertexSize, fDesc->totalNumVertices(),
937 &fDesc->fVertexBuffer, &fDesc->fBaseVertex);
938 if (!vdata) {
939 SkDebugf("Could not allocate vertices\n");
940 return;
941 }
942
943 if (fDesc->fVertexSpec.needsIndexBuffer()) {
944 fDesc->fIndexBuffer = skgpu::v1::QuadPerEdgeAA::GetIndexBuffer(
945 target, fDesc->fVertexSpec.indexBufferOption());
946 if (!fDesc->fIndexBuffer) {
947 SkDebugf("Could not allocate indices\n");
948 return;
949 }
950 }
951
952 if (fDesc->fPrePreparedVertices) {
953 memcpy(vdata, fDesc->fPrePreparedVertices, fDesc->totalSizeInBytes());
954 } else {
955 FillInVertices(target->caps(), this, fDesc, (char*) vdata);
956 }
957 }
958
959 void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
960 if (!fDesc->fVertexBuffer) {
961 return;
962 }
963
964 if (fDesc->fVertexSpec.needsIndexBuffer() && !fDesc->fIndexBuffer) {
965 return;
966 }
967
968 if (!fDesc->fProgramInfo) {
969 this->createProgramInfo(flushState);
970 SkASSERT(fDesc->fProgramInfo);
971 }
972
973 flushState->bindPipelineAndScissorClip(*fDesc->fProgramInfo, chainBounds);
974 flushState->bindBuffers(std::move(fDesc->fIndexBuffer), nullptr,
975 std::move(fDesc->fVertexBuffer));
976
977 int totQuadsSeen = 0;
978 SkDEBUGCODE(int numDraws = 0;)
979 for (const auto& op : ChainRange<TextureOpImpl>(this)) {
980 for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) {
981 const int quadCnt = op.fViewCountPairs[p].fQuadCnt;
982 SkASSERT(numDraws < fDesc->fNumProxies);
983 flushState->bindTextures(fDesc->fProgramInfo->geomProc(),
984 *op.fViewCountPairs[p].fProxy,
985 fDesc->fProgramInfo->pipeline());
986 skgpu::v1::QuadPerEdgeAA::IssueDraw(flushState->caps(), flushState->opsRenderPass(),
987 fDesc->fVertexSpec, totQuadsSeen, quadCnt,
988 fDesc->totalNumVertices(), fDesc->fBaseVertex);
989 totQuadsSeen += quadCnt;
990 SkDEBUGCODE(++numDraws;)
991 }
992 }
993
994 SkASSERT(totQuadsSeen == fDesc->fNumTotalQuads);
995 SkASSERT(numDraws == fDesc->fNumProxies);
996 }
997
998 void propagateCoverageAAThroughoutChain() {
999 fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage);
1000
1001 for (GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) {
1002 auto tex = static_cast<TextureOpImpl*>(tmp);
1003 SkASSERT(tex->fMetadata.aaType() == GrAAType::kCoverage ||
1004 tex->fMetadata.aaType() == GrAAType::kNone);
1005 tex->fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage);
1006 }
1007
1008 for (GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) {
1009 auto tex = static_cast<TextureOpImpl*>(tmp);
1010 SkASSERT(tex->fMetadata.aaType() == GrAAType::kCoverage ||
1011 tex->fMetadata.aaType() == GrAAType::kNone);
1012 tex->fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage);
1013 }
1014 }
1015
1016 CombineResult onCombineIfPossible(GrOp* t, SkArenaAlloc*, const GrCaps& caps) override {
1017 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
1018 auto that = t->cast<TextureOpImpl>();
1019
1020 SkDEBUGCODE(this->validate();)
1021 SkDEBUGCODE(that->validate();)
1022
1023 if (fDesc || that->fDesc) {
1024 // This should never happen (since only DDL recorded ops should be prePrepared)
1025 // but, in any case, we should never combine ops that that been prePrepared
1026 return CombineResult::kCannotCombine;
1027 }
1028
1029 if (fMetadata.subset() != that->fMetadata.subset()) {
1030 // It is technically possible to combine operations across subset modes, but performance
1031 // testing suggests it's better to make more draw calls where some take advantage of
1032 // the more optimal shader path without coordinate clamping.
1033 return CombineResult::kCannotCombine;
1034 }
1035 if (!GrColorSpaceXform::Equals(fTextureColorSpaceXform.get(),
1036 that->fTextureColorSpaceXform.get())) {
1037 return CombineResult::kCannotCombine;
1038 }
1039
1040 bool upgradeToCoverageAAOnMerge = false;
1041 if (fMetadata.aaType() != that->fMetadata.aaType()) {
1042 if (!CanUpgradeAAOnMerge(fMetadata.aaType(), that->fMetadata.aaType())) {
1043 return CombineResult::kCannotCombine;
1044 }
1045 upgradeToCoverageAAOnMerge = true;
1046 }
1047
1048 if (CombinedQuadCountWillOverflow(fMetadata.aaType(), upgradeToCoverageAAOnMerge,
1049 this->numChainedQuads() + that->numChainedQuads())) {
1050 return CombineResult::kCannotCombine;
1051 }
1052
1053 if (fMetadata.saturate() != that->fMetadata.saturate()) {
1054 return CombineResult::kCannotCombine;
1055 }
1056 if (fMetadata.filter() != that->fMetadata.filter()) {
1057 return CombineResult::kCannotCombine;
1058 }
1059 if (fMetadata.mipmapMode() != that->fMetadata.mipmapMode()) {
1060 return CombineResult::kCannotCombine;
1061 }
1062 if (fMetadata.fSwizzle != that->fMetadata.fSwizzle) {
1063 return CombineResult::kCannotCombine;
1064 }
1065 const auto* thisProxy = fViewCountPairs[0].fProxy.get();
1066 const auto* thatProxy = that->fViewCountPairs[0].fProxy.get();
1067 if (fMetadata.fProxyCount > 1 || that->fMetadata.fProxyCount > 1 ||
1068 thisProxy != thatProxy) {
1069 // We can't merge across different proxies. Check if 'this' can be chained with 'that'.
1070 if (GrTextureProxy::ProxiesAreCompatibleAsDynamicState(thisProxy, thatProxy) &&
1071 caps.dynamicStateArrayGeometryProcessorTextureSupport() &&
1072 fMetadata.aaType() == that->fMetadata.aaType()) {
1073 // We only allow chaining when the aaTypes match bc otherwise the AA type
1074 // reported by the chain can be inconsistent. That is, since chaining doesn't
1075 // propagate revised AA information throughout the chain, the head of the chain
1076 // could have an AA setting of kNone while the chain as a whole could have a
1077 // setting of kCoverage. This inconsistency would then interfere with the validity
1078 // of the CombinedQuadCountWillOverflow calls.
1079 // This problem doesn't occur w/ merging bc we do propagate the AA information
1080 // (in propagateCoverageAAThroughoutChain) below.
1081 return CombineResult::kMayChain;
1082 }
1083 return CombineResult::kCannotCombine;
1084 }
1085
1086 fMetadata.fSubset |= that->fMetadata.fSubset;
1087 fMetadata.fColorType = std::max(fMetadata.fColorType, that->fMetadata.fColorType);
1088
1089 // Concatenate quad lists together
1090 fQuads.concat(that->fQuads);
1091 fViewCountPairs[0].fQuadCnt += that->fQuads.count();
1092 fMetadata.fTotalQuadCount += that->fQuads.count();
1093
1094 if (upgradeToCoverageAAOnMerge) {
1095 // This merger may be the start of a concatenation of two chains. When one
1096 // of the chains mutates its AA the other must follow suit or else the above AA
1097 // check may prevent later ops from chaining together. A specific example of this is
1098 // when chain2 is prepended onto chain1:
1099 // chain1 (that): opA (non-AA/mergeable) opB (non-AA/non-mergeable)
1100 // chain2 (this): opC (cov-AA/non-mergeable) opD (cov-AA/mergeable)
1101 // W/o this propagation, after opD & opA merge, opB and opC would say they couldn't
1102 // chain - which would stop the concatenation process.
1103 this->propagateCoverageAAThroughoutChain();
1104 that->propagateCoverageAAThroughoutChain();
1105 }
1106
1107 SkDEBUGCODE(this->validate();)
1108
1109 return CombineResult::kMerged;
1110 }
1111
1112 #if GR_TEST_UTILS
1113 SkString onDumpInfo() const override {
1114 SkString str = SkStringPrintf("# draws: %d\n", fQuads.count());
1115 auto iter = fQuads.iterator();
1116 for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) {
1117 SkString proxyStr = fViewCountPairs[p].fProxy->dump();
1118 str.append(proxyStr);
1119 str.appendf(", Filter: %d, MM: %d\n",
1120 static_cast<int>(fMetadata.fFilter),
1121 static_cast<int>(fMetadata.fMipmapMode));
1122 for (int i = 0; i < fViewCountPairs[p].fQuadCnt && iter.next(); ++i) {
1123 const GrQuad* quad = iter.deviceQuad();
1124 GrQuad uv = iter.isLocalValid() ? *(iter.localQuad()) : GrQuad();
1125 const ColorSubsetAndAA& info = iter.metadata();
1126 str.appendf(
1127 "%d: Color: 0x%08x, Subset(%d): [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n"
1128 " UVs [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n"
1129 " Quad [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n",
1130 i, info.fColor.toBytes_RGBA(), fMetadata.fSubset, info.fSubsetRect.fLeft,
1131 info.fSubsetRect.fTop, info.fSubsetRect.fRight, info.fSubsetRect.fBottom,
1132 quad->point(0).fX, quad->point(0).fY, quad->point(1).fX, quad->point(1).fY,
1133 quad->point(2).fX, quad->point(2).fY, quad->point(3).fX, quad->point(3).fY,
1134 uv.point(0).fX, uv.point(0).fY, uv.point(1).fX, uv.point(1).fY,
1135 uv.point(2).fX, uv.point(2).fY, uv.point(3).fX, uv.point(3).fY);
1136 }
1137 }
1138 return str;
1139 }
1140 #endif
1141
1142 GrQuadBuffer<ColorSubsetAndAA> fQuads;
1143 sk_sp<GrColorSpaceXform> fTextureColorSpaceXform;
1144 // Most state of TextureOp is packed into these two field to minimize the op's size.
1145 // Historically, increasing the size of TextureOp has caused surprising perf regressions, so
1146 // consider/measure changes with care.
1147 Desc* fDesc;
1148 Metadata fMetadata;
1149
1150 // This field must go last. When allocating this op, we will allocate extra space to hold
1151 // additional ViewCountPairs immediately after the op's allocation so we can treat this
1152 // as an fProxyCnt-length array.
1153 ViewCountPair fViewCountPairs[1];
1154
1155 using INHERITED = GrMeshDrawOp;
1156 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
1157 // Stencil Culling use
1158 uint32_t fStencilRef = UINT32_MAX;
1159 #endif
1160 };
1161
1162 } // anonymous namespace
1163
1164 namespace skgpu::v1 {
1165
1166 #if GR_TEST_UTILS
ClassID()1167 uint32_t TextureOp::ClassID() {
1168 return TextureOpImpl::ClassID();
1169 }
1170 #endif
1171
Make(GrRecordingContext * context,GrSurfaceProxyView proxyView,SkAlphaType alphaType,sk_sp<GrColorSpaceXform> textureXform,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,const SkPMColor4f & color,Saturate saturate,SkBlendMode blendMode,GrAAType aaType,DrawQuad * quad,const SkRect * subset,uint32_t stencilRef)1172 GrOp::Owner TextureOp::Make(GrRecordingContext* context,
1173 GrSurfaceProxyView proxyView,
1174 SkAlphaType alphaType,
1175 sk_sp<GrColorSpaceXform> textureXform,
1176 GrSamplerState::Filter filter,
1177 GrSamplerState::MipmapMode mm,
1178 const SkPMColor4f& color,
1179 Saturate saturate,
1180 SkBlendMode blendMode,
1181 GrAAType aaType,
1182 DrawQuad* quad,
1183 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
1184 const SkRect* subset,
1185 uint32_t stencilRef) {
1186 #else
1187 const SkRect* subset) {
1188 #endif
1189 // Apply optimizations that are valid whether or not using TextureOp or FillRectOp
1190 if (subset && subset->contains(proxyView.proxy()->backingStoreBoundsRect())) {
1191 // No need for a shader-based subset if hardware clamping achieves the same effect
1192 subset = nullptr;
1193 }
1194
1195 if (filter != GrSamplerState::Filter::kNearest || mm != GrSamplerState::MipmapMode::kNone) {
1196 auto [mustFilter, mustMM] = filter_and_mm_have_effect(quad->fLocal, quad->fDevice);
1197 if (!mustFilter) {
1198 filter = GrSamplerState::Filter::kNearest;
1199 }
1200 if (!mustMM) {
1201 mm = GrSamplerState::MipmapMode::kNone;
1202 }
1203 }
1204
1205 if (blendMode == SkBlendMode::kSrcOver) {
1206 return TextureOpImpl::Make(context, std::move(proxyView), std::move(textureXform), filter,
1207 #ifdef SK_ENABLE_STENCIL_CULLING_OHOS
1208 mm, color, saturate, aaType, std::move(quad), subset, stencilRef);
1209 #else
1210 mm, color, saturate, aaType, std::move(quad), subset);
1211 #endif
1212 } else {
1213 // Emulate complex blending using FillRectOp
1214 GrSamplerState samplerState(GrSamplerState::WrapMode::kClamp, filter, mm);
1215 GrPaint paint;
1216 paint.setColor4f(color);
1217 paint.setXPFactory(SkBlendMode_AsXPFactory(blendMode));
1218
1219 std::unique_ptr<GrFragmentProcessor> fp;
1220 const auto& caps = *context->priv().caps();
1221 if (subset) {
1222 SkRect localRect;
1223 if (quad->fLocal.asRect(&localRect)) {
1224 fp = GrTextureEffect::MakeSubset(std::move(proxyView), alphaType, SkMatrix::I(),
1225 samplerState, *subset, localRect, caps);
1226 } else {
1227 fp = GrTextureEffect::MakeSubset(std::move(proxyView), alphaType, SkMatrix::I(),
1228 samplerState, *subset, caps);
1229 }
1230 } else {
1231 fp = GrTextureEffect::Make(std::move(proxyView), alphaType, SkMatrix::I(), samplerState,
1232 caps);
1233 }
1234 fp = GrColorSpaceXformEffect::Make(std::move(fp), std::move(textureXform));
1235 fp = GrBlendFragmentProcessor::Make(std::move(fp), nullptr, SkBlendMode::kModulate);
1236 if (saturate == Saturate::kYes) {
1237 fp = GrFragmentProcessor::ClampOutput(std::move(fp));
1238 }
1239 paint.setColorFragmentProcessor(std::move(fp));
1240 return FillRectOp::Make(context, std::move(paint), aaType, quad);
1241 }
1242 }
1243
1244 // A helper class that assists in breaking up bulk API quad draws into manageable chunks.
1245 class TextureOp::BatchSizeLimiter {
1246 public:
1247 BatchSizeLimiter(SurfaceDrawContext* sdc,
1248 const GrClip* clip,
1249 GrRecordingContext* rContext,
1250 int numEntries,
1251 GrSamplerState::Filter filter,
1252 GrSamplerState::MipmapMode mm,
1253 Saturate saturate,
1254 SkCanvas::SrcRectConstraint constraint,
1255 const SkMatrix& viewMatrix,
1256 sk_sp<GrColorSpaceXform> textureColorSpaceXform)
1257 : fSDC(sdc)
1258 , fClip(clip)
1259 , fContext(rContext)
1260 , fFilter(filter)
1261 , fMipmapMode(mm)
1262 , fSaturate(saturate)
1263 , fConstraint(constraint)
1264 , fViewMatrix(viewMatrix)
1265 , fTextureColorSpaceXform(textureColorSpaceXform)
1266 , fNumLeft(numEntries) {}
1267
1268 void createOp(GrTextureSetEntry set[], int clumpSize, GrAAType aaType) {
1269
1270 int clumpProxyCount = proxy_run_count(&set[fNumClumped], clumpSize);
1271 GrOp::Owner op = TextureOpImpl::Make(fContext,
1272 &set[fNumClumped],
1273 clumpSize,
1274 clumpProxyCount,
1275 fFilter,
1276 fMipmapMode,
1277 fSaturate,
1278 aaType,
1279 fConstraint,
1280 fViewMatrix,
1281 fTextureColorSpaceXform);
1282 fSDC->addDrawOp(fClip, std::move(op));
1283
1284 fNumLeft -= clumpSize;
1285 fNumClumped += clumpSize;
1286 }
1287
1288 int numLeft() const { return fNumLeft; }
1289 int baseIndex() const { return fNumClumped; }
1290
1291 private:
1292 SurfaceDrawContext* fSDC;
1293 const GrClip* fClip;
1294 GrRecordingContext* fContext;
1295 GrSamplerState::Filter fFilter;
1296 GrSamplerState::MipmapMode fMipmapMode;
1297 Saturate fSaturate;
1298 SkCanvas::SrcRectConstraint fConstraint;
1299 const SkMatrix& fViewMatrix;
1300 sk_sp<GrColorSpaceXform> fTextureColorSpaceXform;
1301
1302 int fNumLeft;
1303 int fNumClumped = 0; // also the offset for the start of the next clump
1304 };
1305
1306 // Greedily clump quad draws together until the index buffer limit is exceeded.
1307 void TextureOp::AddTextureSetOps(SurfaceDrawContext* sdc,
1308 const GrClip* clip,
1309 GrRecordingContext* context,
1310 GrTextureSetEntry set[],
1311 int cnt,
1312 int proxyRunCnt,
1313 GrSamplerState::Filter filter,
1314 GrSamplerState::MipmapMode mm,
1315 Saturate saturate,
1316 SkBlendMode blendMode,
1317 GrAAType aaType,
1318 SkCanvas::SrcRectConstraint constraint,
1319 const SkMatrix& viewMatrix,
1320 sk_sp<GrColorSpaceXform> textureColorSpaceXform) {
1321 // Ensure that the index buffer limits are lower than the proxy and quad count limits of
1322 // the op's metadata so we don't need to worry about overflow.
1323 SkDEBUGCODE(TextureOpImpl::ValidateResourceLimits();)
1324 SkASSERT(proxy_run_count(set, cnt) == proxyRunCnt);
1325
1326 // First check if we can support batches as a single op
1327 if (blendMode != SkBlendMode::kSrcOver ||
1328 !context->priv().caps()->dynamicStateArrayGeometryProcessorTextureSupport()) {
1329 // Append each entry as its own op; these may still be GrTextureOps if the blend mode is
1330 // src-over but the backend doesn't support dynamic state changes. Otherwise Make()
1331 // automatically creates the appropriate FillRectOp to emulate TextureOp.
1332 SkMatrix ctm;
1333 for (int i = 0; i < cnt; ++i) {
1334 ctm = viewMatrix;
1335 if (set[i].fPreViewMatrix) {
1336 ctm.preConcat(*set[i].fPreViewMatrix);
1337 }
1338
1339 DrawQuad quad;
1340 quad.fEdgeFlags = set[i].fAAFlags;
1341 if (set[i].fDstClipQuad) {
1342 quad.fDevice = GrQuad::MakeFromSkQuad(set[i].fDstClipQuad, ctm);
1343
1344 SkPoint srcPts[4];
1345 GrMapRectPoints(set[i].fDstRect, set[i].fSrcRect, set[i].fDstClipQuad, srcPts, 4);
1346 quad.fLocal = GrQuad::MakeFromSkQuad(srcPts, SkMatrix::I());
1347 } else {
1348 quad.fDevice = GrQuad::MakeFromRect(set[i].fDstRect, ctm);
1349 quad.fLocal = GrQuad(set[i].fSrcRect);
1350 }
1351
1352 const SkRect* subset = constraint == SkCanvas::kStrict_SrcRectConstraint
1353 ? &set[i].fSrcRect : nullptr;
1354
1355 auto op = Make(context, set[i].fProxyView, set[i].fSrcAlphaType, textureColorSpaceXform,
1356 filter, mm, set[i].fColor, saturate, blendMode, aaType, &quad, subset);
1357 sdc->addDrawOp(clip, std::move(op));
1358 }
1359 return;
1360 }
1361
1362 // Second check if we can always just make a single op and avoid the extra iteration
1363 // needed to clump things together.
1364 if (cnt <= std::min(GrResourceProvider::MaxNumNonAAQuads(),
1365 GrResourceProvider::MaxNumAAQuads())) {
1366 auto op = TextureOpImpl::Make(context, set, cnt, proxyRunCnt, filter, mm, saturate, aaType,
1367 constraint, viewMatrix, std::move(textureColorSpaceXform));
1368 sdc->addDrawOp(clip, std::move(op));
1369 return;
1370 }
1371
1372 BatchSizeLimiter state(sdc, clip, context, cnt, filter, mm, saturate, constraint, viewMatrix,
1373 std::move(textureColorSpaceXform));
1374
1375 // kNone and kMSAA never get altered
1376 if (aaType == GrAAType::kNone || aaType == GrAAType::kMSAA) {
1377 // Clump these into series of MaxNumNonAAQuads-sized GrTextureOps
1378 while (state.numLeft() > 0) {
1379 int clumpSize = std::min(state.numLeft(), GrResourceProvider::MaxNumNonAAQuads());
1380
1381 state.createOp(set, clumpSize, aaType);
1382 }
1383 } else {
1384 // kCoverage can be downgraded to kNone. Note that the following is conservative. kCoverage
1385 // can also get downgraded to kNone if all the quads are on integer coordinates and
1386 // axis-aligned.
1387 SkASSERT(aaType == GrAAType::kCoverage);
1388
1389 while (state.numLeft() > 0) {
1390 GrAAType runningAA = GrAAType::kNone;
1391 bool clumped = false;
1392
1393 for (int i = 0; i < state.numLeft(); ++i) {
1394 int absIndex = state.baseIndex() + i;
1395
1396 if (set[absIndex].fAAFlags != GrQuadAAFlags::kNone ||
1397 runningAA == GrAAType::kCoverage) {
1398
1399 if (i >= GrResourceProvider::MaxNumAAQuads()) {
1400 // Here we either need to boost the AA type to kCoverage, but doing so with
1401 // all the accumulated quads would overflow, or we have a set of AA quads
1402 // that has just gotten too large. In either case, calve off the existing
1403 // quads as their own TextureOp.
1404 state.createOp(
1405 set,
1406 runningAA == GrAAType::kNone ? i : GrResourceProvider::MaxNumAAQuads(),
1407 runningAA); // maybe downgrading AA here
1408 clumped = true;
1409 break;
1410 }
1411
1412 runningAA = GrAAType::kCoverage;
1413 } else if (runningAA == GrAAType::kNone) {
1414
1415 if (i >= GrResourceProvider::MaxNumNonAAQuads()) {
1416 // Here we've found a consistent batch of non-AA quads that has gotten too
1417 // large. Calve it off as its own TextureOp.
1418 state.createOp(set, GrResourceProvider::MaxNumNonAAQuads(),
1419 GrAAType::kNone); // definitely downgrading AA here
1420 clumped = true;
1421 break;
1422 }
1423 }
1424 }
1425
1426 if (!clumped) {
1427 // We ran through the above loop w/o hitting a limit. Spit out this last clump of
1428 // quads and call it a day.
1429 state.createOp(set, state.numLeft(), runningAA); // maybe downgrading AA here
1430 }
1431 }
1432 }
1433 }
1434
1435 } // namespace skgpu::v1
1436
1437 #if GR_TEST_UTILS
1438 #include "include/gpu/GrRecordingContext.h"
1439 #include "src/gpu/GrProxyProvider.h"
1440 #include "src/gpu/GrRecordingContextPriv.h"
1441
GR_DRAW_OP_TEST_DEFINE(TextureOpImpl)1442 GR_DRAW_OP_TEST_DEFINE(TextureOpImpl) {
1443 SkISize dims;
1444 dims.fHeight = random->nextULessThan(90) + 10;
1445 dims.fWidth = random->nextULessThan(90) + 10;
1446 auto origin = random->nextBool() ? kTopLeft_GrSurfaceOrigin : kBottomLeft_GrSurfaceOrigin;
1447 GrMipmapped mipMapped = random->nextBool() ? GrMipmapped::kYes : GrMipmapped::kNo;
1448 SkBackingFit fit = SkBackingFit::kExact;
1449 if (mipMapped == GrMipmapped::kNo) {
1450 fit = random->nextBool() ? SkBackingFit::kApprox : SkBackingFit::kExact;
1451 }
1452 const GrBackendFormat format =
1453 context->priv().caps()->getDefaultBackendFormat(GrColorType::kRGBA_8888,
1454 GrRenderable::kNo);
1455 GrProxyProvider* proxyProvider = context->priv().proxyProvider();
1456 sk_sp<GrTextureProxy> proxy = proxyProvider->createProxy(
1457 format, dims, GrRenderable::kNo, 1, mipMapped, fit, SkBudgeted::kNo, GrProtected::kNo,
1458 GrInternalSurfaceFlags::kNone);
1459
1460 SkRect rect = GrTest::TestRect(random);
1461 SkRect srcRect;
1462 srcRect.fLeft = random->nextRangeScalar(0.f, proxy->width() / 2.f);
1463 srcRect.fRight = random->nextRangeScalar(0.f, proxy->width()) + proxy->width() / 2.f;
1464 srcRect.fTop = random->nextRangeScalar(0.f, proxy->height() / 2.f);
1465 srcRect.fBottom = random->nextRangeScalar(0.f, proxy->height()) + proxy->height() / 2.f;
1466 SkMatrix viewMatrix = GrTest::TestMatrixPreservesRightAngles(random);
1467 SkPMColor4f color = SkPMColor4f::FromBytes_RGBA(SkColorToPremulGrColor(random->nextU()));
1468 GrSamplerState::Filter filter = (GrSamplerState::Filter)random->nextULessThan(
1469 static_cast<uint32_t>(GrSamplerState::Filter::kLast) + 1);
1470 GrSamplerState::MipmapMode mm = GrSamplerState::MipmapMode::kNone;
1471 if (mipMapped == GrMipmapped::kYes) {
1472 mm = (GrSamplerState::MipmapMode)random->nextULessThan(
1473 static_cast<uint32_t>(GrSamplerState::MipmapMode::kLast) + 1);
1474 }
1475
1476 auto texXform = GrTest::TestColorXform(random);
1477 GrAAType aaType = GrAAType::kNone;
1478 if (random->nextBool()) {
1479 aaType = (numSamples > 1) ? GrAAType::kMSAA : GrAAType::kCoverage;
1480 }
1481 GrQuadAAFlags aaFlags = GrQuadAAFlags::kNone;
1482 aaFlags |= random->nextBool() ? GrQuadAAFlags::kLeft : GrQuadAAFlags::kNone;
1483 aaFlags |= random->nextBool() ? GrQuadAAFlags::kTop : GrQuadAAFlags::kNone;
1484 aaFlags |= random->nextBool() ? GrQuadAAFlags::kRight : GrQuadAAFlags::kNone;
1485 aaFlags |= random->nextBool() ? GrQuadAAFlags::kBottom : GrQuadAAFlags::kNone;
1486 bool useSubset = random->nextBool();
1487 auto saturate = random->nextBool() ? skgpu::v1::TextureOp::Saturate::kYes
1488 : skgpu::v1::TextureOp::Saturate::kNo;
1489 GrSurfaceProxyView proxyView(
1490 std::move(proxy), origin,
1491 context->priv().caps()->getReadSwizzle(format, GrColorType::kRGBA_8888));
1492 auto alphaType = static_cast<SkAlphaType>(
1493 random->nextRangeU(kUnknown_SkAlphaType + 1, kLastEnum_SkAlphaType));
1494
1495 DrawQuad quad = {GrQuad::MakeFromRect(rect, viewMatrix), GrQuad(srcRect), aaFlags};
1496 return skgpu::v1::TextureOp::Make(context, std::move(proxyView), alphaType,
1497 std::move(texXform), filter, mm, color, saturate,
1498 SkBlendMode::kSrcOver, aaType, &quad,
1499 useSubset ? &srcRect : nullptr);
1500 }
1501
1502 #endif // GR_TEST_UTILS
1503