1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include <new>
9
10 #include "include/core/SkPoint.h"
11 #include "include/core/SkPoint3.h"
12 #include "include/gpu/GrRecordingContext.h"
13 #include "include/private/base/SkFloatingPoint.h"
14 #include "include/private/base/SkTo.h"
15 #include "src/base/SkMathPriv.h"
16 #include "src/core/SkBlendModePriv.h"
17 #include "src/core/SkMatrixPriv.h"
18 #include "src/core/SkRectPriv.h"
19 #include "src/gpu/ganesh/GrAppliedClip.h"
20 #include "src/gpu/ganesh/GrCaps.h"
21 #include "src/gpu/ganesh/GrDrawOpTest.h"
22 #include "src/gpu/ganesh/GrGeometryProcessor.h"
23 #include "src/gpu/ganesh/GrGpu.h"
24 #include "src/gpu/ganesh/GrMemoryPool.h"
25 #include "src/gpu/ganesh/GrOpFlushState.h"
26 #include "src/gpu/ganesh/GrOpsTypes.h"
27 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
28 #include "src/gpu/ganesh/GrResourceProvider.h"
29 #include "src/gpu/ganesh/GrResourceProviderPriv.h"
30 #include "src/gpu/ganesh/GrShaderCaps.h"
31 #include "src/gpu/ganesh/GrTexture.h"
32 #include "src/gpu/ganesh/GrTextureProxy.h"
33 #include "src/gpu/ganesh/GrXferProcessor.h"
34 #include "src/gpu/ganesh/SkGr.h"
35 #include "src/gpu/ganesh/SurfaceDrawContext.h"
36 #include "src/gpu/ganesh/effects/GrBlendFragmentProcessor.h"
37 #include "src/gpu/ganesh/effects/GrTextureEffect.h"
38 #include "src/gpu/ganesh/geometry/GrQuad.h"
39 #include "src/gpu/ganesh/geometry/GrQuadBuffer.h"
40 #include "src/gpu/ganesh/geometry/GrQuadUtils.h"
41 #include "src/gpu/ganesh/geometry/GrRect.h"
42 #include "src/gpu/ganesh/glsl/GrGLSLVarying.h"
43 #include "src/gpu/ganesh/ops/FillRectOp.h"
44 #include "src/gpu/ganesh/ops/GrMeshDrawOp.h"
45 #include "src/gpu/ganesh/ops/GrSimpleMeshDrawOpHelper.h"
46 #include "src/gpu/ganesh/ops/QuadPerEdgeAA.h"
47 #include "src/gpu/ganesh/ops/TextureOp.h"
48
49 #if defined(GR_TEST_UTILS)
50 #include "src/gpu/ganesh/GrProxyProvider.h"
51 #endif
52
53 using namespace skgpu::ganesh;
54
55 namespace {
56
57 using Subset = skgpu::ganesh::QuadPerEdgeAA::Subset;
58 using VertexSpec = skgpu::ganesh::QuadPerEdgeAA::VertexSpec;
59 using ColorType = skgpu::ganesh::QuadPerEdgeAA::ColorType;
60
61 // Extracts lengths of vertical and horizontal edges of axis-aligned quad. "width" is the edge
62 // between v0 and v2 (or v1 and v3), "height" is the edge between v0 and v1 (or v2 and v3).
axis_aligned_quad_size(const GrQuad & quad)63 SkSize axis_aligned_quad_size(const GrQuad& quad) {
64 SkASSERT(quad.quadType() == GrQuad::Type::kAxisAligned);
65 // Simplification of regular edge length equation, since it's axis aligned and can avoid sqrt
66 float dw = std::fabs(quad.x(2) - quad.x(0)) + std::fabs(quad.y(2) - quad.y(0));
67 float dh = std::fabs(quad.x(1) - quad.x(0)) + std::fabs(quad.y(1) - quad.y(0));
68 return {dw, dh};
69 }
70
71 // Describes function for normalizing src coords: [x * iw, y * ih + yOffset] can represent
72 // regular and rectangular textures, w/ or w/o origin correction.
73 struct NormalizationParams {
74 float fIW; // 1 / width of texture, or 1.0 for texture rectangles
75 float fInvH; // 1 / height of texture, or 1.0 for tex rects, X -1 if bottom-left origin
76 float fYOffset; // 0 for top-left origin, height of [normalized] tex if bottom-left
77 };
proxy_normalization_params(const GrSurfaceProxy * proxy,GrSurfaceOrigin origin)78 NormalizationParams proxy_normalization_params(const GrSurfaceProxy* proxy,
79 GrSurfaceOrigin origin) {
80 // Whether or not the proxy is instantiated, this is the size its texture will be, so we can
81 // normalize the src coordinates up front.
82 SkISize dimensions = proxy->backingStoreDimensions();
83 float iw, ih, h;
84 if (proxy->backendFormat().textureType() == GrTextureType::kRectangle) {
85 iw = ih = 1.f;
86 h = dimensions.height();
87 } else {
88 iw = 1.f / dimensions.width();
89 ih = 1.f / dimensions.height();
90 h = 1.f;
91 }
92
93 if (origin == kBottomLeft_GrSurfaceOrigin) {
94 return {iw, -ih, h};
95 } else {
96 return {iw, ih, 0.0f};
97 }
98 }
99
100 // Normalize the subset. If 'subsetRect' is null, it is assumed no subset constraint is desired,
101 // so a sufficiently large rect is returned even if the quad ends up batched with an op that uses
102 // subsets overall. When there is a subset it will be inset based on the filter mode. Normalization
103 // and y-flipping are applied as indicated by NormalizationParams.
normalize_and_inset_subset(GrSamplerState::Filter filter,const NormalizationParams & params,const SkRect * subsetRect)104 SkRect normalize_and_inset_subset(GrSamplerState::Filter filter,
105 const NormalizationParams& params,
106 const SkRect* subsetRect) {
107 static constexpr SkRect kLargeRect = {-100000, -100000, 1000000, 1000000};
108 if (!subsetRect) {
109 // Either the quad has no subset constraint and is batched with a subset constrained op
110 // (in which case we want a subset that doesn't restrict normalized tex coords), or the
111 // entire op doesn't use the subset, in which case the returned value is ignored.
112 return kLargeRect;
113 }
114
115 auto ltrb = skvx::Vec<4, float>::Load(subsetRect);
116 auto flipHi = skvx::Vec<4, float>({1.f, 1.f, -1.f, -1.f});
117 if (filter == GrSamplerState::Filter::kNearest) {
118 // Make sure our insetting puts us at pixel centers.
119 ltrb = skvx::floor(ltrb*flipHi)*flipHi;
120 }
121 // Inset with pin to the rect center.
122 ltrb += skvx::Vec<4, float>({ GrTextureEffect::kLinearInset, GrTextureEffect::kLinearInset,
123 -GrTextureEffect::kLinearInset, -GrTextureEffect::kLinearInset});
124 auto mid = (skvx::shuffle<2, 3, 0, 1>(ltrb) + ltrb)*0.5f;
125 ltrb = skvx::min(ltrb*flipHi, mid*flipHi)*flipHi;
126
127 // Normalize and offset
128 ltrb = ltrb * skvx::Vec<4, float>{params.fIW, params.fInvH, params.fIW, params.fInvH} +
129 skvx::Vec<4, float>{0.f, params.fYOffset, 0.f, params.fYOffset};
130 if (params.fInvH < 0.f) {
131 // Flip top and bottom to keep the rect sorted when loaded back to SkRect.
132 ltrb = skvx::shuffle<0, 3, 2, 1>(ltrb);
133 }
134
135 SkRect out;
136 ltrb.store(&out);
137 return out;
138 }
139
140 // Normalizes logical src coords and corrects for origin
normalize_src_quad(const NormalizationParams & params,GrQuad * srcQuad)141 void normalize_src_quad(const NormalizationParams& params,
142 GrQuad* srcQuad) {
143 // The src quad should not have any perspective
144 SkASSERT(!srcQuad->hasPerspective());
145 skvx::Vec<4, float> xs = srcQuad->x4f() * params.fIW;
146 skvx::Vec<4, float> ys = srcQuad->y4f() * params.fInvH + params.fYOffset;
147 xs.store(srcQuad->xs());
148 ys.store(srcQuad->ys());
149 }
150
151 // Count the number of proxy runs in the entry set. This usually is already computed by
152 // SkGpuDevice, but when the BatchLengthLimiter chops the set up it must determine a new proxy count
153 // for each split.
proxy_run_count(const GrTextureSetEntry set[],int count)154 int proxy_run_count(const GrTextureSetEntry set[], int count) {
155 int actualProxyRunCount = 0;
156 const GrSurfaceProxy* lastProxy = nullptr;
157 for (int i = 0; i < count; ++i) {
158 if (set[i].fProxyView.proxy() != lastProxy) {
159 actualProxyRunCount++;
160 lastProxy = set[i].fProxyView.proxy();
161 }
162 }
163 return actualProxyRunCount;
164 }
165
safe_to_ignore_subset_rect(GrAAType aaType,GrSamplerState::Filter filter,const DrawQuad & quad,const SkRect & subsetRect)166 bool safe_to_ignore_subset_rect(GrAAType aaType, GrSamplerState::Filter filter,
167 const DrawQuad& quad, const SkRect& subsetRect) {
168 // If both the device and local quad are both axis-aligned, and filtering is off, the local quad
169 // can push all the way up to the edges of the the subset rect and the sampler shouldn't
170 // overshoot. Unfortunately, antialiasing adds enough jitter that we can only rely on this in
171 // the non-antialiased case.
172 SkRect localBounds = quad.fLocal.bounds();
173 if (aaType == GrAAType::kNone &&
174 filter == GrSamplerState::Filter::kNearest &&
175 quad.fDevice.quadType() == GrQuad::Type::kAxisAligned &&
176 quad.fLocal.quadType() == GrQuad::Type::kAxisAligned &&
177 subsetRect.contains(localBounds)) {
178
179 return true;
180 }
181
182 // If the local quad is inset by at least 0.5 pixels into the subset rect's bounds, the
183 // sampler shouldn't overshoot, even when antialiasing and filtering is taken into account.
184 if (subsetRect.makeInset(GrTextureEffect::kLinearInset,
185 GrTextureEffect::kLinearInset)
186 .contains(localBounds)) {
187 return true;
188 }
189
190 // The subset rect cannot be ignored safely.
191 return false;
192 }
193
194 /**
195 * Op that implements TextureOp::Make. It draws textured quads. Each quad can modulate against a
196 * the texture by color. The blend with the destination is always src-over. The edges are non-AA.
197 */
198 class TextureOpImpl final : public GrMeshDrawOp {
199 public:
200 using Saturate = TextureOp::Saturate;
201
Make(GrRecordingContext * context,GrSurfaceProxyView proxyView,sk_sp<GrColorSpaceXform> textureXform,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,const SkPMColor4f & color,Saturate saturate,GrAAType aaType,DrawQuad * quad,const SkRect * subset)202 static GrOp::Owner Make(GrRecordingContext* context,
203 GrSurfaceProxyView proxyView,
204 sk_sp<GrColorSpaceXform> textureXform,
205 GrSamplerState::Filter filter,
206 GrSamplerState::MipmapMode mm,
207 const SkPMColor4f& color,
208 Saturate saturate,
209 GrAAType aaType,
210 DrawQuad* quad,
211 const SkRect* subset) {
212
213 return GrOp::Make<TextureOpImpl>(context, std::move(proxyView), std::move(textureXform),
214 filter, mm, color, saturate, aaType, quad, subset);
215 }
216
Make(GrRecordingContext * context,GrTextureSetEntry set[],int cnt,int proxyRunCnt,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,Saturate saturate,GrAAType aaType,SkCanvas::SrcRectConstraint constraint,const SkMatrix & viewMatrix,sk_sp<GrColorSpaceXform> textureColorSpaceXform)217 static GrOp::Owner Make(GrRecordingContext* context,
218 GrTextureSetEntry set[],
219 int cnt,
220 int proxyRunCnt,
221 GrSamplerState::Filter filter,
222 GrSamplerState::MipmapMode mm,
223 Saturate saturate,
224 GrAAType aaType,
225 SkCanvas::SrcRectConstraint constraint,
226 const SkMatrix& viewMatrix,
227 sk_sp<GrColorSpaceXform> textureColorSpaceXform) {
228 // Allocate size based on proxyRunCnt, since that determines number of ViewCountPairs.
229 SkASSERT(proxyRunCnt <= cnt);
230 return GrOp::MakeWithExtraMemory<TextureOpImpl>(
231 context, sizeof(ViewCountPair) * (proxyRunCnt - 1),
232 set, cnt, proxyRunCnt, filter, mm, saturate, aaType, constraint,
233 viewMatrix, std::move(textureColorSpaceXform));
234 }
235
~TextureOpImpl()236 ~TextureOpImpl() override {
237 for (unsigned p = 1; p < fMetadata.fProxyCount; ++p) {
238 fViewCountPairs[p].~ViewCountPair();
239 }
240 }
241
name() const242 const char* name() const override { return "TextureOp"; }
243
visitProxies(const GrVisitProxyFunc & func) const244 void visitProxies(const GrVisitProxyFunc& func) const override {
245 bool mipped = (fMetadata.mipmapMode() != GrSamplerState::MipmapMode::kNone);
246 for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) {
247 func(fViewCountPairs[p].fProxy.get(), skgpu::Mipmapped(mipped));
248 }
249 if (fDesc && fDesc->fProgramInfo) {
250 fDesc->fProgramInfo->visitFPProxies(func);
251 }
252 }
253
254 #ifdef SK_DEBUG
ValidateResourceLimits()255 static void ValidateResourceLimits() {
256 // The op implementation has an upper bound on the number of quads that it can represent.
257 // However, the resource manager imposes its own limit on the number of quads, which should
258 // always be lower than the numerical limit this op can hold.
259 using CountStorage = decltype(Metadata::fTotalQuadCount);
260 CountStorage maxQuadCount = std::numeric_limits<CountStorage>::max();
261 // GrResourceProvider::Max...() is typed as int, so don't compare across signed/unsigned.
262 int resourceLimit = SkTo<int>(maxQuadCount);
263 SkASSERT(GrResourceProvider::MaxNumAAQuads() <= resourceLimit &&
264 GrResourceProvider::MaxNumNonAAQuads() <= resourceLimit);
265 }
266 #endif
267
finalize(const GrCaps & caps,const GrAppliedClip *,GrClampType clampType)268 GrProcessorSet::Analysis finalize(const GrCaps& caps, const GrAppliedClip*,
269 GrClampType clampType) override {
270 SkASSERT(fMetadata.colorType() == ColorType::kNone);
271 auto iter = fQuads.metadata();
272 while(iter.next()) {
273 auto colorType = skgpu::ganesh::QuadPerEdgeAA::MinColorType(iter->fColor);
274 colorType = std::max(static_cast<ColorType>(fMetadata.fColorType),
275 colorType);
276 if (caps.reducedShaderMode()) {
277 colorType = std::max(colorType, ColorType::kByte);
278 }
279 fMetadata.fColorType = static_cast<uint16_t>(colorType);
280 }
281 return GrProcessorSet::EmptySetAnalysis();
282 }
283
fixedFunctionFlags() const284 FixedFunctionFlags fixedFunctionFlags() const override {
285 return fMetadata.aaType() == GrAAType::kMSAA ? FixedFunctionFlags::kUsesHWAA
286 : FixedFunctionFlags::kNone;
287 }
288
289 DEFINE_OP_CLASS_ID
290
291 private:
292 friend class ::GrOp;
293
294 struct ColorSubsetAndAA {
ColorSubsetAndAA__anonca1917d50111::TextureOpImpl::ColorSubsetAndAA295 ColorSubsetAndAA(const SkPMColor4f& color, const SkRect& subsetRect, GrQuadAAFlags aaFlags)
296 : fColor(color)
297 , fSubsetRect(subsetRect)
298 , fAAFlags(static_cast<uint16_t>(aaFlags)) {
299 SkASSERT(fAAFlags == static_cast<uint16_t>(aaFlags));
300 }
301
302 SkPMColor4f fColor;
303 // If the op doesn't use subsets, this is ignored. If the op uses subsets and the specific
304 // entry does not, this rect will equal kLargeRect, so it automatically has no effect.
305 SkRect fSubsetRect;
306 unsigned fAAFlags : 4;
307
aaFlags__anonca1917d50111::TextureOpImpl::ColorSubsetAndAA308 GrQuadAAFlags aaFlags() const { return static_cast<GrQuadAAFlags>(fAAFlags); }
309 };
310
311 struct ViewCountPair {
312 // Normally this would be a GrSurfaceProxyView, but TextureOp applies the GrOrigin right
313 // away so it doesn't need to be stored, and all ViewCountPairs in an op have the same
314 // swizzle so that is stored in the op metadata.
315 sk_sp<GrSurfaceProxy> fProxy;
316 int fQuadCnt;
317 };
318
319 // TextureOp and ViewCountPair are 8 byte aligned. This is packed into 8 bytes to minimally
320 // increase the size of the op; increasing the op size can have a surprising impact on
321 // performance (since texture ops are one of the most commonly used in an app).
322 struct Metadata {
323 // AAType must be filled after initialization; ColorType is determined in finalize()
Metadata__anonca1917d50111::TextureOpImpl::Metadata324 Metadata(const skgpu::Swizzle& swizzle,
325 GrSamplerState::Filter filter,
326 GrSamplerState::MipmapMode mm,
327 Subset subset,
328 Saturate saturate)
329 : fSwizzle(swizzle)
330 , fProxyCount(1)
331 , fTotalQuadCount(1)
332 , fFilter(static_cast<uint16_t>(filter))
333 , fMipmapMode(static_cast<uint16_t>(mm))
334 , fAAType(static_cast<uint16_t>(GrAAType::kNone))
335 , fColorType(static_cast<uint16_t>(ColorType::kNone))
336 , fSubset(static_cast<uint16_t>(subset))
337 , fSaturate(static_cast<uint16_t>(saturate)) {}
338
339 skgpu::Swizzle fSwizzle; // sizeof(skgpu::Swizzle) == uint16_t
340 uint16_t fProxyCount;
341 // This will be >= fProxyCount, since a proxy may be drawn multiple times
342 uint16_t fTotalQuadCount;
343
344 // These must be based on uint16_t to help MSVC's pack bitfields optimally
345 uint16_t fFilter : 2; // GrSamplerState::Filter
346 uint16_t fMipmapMode : 2; // GrSamplerState::MipmapMode
347 uint16_t fAAType : 2; // GrAAType
348 uint16_t fColorType : 2; // GrQuadPerEdgeAA::ColorType
349 uint16_t fSubset : 1; // bool
350 uint16_t fSaturate : 1; // bool
351 uint16_t fUnused : 6; // # of bits left before Metadata exceeds 8 bytes
352
filter__anonca1917d50111::TextureOpImpl::Metadata353 GrSamplerState::Filter filter() const {
354 return static_cast<GrSamplerState::Filter>(fFilter);
355 }
mipmapMode__anonca1917d50111::TextureOpImpl::Metadata356 GrSamplerState::MipmapMode mipmapMode() const {
357 return static_cast<GrSamplerState::MipmapMode>(fMipmapMode);
358 }
aaType__anonca1917d50111::TextureOpImpl::Metadata359 GrAAType aaType() const { return static_cast<GrAAType>(fAAType); }
colorType__anonca1917d50111::TextureOpImpl::Metadata360 ColorType colorType() const { return static_cast<ColorType>(fColorType); }
subset__anonca1917d50111::TextureOpImpl::Metadata361 Subset subset() const { return static_cast<Subset>(fSubset); }
saturate__anonca1917d50111::TextureOpImpl::Metadata362 Saturate saturate() const { return static_cast<Saturate>(fSaturate); }
363
364 static_assert(GrSamplerState::kFilterCount <= 4);
365 static_assert(kGrAATypeCount <= 4);
366 static_assert(skgpu::ganesh::QuadPerEdgeAA::kColorTypeCount <= 4);
367 };
368 static_assert(sizeof(Metadata) == 8);
369
370 // This descriptor is used to store the draw info we decide on during on(Pre)PrepareDraws. We
371 // store the data in a separate struct in order to minimize the size of the TextureOp.
372 // Historically, increasing the TextureOp's size has caused surprising perf regressions, but we
373 // may want to re-evaluate whether this is still necessary.
374 //
375 // In the onPrePrepareDraws case it is allocated in the creation-time opData arena, and
376 // allocatePrePreparedVertices is also called.
377 //
378 // In the onPrepareDraws case this descriptor is allocated in the flush-time arena (i.e., as
379 // part of the flushState).
380 struct Desc {
381 VertexSpec fVertexSpec;
382 int fNumProxies = 0;
383 int fNumTotalQuads = 0;
384
385 // This member variable is only used by 'onPrePrepareDraws'.
386 char* fPrePreparedVertices = nullptr;
387
388 GrProgramInfo* fProgramInfo = nullptr;
389
390 sk_sp<const GrBuffer> fIndexBuffer;
391 sk_sp<const GrBuffer> fVertexBuffer;
392 int fBaseVertex;
393
394 // How big should 'fVertices' be to hold all the vertex data?
totalSizeInBytes__anonca1917d50111::TextureOpImpl::Desc395 size_t totalSizeInBytes() const {
396 return this->totalNumVertices() * fVertexSpec.vertexSize();
397 }
398
totalNumVertices__anonca1917d50111::TextureOpImpl::Desc399 int totalNumVertices() const {
400 return fNumTotalQuads * fVertexSpec.verticesPerQuad();
401 }
402
allocatePrePreparedVertices__anonca1917d50111::TextureOpImpl::Desc403 void allocatePrePreparedVertices(SkArenaAlloc* arena) {
404 fPrePreparedVertices = arena->makeArrayDefault<char>(this->totalSizeInBytes());
405 }
406 };
407 // If subsetRect is not null it will be used to apply a strict src rect-style constraint.
TextureOpImpl(GrSurfaceProxyView proxyView,sk_sp<GrColorSpaceXform> textureColorSpaceXform,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,const SkPMColor4f & color,Saturate saturate,GrAAType aaType,DrawQuad * quad,const SkRect * subsetRect)408 TextureOpImpl(GrSurfaceProxyView proxyView,
409 sk_sp<GrColorSpaceXform> textureColorSpaceXform,
410 GrSamplerState::Filter filter,
411 GrSamplerState::MipmapMode mm,
412 const SkPMColor4f& color,
413 Saturate saturate,
414 GrAAType aaType,
415 DrawQuad* quad,
416 const SkRect* subsetRect)
417 : INHERITED(ClassID())
418 , fQuads(1, true /* includes locals */)
419 , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
420 , fDesc(nullptr)
421 , fMetadata(proxyView.swizzle(), filter, mm, Subset(!!subsetRect), saturate) {
422 // Clean up disparities between the overall aa type and edge configuration and apply
423 // optimizations based on the rect and matrix when appropriate
424 GrQuadUtils::ResolveAAType(aaType, quad->fEdgeFlags, quad->fDevice,
425 &aaType, &quad->fEdgeFlags);
426 fMetadata.fAAType = static_cast<uint16_t>(aaType);
427
428 // We expect our caller to have already caught this optimization.
429 SkASSERT(!subsetRect ||
430 !subsetRect->contains(proxyView.proxy()->backingStoreBoundsRect()));
431
432 // We may have had a strict constraint with nearest filter solely due to possible AA bloat.
433 // Try to identify cases where the subsetting isn't actually necessary, and skip it.
434 if (subsetRect) {
435 if (safe_to_ignore_subset_rect(aaType, filter, *quad, *subsetRect)) {
436 subsetRect = nullptr;
437 fMetadata.fSubset = static_cast<uint16_t>(Subset::kNo);
438 }
439 }
440
441 // Normalize src coordinates and the subset (if set)
442 NormalizationParams params = proxy_normalization_params(proxyView.proxy(),
443 proxyView.origin());
444 normalize_src_quad(params, &quad->fLocal);
445 SkRect subset = normalize_and_inset_subset(filter, params, subsetRect);
446
447 // Set bounds before clipping so we don't have to worry about unioning the bounds of
448 // the two potential quads (GrQuad::bounds() is perspective-safe).
449 bool hairline = GrQuadUtils::WillUseHairline(quad->fDevice, aaType, quad->fEdgeFlags);
450 this->setBounds(quad->fDevice.bounds(), HasAABloat(aaType == GrAAType::kCoverage),
451 hairline ? IsHairline::kYes : IsHairline::kNo);
452 int quadCount = this->appendQuad(quad, color, subset);
453 fViewCountPairs[0] = {proxyView.detachProxy(), quadCount};
454 }
455
TextureOpImpl(GrTextureSetEntry set[],int cnt,int proxyRunCnt,const GrSamplerState::Filter filter,const GrSamplerState::MipmapMode mm,const Saturate saturate,const GrAAType aaType,const SkCanvas::SrcRectConstraint constraint,const SkMatrix & viewMatrix,sk_sp<GrColorSpaceXform> textureColorSpaceXform)456 TextureOpImpl(GrTextureSetEntry set[],
457 int cnt,
458 int proxyRunCnt,
459 const GrSamplerState::Filter filter,
460 const GrSamplerState::MipmapMode mm,
461 const Saturate saturate,
462 const GrAAType aaType,
463 const SkCanvas::SrcRectConstraint constraint,
464 const SkMatrix& viewMatrix,
465 sk_sp<GrColorSpaceXform> textureColorSpaceXform)
466 : INHERITED(ClassID())
467 , fQuads(cnt, true /* includes locals */)
468 , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
469 , fDesc(nullptr)
470 , fMetadata(set[0].fProxyView.swizzle(),
471 GrSamplerState::Filter::kNearest,
472 GrSamplerState::MipmapMode::kNone,
473 Subset::kNo,
474 saturate) {
475 // Update counts to reflect the batch op
476 fMetadata.fProxyCount = SkToUInt(proxyRunCnt);
477 fMetadata.fTotalQuadCount = SkToUInt(cnt);
478
479 SkRect bounds = SkRectPriv::MakeLargestInverted();
480
481 GrAAType netAAType = GrAAType::kNone; // aa type maximally compatible with all dst rects
482 Subset netSubset = Subset::kNo;
483 GrSamplerState::Filter netFilter = GrSamplerState::Filter::kNearest;
484 GrSamplerState::MipmapMode netMM = GrSamplerState::MipmapMode::kNone;
485 bool hasSubpixel = false;
486
487 const GrSurfaceProxy* curProxy = nullptr;
488
489 // 'q' is the index in 'set' and fQuadBuffer; 'p' is the index in fViewCountPairs and only
490 // increases when set[q]'s proxy changes.
491 int p = 0;
492 for (int q = 0; q < cnt; ++q) {
493 SkASSERT(mm == GrSamplerState::MipmapMode::kNone ||
494 (set[0].fProxyView.proxy()->asTextureProxy()->mipmapped() ==
495 skgpu::Mipmapped::kYes));
496 if (q == 0) {
497 // We do not placement new the first ViewCountPair since that one is allocated and
498 // initialized as part of the TextureOp creation.
499 fViewCountPairs[0].fProxy = set[0].fProxyView.detachProxy();
500 fViewCountPairs[0].fQuadCnt = 0;
501 curProxy = fViewCountPairs[0].fProxy.get();
502 } else if (set[q].fProxyView.proxy() != curProxy) {
503 // We must placement new the ViewCountPairs here so that the sk_sps in the
504 // GrSurfaceProxyView get initialized properly.
505 new(&fViewCountPairs[++p])ViewCountPair({set[q].fProxyView.detachProxy(), 0});
506
507 curProxy = fViewCountPairs[p].fProxy.get();
508 SkASSERT(GrTextureProxy::ProxiesAreCompatibleAsDynamicState(
509 curProxy, fViewCountPairs[0].fProxy.get()));
510 SkASSERT(fMetadata.fSwizzle == set[q].fProxyView.swizzle());
511 } // else another quad referencing the same proxy
512
513 SkMatrix ctm = viewMatrix;
514 if (set[q].fPreViewMatrix) {
515 ctm.preConcat(*set[q].fPreViewMatrix);
516 }
517
518 // Use dstRect/srcRect unless dstClip is provided, in which case derive new source
519 // coordinates by mapping dstClipQuad by the dstRect to srcRect transform.
520 DrawQuad quad;
521 if (set[q].fDstClipQuad) {
522 quad.fDevice = GrQuad::MakeFromSkQuad(set[q].fDstClipQuad, ctm);
523
524 SkPoint srcPts[4];
525 GrMapRectPoints(set[q].fDstRect, set[q].fSrcRect, set[q].fDstClipQuad, srcPts, 4);
526 quad.fLocal = GrQuad::MakeFromSkQuad(srcPts, SkMatrix::I());
527 } else {
528 quad.fDevice = GrQuad::MakeFromRect(set[q].fDstRect, ctm);
529 quad.fLocal = GrQuad(set[q].fSrcRect);
530 }
531
532 // This may be reduced per-quad from the requested aggregate filtering level, and used
533 // to determine if the subset is needed for the entry as well.
534 GrSamplerState::Filter filterForQuad = filter;
535 if (netFilter != filter || netMM != mm) {
536 // The only way netFilter != filter is if linear is requested and we haven't yet
537 // found a quad that requires linear (so net is still nearest). Similar for mip
538 // mapping.
539 SkASSERT(filter == netFilter ||
540 (netFilter == GrSamplerState::Filter::kNearest && filter > netFilter));
541 SkASSERT(mm == netMM ||
542 (netMM == GrSamplerState::MipmapMode::kNone && mm > netMM));
543 auto [mustFilter, mustMM] = FilterAndMipmapHaveNoEffect(quad.fLocal, quad.fDevice);
544 if (filter != GrSamplerState::Filter::kNearest) {
545 if (mustFilter) {
546 netFilter = filter; // upgrade batch to higher filter level
547 } else {
548 filterForQuad = GrSamplerState::Filter::kNearest; // downgrade entry
549 }
550 }
551 if (mustMM && mm != GrSamplerState::MipmapMode::kNone) {
552 netMM = mm;
553 }
554 }
555
556 // Determine the AA type for the quad, then merge with net AA type
557 GrAAType aaForQuad;
558 GrQuadUtils::ResolveAAType(aaType, set[q].fAAFlags, quad.fDevice,
559 &aaForQuad, &quad.fEdgeFlags);
560 // Update overall bounds of the op as the union of all quads
561 bounds.joinPossiblyEmptyRect(quad.fDevice.bounds());
562 hasSubpixel |= GrQuadUtils::WillUseHairline(quad.fDevice, aaForQuad, quad.fEdgeFlags);
563
564 // Resolve sets aaForQuad to aaType or None, there is never a change between aa methods
565 SkASSERT(aaForQuad == GrAAType::kNone || aaForQuad == aaType);
566 if (netAAType == GrAAType::kNone && aaForQuad != GrAAType::kNone) {
567 netAAType = aaType;
568 }
569
570 // Calculate metadata for the entry
571 const SkRect* subsetForQuad = nullptr;
572 if (constraint == SkCanvas::kStrict_SrcRectConstraint) {
573 // Check (briefly) if the subset rect is actually needed for this set entry.
574 SkRect* subsetRect = &set[q].fSrcRect;
575 if (!subsetRect->contains(curProxy->backingStoreBoundsRect())) {
576 if (!safe_to_ignore_subset_rect(aaForQuad, filterForQuad, quad, *subsetRect)) {
577 netSubset = Subset::kYes;
578 subsetForQuad = subsetRect;
579 }
580 }
581 }
582
583 // Normalize the src quads and apply origin
584 NormalizationParams proxyParams = proxy_normalization_params(
585 curProxy, set[q].fProxyView.origin());
586 normalize_src_quad(proxyParams, &quad.fLocal);
587
588 // This subset may represent a no-op, otherwise it will have the origin and dimensions
589 // of the texture applied to it.
590 SkRect subset = normalize_and_inset_subset(filter, proxyParams, subsetForQuad);
591
592 // Always append a quad (or 2 if perspective clipped), it just may refer back to a prior
593 // ViewCountPair (this frequently happens when Chrome draws 9-patches).
594 fViewCountPairs[p].fQuadCnt += this->appendQuad(&quad, set[q].fColor, subset);
595 }
596 // The # of proxy switches should match what was provided (+1 because we incremented p
597 // when a new proxy was encountered).
598 SkASSERT((p + 1) == fMetadata.fProxyCount);
599 SkASSERT(fQuads.count() == fMetadata.fTotalQuadCount);
600
601 fMetadata.fAAType = static_cast<uint16_t>(netAAType);
602 fMetadata.fFilter = static_cast<uint16_t>(netFilter);
603 fMetadata.fSubset = static_cast<uint16_t>(netSubset);
604
605 this->setBounds(bounds, HasAABloat(netAAType == GrAAType::kCoverage),
606 hasSubpixel ? IsHairline::kYes : IsHairline::kNo);
607 }
608
appendQuad(DrawQuad * quad,const SkPMColor4f & color,const SkRect & subset)609 int appendQuad(DrawQuad* quad, const SkPMColor4f& color, const SkRect& subset) {
610 DrawQuad extra;
611 // Always clip to W0 to stay consistent with GrQuad::bounds
612 int quadCount = GrQuadUtils::ClipToW0(quad, &extra);
613 if (quadCount == 0) {
614 // We can't discard the op at this point, but disable AA flags so it won't go through
615 // inset/outset processing
616 quad->fEdgeFlags = GrQuadAAFlags::kNone;
617 quadCount = 1;
618 }
619 fQuads.append(quad->fDevice, {color, subset, quad->fEdgeFlags}, &quad->fLocal);
620 if (quadCount > 1) {
621 fQuads.append(extra.fDevice, {color, subset, extra.fEdgeFlags}, &extra.fLocal);
622 fMetadata.fTotalQuadCount++;
623 }
624 return quadCount;
625 }
626
programInfo()627 GrProgramInfo* programInfo() override {
628 // Although this Op implements its own onPrePrepareDraws it calls GrMeshDrawOps' version so
629 // this entry point will be called.
630 return (fDesc) ? fDesc->fProgramInfo : nullptr;
631 }
632
onCreateProgramInfo(const GrCaps * caps,SkArenaAlloc * arena,const GrSurfaceProxyView & writeView,bool usesMSAASurface,GrAppliedClip && appliedClip,const GrDstProxyView & dstProxyView,GrXferBarrierFlags renderPassXferBarriers,GrLoadOp colorLoadOp)633 void onCreateProgramInfo(const GrCaps* caps,
634 SkArenaAlloc* arena,
635 const GrSurfaceProxyView& writeView,
636 bool usesMSAASurface,
637 GrAppliedClip&& appliedClip,
638 const GrDstProxyView& dstProxyView,
639 GrXferBarrierFlags renderPassXferBarriers,
640 GrLoadOp colorLoadOp) override {
641 SkASSERT(fDesc);
642
643 GrGeometryProcessor* gp;
644
645 {
646 const GrBackendFormat& backendFormat =
647 fViewCountPairs[0].fProxy->backendFormat();
648
649 GrSamplerState samplerState = GrSamplerState(GrSamplerState::WrapMode::kClamp,
650 fMetadata.filter());
651
652 gp = skgpu::ganesh::QuadPerEdgeAA::MakeTexturedProcessor(
653 arena,
654 fDesc->fVertexSpec,
655 *caps->shaderCaps(),
656 backendFormat,
657 samplerState,
658 fMetadata.fSwizzle,
659 std::move(fTextureColorSpaceXform),
660 fMetadata.saturate());
661
662 SkASSERT(fDesc->fVertexSpec.vertexSize() == gp->vertexStride());
663 }
664
665 fDesc->fProgramInfo = GrSimpleMeshDrawOpHelper::CreateProgramInfo(
666 caps, arena, writeView, usesMSAASurface, std::move(appliedClip), dstProxyView, gp,
667 GrProcessorSet::MakeEmptySet(), fDesc->fVertexSpec.primitiveType(),
668 renderPassXferBarriers, colorLoadOp, GrPipeline::InputFlags::kNone);
669 }
670
onPrePrepareDraws(GrRecordingContext * context,const GrSurfaceProxyView & writeView,GrAppliedClip * clip,const GrDstProxyView & dstProxyView,GrXferBarrierFlags renderPassXferBarriers,GrLoadOp colorLoadOp)671 void onPrePrepareDraws(GrRecordingContext* context,
672 const GrSurfaceProxyView& writeView,
673 GrAppliedClip* clip,
674 const GrDstProxyView& dstProxyView,
675 GrXferBarrierFlags renderPassXferBarriers,
676 GrLoadOp colorLoadOp) override {
677 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
678
679 SkDEBUGCODE(this->validate();)
680 SkASSERT(!fDesc);
681
682 SkArenaAlloc* arena = context->priv().recordTimeAllocator();
683
684 fDesc = arena->make<Desc>();
685 this->characterize(fDesc);
686 fDesc->allocatePrePreparedVertices(arena);
687 FillInVertices(*context->priv().caps(), this, fDesc, fDesc->fPrePreparedVertices);
688
689 // This will call onCreateProgramInfo and register the created program with the DDL.
690 this->INHERITED::onPrePrepareDraws(context, writeView, clip, dstProxyView,
691 renderPassXferBarriers, colorLoadOp);
692 }
693
FillInVertices(const GrCaps & caps,TextureOpImpl * texOp,Desc * desc,char * vertexData)694 static void FillInVertices(const GrCaps& caps,
695 TextureOpImpl* texOp,
696 Desc* desc,
697 char* vertexData) {
698 SkASSERT(vertexData);
699
700 SkDEBUGCODE(int totQuadsSeen = 0;) SkDEBUGCODE(int totVerticesSeen = 0;)
701 SkDEBUGCODE(const size_t vertexSize = desc->fVertexSpec.vertexSize();)
702 SkDEBUGCODE(auto startMark{vertexData};)
703
704 skgpu::ganesh::QuadPerEdgeAA::Tessellator tessellator(
705 desc->fVertexSpec, vertexData);
706 for (const auto& op : ChainRange<TextureOpImpl>(texOp)) {
707 auto iter = op.fQuads.iterator();
708 for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) {
709 const int quadCnt = op.fViewCountPairs[p].fQuadCnt;
710 SkDEBUGCODE(int meshVertexCnt = quadCnt * desc->fVertexSpec.verticesPerQuad());
711
712 for (int i = 0; i < quadCnt && iter.next(); ++i) {
713 SkASSERT(iter.isLocalValid());
714 const ColorSubsetAndAA& info = iter.metadata();
715
716 tessellator.append(iter.deviceQuad(), iter.localQuad(), info.fColor,
717 info.fSubsetRect, info.aaFlags());
718 }
719
720 SkASSERT((totVerticesSeen + meshVertexCnt) * vertexSize
721 == (size_t)(tessellator.vertexMark() - startMark));
722
723 SkDEBUGCODE(totQuadsSeen += quadCnt;)
724 SkDEBUGCODE(totVerticesSeen += meshVertexCnt);
725 SkASSERT(totQuadsSeen * desc->fVertexSpec.verticesPerQuad() == totVerticesSeen);
726 }
727
728 // If quad counts per proxy were calculated correctly, the entire iterator
729 // should have been consumed.
730 SkASSERT(!iter.next());
731 }
732
733 SkASSERT(desc->totalSizeInBytes() == (size_t)(tessellator.vertexMark() - startMark));
734 SkASSERT(totQuadsSeen == desc->fNumTotalQuads);
735 SkASSERT(totVerticesSeen == desc->totalNumVertices());
736 }
737
738 #ifdef SK_DEBUG
validate_op(GrTextureType textureType,GrAAType aaType,skgpu::Swizzle swizzle,const TextureOpImpl * op)739 static int validate_op(GrTextureType textureType,
740 GrAAType aaType,
741 skgpu::Swizzle swizzle,
742 const TextureOpImpl* op) {
743 SkASSERT(op->fMetadata.fSwizzle == swizzle);
744
745 int quadCount = 0;
746 for (unsigned p = 0; p < op->fMetadata.fProxyCount; ++p) {
747 auto* proxy = op->fViewCountPairs[p].fProxy->asTextureProxy();
748 quadCount += op->fViewCountPairs[p].fQuadCnt;
749 SkASSERT(proxy);
750 SkASSERT(proxy->textureType() == textureType);
751 }
752
753 SkASSERT(aaType == op->fMetadata.aaType());
754 return quadCount;
755 }
756
validate() const757 void validate() const override {
758 // NOTE: Since this is debug-only code, we use the virtual asTextureProxy()
759 auto textureType = fViewCountPairs[0].fProxy->asTextureProxy()->textureType();
760 GrAAType aaType = fMetadata.aaType();
761 skgpu::Swizzle swizzle = fMetadata.fSwizzle;
762
763 int quadCount = validate_op(textureType, aaType, swizzle, this);
764
765 for (const GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) {
766 quadCount += validate_op(textureType, aaType, swizzle,
767 static_cast<const TextureOpImpl*>(tmp));
768 }
769
770 for (const GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) {
771 quadCount += validate_op(textureType, aaType, swizzle,
772 static_cast<const TextureOpImpl*>(tmp));
773 }
774
775 SkASSERT(quadCount == this->numChainedQuads());
776 }
777
778 #endif
779
780 #if defined(GR_TEST_UTILS)
numQuads() const781 int numQuads() const final { return this->totNumQuads(); }
782 #endif
783
characterize(Desc * desc) const784 void characterize(Desc* desc) const {
785 SkDEBUGCODE(this->validate();)
786
787 GrQuad::Type quadType = GrQuad::Type::kAxisAligned;
788 ColorType colorType = ColorType::kNone;
789 GrQuad::Type srcQuadType = GrQuad::Type::kAxisAligned;
790 Subset subset = Subset::kNo;
791 GrAAType overallAAType = fMetadata.aaType();
792
793 desc->fNumProxies = 0;
794 desc->fNumTotalQuads = 0;
795 int maxQuadsPerMesh = 0;
796
797 for (const auto& op : ChainRange<TextureOpImpl>(this)) {
798 if (op.fQuads.deviceQuadType() > quadType) {
799 quadType = op.fQuads.deviceQuadType();
800 }
801 if (op.fQuads.localQuadType() > srcQuadType) {
802 srcQuadType = op.fQuads.localQuadType();
803 }
804 if (op.fMetadata.subset() == Subset::kYes) {
805 subset = Subset::kYes;
806 }
807 colorType = std::max(colorType, op.fMetadata.colorType());
808 desc->fNumProxies += op.fMetadata.fProxyCount;
809
810 for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) {
811 maxQuadsPerMesh = std::max(op.fViewCountPairs[p].fQuadCnt, maxQuadsPerMesh);
812 }
813 desc->fNumTotalQuads += op.totNumQuads();
814
815 if (op.fMetadata.aaType() == GrAAType::kCoverage) {
816 overallAAType = GrAAType::kCoverage;
817 }
818 }
819
820 SkASSERT(desc->fNumTotalQuads == this->numChainedQuads());
821
822 SkASSERT(!CombinedQuadCountWillOverflow(overallAAType, false, desc->fNumTotalQuads));
823
824 auto indexBufferOption =
825 skgpu::ganesh::QuadPerEdgeAA::CalcIndexBufferOption(overallAAType, maxQuadsPerMesh);
826
827 desc->fVertexSpec = VertexSpec(quadType, colorType, srcQuadType, /* hasLocal */ true,
828 subset, overallAAType, /* alpha as coverage */ true,
829 indexBufferOption);
830
831 SkASSERT(desc->fNumTotalQuads <=
832 skgpu::ganesh::QuadPerEdgeAA::QuadLimit(indexBufferOption));
833 }
834
totNumQuads() const835 int totNumQuads() const {
836 #ifdef SK_DEBUG
837 int tmp = 0;
838 for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) {
839 tmp += fViewCountPairs[p].fQuadCnt;
840 }
841 SkASSERT(tmp == fMetadata.fTotalQuadCount);
842 #endif
843
844 return fMetadata.fTotalQuadCount;
845 }
846
numChainedQuads() const847 int numChainedQuads() const {
848 int numChainedQuads = this->totNumQuads();
849
850 for (const GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) {
851 numChainedQuads += ((const TextureOpImpl*)tmp)->totNumQuads();
852 }
853
854 for (const GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) {
855 numChainedQuads += ((const TextureOpImpl*)tmp)->totNumQuads();
856 }
857
858 return numChainedQuads;
859 }
860
861 // onPrePrepareDraws may or may not have been called at this point
onPrepareDraws(GrMeshDrawTarget * target)862 void onPrepareDraws(GrMeshDrawTarget* target) override {
863 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
864
865 SkDEBUGCODE(this->validate();)
866
867 SkASSERT(!fDesc || fDesc->fPrePreparedVertices);
868
869 if (!fDesc) {
870 SkArenaAlloc* arena = target->allocator();
871 fDesc = arena->make<Desc>();
872 this->characterize(fDesc);
873 SkASSERT(!fDesc->fPrePreparedVertices);
874 }
875
876 size_t vertexSize = fDesc->fVertexSpec.vertexSize();
877
878 void* vdata = target->makeVertexSpace(vertexSize, fDesc->totalNumVertices(),
879 &fDesc->fVertexBuffer, &fDesc->fBaseVertex);
880 if (!vdata) {
881 SkDebugf("Could not allocate vertices\n");
882 return;
883 }
884
885 if (fDesc->fVertexSpec.needsIndexBuffer()) {
886 fDesc->fIndexBuffer = skgpu::ganesh::QuadPerEdgeAA::GetIndexBuffer(
887 target, fDesc->fVertexSpec.indexBufferOption());
888 if (!fDesc->fIndexBuffer) {
889 SkDebugf("Could not allocate indices\n");
890 return;
891 }
892 }
893
894 if (fDesc->fPrePreparedVertices) {
895 memcpy(vdata, fDesc->fPrePreparedVertices, fDesc->totalSizeInBytes());
896 } else {
897 FillInVertices(target->caps(), this, fDesc, (char*) vdata);
898 }
899 }
900
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)901 void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
902 if (!fDesc->fVertexBuffer) {
903 return;
904 }
905
906 if (fDesc->fVertexSpec.needsIndexBuffer() && !fDesc->fIndexBuffer) {
907 return;
908 }
909
910 if (!fDesc->fProgramInfo) {
911 this->createProgramInfo(flushState);
912 SkASSERT(fDesc->fProgramInfo);
913 }
914
915 flushState->bindPipelineAndScissorClip(*fDesc->fProgramInfo, chainBounds);
916 flushState->bindBuffers(std::move(fDesc->fIndexBuffer), nullptr,
917 std::move(fDesc->fVertexBuffer));
918
919 int totQuadsSeen = 0;
920 SkDEBUGCODE(int numDraws = 0;)
921 for (const auto& op : ChainRange<TextureOpImpl>(this)) {
922 for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) {
923 const int quadCnt = op.fViewCountPairs[p].fQuadCnt;
924 SkASSERT(numDraws < fDesc->fNumProxies);
925 flushState->bindTextures(fDesc->fProgramInfo->geomProc(),
926 *op.fViewCountPairs[p].fProxy,
927 fDesc->fProgramInfo->pipeline());
928 skgpu::ganesh::QuadPerEdgeAA::IssueDraw(flushState->caps(),
929 flushState->opsRenderPass(),
930 fDesc->fVertexSpec,
931 totQuadsSeen,
932 quadCnt,
933 fDesc->totalNumVertices(),
934 fDesc->fBaseVertex);
935 totQuadsSeen += quadCnt;
936 SkDEBUGCODE(++numDraws;)
937 }
938 }
939
940 SkASSERT(totQuadsSeen == fDesc->fNumTotalQuads);
941 SkASSERT(numDraws == fDesc->fNumProxies);
942 }
943
propagateCoverageAAThroughoutChain()944 void propagateCoverageAAThroughoutChain() {
945 fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage);
946
947 for (GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) {
948 auto tex = static_cast<TextureOpImpl*>(tmp);
949 SkASSERT(tex->fMetadata.aaType() == GrAAType::kCoverage ||
950 tex->fMetadata.aaType() == GrAAType::kNone);
951 tex->fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage);
952 }
953
954 for (GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) {
955 auto tex = static_cast<TextureOpImpl*>(tmp);
956 SkASSERT(tex->fMetadata.aaType() == GrAAType::kCoverage ||
957 tex->fMetadata.aaType() == GrAAType::kNone);
958 tex->fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage);
959 }
960 }
961
onCombineIfPossible(GrOp * t,SkArenaAlloc *,const GrCaps & caps)962 CombineResult onCombineIfPossible(GrOp* t, SkArenaAlloc*, const GrCaps& caps) override {
963 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
964 auto that = t->cast<TextureOpImpl>();
965
966 SkDEBUGCODE(this->validate();)
967 SkDEBUGCODE(that->validate();)
968
969 if (fDesc || that->fDesc) {
970 // This should never happen (since only DDL recorded ops should be prePrepared)
971 // but, in any case, we should never combine ops that that been prePrepared
972 return CombineResult::kCannotCombine;
973 }
974
975 if (fMetadata.subset() != that->fMetadata.subset()) {
976 // It is technically possible to combine operations across subset modes, but performance
977 // testing suggests it's better to make more draw calls where some take advantage of
978 // the more optimal shader path without coordinate clamping.
979 return CombineResult::kCannotCombine;
980 }
981 if (!GrColorSpaceXform::Equals(fTextureColorSpaceXform.get(),
982 that->fTextureColorSpaceXform.get())) {
983 return CombineResult::kCannotCombine;
984 }
985
986 bool upgradeToCoverageAAOnMerge = false;
987 if (fMetadata.aaType() != that->fMetadata.aaType()) {
988 if (!CanUpgradeAAOnMerge(fMetadata.aaType(), that->fMetadata.aaType())) {
989 return CombineResult::kCannotCombine;
990 }
991 upgradeToCoverageAAOnMerge = true;
992 }
993
994 if (CombinedQuadCountWillOverflow(fMetadata.aaType(), upgradeToCoverageAAOnMerge,
995 this->numChainedQuads() + that->numChainedQuads())) {
996 return CombineResult::kCannotCombine;
997 }
998
999 if (fMetadata.saturate() != that->fMetadata.saturate()) {
1000 return CombineResult::kCannotCombine;
1001 }
1002 if (fMetadata.filter() != that->fMetadata.filter()) {
1003 return CombineResult::kCannotCombine;
1004 }
1005 if (fMetadata.mipmapMode() != that->fMetadata.mipmapMode()) {
1006 return CombineResult::kCannotCombine;
1007 }
1008 if (fMetadata.fSwizzle != that->fMetadata.fSwizzle) {
1009 return CombineResult::kCannotCombine;
1010 }
1011 const auto* thisProxy = fViewCountPairs[0].fProxy.get();
1012 const auto* thatProxy = that->fViewCountPairs[0].fProxy.get();
1013 if (fMetadata.fProxyCount > 1 || that->fMetadata.fProxyCount > 1 ||
1014 thisProxy != thatProxy) {
1015 // We can't merge across different proxies. Check if 'this' can be chained with 'that'.
1016 if (GrTextureProxy::ProxiesAreCompatibleAsDynamicState(thisProxy, thatProxy) &&
1017 caps.dynamicStateArrayGeometryProcessorTextureSupport() &&
1018 fMetadata.aaType() == that->fMetadata.aaType()) {
1019 // We only allow chaining when the aaTypes match bc otherwise the AA type
1020 // reported by the chain can be inconsistent. That is, since chaining doesn't
1021 // propagate revised AA information throughout the chain, the head of the chain
1022 // could have an AA setting of kNone while the chain as a whole could have a
1023 // setting of kCoverage. This inconsistency would then interfere with the validity
1024 // of the CombinedQuadCountWillOverflow calls.
1025 // This problem doesn't occur w/ merging bc we do propagate the AA information
1026 // (in propagateCoverageAAThroughoutChain) below.
1027 return CombineResult::kMayChain;
1028 }
1029 return CombineResult::kCannotCombine;
1030 }
1031
1032 fMetadata.fSubset |= that->fMetadata.fSubset;
1033 fMetadata.fColorType = std::max(fMetadata.fColorType, that->fMetadata.fColorType);
1034
1035 // Concatenate quad lists together
1036 fQuads.concat(that->fQuads);
1037 fViewCountPairs[0].fQuadCnt += that->fQuads.count();
1038 fMetadata.fTotalQuadCount += that->fQuads.count();
1039
1040 if (upgradeToCoverageAAOnMerge) {
1041 // This merger may be the start of a concatenation of two chains. When one
1042 // of the chains mutates its AA the other must follow suit or else the above AA
1043 // check may prevent later ops from chaining together. A specific example of this is
1044 // when chain2 is prepended onto chain1:
1045 // chain1 (that): opA (non-AA/mergeable) opB (non-AA/non-mergeable)
1046 // chain2 (this): opC (cov-AA/non-mergeable) opD (cov-AA/mergeable)
1047 // W/o this propagation, after opD & opA merge, opB and opC would say they couldn't
1048 // chain - which would stop the concatenation process.
1049 this->propagateCoverageAAThroughoutChain();
1050 that->propagateCoverageAAThroughoutChain();
1051 }
1052
1053 SkDEBUGCODE(this->validate();)
1054
1055 return CombineResult::kMerged;
1056 }
1057
1058 #if defined(GR_TEST_UTILS)
onDumpInfo() const1059 SkString onDumpInfo() const override {
1060 SkString str = SkStringPrintf("# draws: %d\n", fQuads.count());
1061 auto iter = fQuads.iterator();
1062 for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) {
1063 SkString proxyStr = fViewCountPairs[p].fProxy->dump();
1064 str.append(proxyStr);
1065 str.appendf(", Filter: %d, MM: %d\n",
1066 static_cast<int>(fMetadata.fFilter),
1067 static_cast<int>(fMetadata.fMipmapMode));
1068 for (int i = 0; i < fViewCountPairs[p].fQuadCnt && iter.next(); ++i) {
1069 const GrQuad* quad = iter.deviceQuad();
1070 GrQuad uv = iter.isLocalValid() ? *(iter.localQuad()) : GrQuad();
1071 const ColorSubsetAndAA& info = iter.metadata();
1072 str.appendf(
1073 "%d: Color: 0x%08x, Subset(%d): [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n"
1074 " UVs [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n"
1075 " Quad [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n",
1076 i, info.fColor.toBytes_RGBA(), fMetadata.fSubset, info.fSubsetRect.fLeft,
1077 info.fSubsetRect.fTop, info.fSubsetRect.fRight, info.fSubsetRect.fBottom,
1078 quad->point(0).fX, quad->point(0).fY, quad->point(1).fX, quad->point(1).fY,
1079 quad->point(2).fX, quad->point(2).fY, quad->point(3).fX, quad->point(3).fY,
1080 uv.point(0).fX, uv.point(0).fY, uv.point(1).fX, uv.point(1).fY,
1081 uv.point(2).fX, uv.point(2).fY, uv.point(3).fX, uv.point(3).fY);
1082 }
1083 }
1084 return str;
1085 }
1086 #endif
1087
1088 GrQuadBuffer<ColorSubsetAndAA> fQuads;
1089 sk_sp<GrColorSpaceXform> fTextureColorSpaceXform;
1090 // Most state of TextureOp is packed into these two field to minimize the op's size.
1091 // Historically, increasing the size of TextureOp has caused surprising perf regressions, so
1092 // consider/measure changes with care.
1093 Desc* fDesc;
1094 Metadata fMetadata;
1095
1096 // This field must go last. When allocating this op, we will allocate extra space to hold
1097 // additional ViewCountPairs immediately after the op's allocation so we can treat this
1098 // as an fProxyCnt-length array.
1099 ViewCountPair fViewCountPairs[1];
1100
1101 using INHERITED = GrMeshDrawOp;
1102 };
1103
1104 } // anonymous namespace
1105
1106 namespace skgpu::ganesh {
1107
1108 #if defined(GR_TEST_UTILS)
ClassID()1109 uint32_t TextureOp::ClassID() {
1110 return TextureOpImpl::ClassID();
1111 }
1112 #endif
1113
FilterAndMipmapHaveNoEffect(const GrQuad & srcQuad,const GrQuad & dstQuad)1114 std::tuple<bool /* filter */, bool /* mipmap */> FilterAndMipmapHaveNoEffect(
1115 const GrQuad& srcQuad, const GrQuad& dstQuad) {
1116 // If not axis-aligned in src or dst, then always say it has an effect
1117 if (srcQuad.quadType() != GrQuad::Type::kAxisAligned ||
1118 dstQuad.quadType() != GrQuad::Type::kAxisAligned) {
1119 return {true, true};
1120 }
1121
1122 SkRect srcRect;
1123 SkRect dstRect;
1124 if (srcQuad.asRect(&srcRect) && dstQuad.asRect(&dstRect)) {
1125 // Disable filtering when there is no scaling (width and height are the same), and the
1126 // top-left corners have the same fraction (so src and dst snap to the pixel grid
1127 // identically).
1128 SkASSERT(srcRect.isSorted());
1129 bool filter = srcRect.width() != dstRect.width() || srcRect.height() != dstRect.height() ||
1130 SkScalarFraction(srcRect.fLeft) != SkScalarFraction(dstRect.fLeft) ||
1131 SkScalarFraction(srcRect.fTop) != SkScalarFraction(dstRect.fTop);
1132 bool mm = srcRect.width() > dstRect.width() || srcRect.height() > dstRect.height();
1133 return {filter, mm};
1134 }
1135 // Extract edge lengths
1136 SkSize srcSize = axis_aligned_quad_size(srcQuad);
1137 SkSize dstSize = axis_aligned_quad_size(dstQuad);
1138 // Although the quads are axis-aligned, the local coordinate system is transformed such
1139 // that fractionally-aligned sample centers will not align with the device coordinate system
1140 // So disable filtering when edges are the same length and both srcQuad and dstQuad
1141 // 0th vertex is integer aligned.
1142 bool filter = srcSize != dstSize || !SkScalarIsInt(srcQuad.x(0)) ||
1143 !SkScalarIsInt(srcQuad.y(0)) || !SkScalarIsInt(dstQuad.x(0)) ||
1144 !SkScalarIsInt(dstQuad.y(0));
1145 bool mm = srcSize.fWidth > dstSize.fWidth || srcSize.fHeight > dstSize.fHeight;
1146 return {filter, mm};
1147 }
1148
Make(GrRecordingContext * context,GrSurfaceProxyView proxyView,SkAlphaType alphaType,sk_sp<GrColorSpaceXform> textureXform,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,const SkPMColor4f & color,Saturate saturate,SkBlendMode blendMode,GrAAType aaType,DrawQuad * quad,const SkRect * subset)1149 GrOp::Owner TextureOp::Make(GrRecordingContext* context,
1150 GrSurfaceProxyView proxyView,
1151 SkAlphaType alphaType,
1152 sk_sp<GrColorSpaceXform> textureXform,
1153 GrSamplerState::Filter filter,
1154 GrSamplerState::MipmapMode mm,
1155 const SkPMColor4f& color,
1156 Saturate saturate,
1157 SkBlendMode blendMode,
1158 GrAAType aaType,
1159 DrawQuad* quad,
1160 const SkRect* subset) {
1161 // Apply optimizations that are valid whether or not using TextureOp or FillRectOp
1162 if (subset && subset->contains(proxyView.proxy()->backingStoreBoundsRect())) {
1163 // No need for a shader-based subset if hardware clamping achieves the same effect
1164 subset = nullptr;
1165 }
1166
1167 if (filter != GrSamplerState::Filter::kNearest || mm != GrSamplerState::MipmapMode::kNone) {
1168 auto [mustFilter, mustMM] = FilterAndMipmapHaveNoEffect(quad->fLocal, quad->fDevice);
1169 if (!mustFilter) {
1170 filter = GrSamplerState::Filter::kNearest;
1171 }
1172 if (!mustMM) {
1173 mm = GrSamplerState::MipmapMode::kNone;
1174 }
1175 }
1176
1177 if (blendMode == SkBlendMode::kSrcOver) {
1178 return TextureOpImpl::Make(context, std::move(proxyView), std::move(textureXform), filter,
1179 mm, color, saturate, aaType, std::move(quad), subset);
1180 } else {
1181 // Emulate complex blending using FillRectOp
1182 GrSamplerState samplerState(GrSamplerState::WrapMode::kClamp, filter, mm);
1183 GrPaint paint;
1184 paint.setColor4f(color);
1185 paint.setXPFactory(GrXPFactory::FromBlendMode(blendMode));
1186
1187 std::unique_ptr<GrFragmentProcessor> fp;
1188 const auto& caps = *context->priv().caps();
1189 if (subset) {
1190 SkRect localRect;
1191 if (quad->fLocal.asRect(&localRect)) {
1192 fp = GrTextureEffect::MakeSubset(std::move(proxyView), alphaType, SkMatrix::I(),
1193 samplerState, *subset, localRect, caps);
1194 } else {
1195 fp = GrTextureEffect::MakeSubset(std::move(proxyView), alphaType, SkMatrix::I(),
1196 samplerState, *subset, caps);
1197 }
1198 } else {
1199 fp = GrTextureEffect::Make(std::move(proxyView), alphaType, SkMatrix::I(), samplerState,
1200 caps);
1201 }
1202 fp = GrColorSpaceXformEffect::Make(std::move(fp), std::move(textureXform));
1203 fp = GrBlendFragmentProcessor::Make<SkBlendMode::kModulate>(std::move(fp), nullptr);
1204 if (saturate == Saturate::kYes) {
1205 fp = GrFragmentProcessor::ClampOutput(std::move(fp));
1206 }
1207 paint.setColorFragmentProcessor(std::move(fp));
1208 return ganesh::FillRectOp::Make(context, std::move(paint), aaType, quad);
1209 }
1210 }
1211
1212 // A helper class that assists in breaking up bulk API quad draws into manageable chunks.
1213 class TextureOp::BatchSizeLimiter {
1214 public:
BatchSizeLimiter(ganesh::SurfaceDrawContext * sdc,const GrClip * clip,GrRecordingContext * rContext,int numEntries,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,Saturate saturate,SkCanvas::SrcRectConstraint constraint,const SkMatrix & viewMatrix,sk_sp<GrColorSpaceXform> textureColorSpaceXform)1215 BatchSizeLimiter(ganesh::SurfaceDrawContext* sdc,
1216 const GrClip* clip,
1217 GrRecordingContext* rContext,
1218 int numEntries,
1219 GrSamplerState::Filter filter,
1220 GrSamplerState::MipmapMode mm,
1221 Saturate saturate,
1222 SkCanvas::SrcRectConstraint constraint,
1223 const SkMatrix& viewMatrix,
1224 sk_sp<GrColorSpaceXform> textureColorSpaceXform)
1225 : fSDC(sdc)
1226 , fClip(clip)
1227 , fContext(rContext)
1228 , fFilter(filter)
1229 , fMipmapMode(mm)
1230 , fSaturate(saturate)
1231 , fConstraint(constraint)
1232 , fViewMatrix(viewMatrix)
1233 , fTextureColorSpaceXform(textureColorSpaceXform)
1234 , fNumLeft(numEntries) {}
1235
createOp(GrTextureSetEntry set[],int clumpSize,GrAAType aaType)1236 void createOp(GrTextureSetEntry set[], int clumpSize, GrAAType aaType) {
1237
1238 int clumpProxyCount = proxy_run_count(&set[fNumClumped], clumpSize);
1239 GrOp::Owner op = TextureOpImpl::Make(fContext,
1240 &set[fNumClumped],
1241 clumpSize,
1242 clumpProxyCount,
1243 fFilter,
1244 fMipmapMode,
1245 fSaturate,
1246 aaType,
1247 fConstraint,
1248 fViewMatrix,
1249 fTextureColorSpaceXform);
1250 fSDC->addDrawOp(fClip, std::move(op));
1251
1252 fNumLeft -= clumpSize;
1253 fNumClumped += clumpSize;
1254 }
1255
numLeft() const1256 int numLeft() const { return fNumLeft; }
baseIndex() const1257 int baseIndex() const { return fNumClumped; }
1258
1259 private:
1260 ganesh::SurfaceDrawContext* fSDC;
1261 const GrClip* fClip;
1262 GrRecordingContext* fContext;
1263 GrSamplerState::Filter fFilter;
1264 GrSamplerState::MipmapMode fMipmapMode;
1265 Saturate fSaturate;
1266 SkCanvas::SrcRectConstraint fConstraint;
1267 const SkMatrix& fViewMatrix;
1268 sk_sp<GrColorSpaceXform> fTextureColorSpaceXform;
1269
1270 int fNumLeft;
1271 int fNumClumped = 0; // also the offset for the start of the next clump
1272 };
1273
1274 // Greedily clump quad draws together until the index buffer limit is exceeded.
AddTextureSetOps(ganesh::SurfaceDrawContext * sdc,const GrClip * clip,GrRecordingContext * context,GrTextureSetEntry set[],int cnt,int proxyRunCnt,GrSamplerState::Filter filter,GrSamplerState::MipmapMode mm,Saturate saturate,SkBlendMode blendMode,GrAAType aaType,SkCanvas::SrcRectConstraint constraint,const SkMatrix & viewMatrix,sk_sp<GrColorSpaceXform> textureColorSpaceXform)1275 void TextureOp::AddTextureSetOps(ganesh::SurfaceDrawContext* sdc,
1276 const GrClip* clip,
1277 GrRecordingContext* context,
1278 GrTextureSetEntry set[],
1279 int cnt,
1280 int proxyRunCnt,
1281 GrSamplerState::Filter filter,
1282 GrSamplerState::MipmapMode mm,
1283 Saturate saturate,
1284 SkBlendMode blendMode,
1285 GrAAType aaType,
1286 SkCanvas::SrcRectConstraint constraint,
1287 const SkMatrix& viewMatrix,
1288 sk_sp<GrColorSpaceXform> textureColorSpaceXform) {
1289 // Ensure that the index buffer limits are lower than the proxy and quad count limits of
1290 // the op's metadata so we don't need to worry about overflow.
1291 SkDEBUGCODE(TextureOpImpl::ValidateResourceLimits();)
1292 SkASSERT(proxy_run_count(set, cnt) == proxyRunCnt);
1293
1294 // First check if we can support batches as a single op
1295 if (blendMode != SkBlendMode::kSrcOver ||
1296 !context->priv().caps()->dynamicStateArrayGeometryProcessorTextureSupport()) {
1297 // Append each entry as its own op; these may still be GrTextureOps if the blend mode is
1298 // src-over but the backend doesn't support dynamic state changes. Otherwise Make()
1299 // automatically creates the appropriate FillRectOp to emulate TextureOp.
1300 SkMatrix ctm;
1301 for (int i = 0; i < cnt; ++i) {
1302 ctm = viewMatrix;
1303 if (set[i].fPreViewMatrix) {
1304 ctm.preConcat(*set[i].fPreViewMatrix);
1305 }
1306
1307 DrawQuad quad;
1308 quad.fEdgeFlags = set[i].fAAFlags;
1309 if (set[i].fDstClipQuad) {
1310 quad.fDevice = GrQuad::MakeFromSkQuad(set[i].fDstClipQuad, ctm);
1311
1312 SkPoint srcPts[4];
1313 GrMapRectPoints(set[i].fDstRect, set[i].fSrcRect, set[i].fDstClipQuad, srcPts, 4);
1314 quad.fLocal = GrQuad::MakeFromSkQuad(srcPts, SkMatrix::I());
1315 } else {
1316 quad.fDevice = GrQuad::MakeFromRect(set[i].fDstRect, ctm);
1317 quad.fLocal = GrQuad(set[i].fSrcRect);
1318 }
1319
1320 const SkRect* subset = constraint == SkCanvas::kStrict_SrcRectConstraint
1321 ? &set[i].fSrcRect : nullptr;
1322
1323 auto op = Make(context, set[i].fProxyView, set[i].fSrcAlphaType, textureColorSpaceXform,
1324 filter, mm, set[i].fColor, saturate, blendMode, aaType, &quad, subset);
1325 sdc->addDrawOp(clip, std::move(op));
1326 }
1327 return;
1328 }
1329
1330 // Second check if we can always just make a single op and avoid the extra iteration
1331 // needed to clump things together.
1332 if (cnt <= std::min(GrResourceProvider::MaxNumNonAAQuads(),
1333 GrResourceProvider::MaxNumAAQuads())) {
1334 auto op = TextureOpImpl::Make(context, set, cnt, proxyRunCnt, filter, mm, saturate, aaType,
1335 constraint, viewMatrix, std::move(textureColorSpaceXform));
1336 sdc->addDrawOp(clip, std::move(op));
1337 return;
1338 }
1339
1340 BatchSizeLimiter state(sdc, clip, context, cnt, filter, mm, saturate, constraint, viewMatrix,
1341 std::move(textureColorSpaceXform));
1342
1343 // kNone and kMSAA never get altered
1344 if (aaType == GrAAType::kNone || aaType == GrAAType::kMSAA) {
1345 // Clump these into series of MaxNumNonAAQuads-sized GrTextureOps
1346 while (state.numLeft() > 0) {
1347 int clumpSize = std::min(state.numLeft(), GrResourceProvider::MaxNumNonAAQuads());
1348
1349 state.createOp(set, clumpSize, aaType);
1350 }
1351 } else {
1352 // kCoverage can be downgraded to kNone. Note that the following is conservative. kCoverage
1353 // can also get downgraded to kNone if all the quads are on integer coordinates and
1354 // axis-aligned.
1355 SkASSERT(aaType == GrAAType::kCoverage);
1356
1357 while (state.numLeft() > 0) {
1358 GrAAType runningAA = GrAAType::kNone;
1359 bool clumped = false;
1360
1361 for (int i = 0; i < state.numLeft(); ++i) {
1362 int absIndex = state.baseIndex() + i;
1363
1364 if (set[absIndex].fAAFlags != GrQuadAAFlags::kNone ||
1365 runningAA == GrAAType::kCoverage) {
1366
1367 if (i >= GrResourceProvider::MaxNumAAQuads()) {
1368 // Here we either need to boost the AA type to kCoverage, but doing so with
1369 // all the accumulated quads would overflow, or we have a set of AA quads
1370 // that has just gotten too large. In either case, calve off the existing
1371 // quads as their own TextureOp.
1372 state.createOp(
1373 set,
1374 runningAA == GrAAType::kNone ? i : GrResourceProvider::MaxNumAAQuads(),
1375 runningAA); // maybe downgrading AA here
1376 clumped = true;
1377 break;
1378 }
1379
1380 runningAA = GrAAType::kCoverage;
1381 } else if (runningAA == GrAAType::kNone) {
1382
1383 if (i >= GrResourceProvider::MaxNumNonAAQuads()) {
1384 // Here we've found a consistent batch of non-AA quads that has gotten too
1385 // large. Calve it off as its own TextureOp.
1386 state.createOp(set, GrResourceProvider::MaxNumNonAAQuads(),
1387 GrAAType::kNone); // definitely downgrading AA here
1388 clumped = true;
1389 break;
1390 }
1391 }
1392 }
1393
1394 if (!clumped) {
1395 // We ran through the above loop w/o hitting a limit. Spit out this last clump of
1396 // quads and call it a day.
1397 state.createOp(set, state.numLeft(), runningAA); // maybe downgrading AA here
1398 }
1399 }
1400 }
1401 }
1402
1403 } // namespace skgpu::ganesh
1404
1405 #if defined(GR_TEST_UTILS)
GR_DRAW_OP_TEST_DEFINE(TextureOpImpl)1406 GR_DRAW_OP_TEST_DEFINE(TextureOpImpl) {
1407 SkISize dims;
1408 dims.fHeight = random->nextULessThan(90) + 10;
1409 dims.fWidth = random->nextULessThan(90) + 10;
1410 auto origin = random->nextBool() ? kTopLeft_GrSurfaceOrigin : kBottomLeft_GrSurfaceOrigin;
1411 skgpu::Mipmapped mipmapped =
1412 random->nextBool() ? skgpu::Mipmapped::kYes : skgpu::Mipmapped::kNo;
1413 SkBackingFit fit = SkBackingFit::kExact;
1414 if (mipmapped == skgpu::Mipmapped::kNo) {
1415 fit = random->nextBool() ? SkBackingFit::kApprox : SkBackingFit::kExact;
1416 }
1417 const GrBackendFormat format =
1418 context->priv().caps()->getDefaultBackendFormat(GrColorType::kRGBA_8888,
1419 GrRenderable::kNo);
1420 GrProxyProvider* proxyProvider = context->priv().proxyProvider();
1421 sk_sp<GrTextureProxy> proxy = proxyProvider->createProxy(format,
1422 dims,
1423 GrRenderable::kNo,
1424 1,
1425 mipmapped,
1426 fit,
1427 skgpu::Budgeted::kNo,
1428 GrProtected::kNo,
1429 /*label=*/"TextureOp",
1430 GrInternalSurfaceFlags::kNone);
1431
1432 SkRect rect = GrTest::TestRect(random);
1433 SkRect srcRect;
1434 srcRect.fLeft = random->nextRangeScalar(0.f, proxy->width() / 2.f);
1435 srcRect.fRight = random->nextRangeScalar(0.f, proxy->width()) + proxy->width() / 2.f;
1436 srcRect.fTop = random->nextRangeScalar(0.f, proxy->height() / 2.f);
1437 srcRect.fBottom = random->nextRangeScalar(0.f, proxy->height()) + proxy->height() / 2.f;
1438 SkMatrix viewMatrix = GrTest::TestMatrixPreservesRightAngles(random);
1439 SkPMColor4f color = SkPMColor4f::FromBytes_RGBA(SkColorToPremulGrColor(random->nextU()));
1440 GrSamplerState::Filter filter = (GrSamplerState::Filter)random->nextULessThan(
1441 static_cast<uint32_t>(GrSamplerState::Filter::kLast) + 1);
1442 GrSamplerState::MipmapMode mm = GrSamplerState::MipmapMode::kNone;
1443 if (mipmapped == skgpu::Mipmapped::kYes) {
1444 mm = (GrSamplerState::MipmapMode)random->nextULessThan(
1445 static_cast<uint32_t>(GrSamplerState::MipmapMode::kLast) + 1);
1446 }
1447
1448 auto texXform = GrTest::TestColorXform(random);
1449 GrAAType aaType = GrAAType::kNone;
1450 if (random->nextBool()) {
1451 aaType = (numSamples > 1) ? GrAAType::kMSAA : GrAAType::kCoverage;
1452 }
1453 GrQuadAAFlags aaFlags = GrQuadAAFlags::kNone;
1454 aaFlags |= random->nextBool() ? GrQuadAAFlags::kLeft : GrQuadAAFlags::kNone;
1455 aaFlags |= random->nextBool() ? GrQuadAAFlags::kTop : GrQuadAAFlags::kNone;
1456 aaFlags |= random->nextBool() ? GrQuadAAFlags::kRight : GrQuadAAFlags::kNone;
1457 aaFlags |= random->nextBool() ? GrQuadAAFlags::kBottom : GrQuadAAFlags::kNone;
1458 bool useSubset = random->nextBool();
1459 auto saturate = random->nextBool() ? TextureOp::Saturate::kYes
1460 : TextureOp::Saturate::kNo;
1461 GrSurfaceProxyView proxyView(
1462 std::move(proxy), origin,
1463 context->priv().caps()->getReadSwizzle(format, GrColorType::kRGBA_8888));
1464 auto alphaType = static_cast<SkAlphaType>(
1465 random->nextRangeU(kUnknown_SkAlphaType + 1, kLastEnum_SkAlphaType));
1466
1467 DrawQuad quad = {GrQuad::MakeFromRect(rect, viewMatrix), GrQuad(srcRect), aaFlags};
1468 return TextureOp::Make(context, std::move(proxyView), alphaType,
1469 std::move(texXform), filter, mm, color, saturate,
1470 SkBlendMode::kSrcOver, aaType, &quad,
1471 useSubset ? &srcRect : nullptr);
1472 }
1473
1474 #endif // defined(GR_TEST_UTILS)
1475