1 /*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ops/GrTextureOp.h"
9 #include <new>
10 #include "include/core/SkPoint.h"
11 #include "include/core/SkPoint3.h"
12 #include "include/gpu/GrTexture.h"
13 #include "include/private/GrRecordingContext.h"
14 #include "include/private/SkFloatingPoint.h"
15 #include "include/private/SkTo.h"
16 #include "src/core/SkMathPriv.h"
17 #include "src/core/SkMatrixPriv.h"
18 #include "src/core/SkRectPriv.h"
19 #include "src/gpu/GrAppliedClip.h"
20 #include "src/gpu/GrCaps.h"
21 #include "src/gpu/GrDrawOpTest.h"
22 #include "src/gpu/GrGeometryProcessor.h"
23 #include "src/gpu/GrGpu.h"
24 #include "src/gpu/GrMemoryPool.h"
25 #include "src/gpu/GrOpFlushState.h"
26 #include "src/gpu/GrRecordingContextPriv.h"
27 #include "src/gpu/GrResourceProvider.h"
28 #include "src/gpu/GrResourceProviderPriv.h"
29 #include "src/gpu/GrShaderCaps.h"
30 #include "src/gpu/GrTexturePriv.h"
31 #include "src/gpu/GrTextureProxy.h"
32 #include "src/gpu/SkGr.h"
33 #include "src/gpu/effects/GrTextureDomain.h"
34 #include "src/gpu/geometry/GrQuad.h"
35 #include "src/gpu/geometry/GrQuadBuffer.h"
36 #include "src/gpu/geometry/GrQuadUtils.h"
37 #include "src/gpu/glsl/GrGLSLVarying.h"
38 #include "src/gpu/ops/GrFillRectOp.h"
39 #include "src/gpu/ops/GrMeshDrawOp.h"
40 #include "src/gpu/ops/GrQuadPerEdgeAA.h"
41
42 namespace {
43
44 using Domain = GrQuadPerEdgeAA::Domain;
45 using VertexSpec = GrQuadPerEdgeAA::VertexSpec;
46 using ColorType = GrQuadPerEdgeAA::ColorType;
47
48 // Extracts lengths of vertical and horizontal edges of axis-aligned quad. "width" is the edge
49 // between v0 and v2 (or v1 and v3), "height" is the edge between v0 and v1 (or v2 and v3).
axis_aligned_quad_size(const GrQuad & quad)50 static SkSize axis_aligned_quad_size(const GrQuad& quad) {
51 SkASSERT(quad.quadType() == GrQuad::Type::kAxisAligned);
52 // Simplification of regular edge length equation, since it's axis aligned and can avoid sqrt
53 float dw = sk_float_abs(quad.x(2) - quad.x(0)) + sk_float_abs(quad.y(2) - quad.y(0));
54 float dh = sk_float_abs(quad.x(1) - quad.x(0)) + sk_float_abs(quad.y(1) - quad.y(0));
55 return {dw, dh};
56 }
57
filter_has_effect(const GrQuad & srcQuad,const GrQuad & dstQuad)58 static bool filter_has_effect(const GrQuad& srcQuad, const GrQuad& dstQuad) {
59 // If not axis-aligned in src or dst, then always say it has an effect
60 if (srcQuad.quadType() != GrQuad::Type::kAxisAligned ||
61 dstQuad.quadType() != GrQuad::Type::kAxisAligned) {
62 return true;
63 }
64
65 SkRect srcRect;
66 SkRect dstRect;
67 if (srcQuad.asRect(&srcRect) && dstQuad.asRect(&dstRect)) {
68 // Disable filtering when there is no scaling (width and height are the same), and the
69 // top-left corners have the same fraction (so src and dst snap to the pixel grid
70 // identically).
71 SkASSERT(srcRect.isSorted());
72 return srcRect.width() != dstRect.width() || srcRect.height() != dstRect.height() ||
73 SkScalarFraction(srcRect.fLeft) != SkScalarFraction(dstRect.fLeft) ||
74 SkScalarFraction(srcRect.fTop) != SkScalarFraction(dstRect.fTop);
75 } else {
76 // Although the quads are axis-aligned, the local coordinate system is transformed such
77 // that fractionally-aligned sample centers will not align with the device coordinate system
78 // So disable filtering when edges are the same length and both srcQuad and dstQuad
79 // 0th vertex is integer aligned.
80 if (SkScalarIsInt(srcQuad.x(0)) && SkScalarIsInt(srcQuad.y(0)) &&
81 SkScalarIsInt(dstQuad.x(0)) && SkScalarIsInt(dstQuad.y(0))) {
82 // Extract edge lengths
83 SkSize srcSize = axis_aligned_quad_size(srcQuad);
84 SkSize dstSize = axis_aligned_quad_size(dstQuad);
85 return srcSize.fWidth != dstSize.fWidth || srcSize.fHeight != dstSize.fHeight;
86 } else {
87 return true;
88 }
89 }
90 }
91
92 // if normalizing the domain then pass 1/width, 1/height, 1 for iw, ih, h. Otherwise pass
93 // 1, 1, and height.
compute_domain(Domain domain,GrSamplerState::Filter filter,GrSurfaceOrigin origin,const SkRect & domainRect,float iw,float ih,float h,SkRect * out)94 static void compute_domain(Domain domain, GrSamplerState::Filter filter, GrSurfaceOrigin origin,
95 const SkRect& domainRect, float iw, float ih, float h, SkRect* out) {
96 static constexpr SkRect kLargeRect = {-100000, -100000, 1000000, 1000000};
97 if (domain == Domain::kNo) {
98 // Either the quad has no domain constraint and is batched with a domain constrained op
99 // (in which case we want a domain that doesn't restrict normalized tex coords), or the
100 // entire op doesn't use the domain, in which case the returned value is ignored.
101 *out = kLargeRect;
102 return;
103 }
104
105 auto ltrb = Sk4f::Load(&domainRect);
106 if (filter == GrSamplerState::Filter::kBilerp) {
107 auto rblt = SkNx_shuffle<2, 3, 0, 1>(ltrb);
108 auto whwh = (rblt - ltrb).abs();
109 auto c = (rblt + ltrb) * 0.5f;
110 static const Sk4f kOffsets = {0.5f, 0.5f, -0.5f, -0.5f};
111 ltrb = (whwh < 1.f).thenElse(c, ltrb + kOffsets);
112 }
113 ltrb *= Sk4f(iw, ih, iw, ih);
114 if (origin == kBottomLeft_GrSurfaceOrigin) {
115 static const Sk4f kMul = {1.f, -1.f, 1.f, -1.f};
116 const Sk4f kAdd = {0.f, h, 0.f, h};
117 ltrb = SkNx_shuffle<0, 3, 2, 1>(kMul * ltrb + kAdd);
118 }
119
120 ltrb.store(out);
121 }
122
123 // Normalizes logical src coords and corrects for origin
compute_src_quad(GrSurfaceOrigin origin,const GrQuad & srcQuad,float iw,float ih,float h,GrQuad * out)124 static void compute_src_quad(GrSurfaceOrigin origin, const GrQuad& srcQuad,
125 float iw, float ih, float h, GrQuad* out) {
126 // The src quad should not have any perspective
127 SkASSERT(!srcQuad.hasPerspective() && !out->hasPerspective());
128 skvx::Vec<4, float> xs = srcQuad.x4f() * iw;
129 skvx::Vec<4, float> ys = srcQuad.y4f() * ih;
130 if (origin == kBottomLeft_GrSurfaceOrigin) {
131 ys = h - ys;
132 }
133 xs.store(out->xs());
134 ys.store(out->ys());
135 out->setQuadType(srcQuad.quadType());
136 }
137
138 /**
139 * Op that implements GrTextureOp::Make. It draws textured quads. Each quad can modulate against a
140 * the texture by color. The blend with the destination is always src-over. The edges are non-AA.
141 */
142 class TextureOp final : public GrMeshDrawOp {
143 public:
Make(GrRecordingContext * context,sk_sp<GrTextureProxy> proxy,sk_sp<GrColorSpaceXform> textureXform,GrSamplerState::Filter filter,const SkPMColor4f & color,GrAAType aaType,GrQuadAAFlags aaFlags,const GrQuad & deviceQuad,const GrQuad & localQuad,const SkRect * domain)144 static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
145 sk_sp<GrTextureProxy> proxy,
146 sk_sp<GrColorSpaceXform> textureXform,
147 GrSamplerState::Filter filter,
148 const SkPMColor4f& color,
149 GrAAType aaType,
150 GrQuadAAFlags aaFlags,
151 const GrQuad& deviceQuad,
152 const GrQuad& localQuad,
153 const SkRect* domain) {
154 GrOpMemoryPool* pool = context->priv().opMemoryPool();
155 return pool->allocate<TextureOp>(
156 std::move(proxy), std::move(textureXform), filter, color, aaType, aaFlags,
157 deviceQuad, localQuad, domain);
158 }
Make(GrRecordingContext * context,const GrRenderTargetContext::TextureSetEntry set[],int cnt,GrSamplerState::Filter filter,GrAAType aaType,SkCanvas::SrcRectConstraint constraint,const SkMatrix & viewMatrix,sk_sp<GrColorSpaceXform> textureColorSpaceXform)159 static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
160 const GrRenderTargetContext::TextureSetEntry set[],
161 int cnt, GrSamplerState::Filter filter, GrAAType aaType,
162 SkCanvas::SrcRectConstraint constraint,
163 const SkMatrix& viewMatrix,
164 sk_sp<GrColorSpaceXform> textureColorSpaceXform) {
165 size_t size = sizeof(TextureOp) + sizeof(Proxy) * (cnt - 1);
166 GrOpMemoryPool* pool = context->priv().opMemoryPool();
167 void* mem = pool->allocate(size);
168 return std::unique_ptr<GrDrawOp>(new (mem) TextureOp(
169 set, cnt, filter, aaType, constraint, viewMatrix,
170 std::move(textureColorSpaceXform)));
171 }
172
~TextureOp()173 ~TextureOp() override {
174 for (unsigned p = 0; p < fProxyCnt; ++p) {
175 fProxies[p].fProxy->unref();
176 }
177 }
178
name() const179 const char* name() const override { return "TextureOp"; }
180
visitProxies(const VisitProxyFunc & func) const181 void visitProxies(const VisitProxyFunc& func) const override {
182 for (unsigned p = 0; p < fProxyCnt; ++p) {
183 bool mipped = (GrSamplerState::Filter::kMipMap == this->filter());
184 func(fProxies[p].fProxy, GrMipMapped(mipped));
185 }
186 }
187
188 #ifdef SK_DEBUG
dumpInfo() const189 SkString dumpInfo() const override {
190 SkString str;
191 str.appendf("# draws: %d\n", fQuads.count());
192 auto iter = fQuads.iterator();
193 for (unsigned p = 0; p < fProxyCnt; ++p) {
194 str.appendf("Proxy ID: %d, Filter: %d\n", fProxies[p].fProxy->uniqueID().asUInt(),
195 static_cast<int>(fFilter));
196 int i = 0;
197 while(i < fProxies[p].fQuadCnt && iter.next()) {
198 const GrQuad& quad = iter.deviceQuad();
199 const GrQuad& uv = iter.localQuad();
200 const ColorDomainAndAA& info = iter.metadata();
201 str.appendf(
202 "%d: Color: 0x%08x, Domain(%d): [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n"
203 " UVs [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n"
204 " Quad [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n",
205 i, info.fColor.toBytes_RGBA(), info.fHasDomain, info.fDomainRect.fLeft,
206 info.fDomainRect.fTop, info.fDomainRect.fRight, info.fDomainRect.fBottom,
207 quad.point(0).fX, quad.point(0).fY, quad.point(1).fX, quad.point(1).fY,
208 quad.point(2).fX, quad.point(2).fY, quad.point(3).fX, quad.point(3).fY,
209 uv.point(0).fX, uv.point(0).fY, uv.point(1).fX, uv.point(1).fY,
210 uv.point(2).fX, uv.point(2).fY, uv.point(3).fX, uv.point(3).fY);
211
212 i++;
213 }
214 }
215 str += INHERITED::dumpInfo();
216 return str;
217 }
218 #endif
219
finalize(const GrCaps & caps,const GrAppliedClip *,bool hasMixedSampledCoverage,GrClampType clampType)220 GrProcessorSet::Analysis finalize(
221 const GrCaps& caps, const GrAppliedClip*, bool hasMixedSampledCoverage,
222 GrClampType clampType) override {
223 fColorType = static_cast<unsigned>(ColorType::kNone);
224 auto iter = fQuads.metadata();
225 while(iter.next()) {
226 auto colorType = GrQuadPerEdgeAA::MinColorType(iter->fColor, clampType, caps);
227 fColorType = SkTMax(fColorType, static_cast<unsigned>(colorType));
228 }
229 return GrProcessorSet::EmptySetAnalysis();
230 }
231
fixedFunctionFlags() const232 FixedFunctionFlags fixedFunctionFlags() const override {
233 return this->aaType() == GrAAType::kMSAA ? FixedFunctionFlags::kUsesHWAA
234 : FixedFunctionFlags::kNone;
235 }
236
237 DEFINE_OP_CLASS_ID
238
239 private:
240 friend class ::GrOpMemoryPool;
241
242 struct ColorDomainAndAA {
ColorDomainAndAA__anone67ae77c0111::TextureOp::ColorDomainAndAA243 ColorDomainAndAA(const SkPMColor4f& color, const SkRect* domainRect, GrQuadAAFlags aaFlags)
244 : fColor(color)
245 , fDomainRect(domainRect ? *domainRect : SkRect::MakeEmpty())
246 , fHasDomain(static_cast<unsigned>(domainRect ? Domain::kYes : Domain::kNo))
247 , fAAFlags(static_cast<unsigned>(aaFlags)) {
248 SkASSERT(fAAFlags == static_cast<unsigned>(aaFlags));
249 }
250
251 SkPMColor4f fColor;
252 SkRect fDomainRect;
253 unsigned fHasDomain : 1;
254 unsigned fAAFlags : 4;
255
domain__anone67ae77c0111::TextureOp::ColorDomainAndAA256 Domain domain() const { return Domain(fHasDomain); }
aaFlags__anone67ae77c0111::TextureOp::ColorDomainAndAA257 GrQuadAAFlags aaFlags() const { return static_cast<GrQuadAAFlags>(fAAFlags); }
258 };
259 struct Proxy {
260 GrTextureProxy* fProxy;
261 int fQuadCnt;
262 };
263
264 // dstQuad should be the geometry transformed by the view matrix. If domainRect
265 // is not null it will be used to apply the strict src rect constraint.
TextureOp(sk_sp<GrTextureProxy> proxy,sk_sp<GrColorSpaceXform> textureColorSpaceXform,GrSamplerState::Filter filter,const SkPMColor4f & color,GrAAType aaType,GrQuadAAFlags aaFlags,const GrQuad & dstQuad,const GrQuad & srcQuad,const SkRect * domainRect)266 TextureOp(sk_sp<GrTextureProxy> proxy, sk_sp<GrColorSpaceXform> textureColorSpaceXform,
267 GrSamplerState::Filter filter, const SkPMColor4f& color,
268 GrAAType aaType, GrQuadAAFlags aaFlags,
269 const GrQuad& dstQuad, const GrQuad& srcQuad, const SkRect* domainRect)
270 : INHERITED(ClassID())
271 , fQuads(1, true /* includes locals */)
272 , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
273 , fFilter(static_cast<unsigned>(filter)) {
274 // Clean up disparities between the overall aa type and edge configuration and apply
275 // optimizations based on the rect and matrix when appropriate
276 GrQuadUtils::ResolveAAType(aaType, aaFlags, dstQuad, &aaType, &aaFlags);
277 fAAType = static_cast<unsigned>(aaType);
278
279 // We expect our caller to have already caught this optimization.
280 SkASSERT(!domainRect || !domainRect->contains(proxy->getWorstCaseBoundsRect()));
281
282 // We may have had a strict constraint with nearest filter solely due to possible AA bloat.
283 // If we don't have (or determined we don't need) coverage AA then we can skip using a
284 // domain.
285 if (domainRect && this->filter() == GrSamplerState::Filter::kNearest &&
286 aaType != GrAAType::kCoverage) {
287 domainRect = nullptr;
288 }
289
290 fQuads.append(dstQuad, {color, domainRect, aaFlags}, &srcQuad);
291
292 fProxyCnt = 1;
293 fProxies[0] = {proxy.release(), 1};
294 this->setBounds(dstQuad.bounds(), HasAABloat(aaType == GrAAType::kCoverage),
295 IsZeroArea::kNo);
296 fDomain = static_cast<unsigned>(domainRect != nullptr);
297 }
TextureOp(const GrRenderTargetContext::TextureSetEntry set[],int cnt,GrSamplerState::Filter filter,GrAAType aaType,SkCanvas::SrcRectConstraint constraint,const SkMatrix & viewMatrix,sk_sp<GrColorSpaceXform> textureColorSpaceXform)298 TextureOp(const GrRenderTargetContext::TextureSetEntry set[], int cnt,
299 GrSamplerState::Filter filter, GrAAType aaType,
300 SkCanvas::SrcRectConstraint constraint, const SkMatrix& viewMatrix,
301 sk_sp<GrColorSpaceXform> textureColorSpaceXform)
302 : INHERITED(ClassID())
303 , fQuads(cnt, true /* includes locals */)
304 , fTextureColorSpaceXform(std::move(textureColorSpaceXform))
305 , fFilter(static_cast<unsigned>(filter)) {
306 fProxyCnt = SkToUInt(cnt);
307 SkRect bounds = SkRectPriv::MakeLargestInverted();
308 GrAAType overallAAType = GrAAType::kNone; // aa type maximally compatible with all dst rects
309 bool mustFilter = false;
310 bool allOpaque = true;
311 Domain netDomain = Domain::kNo;
312 for (unsigned p = 0; p < fProxyCnt; ++p) {
313 fProxies[p].fProxy = SkRef(set[p].fProxy.get());
314 fProxies[p].fQuadCnt = 1;
315 SkASSERT(fProxies[p].fProxy->textureType() == fProxies[0].fProxy->textureType());
316 SkASSERT(fProxies[p].fProxy->config() == fProxies[0].fProxy->config());
317
318 SkMatrix ctm = viewMatrix;
319 if (set[p].fPreViewMatrix) {
320 ctm.preConcat(*set[p].fPreViewMatrix);
321 }
322
323 // Use dstRect/srcRect unless dstClip is provided, in which case derive new source
324 // coordinates by mapping dstClipQuad by the dstRect to srcRect transform.
325 GrQuad quad, srcQuad;
326 if (set[p].fDstClipQuad) {
327 quad = GrQuad::MakeFromSkQuad(set[p].fDstClipQuad, ctm);
328
329 SkPoint srcPts[4];
330 GrMapRectPoints(set[p].fDstRect, set[p].fSrcRect, set[p].fDstClipQuad, srcPts, 4);
331 srcQuad = GrQuad::MakeFromSkQuad(srcPts, SkMatrix::I());
332 } else {
333 quad = GrQuad::MakeFromRect(set[p].fDstRect, ctm);
334 srcQuad = GrQuad(set[p].fSrcRect);
335 }
336
337 if (!mustFilter && this->filter() != GrSamplerState::Filter::kNearest) {
338 mustFilter = filter_has_effect(srcQuad, quad);
339 }
340
341 bounds.joinPossiblyEmptyRect(quad.bounds());
342 GrQuadAAFlags aaFlags;
343 // Don't update the overall aaType, might be inappropriate for some of the quads
344 GrAAType aaForQuad;
345 GrQuadUtils::ResolveAAType(aaType, set[p].fAAFlags, quad, &aaForQuad, &aaFlags);
346 // Resolve sets aaForQuad to aaType or None, there is never a change between aa methods
347 SkASSERT(aaForQuad == GrAAType::kNone || aaForQuad == aaType);
348 if (overallAAType == GrAAType::kNone && aaForQuad != GrAAType::kNone) {
349 overallAAType = aaType;
350 }
351
352 // Calculate metadata for the entry
353 const SkRect* domainForQuad = nullptr;
354 if (constraint == SkCanvas::kStrict_SrcRectConstraint) {
355 // Check (briefly) if the strict constraint is needed for this set entry
356 if (!set[p].fSrcRect.contains(fProxies[p].fProxy->getWorstCaseBoundsRect()) &&
357 (mustFilter || aaForQuad == GrAAType::kCoverage)) {
358 // Can't rely on hardware clamping and the draw will access outer texels
359 // for AA and/or bilerp
360 netDomain = Domain::kYes;
361 domainForQuad = &set[p].fSrcRect;
362 }
363 }
364 float alpha = SkTPin(set[p].fAlpha, 0.f, 1.f);
365 allOpaque &= (1.f == alpha);
366 SkPMColor4f color{alpha, alpha, alpha, alpha};
367 fQuads.append(quad, {color, domainForQuad, aaFlags}, &srcQuad);
368 }
369 fAAType = static_cast<unsigned>(overallAAType);
370 if (!mustFilter) {
371 fFilter = static_cast<unsigned>(GrSamplerState::Filter::kNearest);
372 }
373 this->setBounds(bounds, HasAABloat(this->aaType() == GrAAType::kCoverage), IsZeroArea::kNo);
374 fDomain = static_cast<unsigned>(netDomain);
375 }
376
tess(void * v,const VertexSpec & spec,const GrTextureProxy * proxy,GrQuadBuffer<ColorDomainAndAA>::Iter * iter,int cnt) const377 void tess(void* v, const VertexSpec& spec, const GrTextureProxy* proxy,
378 GrQuadBuffer<ColorDomainAndAA>::Iter* iter, int cnt) const {
379 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
380 auto origin = proxy->origin();
381 const auto* texture = proxy->peekTexture();
382 float iw, ih, h;
383 if (proxy->textureType() == GrTextureType::kRectangle) {
384 iw = ih = 1.f;
385 h = texture->height();
386 } else {
387 iw = 1.f / texture->width();
388 ih = 1.f / texture->height();
389 h = 1.f;
390 }
391
392 int i = 0;
393 // Explicit ctor ensures ws are 1s, which compute_src_quad requires
394 GrQuad srcQuad(SkRect::MakeEmpty());
395 SkRect domain;
396 while(i < cnt && iter->next()) {
397 SkASSERT(iter->isLocalValid());
398 const ColorDomainAndAA& info = iter->metadata();
399 // Must correct the texture coordinates and domain now that the real texture size
400 // is known
401 compute_src_quad(origin, iter->localQuad(), iw, ih, h, &srcQuad);
402 compute_domain(info.domain(), this->filter(), origin, info.fDomainRect, iw, ih, h,
403 &domain);
404 v = GrQuadPerEdgeAA::Tessellate(v, spec, iter->deviceQuad(), info.fColor, srcQuad,
405 domain, info.aaFlags());
406 i++;
407 }
408 }
409
onPrepareDraws(Target * target)410 void onPrepareDraws(Target* target) override {
411 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
412 GrQuad::Type quadType = GrQuad::Type::kAxisAligned;
413 GrQuad::Type srcQuadType = GrQuad::Type::kAxisAligned;
414 Domain domain = Domain::kNo;
415 ColorType colorType = ColorType::kNone;
416 int numProxies = 0;
417 int numTotalQuads = 0;
418 auto textureType = fProxies[0].fProxy->textureType();
419 const GrSwizzle& swizzle = fProxies[0].fProxy->textureSwizzle();
420 GrAAType aaType = this->aaType();
421 for (const auto& op : ChainRange<TextureOp>(this)) {
422 if (op.fQuads.deviceQuadType() > quadType) {
423 quadType = op.fQuads.deviceQuadType();
424 }
425 if (op.fQuads.localQuadType() > srcQuadType) {
426 srcQuadType = op.fQuads.localQuadType();
427 }
428 if (op.fDomain) {
429 domain = Domain::kYes;
430 }
431 colorType = SkTMax(colorType, static_cast<ColorType>(op.fColorType));
432 numProxies += op.fProxyCnt;
433 for (unsigned p = 0; p < op.fProxyCnt; ++p) {
434 numTotalQuads += op.fProxies[p].fQuadCnt;
435 auto* proxy = op.fProxies[p].fProxy;
436 if (!proxy->isInstantiated()) {
437 return;
438 }
439 SkASSERT(proxy->textureType() == textureType);
440 SkASSERT(proxy->textureSwizzle() == swizzle);
441 }
442 if (op.aaType() == GrAAType::kCoverage) {
443 SkASSERT(aaType == GrAAType::kCoverage || aaType == GrAAType::kNone);
444 aaType = GrAAType::kCoverage;
445 }
446 }
447
448 VertexSpec vertexSpec(quadType, colorType, srcQuadType, /* hasLocal */ true, domain, aaType,
449 /* alpha as coverage */ true);
450
451 GrSamplerState samplerState = GrSamplerState(GrSamplerState::WrapMode::kClamp,
452 this->filter());
453 GrGpu* gpu = target->resourceProvider()->priv().gpu();
454 uint32_t extraSamplerKey = gpu->getExtraSamplerKeyForProgram(
455 samplerState, fProxies[0].fProxy->backendFormat());
456
457 sk_sp<GrGeometryProcessor> gp = GrQuadPerEdgeAA::MakeTexturedProcessor(
458 vertexSpec, *target->caps().shaderCaps(), textureType, samplerState, swizzle,
459 extraSamplerKey, std::move(fTextureColorSpaceXform));
460
461 // We'll use a dynamic state array for the GP textures when there are multiple ops.
462 // Otherwise, we use fixed dynamic state to specify the single op's proxy.
463 GrPipeline::DynamicStateArrays* dynamicStateArrays = nullptr;
464 GrPipeline::FixedDynamicState* fixedDynamicState;
465 if (numProxies > 1) {
466 dynamicStateArrays = target->allocDynamicStateArrays(numProxies, 1, false);
467 fixedDynamicState = target->makeFixedDynamicState(0);
468 } else {
469 fixedDynamicState = target->makeFixedDynamicState(1);
470 fixedDynamicState->fPrimitiveProcessorTextures[0] = fProxies[0].fProxy;
471 }
472
473 size_t vertexSize = gp->vertexStride();
474
475 GrMesh* meshes = target->allocMeshes(numProxies);
476 sk_sp<const GrBuffer> vbuffer;
477 int vertexOffsetInBuffer = 0;
478 int numQuadVerticesLeft = numTotalQuads * vertexSpec.verticesPerQuad();
479 int numAllocatedVertices = 0;
480 void* vdata = nullptr;
481
482 int m = 0;
483 for (const auto& op : ChainRange<TextureOp>(this)) {
484 auto iter = op.fQuads.iterator();
485 for (unsigned p = 0; p < op.fProxyCnt; ++p) {
486 int quadCnt = op.fProxies[p].fQuadCnt;
487 auto* proxy = op.fProxies[p].fProxy;
488 int meshVertexCnt = quadCnt * vertexSpec.verticesPerQuad();
489 if (numAllocatedVertices < meshVertexCnt) {
490 vdata = target->makeVertexSpaceAtLeast(
491 vertexSize, meshVertexCnt, numQuadVerticesLeft, &vbuffer,
492 &vertexOffsetInBuffer, &numAllocatedVertices);
493 SkASSERT(numAllocatedVertices <= numQuadVerticesLeft);
494 if (!vdata) {
495 SkDebugf("Could not allocate vertices\n");
496 return;
497 }
498 }
499 SkASSERT(numAllocatedVertices >= meshVertexCnt);
500
501 op.tess(vdata, vertexSpec, proxy, &iter, quadCnt);
502
503 if (!GrQuadPerEdgeAA::ConfigureMeshIndices(target, &(meshes[m]), vertexSpec,
504 quadCnt)) {
505 SkDebugf("Could not allocate indices");
506 return;
507 }
508 meshes[m].setVertexData(vbuffer, vertexOffsetInBuffer);
509 if (dynamicStateArrays) {
510 dynamicStateArrays->fPrimitiveProcessorTextures[m] = proxy;
511 }
512 ++m;
513 numAllocatedVertices -= meshVertexCnt;
514 numQuadVerticesLeft -= meshVertexCnt;
515 vertexOffsetInBuffer += meshVertexCnt;
516 vdata = reinterpret_cast<char*>(vdata) + vertexSize * meshVertexCnt;
517 }
518 // If quad counts per proxy were calculated correctly, the entire iterator should have
519 // been consumed.
520 SkASSERT(!iter.next());
521 }
522 SkASSERT(!numQuadVerticesLeft);
523 SkASSERT(!numAllocatedVertices);
524 target->recordDraw(
525 std::move(gp), meshes, numProxies, fixedDynamicState, dynamicStateArrays);
526 }
527
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)528 void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override {
529 auto pipelineFlags = (GrAAType::kMSAA == this->aaType())
530 ? GrPipeline::InputFlags::kHWAntialias
531 : GrPipeline::InputFlags::kNone;
532 flushState->executeDrawsAndUploadsForMeshDrawOp(
533 this, chainBounds, GrProcessorSet::MakeEmptySet(), pipelineFlags);
534 }
535
onCombineIfPossible(GrOp * t,const GrCaps & caps)536 CombineResult onCombineIfPossible(GrOp* t, const GrCaps& caps) override {
537 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
538 const auto* that = t->cast<TextureOp>();
539 if (fDomain != that->fDomain) {
540 // It is technically possible to combine operations across domain modes, but performance
541 // testing suggests it's better to make more draw calls where some take advantage of
542 // the more optimal shader path without coordinate clamping.
543 return CombineResult::kCannotCombine;
544 }
545 if (!GrColorSpaceXform::Equals(fTextureColorSpaceXform.get(),
546 that->fTextureColorSpaceXform.get())) {
547 return CombineResult::kCannotCombine;
548 }
549 bool upgradeToCoverageAAOnMerge = false;
550 if (this->aaType() != that->aaType()) {
551 if (!((this->aaType() == GrAAType::kCoverage && that->aaType() == GrAAType::kNone) ||
552 (that->aaType() == GrAAType::kCoverage && this->aaType() == GrAAType::kNone))) {
553 return CombineResult::kCannotCombine;
554 }
555 upgradeToCoverageAAOnMerge = true;
556 }
557 if (fFilter != that->fFilter) {
558 return CombineResult::kCannotCombine;
559 }
560 auto thisProxy = fProxies[0].fProxy;
561 auto thatProxy = that->fProxies[0].fProxy;
562 if (fProxyCnt > 1 || that->fProxyCnt > 1 ||
563 thisProxy->uniqueID() != thatProxy->uniqueID()) {
564 // We can't merge across different proxies. Check if 'this' can be chained with 'that'.
565 if (GrTextureProxy::ProxiesAreCompatibleAsDynamicState(thisProxy, thatProxy) &&
566 caps.dynamicStateArrayGeometryProcessorTextureSupport()) {
567 return CombineResult::kMayChain;
568 }
569 return CombineResult::kCannotCombine;
570 }
571
572 fDomain |= that->fDomain;
573 fColorType = SkTMax(fColorType, that->fColorType);
574 if (upgradeToCoverageAAOnMerge) {
575 fAAType = static_cast<unsigned>(GrAAType::kCoverage);
576 }
577
578 // Concatenate quad lists together
579 fQuads.concat(that->fQuads);
580 fProxies[0].fQuadCnt += that->fQuads.count();
581
582 return CombineResult::kMerged;
583 }
584
aaType() const585 GrAAType aaType() const { return static_cast<GrAAType>(fAAType); }
filter() const586 GrSamplerState::Filter filter() const { return static_cast<GrSamplerState::Filter>(fFilter); }
587
588 GrQuadBuffer<ColorDomainAndAA> fQuads;
589 sk_sp<GrColorSpaceXform> fTextureColorSpaceXform;
590 unsigned fFilter : 2;
591 unsigned fAAType : 2;
592 unsigned fDomain : 1;
593 unsigned fColorType : 2;
594 GR_STATIC_ASSERT(GrQuadPerEdgeAA::kColorTypeCount <= 4);
595 unsigned fProxyCnt : 32 - 7;
596 Proxy fProxies[1];
597
598 static_assert(GrQuad::kTypeCount <= 4, "GrQuad::Type does not fit in 2 bits");
599
600 typedef GrMeshDrawOp INHERITED;
601 };
602
603 } // anonymous namespace
604
605 namespace GrTextureOp {
606
Make(GrRecordingContext * context,sk_sp<GrTextureProxy> proxy,sk_sp<GrColorSpaceXform> textureXform,GrSamplerState::Filter filter,const SkPMColor4f & color,SkBlendMode blendMode,GrAAType aaType,GrQuadAAFlags aaFlags,const GrQuad & deviceQuad,const GrQuad & localQuad,const SkRect * domain)607 std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context,
608 sk_sp<GrTextureProxy> proxy,
609 sk_sp<GrColorSpaceXform> textureXform,
610 GrSamplerState::Filter filter,
611 const SkPMColor4f& color,
612 SkBlendMode blendMode,
613 GrAAType aaType,
614 GrQuadAAFlags aaFlags,
615 const GrQuad& deviceQuad,
616 const GrQuad& localQuad,
617 const SkRect* domain) {
618 // Apply optimizations that are valid whether or not using GrTextureOp or GrFillRectOp
619 if (domain && domain->contains(proxy->getWorstCaseBoundsRect())) {
620 // No need for a shader-based domain if hardware clamping achieves the same effect
621 domain = nullptr;
622 }
623
624 if (filter != GrSamplerState::Filter::kNearest && !filter_has_effect(localQuad, deviceQuad)) {
625 filter = GrSamplerState::Filter::kNearest;
626 }
627
628 if (blendMode == SkBlendMode::kSrcOver) {
629 return TextureOp::Make(context, std::move(proxy), std::move(textureXform), filter, color,
630 aaType, aaFlags, deviceQuad, localQuad, domain);
631 } else {
632 // Emulate complex blending using GrFillRectOp
633 GrPaint paint;
634 paint.setColor4f(color);
635 paint.setXPFactory(SkBlendMode_AsXPFactory(blendMode));
636
637 std::unique_ptr<GrFragmentProcessor> fp;
638 if (domain) {
639 // Update domain to match what GrTextureOp computes during tessellation, using top-left
640 // as the origin so that it doesn't depend on final texture size (which the FP handles
641 // later, as well as accounting for the true origin).
642 SkRect correctedDomain;
643 compute_domain(Domain::kYes, filter, kTopLeft_GrSurfaceOrigin, *domain,
644 1.f, 1.f, proxy->height(), &correctedDomain);
645 fp = GrTextureDomainEffect::Make(std::move(proxy), SkMatrix::I(), correctedDomain,
646 GrTextureDomain::kClamp_Mode, filter);
647 } else {
648 fp = GrSimpleTextureEffect::Make(std::move(proxy), SkMatrix::I(), filter);
649 }
650 fp = GrColorSpaceXformEffect::Make(std::move(fp), std::move(textureXform));
651 paint.addColorFragmentProcessor(std::move(fp));
652
653 return GrFillRectOp::Make(context, std::move(paint), aaType, aaFlags,
654 deviceQuad, localQuad);
655 }
656 }
657
MakeSet(GrRecordingContext * context,const GrRenderTargetContext::TextureSetEntry set[],int cnt,GrSamplerState::Filter filter,GrAAType aaType,SkCanvas::SrcRectConstraint constraint,const SkMatrix & viewMatrix,sk_sp<GrColorSpaceXform> textureColorSpaceXform)658 std::unique_ptr<GrDrawOp> MakeSet(GrRecordingContext* context,
659 const GrRenderTargetContext::TextureSetEntry set[],
660 int cnt,
661 GrSamplerState::Filter filter,
662 GrAAType aaType,
663 SkCanvas::SrcRectConstraint constraint,
664 const SkMatrix& viewMatrix,
665 sk_sp<GrColorSpaceXform> textureColorSpaceXform) {
666 return TextureOp::Make(context, set, cnt, filter, aaType, constraint, viewMatrix,
667 std::move(textureColorSpaceXform));
668 }
669
670 } // namespace GrTextureOp
671
672 #if GR_TEST_UTILS
673 #include "include/private/GrRecordingContext.h"
674 #include "src/gpu/GrProxyProvider.h"
675 #include "src/gpu/GrRecordingContextPriv.h"
676
GR_DRAW_OP_TEST_DEFINE(TextureOp)677 GR_DRAW_OP_TEST_DEFINE(TextureOp) {
678 GrSurfaceDesc desc;
679 desc.fConfig = kRGBA_8888_GrPixelConfig;
680 desc.fHeight = random->nextULessThan(90) + 10;
681 desc.fWidth = random->nextULessThan(90) + 10;
682 auto origin = random->nextBool() ? kTopLeft_GrSurfaceOrigin : kBottomLeft_GrSurfaceOrigin;
683 GrMipMapped mipMapped = random->nextBool() ? GrMipMapped::kYes : GrMipMapped::kNo;
684 SkBackingFit fit = SkBackingFit::kExact;
685 if (mipMapped == GrMipMapped::kNo) {
686 fit = random->nextBool() ? SkBackingFit::kApprox : SkBackingFit::kExact;
687 }
688 const GrBackendFormat format =
689 context->priv().caps()->getDefaultBackendFormat(GrColorType::kRGBA_8888,
690 GrRenderable::kNo);
691
692 GrProxyProvider* proxyProvider = context->priv().proxyProvider();
693 sk_sp<GrTextureProxy> proxy = proxyProvider->createProxy(
694 format, desc, GrRenderable::kNo, 1, origin, mipMapped, fit, SkBudgeted::kNo,
695 GrProtected::kNo, GrInternalSurfaceFlags::kNone);
696
697 SkRect rect = GrTest::TestRect(random);
698 SkRect srcRect;
699 srcRect.fLeft = random->nextRangeScalar(0.f, proxy->width() / 2.f);
700 srcRect.fRight = random->nextRangeScalar(0.f, proxy->width()) + proxy->width() / 2.f;
701 srcRect.fTop = random->nextRangeScalar(0.f, proxy->height() / 2.f);
702 srcRect.fBottom = random->nextRangeScalar(0.f, proxy->height()) + proxy->height() / 2.f;
703 SkMatrix viewMatrix = GrTest::TestMatrixPreservesRightAngles(random);
704 SkPMColor4f color = SkPMColor4f::FromBytes_RGBA(SkColorToPremulGrColor(random->nextU()));
705 GrSamplerState::Filter filter = (GrSamplerState::Filter)random->nextULessThan(
706 static_cast<uint32_t>(GrSamplerState::Filter::kMipMap) + 1);
707 while (mipMapped == GrMipMapped::kNo && filter == GrSamplerState::Filter::kMipMap) {
708 filter = (GrSamplerState::Filter)random->nextULessThan(
709 static_cast<uint32_t>(GrSamplerState::Filter::kMipMap) + 1);
710 }
711 auto texXform = GrTest::TestColorXform(random);
712 GrAAType aaType = GrAAType::kNone;
713 if (random->nextBool()) {
714 aaType = (numSamples > 1) ? GrAAType::kMSAA : GrAAType::kCoverage;
715 }
716 GrQuadAAFlags aaFlags = GrQuadAAFlags::kNone;
717 aaFlags |= random->nextBool() ? GrQuadAAFlags::kLeft : GrQuadAAFlags::kNone;
718 aaFlags |= random->nextBool() ? GrQuadAAFlags::kTop : GrQuadAAFlags::kNone;
719 aaFlags |= random->nextBool() ? GrQuadAAFlags::kRight : GrQuadAAFlags::kNone;
720 aaFlags |= random->nextBool() ? GrQuadAAFlags::kBottom : GrQuadAAFlags::kNone;
721 bool useDomain = random->nextBool();
722 return GrTextureOp::Make(context, std::move(proxy), std::move(texXform), filter, color,
723 SkBlendMode::kSrcOver, aaType, aaFlags,
724 GrQuad::MakeFromRect(rect, viewMatrix), GrQuad(srcRect),
725 useDomain ? &srcRect : nullptr);
726 }
727
728 #endif
729