1 /*
2 * Copyright 2019 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/gpu/GrDirectContext.h"
9 #include "src/core/SkBlendModePriv.h"
10 #include "src/gpu/GrDirectContextPriv.h"
11 #include "src/gpu/GrOpsTypes.h"
12 #include "src/gpu/GrProxyProvider.h"
13 #include "src/gpu/GrResourceProvider.h"
14 #include "src/gpu/ops/FillRectOp.h"
15 #include "src/gpu/ops/TextureOp.h"
16 #include "src/gpu/v1/SurfaceDrawContext_v1.h"
17 #include "tests/Test.h"
18
new_SDC(GrRecordingContext * rContext)19 static std::unique_ptr<skgpu::v1::SurfaceDrawContext> new_SDC(GrRecordingContext* rContext) {
20 return skgpu::v1::SurfaceDrawContext::Make(
21 rContext, GrColorType::kRGBA_8888, nullptr, SkBackingFit::kExact, {128, 128},
22 SkSurfaceProps());
23 }
24
create_proxy(GrRecordingContext * rContext)25 static sk_sp<GrSurfaceProxy> create_proxy(GrRecordingContext* rContext) {
26 static constexpr SkISize kDimensions = {128, 128};
27
28 const GrBackendFormat format = rContext->priv().caps()->getDefaultBackendFormat(
29 GrColorType::kRGBA_8888,
30 GrRenderable::kYes);
31 return rContext->priv().proxyProvider()->createProxy(
32 format, kDimensions, GrRenderable::kYes, 1, GrMipmapped::kNo, SkBackingFit::kExact,
33 SkBudgeted::kNo, GrProtected::kNo, GrInternalSurfaceFlags::kNone);
34 }
35
36 typedef GrQuadAAFlags (*PerQuadAAFunc)(int i);
37
38 typedef void (*BulkRectTest)(skiatest::Reporter*,
39 GrDirectContext*,
40 PerQuadAAFunc,
41 GrAAType overallAA,
42 SkBlendMode,
43 bool addOneByOne,
44 bool allUniqueProxies,
45 int requestedTotNumQuads,
46 int expectedNumOps);
47
48 //-------------------------------------------------------------------------------------------------
fillrectop_creation_test(skiatest::Reporter * reporter,GrDirectContext * dContext,PerQuadAAFunc perQuadAA,GrAAType overallAA,SkBlendMode blendMode,bool addOneByOne,bool allUniqueProxies,int requestedTotNumQuads,int expectedNumOps)49 static void fillrectop_creation_test(skiatest::Reporter* reporter, GrDirectContext* dContext,
50 PerQuadAAFunc perQuadAA, GrAAType overallAA,
51 SkBlendMode blendMode, bool addOneByOne,
52 bool allUniqueProxies,
53 int requestedTotNumQuads, int expectedNumOps) {
54
55 if (addOneByOne || allUniqueProxies) {
56 return;
57 }
58
59 std::unique_ptr<skgpu::v1::SurfaceDrawContext> sdc = new_SDC(dContext);
60
61 auto quads = new GrQuadSetEntry[requestedTotNumQuads];
62
63 for (int i = 0; i < requestedTotNumQuads; ++i) {
64 quads[i].fRect = SkRect::MakeWH(100.5f, 100.5f); // prevent the int non-AA optimization
65 quads[i].fColor = SK_PMColor4fWHITE;
66 quads[i].fLocalMatrix = SkMatrix::I();
67 quads[i].fAAFlags = perQuadAA(i);
68 }
69
70 GrPaint paint;
71 paint.setXPFactory(SkBlendMode_AsXPFactory(blendMode));
72
73 skgpu::v1::FillRectOp::AddFillRectOps(sdc.get(), nullptr, dContext, std::move(paint), overallAA,
74 SkMatrix::I(), quads, requestedTotNumQuads);
75
76 auto opsTask = sdc->testingOnly_PeekLastOpsTask();
77 int actualNumOps = opsTask->numOpChains();
78
79 int actualTotNumQuads = 0;
80
81 for (int i = 0; i < actualNumOps; ++i) {
82 const GrOp* tmp = opsTask->getChain(i);
83 REPORTER_ASSERT(reporter, tmp->classID() == skgpu::v1::FillRectOp::ClassID());
84 REPORTER_ASSERT(reporter, tmp->isChainTail());
85 actualTotNumQuads += ((GrDrawOp*) tmp)->numQuads();
86 }
87
88 REPORTER_ASSERT(reporter, expectedNumOps == actualNumOps);
89 REPORTER_ASSERT(reporter, requestedTotNumQuads == actualTotNumQuads);
90
91 dContext->flushAndSubmit();
92
93 delete[] quads;
94 }
95
96 //-------------------------------------------------------------------------------------------------
textureop_creation_test(skiatest::Reporter * reporter,GrDirectContext * dContext,PerQuadAAFunc perQuadAA,GrAAType overallAA,SkBlendMode blendMode,bool addOneByOne,bool allUniqueProxies,int requestedTotNumQuads,int expectedNumOps)97 static void textureop_creation_test(skiatest::Reporter* reporter, GrDirectContext* dContext,
98 PerQuadAAFunc perQuadAA, GrAAType overallAA,
99 SkBlendMode blendMode, bool addOneByOne,
100 bool allUniqueProxies,
101 int requestedTotNumQuads, int expectedNumOps) {
102
103 std::unique_ptr<skgpu::v1::SurfaceDrawContext> sdc = new_SDC(dContext);
104
105 GrSurfaceProxyView proxyViewA, proxyViewB;
106
107 if (!allUniqueProxies) {
108 sk_sp<GrSurfaceProxy> proxyA = create_proxy(dContext);
109 sk_sp<GrSurfaceProxy> proxyB = create_proxy(dContext);
110 proxyViewA = GrSurfaceProxyView(std::move(proxyA),
111 kTopLeft_GrSurfaceOrigin,
112 GrSwizzle::RGBA());
113 proxyViewB = GrSurfaceProxyView(std::move(proxyB),
114 kTopLeft_GrSurfaceOrigin,
115 GrSwizzle::RGBA());
116 }
117
118 auto set = new GrTextureSetEntry[requestedTotNumQuads];
119
120 for (int i = 0; i < requestedTotNumQuads; ++i) {
121 if (!allUniqueProxies) {
122 // Alternate between two proxies to prevent op merging if the batch API was forced to
123 // submit one op at a time (to work, this does require that all fDstRects overlap).
124 set[i].fProxyView = i % 2 == 0 ? proxyViewA : proxyViewB;
125 } else {
126 // Each op gets its own proxy to force chaining only
127 sk_sp<GrSurfaceProxy> proxyA = create_proxy(dContext);
128 set[i].fProxyView = GrSurfaceProxyView(std::move(proxyA),
129 kTopLeft_GrSurfaceOrigin,
130 GrSwizzle::RGBA());
131 }
132
133 set[i].fSrcAlphaType = kPremul_SkAlphaType;
134 set[i].fSrcRect = SkRect::MakeWH(100.0f, 100.0f);
135 set[i].fDstRect = SkRect::MakeWH(100.5f, 100.5f); // prevent the int non-AA optimization
136 set[i].fDstClipQuad = nullptr;
137 set[i].fPreViewMatrix = nullptr;
138 set[i].fColor = {1.f, 1.f, 1.f, 1.f};
139 set[i].fAAFlags = perQuadAA(i);
140 }
141
142 if (addOneByOne) {
143 for (int i = 0; i < requestedTotNumQuads; ++i) {
144 DrawQuad quad;
145
146 quad.fDevice = GrQuad::MakeFromRect(set[i].fDstRect, SkMatrix::I());
147 quad.fLocal = GrQuad(set[i].fSrcRect);
148 quad.fEdgeFlags = set[i].fAAFlags;
149
150 GrOp::Owner op = skgpu::v1::TextureOp::Make(dContext,
151 set[i].fProxyView,
152 set[i].fSrcAlphaType,
153 nullptr,
154 GrSamplerState::Filter::kNearest,
155 GrSamplerState::MipmapMode::kNone,
156 set[i].fColor,
157 skgpu::v1::TextureOp::Saturate::kYes,
158 blendMode,
159 overallAA,
160 &quad,
161 nullptr);
162 sdc->addDrawOp(nullptr, std::move(op));
163 }
164 } else {
165 skgpu::v1::TextureOp::AddTextureSetOps(sdc.get(),
166 nullptr,
167 dContext,
168 set,
169 requestedTotNumQuads,
170 requestedTotNumQuads, // We alternate so proxyCnt == cnt
171 GrSamplerState::Filter::kNearest,
172 GrSamplerState::MipmapMode::kNone,
173 skgpu::v1::TextureOp::Saturate::kYes,
174 blendMode,
175 overallAA,
176 SkCanvas::kStrict_SrcRectConstraint,
177 SkMatrix::I(),
178 nullptr);
179 }
180
181 auto opsTask = sdc->testingOnly_PeekLastOpsTask();
182 int actualNumOps = opsTask->numOpChains();
183
184 int actualTotNumQuads = 0;
185
186 if (blendMode != SkBlendMode::kSrcOver ||
187 !dContext->priv().caps()->dynamicStateArrayGeometryProcessorTextureSupport()) {
188 // In either of these two cases, TextureOp creates one op per quad instead. Since
189 // each entry alternates proxies but overlaps geometrically, this will prevent the ops
190 // from being merged back into fewer ops.
191 expectedNumOps = requestedTotNumQuads;
192 }
193 uint32_t expectedOpID = blendMode == SkBlendMode::kSrcOver ? skgpu::v1::TextureOp::ClassID()
194 : skgpu::v1::FillRectOp::ClassID();
195 for (int i = 0; i < actualNumOps; ++i) {
196 const GrOp* tmp = opsTask->getChain(i);
197 REPORTER_ASSERT(reporter, allUniqueProxies || tmp->isChainTail());
198 while (tmp) {
199 REPORTER_ASSERT(reporter, tmp->classID() == expectedOpID);
200 actualTotNumQuads += ((GrDrawOp*) tmp)->numQuads();
201 tmp = tmp->nextInChain();
202 }
203 }
204
205 REPORTER_ASSERT(reporter, expectedNumOps == actualNumOps);
206 REPORTER_ASSERT(reporter, requestedTotNumQuads == actualTotNumQuads);
207
208 dContext->flushAndSubmit();
209
210 delete[] set;
211 }
212
213 //-------------------------------------------------------------------------------------------------
run_test(GrDirectContext * dContext,skiatest::Reporter * reporter,BulkRectTest test)214 static void run_test(GrDirectContext* dContext, skiatest::Reporter* reporter, BulkRectTest test) {
215
216 // This is the simple case where there is no AA at all. We expect 2 non-AA clumps of quads.
217 {
218 auto noAA = [](int i) -> GrQuadAAFlags {
219 return GrQuadAAFlags::kNone;
220 };
221
222 static const int kNumExpectedOps = 2;
223
224 test(reporter, dContext, noAA, GrAAType::kNone, SkBlendMode::kSrcOver,
225 false, false, 2*GrResourceProvider::MaxNumNonAAQuads(), kNumExpectedOps);
226 }
227
228 // This is the same as the above case except the overall AA is kCoverage. However, since
229 // the per-quad AA is still none, all the quads should be downgraded to non-AA.
230 {
231 auto noAA = [](int i) -> GrQuadAAFlags {
232 return GrQuadAAFlags::kNone;
233 };
234
235 static const int kNumExpectedOps = 2;
236
237 test(reporter, dContext, noAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
238 false, false, 2*GrResourceProvider::MaxNumNonAAQuads(), kNumExpectedOps);
239 }
240
241 // This case has an overall AA of kCoverage but the per-quad AA alternates.
242 // We should end up with several aa-sized clumps
243 {
244 auto alternateAA = [](int i) -> GrQuadAAFlags {
245 return (i % 2) ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
246 };
247
248 int numExpectedOps = 2*GrResourceProvider::MaxNumNonAAQuads() /
249 GrResourceProvider::MaxNumAAQuads();
250
251 test(reporter, dContext, alternateAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
252 false, false, 2*GrResourceProvider::MaxNumNonAAQuads(), numExpectedOps);
253 }
254
255 // In this case we have a run of MaxNumAAQuads non-AA quads and then AA quads. This
256 // exercises the case where we have a clump of quads that can't be upgraded to AA bc of
257 // its size. We expect one clump of non-AA quads followed by one clump of AA quads.
258 {
259 auto runOfNonAA = [](int i) -> GrQuadAAFlags {
260 return (i < GrResourceProvider::MaxNumAAQuads()) ? GrQuadAAFlags::kNone
261 : GrQuadAAFlags::kAll;
262 };
263
264 static const int kNumExpectedOps = 2;
265
266 test(reporter, dContext, runOfNonAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
267 false, false, 2*GrResourceProvider::MaxNumAAQuads(), kNumExpectedOps);
268 }
269
270 // In this case we use a blend mode other than src-over, which hits the FillRectOp fallback
271 // code path for TextureOp. We pass in the expected results if batching was successful, to
272 // that bulk_fill_rect_create_test batches on all modes; bulk_texture_rect_create_test is
273 // responsible for revising its expectations.
274 {
275 auto fixedAA = [](int i) -> GrQuadAAFlags {
276 return GrQuadAAFlags::kAll;
277 };
278
279 static const int kNumExpectedOps = 2;
280
281 test(reporter, dContext, fixedAA, GrAAType::kCoverage, SkBlendMode::kSrcATop,
282 false, false, 2*GrResourceProvider::MaxNumAAQuads(), kNumExpectedOps);
283 }
284
285 // This repros crbug.com/1108475, where we create 1024 non-AA texture ops w/ one coverage-AA
286 // texture op in the middle. Because each op has its own texture, all the texture ops
287 // get chained together so the quad count can exceed the AA maximum.
288 {
289 auto onlyOneAA = [](int i) -> GrQuadAAFlags {
290 return i == 256 ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
291 };
292
293 static const int kNumExpectedOps = 3;
294
295 test(reporter, dContext, onlyOneAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
296 true, true, 1024, kNumExpectedOps);
297 }
298
299 // This repros a problem related to crbug.com/1108475. In this case, the bulk creation
300 // method had no way to break up the set of texture ops at the AA quad limit.
301 {
302 auto onlyOneAA = [](int i) -> GrQuadAAFlags {
303 return i == 256 ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
304 };
305
306 static const int kNumExpectedOps = 2;
307
308 test(reporter, dContext, onlyOneAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
309 false, true, 1024, kNumExpectedOps);
310 }
311
312 }
313
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(BulkFillRectTest,reporter,ctxInfo)314 DEF_GPUTEST_FOR_RENDERING_CONTEXTS(BulkFillRectTest, reporter, ctxInfo) {
315 run_test(ctxInfo.directContext(), reporter, fillrectop_creation_test);
316 }
317
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(BulkTextureRectTest,reporter,ctxInfo)318 DEF_GPUTEST_FOR_RENDERING_CONTEXTS(BulkTextureRectTest, reporter, ctxInfo) {
319 run_test(ctxInfo.directContext(), reporter, textureop_creation_test);
320 }
321