1 /*
2 * Copyright 2019 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/core/SkAlphaType.h"
9 #include "include/core/SkBlendMode.h"
10 #include "include/core/SkCanvas.h"
11 #include "include/core/SkColorSpace.h"
12 #include "include/core/SkMatrix.h"
13 #include "include/core/SkRect.h"
14 #include "include/core/SkRefCnt.h"
15 #include "include/core/SkSamplingOptions.h"
16 #include "include/core/SkSize.h"
17 #include "include/core/SkSurfaceProps.h"
18 #include "include/core/SkTypes.h"
19 #include "include/gpu/GpuTypes.h"
20 #include "include/gpu/GrBackendSurface.h"
21 #include "include/gpu/GrDirectContext.h"
22 #include "include/gpu/GrRecordingContext.h"
23 #include "include/gpu/GrTypes.h"
24 #include "include/private/SkColorData.h"
25 #include "include/private/gpu/ganesh/GrTypesPriv.h"
26 #include "src/core/SkBlendModePriv.h"
27 #include "src/gpu/SkBackingFit.h"
28 #include "src/gpu/Swizzle.h"
29 #include "src/gpu/ganesh/GrCaps.h"
30 #include "src/gpu/ganesh/GrColorSpaceXform.h"
31 #include "src/gpu/ganesh/GrDirectContextPriv.h"
32 #include "src/gpu/ganesh/GrOpsTypes.h"
33 #include "src/gpu/ganesh/GrPaint.h"
34 #include "src/gpu/ganesh/GrProxyProvider.h"
35 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
36 #include "src/gpu/ganesh/GrResourceProvider.h"
37 #include "src/gpu/ganesh/GrSamplerState.h"
38 #include "src/gpu/ganesh/GrSurfaceProxy.h"
39 #include "src/gpu/ganesh/GrSurfaceProxyView.h"
40 #include "src/gpu/ganesh/SurfaceDrawContext.h"
41 #include "src/gpu/ganesh/geometry/GrQuad.h"
42 #include "src/gpu/ganesh/ops/FillRectOp.h"
43 #include "src/gpu/ganesh/ops/GrDrawOp.h"
44 #include "src/gpu/ganesh/ops/GrOp.h"
45 #include "src/gpu/ganesh/ops/OpsTask.h"
46 #include "src/gpu/ganesh/ops/TextureOp.h"
47 #include "tests/CtsEnforcement.h"
48 #include "tests/Test.h"
49
50 #include <cstdint>
51 #include <memory>
52 #include <utility>
53
54 struct GrContextOptions;
55
56 using namespace skgpu::ganesh;
57
new_SDC(GrRecordingContext * rContext)58 static std::unique_ptr<skgpu::v1::SurfaceDrawContext> new_SDC(GrRecordingContext* rContext) {
59 return skgpu::v1::SurfaceDrawContext::Make(
60 rContext, GrColorType::kRGBA_8888, nullptr, SkBackingFit::kExact, {128, 128},
61 SkSurfaceProps(), /*label=*/{});
62 }
63
create_proxy(GrRecordingContext * rContext)64 static sk_sp<GrSurfaceProxy> create_proxy(GrRecordingContext* rContext) {
65 static constexpr SkISize kDimensions = {128, 128};
66
67 const GrBackendFormat format = rContext->priv().caps()->getDefaultBackendFormat(
68 GrColorType::kRGBA_8888,
69 GrRenderable::kYes);
70 return rContext->priv().proxyProvider()->createProxy(format,
71 kDimensions,
72 GrRenderable::kYes,
73 1,
74 GrMipmapped::kNo,
75 SkBackingFit::kExact,
76 skgpu::Budgeted::kNo,
77 GrProtected::kNo,
78 /*label=*/"CreateSurfaceProxy",
79 GrInternalSurfaceFlags::kNone);
80 }
81
82 typedef GrQuadAAFlags (*PerQuadAAFunc)(int i);
83
84 typedef void (*BulkRectTest)(skiatest::Reporter*,
85 GrDirectContext*,
86 PerQuadAAFunc,
87 GrAAType overallAA,
88 SkBlendMode,
89 bool addOneByOne,
90 bool allUniqueProxies,
91 int requestedTotNumQuads,
92 int expectedNumOps);
93
94 //-------------------------------------------------------------------------------------------------
fillrectop_creation_test(skiatest::Reporter * reporter,GrDirectContext * dContext,PerQuadAAFunc perQuadAA,GrAAType overallAA,SkBlendMode blendMode,bool addOneByOne,bool allUniqueProxies,int requestedTotNumQuads,int expectedNumOps)95 static void fillrectop_creation_test(skiatest::Reporter* reporter, GrDirectContext* dContext,
96 PerQuadAAFunc perQuadAA, GrAAType overallAA,
97 SkBlendMode blendMode, bool addOneByOne,
98 bool allUniqueProxies,
99 int requestedTotNumQuads, int expectedNumOps) {
100
101 if (addOneByOne || allUniqueProxies) {
102 return;
103 }
104
105 std::unique_ptr<skgpu::v1::SurfaceDrawContext> sdc = new_SDC(dContext);
106
107 auto quads = new GrQuadSetEntry[requestedTotNumQuads];
108
109 for (int i = 0; i < requestedTotNumQuads; ++i) {
110 quads[i].fRect = SkRect::MakeWH(100.5f, 100.5f); // prevent the int non-AA optimization
111 quads[i].fColor = SK_PMColor4fWHITE;
112 quads[i].fLocalMatrix = SkMatrix::I();
113 quads[i].fAAFlags = perQuadAA(i);
114 }
115
116 GrPaint paint;
117 paint.setXPFactory(SkBlendMode_AsXPFactory(blendMode));
118
119 skgpu::v1::FillRectOp::AddFillRectOps(sdc.get(), nullptr, dContext, std::move(paint), overallAA,
120 SkMatrix::I(), quads, requestedTotNumQuads);
121
122 auto opsTask = sdc->testingOnly_PeekLastOpsTask();
123 int actualNumOps = opsTask->numOpChains();
124
125 int actualTotNumQuads = 0;
126
127 for (int i = 0; i < actualNumOps; ++i) {
128 const GrOp* tmp = opsTask->getChain(i);
129 REPORTER_ASSERT(reporter, tmp->classID() == skgpu::v1::FillRectOp::ClassID());
130 REPORTER_ASSERT(reporter, tmp->isChainTail());
131 actualTotNumQuads += ((GrDrawOp*) tmp)->numQuads();
132 }
133
134 REPORTER_ASSERT(reporter, expectedNumOps == actualNumOps);
135 REPORTER_ASSERT(reporter, requestedTotNumQuads == actualTotNumQuads);
136
137 dContext->flushAndSubmit();
138
139 delete[] quads;
140 }
141
142 //-------------------------------------------------------------------------------------------------
textureop_creation_test(skiatest::Reporter * reporter,GrDirectContext * dContext,PerQuadAAFunc perQuadAA,GrAAType overallAA,SkBlendMode blendMode,bool addOneByOne,bool allUniqueProxies,int requestedTotNumQuads,int expectedNumOps)143 static void textureop_creation_test(skiatest::Reporter* reporter, GrDirectContext* dContext,
144 PerQuadAAFunc perQuadAA, GrAAType overallAA,
145 SkBlendMode blendMode, bool addOneByOne,
146 bool allUniqueProxies,
147 int requestedTotNumQuads, int expectedNumOps) {
148
149 std::unique_ptr<skgpu::v1::SurfaceDrawContext> sdc = new_SDC(dContext);
150
151 GrSurfaceProxyView proxyViewA, proxyViewB;
152
153 if (!allUniqueProxies) {
154 sk_sp<GrSurfaceProxy> proxyA = create_proxy(dContext);
155 sk_sp<GrSurfaceProxy> proxyB = create_proxy(dContext);
156 proxyViewA = GrSurfaceProxyView(std::move(proxyA),
157 kTopLeft_GrSurfaceOrigin,
158 skgpu::Swizzle::RGBA());
159 proxyViewB = GrSurfaceProxyView(std::move(proxyB),
160 kTopLeft_GrSurfaceOrigin,
161 skgpu::Swizzle::RGBA());
162 }
163
164 auto set = new GrTextureSetEntry[requestedTotNumQuads];
165
166 for (int i = 0; i < requestedTotNumQuads; ++i) {
167 if (!allUniqueProxies) {
168 // Alternate between two proxies to prevent op merging if the batch API was forced to
169 // submit one op at a time (to work, this does require that all fDstRects overlap).
170 set[i].fProxyView = i % 2 == 0 ? proxyViewA : proxyViewB;
171 } else {
172 // Each op gets its own proxy to force chaining only
173 sk_sp<GrSurfaceProxy> proxyA = create_proxy(dContext);
174 set[i].fProxyView = GrSurfaceProxyView(std::move(proxyA),
175 kTopLeft_GrSurfaceOrigin,
176 skgpu::Swizzle::RGBA());
177 }
178
179 set[i].fSrcAlphaType = kPremul_SkAlphaType;
180 set[i].fSrcRect = SkRect::MakeWH(100.0f, 100.0f);
181 set[i].fDstRect = SkRect::MakeWH(100.5f, 100.5f); // prevent the int non-AA optimization
182 set[i].fDstClipQuad = nullptr;
183 set[i].fPreViewMatrix = nullptr;
184 set[i].fColor = {1.f, 1.f, 1.f, 1.f};
185 set[i].fAAFlags = perQuadAA(i);
186 }
187
188 if (addOneByOne) {
189 for (int i = 0; i < requestedTotNumQuads; ++i) {
190 DrawQuad quad;
191
192 quad.fDevice = GrQuad::MakeFromRect(set[i].fDstRect, SkMatrix::I());
193 quad.fLocal = GrQuad(set[i].fSrcRect);
194 quad.fEdgeFlags = set[i].fAAFlags;
195
196 GrOp::Owner op = TextureOp::Make(dContext,
197 set[i].fProxyView,
198 set[i].fSrcAlphaType,
199 nullptr,
200 GrSamplerState::Filter::kNearest,
201 GrSamplerState::MipmapMode::kNone,
202 set[i].fColor,
203 TextureOp::Saturate::kYes,
204 blendMode,
205 overallAA,
206 &quad,
207 nullptr);
208 sdc->addDrawOp(nullptr, std::move(op));
209 }
210 } else {
211 TextureOp::AddTextureSetOps(sdc.get(),
212 nullptr,
213 dContext,
214 set,
215 requestedTotNumQuads,
216 requestedTotNumQuads, // We alternate so proxyCnt == cnt
217 GrSamplerState::Filter::kNearest,
218 GrSamplerState::MipmapMode::kNone,
219 TextureOp::Saturate::kYes,
220 blendMode,
221 overallAA,
222 SkCanvas::kStrict_SrcRectConstraint,
223 SkMatrix::I(),
224 nullptr);
225 }
226
227 auto opsTask = sdc->testingOnly_PeekLastOpsTask();
228 int actualNumOps = opsTask->numOpChains();
229
230 int actualTotNumQuads = 0;
231
232 if (blendMode != SkBlendMode::kSrcOver ||
233 !dContext->priv().caps()->dynamicStateArrayGeometryProcessorTextureSupport()) {
234 // In either of these two cases, TextureOp creates one op per quad instead. Since
235 // each entry alternates proxies but overlaps geometrically, this will prevent the ops
236 // from being merged back into fewer ops.
237 expectedNumOps = requestedTotNumQuads;
238 }
239 uint32_t expectedOpID = blendMode == SkBlendMode::kSrcOver
240 ? TextureOp::ClassID()
241 : skgpu::v1::FillRectOp::ClassID();
242 for (int i = 0; i < actualNumOps; ++i) {
243 const GrOp* tmp = opsTask->getChain(i);
244 REPORTER_ASSERT(reporter, allUniqueProxies || tmp->isChainTail());
245 while (tmp) {
246 REPORTER_ASSERT(reporter, tmp->classID() == expectedOpID);
247 actualTotNumQuads += ((GrDrawOp*) tmp)->numQuads();
248 tmp = tmp->nextInChain();
249 }
250 }
251
252 REPORTER_ASSERT(reporter, expectedNumOps == actualNumOps);
253 REPORTER_ASSERT(reporter, requestedTotNumQuads == actualTotNumQuads);
254
255 dContext->flushAndSubmit();
256
257 delete[] set;
258 }
259
260 //-------------------------------------------------------------------------------------------------
run_test(GrDirectContext * dContext,skiatest::Reporter * reporter,BulkRectTest test)261 static void run_test(GrDirectContext* dContext, skiatest::Reporter* reporter, BulkRectTest test) {
262
263 // This is the simple case where there is no AA at all. We expect 2 non-AA clumps of quads.
264 {
265 auto noAA = [](int i) -> GrQuadAAFlags {
266 return GrQuadAAFlags::kNone;
267 };
268
269 static const int kNumExpectedOps = 2;
270
271 test(reporter, dContext, noAA, GrAAType::kNone, SkBlendMode::kSrcOver,
272 false, false, 2*GrResourceProvider::MaxNumNonAAQuads(), kNumExpectedOps);
273 }
274
275 // This is the same as the above case except the overall AA is kCoverage. However, since
276 // the per-quad AA is still none, all the quads should be downgraded to non-AA.
277 {
278 auto noAA = [](int i) -> GrQuadAAFlags {
279 return GrQuadAAFlags::kNone;
280 };
281
282 static const int kNumExpectedOps = 2;
283
284 test(reporter, dContext, noAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
285 false, false, 2*GrResourceProvider::MaxNumNonAAQuads(), kNumExpectedOps);
286 }
287
288 // This case has an overall AA of kCoverage but the per-quad AA alternates.
289 // We should end up with several aa-sized clumps
290 {
291 auto alternateAA = [](int i) -> GrQuadAAFlags {
292 return (i % 2) ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
293 };
294
295 int numExpectedOps = 2*GrResourceProvider::MaxNumNonAAQuads() /
296 GrResourceProvider::MaxNumAAQuads();
297
298 test(reporter, dContext, alternateAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
299 false, false, 2*GrResourceProvider::MaxNumNonAAQuads(), numExpectedOps);
300 }
301
302 // In this case we have a run of MaxNumAAQuads non-AA quads and then AA quads. This
303 // exercises the case where we have a clump of quads that can't be upgraded to AA bc of
304 // its size. We expect one clump of non-AA quads followed by one clump of AA quads.
305 {
306 auto runOfNonAA = [](int i) -> GrQuadAAFlags {
307 return (i < GrResourceProvider::MaxNumAAQuads()) ? GrQuadAAFlags::kNone
308 : GrQuadAAFlags::kAll;
309 };
310
311 static const int kNumExpectedOps = 2;
312
313 test(reporter, dContext, runOfNonAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
314 false, false, 2*GrResourceProvider::MaxNumAAQuads(), kNumExpectedOps);
315 }
316
317 // In this case we use a blend mode other than src-over, which hits the FillRectOp fallback
318 // code path for TextureOp. We pass in the expected results if batching was successful, to
319 // that bulk_fill_rect_create_test batches on all modes; bulk_texture_rect_create_test is
320 // responsible for revising its expectations.
321 {
322 auto fixedAA = [](int i) -> GrQuadAAFlags {
323 return GrQuadAAFlags::kAll;
324 };
325
326 static const int kNumExpectedOps = 2;
327
328 test(reporter, dContext, fixedAA, GrAAType::kCoverage, SkBlendMode::kSrcATop,
329 false, false, 2*GrResourceProvider::MaxNumAAQuads(), kNumExpectedOps);
330 }
331
332 // This repros crbug.com/1108475, where we create 1024 non-AA texture ops w/ one coverage-AA
333 // texture op in the middle. Because each op has its own texture, all the texture ops
334 // get chained together so the quad count can exceed the AA maximum.
335 {
336 auto onlyOneAA = [](int i) -> GrQuadAAFlags {
337 return i == 256 ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
338 };
339
340 static const int kNumExpectedOps = 3;
341
342 test(reporter, dContext, onlyOneAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
343 true, true, 1024, kNumExpectedOps);
344 }
345
346 // This repros a problem related to crbug.com/1108475. In this case, the bulk creation
347 // method had no way to break up the set of texture ops at the AA quad limit.
348 {
349 auto onlyOneAA = [](int i) -> GrQuadAAFlags {
350 return i == 256 ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
351 };
352
353 static const int kNumExpectedOps = 2;
354
355 test(reporter, dContext, onlyOneAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
356 false, true, 1024, kNumExpectedOps);
357 }
358
359 }
360
DEF_GANESH_TEST_FOR_RENDERING_CONTEXTS(BulkFillRectTest,reporter,ctxInfo,CtsEnforcement::kApiLevel_T)361 DEF_GANESH_TEST_FOR_RENDERING_CONTEXTS(BulkFillRectTest,
362 reporter,
363 ctxInfo,
364 CtsEnforcement::kApiLevel_T) {
365 run_test(ctxInfo.directContext(), reporter, fillrectop_creation_test);
366 }
367
DEF_GANESH_TEST_FOR_RENDERING_CONTEXTS(BulkTextureRectTest,reporter,ctxInfo,CtsEnforcement::kApiLevel_T)368 DEF_GANESH_TEST_FOR_RENDERING_CONTEXTS(BulkTextureRectTest,
369 reporter,
370 ctxInfo,
371 CtsEnforcement::kApiLevel_T) {
372 run_test(ctxInfo.directContext(), reporter, textureop_creation_test);
373 }
374