1 /*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/ops/FillRRectOp.h"
9
10 #include "include/gpu/GrRecordingContext.h"
11 #include "src/base/SkVx.h"
12 #include "src/core/SkRRectPriv.h"
13 #include "src/gpu/BufferWriter.h"
14 #include "src/gpu/KeyBuilder.h"
15 #include "src/gpu/ganesh/GrCaps.h"
16 #include "src/gpu/ganesh/GrGeometryProcessor.h"
17 #include "src/gpu/ganesh/GrMemoryPool.h"
18 #include "src/gpu/ganesh/GrOpFlushState.h"
19 #include "src/gpu/ganesh/GrOpsRenderPass.h"
20 #include "src/gpu/ganesh/GrProgramInfo.h"
21 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
22 #include "src/gpu/ganesh/GrResourceProvider.h"
23 #include "src/gpu/ganesh/geometry/GrShape.h"
24 #include "src/gpu/ganesh/glsl/GrGLSLFragmentShaderBuilder.h"
25 #include "src/gpu/ganesh/glsl/GrGLSLVarying.h"
26 #include "src/gpu/ganesh/glsl/GrGLSLVertexGeoBuilder.h"
27 #include "src/gpu/ganesh/ops/GrMeshDrawOp.h"
28 #include "src/gpu/ganesh/ops/GrSimpleMeshDrawOpHelper.h"
29
30 using namespace skia_private;
31
32 namespace skgpu::ganesh::FillRRectOp {
33
34 namespace {
35
36 // Note: Just checking m.restStaysRect is not sufficient
skews_are_relevant(const SkMatrix & m)37 bool skews_are_relevant(const SkMatrix& m) {
38 SkASSERT(!m.hasPerspective());
39
40 if (m[SkMatrix::kMSkewX] == 0.0f && m[SkMatrix::kMSkewY] == 0.0f) {
41 return false;
42 }
43
44 static constexpr float kTol = SK_ScalarNearlyZero;
45 float absScaleX = SkScalarAbs(m[SkMatrix::kMScaleX]);
46 float absSkewX = SkScalarAbs(m[SkMatrix::kMSkewX]);
47 float absScaleY = SkScalarAbs(m[SkMatrix::kMScaleY]);
48 float absSkewY = SkScalarAbs(m[SkMatrix::kMSkewY]);
49
50 // The maximum absolute column sum norm of the upper left 2x2
51 float norm = std::max(absScaleX + absSkewY, absSkewX + absScaleY);
52
53 return absSkewX > kTol * norm || absSkewY > kTol * norm;
54 }
55
56 class FillRRectOpImpl final : public GrMeshDrawOp {
57 private:
58 using Helper = GrSimpleMeshDrawOpHelper;
59
60 public:
61 DEFINE_OP_CLASS_ID
62
63 struct LocalCoords {
64 enum class Type : bool { kRect, kMatrix };
LocalCoordsskgpu::ganesh::FillRRectOp::__anonad97b46b0111::FillRRectOpImpl::LocalCoords65 LocalCoords(const SkRect& localRect)
66 : fType(Type::kRect)
67 , fRect(localRect) {}
LocalCoordsskgpu::ganesh::FillRRectOp::__anonad97b46b0111::FillRRectOpImpl::LocalCoords68 LocalCoords(const SkMatrix& localMatrix)
69 : fType(Type::kMatrix)
70 , fMatrix(localMatrix) {}
71 Type fType;
72 union {
73 SkRect fRect;
74 SkMatrix fMatrix;
75 };
76 };
77
78 static GrOp::Owner Make(GrRecordingContext*,
79 SkArenaAlloc*,
80 GrPaint&&,
81 const SkMatrix& viewMatrix,
82 const SkRRect&,
83 const LocalCoords&,
84 GrAA);
85
name() const86 const char* name() const override { return "FillRRectOp"; }
87
fixedFunctionFlags() const88 FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); }
89
90 ClipResult clipToShape(skgpu::ganesh::SurfaceDrawContext*,
91 SkClipOp,
92 const SkMatrix& clipMatrix,
93 const GrShape&,
94 GrAA) override;
95
96 GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*, GrClampType) override;
97 CombineResult onCombineIfPossible(GrOp*, SkArenaAlloc*, const GrCaps&) override;
98
99 #if defined(GR_TEST_UTILS)
100 SkString onDumpInfo() const override;
101 #endif
102
visitProxies(const GrVisitProxyFunc & func) const103 void visitProxies(const GrVisitProxyFunc& func) const override {
104 if (fProgramInfo) {
105 fProgramInfo->visitFPProxies(func);
106 } else {
107 fHelper.visitProxies(func);
108 }
109 }
110
111 void onPrepareDraws(GrMeshDrawTarget*) override;
112
113 void onExecute(GrOpFlushState*, const SkRect& chainBounds) override;
114
115 private:
116 friend class ::GrSimpleMeshDrawOpHelper; // for access to ctor
117 friend class ::GrOp; // for access to ctor
118
119 enum class ProcessorFlags {
120 kNone = 0,
121 kUseHWDerivatives = 1 << 0,
122 kHasLocalCoords = 1 << 1,
123 kWideColor = 1 << 2,
124 kMSAAEnabled = 1 << 3,
125 kFakeNonAA = 1 << 4,
126 };
127 constexpr static int kNumProcessorFlags = 5;
128
129 GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(ProcessorFlags);
130
131 class Processor;
132
133 FillRRectOpImpl(GrProcessorSet*,
134 const SkPMColor4f& paintColor,
135 SkArenaAlloc*,
136 const SkMatrix& viewMatrix,
137 const SkRRect&,
138 const LocalCoords&,
139 ProcessorFlags);
140
programInfo()141 GrProgramInfo* programInfo() override { return fProgramInfo; }
142
143 // Create a GrProgramInfo object in the provided arena
144 void onCreateProgramInfo(const GrCaps*,
145 SkArenaAlloc*,
146 const GrSurfaceProxyView& writeView,
147 bool usesMSAASurface,
148 GrAppliedClip&&,
149 const GrDstProxyView&,
150 GrXferBarrierFlags renderPassXferBarriers,
151 GrLoadOp colorLoadOp) override;
152
153 Helper fHelper;
154 ProcessorFlags fProcessorFlags;
155
156 struct Instance {
Instanceskgpu::ganesh::FillRRectOp::__anonad97b46b0111::FillRRectOpImpl::Instance157 Instance(const SkMatrix& viewMatrix,
158 const SkRRect& rrect,
159 const LocalCoords& localCoords,
160 const SkPMColor4f& color)
161 : fViewMatrix(viewMatrix), fRRect(rrect), fLocalCoords(localCoords), fColor(color) {
162 }
163 SkMatrix fViewMatrix;
164 SkRRect fRRect;
165 LocalCoords fLocalCoords;
166 SkPMColor4f fColor;
167 Instance* fNext = nullptr;
168 };
169
170 Instance* fHeadInstance;
171 Instance** fTailInstance;
172 int fInstanceCount = 1;
173
174 sk_sp<const GrBuffer> fInstanceBuffer;
175 sk_sp<const GrBuffer> fVertexBuffer;
176 sk_sp<const GrBuffer> fIndexBuffer;
177 int fBaseInstance = 0;
178
179 // If this op is prePrepared the created programInfo will be stored here for use in
180 // onExecute. In the prePrepared case it will have been stored in the record-time arena.
181 GrProgramInfo* fProgramInfo = nullptr;
182 };
183
184 GR_MAKE_BITFIELD_CLASS_OPS(FillRRectOpImpl::ProcessorFlags)
185
186 // Hardware derivatives are not always accurate enough for highly elliptical corners. This method
187 // checks to make sure the corners will still all look good if we use HW derivatives.
188 bool can_use_hw_derivatives_with_coverage(const GrShaderCaps&,
189 const SkMatrix&,
190 const SkRRect&);
191
Make(GrRecordingContext * ctx,SkArenaAlloc * arena,GrPaint && paint,const SkMatrix & viewMatrix,const SkRRect & rrect,const LocalCoords & localCoords,GrAA aa)192 GrOp::Owner FillRRectOpImpl::Make(GrRecordingContext* ctx,
193 SkArenaAlloc* arena,
194 GrPaint&& paint,
195 const SkMatrix& viewMatrix,
196 const SkRRect& rrect,
197 const LocalCoords& localCoords,
198 GrAA aa) {
199 const GrCaps* caps = ctx->priv().caps();
200
201 if (!caps->drawInstancedSupport()) {
202 return nullptr;
203 }
204
205 // We transform into a normalized -1..+1 space to draw the round rect. If the boundaries are too
206 // large, the math can overflow. The caller can fall back on path rendering if this is the case.
207 if (std::max(rrect.height(), rrect.width()) >= 1e6f) {
208 return nullptr;
209 }
210
211 ProcessorFlags flags = ProcessorFlags::kNone;
212 // TODO: Support perspective in a follow-on CL. This shouldn't be difficult, since we already
213 // use HW derivatives. The only trick will be adjusting the AA outset to account for
214 // perspective. (i.e., outset = 0.5 * z.)
215 if (viewMatrix.hasPerspective()) {
216 return nullptr;
217 }
218 if (can_use_hw_derivatives_with_coverage(*caps->shaderCaps(), viewMatrix, rrect)) {
219 // HW derivatives (more specifically, fwidth()) are consistently faster on all platforms in
220 // coverage mode. We use them as long as the approximation will be accurate enough.
221 flags |= ProcessorFlags::kUseHWDerivatives;
222 }
223 if (aa == GrAA::kNo) {
224 flags |= ProcessorFlags::kFakeNonAA;
225 }
226
227 return Helper::FactoryHelper<FillRRectOpImpl>(ctx, std::move(paint), arena, viewMatrix, rrect,
228 localCoords, flags);
229 }
230
FillRRectOpImpl(GrProcessorSet * processorSet,const SkPMColor4f & paintColor,SkArenaAlloc * arena,const SkMatrix & viewMatrix,const SkRRect & rrect,const LocalCoords & localCoords,ProcessorFlags processorFlags)231 FillRRectOpImpl::FillRRectOpImpl(GrProcessorSet* processorSet,
232 const SkPMColor4f& paintColor,
233 SkArenaAlloc* arena,
234 const SkMatrix& viewMatrix,
235 const SkRRect& rrect,
236 const LocalCoords& localCoords,
237 ProcessorFlags processorFlags)
238 : GrMeshDrawOp(ClassID())
239 , fHelper(processorSet,
240 (processorFlags & ProcessorFlags::kFakeNonAA)
241 ? GrAAType::kNone
242 : GrAAType::kCoverage) // Use analytic AA even if the RT is MSAA.
243 , fProcessorFlags(processorFlags & ~(ProcessorFlags::kHasLocalCoords |
244 ProcessorFlags::kWideColor |
245 ProcessorFlags::kMSAAEnabled))
246 , fHeadInstance(arena->make<Instance>(viewMatrix, rrect, localCoords, paintColor))
247 , fTailInstance(&fHeadInstance->fNext) {
248 // FillRRectOp::Make fails if there is perspective.
249 SkASSERT(!viewMatrix.hasPerspective());
250 this->setBounds(viewMatrix.mapRect(rrect.getBounds()),
251 GrOp::HasAABloat(!(processorFlags & ProcessorFlags::kFakeNonAA)),
252 GrOp::IsHairline::kNo);
253 }
254
clipToShape(skgpu::ganesh::SurfaceDrawContext * sdc,SkClipOp clipOp,const SkMatrix & clipMatrix,const GrShape & shape,GrAA aa)255 GrDrawOp::ClipResult FillRRectOpImpl::clipToShape(skgpu::ganesh::SurfaceDrawContext* sdc,
256 SkClipOp clipOp,
257 const SkMatrix& clipMatrix,
258 const GrShape& shape,
259 GrAA aa) {
260 SkASSERT(fInstanceCount == 1); // This needs to be called before combining.
261 SkASSERT(fHeadInstance->fNext == nullptr);
262
263 if ((shape.isRect() || shape.isRRect()) &&
264 clipOp == SkClipOp::kIntersect &&
265 (aa == GrAA::kNo) == (fProcessorFlags & ProcessorFlags::kFakeNonAA)) {
266 // The clip shape is a round rect. Attempt to map it to a round rect in "viewMatrix" space.
267 SkRRect clipRRect;
268 if (clipMatrix == fHeadInstance->fViewMatrix) {
269 if (shape.isRect()) {
270 clipRRect.setRect(shape.rect());
271 } else {
272 clipRRect = shape.rrect();
273 }
274 } else {
275 // Find a matrix that maps from "clipMatrix" space to "viewMatrix" space.
276 SkASSERT(!fHeadInstance->fViewMatrix.hasPerspective());
277 if (clipMatrix.hasPerspective()) {
278 return ClipResult::kFail;
279 }
280 SkMatrix clipToView;
281 if (!fHeadInstance->fViewMatrix.invert(&clipToView)) {
282 return ClipResult::kClippedOut;
283 }
284 clipToView.preConcat(clipMatrix);
285 SkASSERT(!clipToView.hasPerspective());
286
287 if (skews_are_relevant(clipToView)) {
288 // A rect in "clipMatrix" space is not a rect in "viewMatrix" space.
289 return ClipResult::kFail;
290 }
291 clipToView.setSkewX(0);
292 clipToView.setSkewY(0);
293 SkASSERT(clipToView.rectStaysRect());
294
295 if (shape.isRect()) {
296 clipRRect.setRect(clipToView.mapRect(shape.rect()));
297 } else {
298 if (!shape.rrect().transform(clipToView, &clipRRect)) {
299 // Transforming the rrect failed. This shouldn't generally happen except in
300 // cases of fp32 overflow.
301 return ClipResult::kFail;
302 }
303 }
304 }
305
306 // Intersect our round rect with the clip shape.
307 SkRRect isectRRect;
308 if (fHeadInstance->fRRect.isRect() && clipRRect.isRect()) {
309 SkRect isectRect;
310 if (!isectRect.intersect(fHeadInstance->fRRect.rect(), clipRRect.rect())) {
311 return ClipResult::kClippedOut;
312 }
313 isectRRect.setRect(isectRect);
314 } else {
315 isectRRect = SkRRectPriv::ConservativeIntersect(fHeadInstance->fRRect, clipRRect);
316 if (isectRRect.isEmpty()) {
317 // The round rects did not intersect at all or the intersection was too complicated
318 // to compute quickly.
319 return ClipResult::kFail;
320 }
321 }
322
323 // Don't apply the clip geometrically if it becomes subpixel, since then the hairline
324 // rendering may outset beyond the original clip.
325 SkRect devISectBounds = fHeadInstance->fViewMatrix.mapRect(isectRRect.rect());
326 if (devISectBounds.width() < 1.f || devISectBounds.height() < 1.f) {
327 return ClipResult::kFail;
328 }
329
330 if (fHeadInstance->fLocalCoords.fType == LocalCoords::Type::kRect) {
331 // Update the local rect.
332 auto rect = sk_bit_cast<skvx::float4>(fHeadInstance->fRRect.rect());
333 auto local = sk_bit_cast<skvx::float4>(fHeadInstance->fLocalCoords.fRect);
334 auto isect = sk_bit_cast<skvx::float4>(isectRRect.rect());
335 auto rectToLocalSize = (local - skvx::shuffle<2,3,0,1>(local)) /
336 (rect - skvx::shuffle<2,3,0,1>(rect));
337 auto localCoordsRect = (isect - rect) * rectToLocalSize + local;
338 fHeadInstance->fLocalCoords.fRect.setLTRB(localCoordsRect.x(),
339 localCoordsRect.y(),
340 localCoordsRect.z(),
341 localCoordsRect.w());
342 }
343
344 // Update the round rect.
345 fHeadInstance->fRRect = isectRRect;
346 return ClipResult::kClippedGeometrically;
347 }
348
349 return ClipResult::kFail;
350 }
351
finalize(const GrCaps & caps,const GrAppliedClip * clip,GrClampType clampType)352 GrProcessorSet::Analysis FillRRectOpImpl::finalize(const GrCaps& caps, const GrAppliedClip* clip,
353 GrClampType clampType) {
354 SkASSERT(fInstanceCount == 1);
355 SkASSERT(fHeadInstance->fNext == nullptr);
356
357 bool isWideColor;
358 auto analysis = fHelper.finalizeProcessors(caps, clip, clampType,
359 GrProcessorAnalysisCoverage::kSingleChannel,
360 &fHeadInstance->fColor, &isWideColor);
361 if (isWideColor) {
362 fProcessorFlags |= ProcessorFlags::kWideColor;
363 }
364 if (analysis.usesLocalCoords()) {
365 fProcessorFlags |= ProcessorFlags::kHasLocalCoords;
366 }
367 return analysis;
368 }
369
onCombineIfPossible(GrOp * op,SkArenaAlloc *,const GrCaps & caps)370 GrOp::CombineResult FillRRectOpImpl::onCombineIfPossible(GrOp* op,
371 SkArenaAlloc*,
372 const GrCaps& caps) {
373 auto that = op->cast<FillRRectOpImpl>();
374 if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds()) ||
375 fProcessorFlags != that->fProcessorFlags) {
376 return CombineResult::kCannotCombine;
377 }
378
379 *fTailInstance = that->fHeadInstance;
380 fTailInstance = that->fTailInstance;
381 fInstanceCount += that->fInstanceCount;
382 return CombineResult::kMerged;
383 }
384
385 #if defined(GR_TEST_UTILS)
onDumpInfo() const386 SkString FillRRectOpImpl::onDumpInfo() const {
387 SkString str = SkStringPrintf("# instances: %d\n", fInstanceCount);
388 str += fHelper.dumpInfo();
389 int i = 0;
390 for (Instance* tmp = fHeadInstance; tmp; tmp = tmp->fNext, ++i) {
391 str.appendf("%d: Color: [%.2f, %.2f, %.2f, %.2f] ",
392 i, tmp->fColor.fR, tmp->fColor.fG, tmp->fColor.fB, tmp->fColor.fA);
393 SkMatrix m = tmp->fViewMatrix;
394 str.appendf("ViewMatrix: [%.2f, %.2f, %.2f, %.2f, %.2f, %.2f, %.2f, %.2f, %.2f] ",
395 m[0], m[1], m[2], m[3], m[4], m[5], m[6], m[7], m[8]);
396 SkRect r = tmp->fRRect.rect();
397 str.appendf("Rect: [%f %f %f %f]\n", r.fLeft, r.fTop, r.fRight, r.fBottom);
398 }
399 return str;
400 }
401 #endif
402
403 class FillRRectOpImpl::Processor final : public GrGeometryProcessor {
404 public:
Make(SkArenaAlloc * arena,GrAAType aaType,ProcessorFlags flags)405 static GrGeometryProcessor* Make(SkArenaAlloc* arena, GrAAType aaType, ProcessorFlags flags) {
406 return arena->make([&](void* ptr) {
407 return new (ptr) Processor(aaType, flags);
408 });
409 }
410
name() const411 const char* name() const override { return "FillRRectOp::Processor"; }
412
addToKey(const GrShaderCaps & caps,KeyBuilder * b) const413 void addToKey(const GrShaderCaps& caps, KeyBuilder* b) const override {
414 b->addBits(kNumProcessorFlags, (uint32_t)fFlags, "flags");
415 }
416
417 std::unique_ptr<ProgramImpl> makeProgramImpl(const GrShaderCaps&) const override;
418
419 private:
420 class Impl;
421
Processor(GrAAType aaType,ProcessorFlags flags)422 Processor(GrAAType aaType, ProcessorFlags flags)
423 : GrGeometryProcessor(kGrFillRRectOp_Processor_ClassID)
424 , fFlags(flags) {
425 this->setVertexAttributesWithImplicitOffsets(kVertexAttribs, std::size(kVertexAttribs));
426
427 fInstanceAttribs.emplace_back("radii_x", kFloat4_GrVertexAttribType, SkSLType::kFloat4);
428 fInstanceAttribs.emplace_back("radii_y", kFloat4_GrVertexAttribType, SkSLType::kFloat4);
429 fInstanceAttribs.emplace_back("skew", kFloat4_GrVertexAttribType, SkSLType::kFloat4);
430 if (fFlags & ProcessorFlags::kHasLocalCoords) {
431 fInstanceAttribs.emplace_back("translate_and_localrotate",
432 kFloat4_GrVertexAttribType,
433 SkSLType::kFloat4);
434 fInstanceAttribs.emplace_back(
435 "localrect", kFloat4_GrVertexAttribType, SkSLType::kFloat4);
436 } else {
437 fInstanceAttribs.emplace_back("translate_and_localrotate",
438 kFloat2_GrVertexAttribType,
439 SkSLType::kFloat2);
440 }
441 fColorAttrib = &fInstanceAttribs.push_back(
442 MakeColorAttribute("color", (fFlags & ProcessorFlags::kWideColor)));
443 SkASSERT(fInstanceAttribs.size() <= kMaxInstanceAttribs);
444 this->setInstanceAttributesWithImplicitOffsets(fInstanceAttribs.begin(),
445 fInstanceAttribs.size());
446 }
447
448 inline static constexpr Attribute kVertexAttribs[] = {
449 {"radii_selector", kFloat4_GrVertexAttribType, SkSLType::kFloat4},
450 {"corner_and_radius_outsets", kFloat4_GrVertexAttribType, SkSLType::kFloat4},
451 // Coverage only.
452 {"aa_bloat_and_coverage", kFloat4_GrVertexAttribType, SkSLType::kFloat4}};
453
454 const ProcessorFlags fFlags;
455
456 constexpr static int kMaxInstanceAttribs = 6;
457 STArray<kMaxInstanceAttribs, Attribute> fInstanceAttribs;
458 const Attribute* fColorAttrib;
459 };
460
461 // Our coverage geometry consists of an inset octagon with solid coverage, surrounded by linear
462 // coverage ramps on the horizontal and vertical edges, and "arc coverage" pieces on the diagonal
463 // edges. The Vertex struct tells the shader where to place its vertex within a normalized
464 // ([l, t, r, b] = [-1, -1, +1, +1]) space, and how to calculate coverage. See onEmitCode.
465 struct CoverageVertex {
466 std::array<float, 4> fRadiiSelector;
467 std::array<float, 2> fCorner;
468 std::array<float, 2> fRadiusOutset;
469 std::array<float, 2> fAABloatDirection;
470 float fCoverage;
471 float fIsLinearCoverage;
472 };
473
474 // This is the offset (when multiplied by radii) from the corners of a bounding box to the vertices
475 // of its inscribed octagon. We draw the outside portion of arcs with quarter-octagons rather than
476 // rectangles.
477 static constexpr float kOctoOffset = 1/(1 + SK_ScalarRoot2Over2);
478
479 static constexpr CoverageVertex kVertexData[] = {
480 // Left inset edge.
481 {{{0,0,0,1}}, {{-1,+1}}, {{0,-1}}, {{+1,0}}, 1, 1},
482 {{{1,0,0,0}}, {{-1,-1}}, {{0,+1}}, {{+1,0}}, 1, 1},
483
484 // Top inset edge.
485 {{{1,0,0,0}}, {{-1,-1}}, {{+1,0}}, {{0,+1}}, 1, 1},
486 {{{0,1,0,0}}, {{+1,-1}}, {{-1,0}}, {{0,+1}}, 1, 1},
487
488 // Right inset edge.
489 {{{0,1,0,0}}, {{+1,-1}}, {{0,+1}}, {{-1,0}}, 1, 1},
490 {{{0,0,1,0}}, {{+1,+1}}, {{0,-1}}, {{-1,0}}, 1, 1},
491
492 // Bottom inset edge.
493 {{{0,0,1,0}}, {{+1,+1}}, {{-1,0}}, {{0,-1}}, 1, 1},
494 {{{0,0,0,1}}, {{-1,+1}}, {{+1,0}}, {{0,-1}}, 1, 1},
495
496
497 // Left outset edge.
498 {{{0,0,0,1}}, {{-1,+1}}, {{0,-1}}, {{-1,0}}, 0, 1},
499 {{{1,0,0,0}}, {{-1,-1}}, {{0,+1}}, {{-1,0}}, 0, 1},
500
501 // Top outset edge.
502 {{{1,0,0,0}}, {{-1,-1}}, {{+1,0}}, {{0,-1}}, 0, 1},
503 {{{0,1,0,0}}, {{+1,-1}}, {{-1,0}}, {{0,-1}}, 0, 1},
504
505 // Right outset edge.
506 {{{0,1,0,0}}, {{+1,-1}}, {{0,+1}}, {{+1,0}}, 0, 1},
507 {{{0,0,1,0}}, {{+1,+1}}, {{0,-1}}, {{+1,0}}, 0, 1},
508
509 // Bottom outset edge.
510 {{{0,0,1,0}}, {{+1,+1}}, {{-1,0}}, {{0,+1}}, 0, 1},
511 {{{0,0,0,1}}, {{-1,+1}}, {{+1,0}}, {{0,+1}}, 0, 1},
512
513
514 // Top-left corner.
515 {{{1,0,0,0}}, {{-1,-1}}, {{ 0,+1}}, {{-1, 0}}, 0, 0},
516 {{{1,0,0,0}}, {{-1,-1}}, {{ 0,+1}}, {{+1, 0}}, 1, 0},
517 {{{1,0,0,0}}, {{-1,-1}}, {{+1, 0}}, {{ 0,+1}}, 1, 0},
518 {{{1,0,0,0}}, {{-1,-1}}, {{+1, 0}}, {{ 0,-1}}, 0, 0},
519 {{{1,0,0,0}}, {{-1,-1}}, {{+kOctoOffset,0}}, {{-1,-1}}, 0, 0},
520 {{{1,0,0,0}}, {{-1,-1}}, {{0,+kOctoOffset}}, {{-1,-1}}, 0, 0},
521
522 // Top-right corner.
523 {{{0,1,0,0}}, {{+1,-1}}, {{-1, 0}}, {{ 0,-1}}, 0, 0},
524 {{{0,1,0,0}}, {{+1,-1}}, {{-1, 0}}, {{ 0,+1}}, 1, 0},
525 {{{0,1,0,0}}, {{+1,-1}}, {{ 0,+1}}, {{-1, 0}}, 1, 0},
526 {{{0,1,0,0}}, {{+1,-1}}, {{ 0,+1}}, {{+1, 0}}, 0, 0},
527 {{{0,1,0,0}}, {{+1,-1}}, {{0,+kOctoOffset}}, {{+1,-1}}, 0, 0},
528 {{{0,1,0,0}}, {{+1,-1}}, {{-kOctoOffset,0}}, {{+1,-1}}, 0, 0},
529
530 // Bottom-right corner.
531 {{{0,0,1,0}}, {{+1,+1}}, {{ 0,-1}}, {{+1, 0}}, 0, 0},
532 {{{0,0,1,0}}, {{+1,+1}}, {{ 0,-1}}, {{-1, 0}}, 1, 0},
533 {{{0,0,1,0}}, {{+1,+1}}, {{-1, 0}}, {{ 0,-1}}, 1, 0},
534 {{{0,0,1,0}}, {{+1,+1}}, {{-1, 0}}, {{ 0,+1}}, 0, 0},
535 {{{0,0,1,0}}, {{+1,+1}}, {{-kOctoOffset,0}}, {{+1,+1}}, 0, 0},
536 {{{0,0,1,0}}, {{+1,+1}}, {{0,-kOctoOffset}}, {{+1,+1}}, 0, 0},
537
538 // Bottom-left corner.
539 {{{0,0,0,1}}, {{-1,+1}}, {{+1, 0}}, {{ 0,+1}}, 0, 0},
540 {{{0,0,0,1}}, {{-1,+1}}, {{+1, 0}}, {{ 0,-1}}, 1, 0},
541 {{{0,0,0,1}}, {{-1,+1}}, {{ 0,-1}}, {{+1, 0}}, 1, 0},
542 {{{0,0,0,1}}, {{-1,+1}}, {{ 0,-1}}, {{-1, 0}}, 0, 0},
543 {{{0,0,0,1}}, {{-1,+1}}, {{0,-kOctoOffset}}, {{-1,+1}}, 0, 0},
544 {{{0,0,0,1}}, {{-1,+1}}, {{+kOctoOffset,0}}, {{-1,+1}}, 0, 0}};
545
546 SKGPU_DECLARE_STATIC_UNIQUE_KEY(gVertexBufferKey);
547
548 static constexpr uint16_t kIndexData[] = {
549 // Inset octagon (solid coverage).
550 0, 1, 7,
551 1, 2, 7,
552 7, 2, 6,
553 2, 3, 6,
554 6, 3, 5,
555 3, 4, 5,
556
557 // AA borders (linear coverage).
558 0, 1, 8, 1, 9, 8,
559 2, 3, 10, 3, 11, 10,
560 4, 5, 12, 5, 13, 12,
561 6, 7, 14, 7, 15, 14,
562
563 // Top-left arc.
564 16, 17, 21,
565 17, 21, 18,
566 21, 18, 20,
567 18, 20, 19,
568
569 // Top-right arc.
570 22, 23, 27,
571 23, 27, 24,
572 27, 24, 26,
573 24, 26, 25,
574
575 // Bottom-right arc.
576 28, 29, 33,
577 29, 33, 30,
578 33, 30, 32,
579 30, 32, 31,
580
581 // Bottom-left arc.
582 34, 35, 39,
583 35, 39, 36,
584 39, 36, 38,
585 36, 38, 37};
586
587 SKGPU_DECLARE_STATIC_UNIQUE_KEY(gIndexBufferKey);
588
onPrepareDraws(GrMeshDrawTarget * target)589 void FillRRectOpImpl::onPrepareDraws(GrMeshDrawTarget* target) {
590 if (!fProgramInfo) {
591 this->createProgramInfo(target);
592 }
593
594 size_t instanceStride = fProgramInfo->geomProc().instanceStride();
595
596 if (VertexWriter instanceWriter = target->makeVertexWriter(instanceStride, fInstanceCount,
597 &fInstanceBuffer, &fBaseInstance)) {
598 SkDEBUGCODE(auto end = instanceWriter.mark(instanceStride * fInstanceCount));
599 for (Instance* i = fHeadInstance; i; i = i->fNext) {
600 auto [l, t, r, b] = i->fRRect.rect();
601
602 // Produce a matrix that draws the round rect from normalized [-1, -1, +1, +1] space.
603 SkMatrix m;
604 // Unmap the normalized rect [-1, -1, +1, +1] back to [l, t, r, b].
605 m.setScaleTranslate((r - l)/2, (b - t)/2, (l + r)/2, (t + b)/2);
606 // Map to device space.
607 m.postConcat(i->fViewMatrix);
608
609 // Convert the radii to [-1, -1, +1, +1] space and write their attribs.
610 skvx::float4 radiiX, radiiY;
611 skvx::strided_load2(&SkRRectPriv::GetRadiiArray(i->fRRect)->fX, radiiX, radiiY);
612 radiiX *= 2 / (r - l);
613 radiiY *= 2 / (b - t);
614
615 instanceWriter << radiiX << radiiY
616 << m.getScaleX() << m.getSkewX() << m.getSkewY() << m.getScaleY()
617 << m.getTranslateX() << m.getTranslateY();
618
619 if (fProcessorFlags & ProcessorFlags::kHasLocalCoords) {
620 if (i->fLocalCoords.fType == LocalCoords::Type::kRect) {
621 instanceWriter << 0.f << 0.f // localrotate
622 << i->fLocalCoords.fRect; // localrect
623 } else {
624 SkASSERT(i->fLocalCoords.fType == LocalCoords::Type::kMatrix);
625 const SkRect& bounds = i->fRRect.rect();
626 const SkMatrix& localMatrix = i->fLocalCoords.fMatrix;
627 SkVector u = localMatrix.mapVector(bounds.right() - bounds.left(), 0);
628 SkVector v = localMatrix.mapVector(0, bounds.bottom() - bounds.top());
629 SkPoint l0 = localMatrix.mapPoint({bounds.left(), bounds.top()});
630 instanceWriter << v.x() << u.y() // localrotate
631 << l0 << (l0.x() + u.x()) << (l0.y() + v.y()); // localrect
632 }
633 }
634
635 instanceWriter << VertexColor(i->fColor, fProcessorFlags & ProcessorFlags::kWideColor);
636 }
637 SkASSERT(instanceWriter.mark() == end);
638 }
639
640 SKGPU_DEFINE_STATIC_UNIQUE_KEY(gIndexBufferKey);
641
642 fIndexBuffer = target->resourceProvider()->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,
643 sizeof(kIndexData),
644 kIndexData, gIndexBufferKey);
645
646 SKGPU_DEFINE_STATIC_UNIQUE_KEY(gVertexBufferKey);
647
648 fVertexBuffer = target->resourceProvider()->findOrMakeStaticBuffer(GrGpuBufferType::kVertex,
649 sizeof(kVertexData),
650 kVertexData,
651 gVertexBufferKey);
652 }
653
654 class FillRRectOpImpl::Processor::Impl : public ProgramImpl {
655 public:
setData(const GrGLSLProgramDataManager &,const GrShaderCaps &,const GrGeometryProcessor &)656 void setData(const GrGLSLProgramDataManager&,
657 const GrShaderCaps&,
658 const GrGeometryProcessor&) override {}
659
660 private:
onEmitCode(EmitArgs & args,GrGPArgs * gpArgs)661 void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override {
662 GrGLSLVertexBuilder* v = args.fVertBuilder;
663 GrGLSLFPFragmentBuilder* f = args.fFragBuilder;
664
665 const auto& proc = args.fGeomProc.cast<Processor>();
666 bool useHWDerivatives = (proc.fFlags & ProcessorFlags::kUseHWDerivatives);
667
668 SkASSERT(proc.vertexStride() == sizeof(CoverageVertex));
669
670 GrGLSLVaryingHandler* varyings = args.fVaryingHandler;
671 varyings->emitAttributes(proc);
672 f->codeAppendf("half4 %s;", args.fOutputColor);
673 varyings->addPassThroughAttribute(proc.fColorAttrib->asShaderVar(),
674 args.fOutputColor,
675 GrGLSLVaryingHandler::Interpolation::kCanBeFlat);
676
677 // Emit the vertex shader.
678 // When MSAA is enabled, we need to make sure every sample gets lit up on pixels that have
679 // fractional coverage. We do this by making the ramp wider.
680 v->codeAppendf("float aa_bloat_multiplier = %i;",
681 (proc.fFlags & ProcessorFlags::kMSAAEnabled)
682 ? 2 // Outset an entire pixel (2 radii).
683 : (!(proc.fFlags & ProcessorFlags::kFakeNonAA))
684 ? 1 // Outset one half pixel (1 radius).
685 : 0); // No AA bloat.
686
687 // Unpack vertex attribs.
688 v->codeAppend("float2 corner = corner_and_radius_outsets.xy;");
689 v->codeAppend("float2 radius_outset = corner_and_radius_outsets.zw;");
690 v->codeAppend("float2 aa_bloat_direction = aa_bloat_and_coverage.xy;");
691 v->codeAppend("float is_linear_coverage = aa_bloat_and_coverage.w;");
692
693 // Find the amount to bloat each edge for AA (in source space).
694 v->codeAppend("float2 pixellength = inversesqrt("
695 "float2(dot(skew.xz, skew.xz), dot(skew.yw, skew.yw)));");
696 v->codeAppend("float4 normalized_axis_dirs = skew * pixellength.xyxy;");
697 v->codeAppend("float2 axiswidths = (abs(normalized_axis_dirs.xy) + "
698 "abs(normalized_axis_dirs.zw));");
699 v->codeAppend("float2 aa_bloatradius = axiswidths * pixellength * .5;");
700
701 // Identify our radii.
702 v->codeAppend("float4 radii_and_neighbors = radii_selector"
703 "* float4x4(radii_x, radii_y, radii_x.yxwz, radii_y.wzyx);");
704 v->codeAppend("float2 radii = radii_and_neighbors.xy;");
705 v->codeAppend("float2 neighbor_radii = radii_and_neighbors.zw;");
706
707 v->codeAppend("float coverage_multiplier = 1;");
708 v->codeAppend("if (any(greaterThan(aa_bloatradius, float2(1)))) {");
709 // The rrect is more narrow than a half-pixel AA coverage ramp. We can't
710 // draw as-is or else opposite AA borders will overlap. Instead, fudge the
711 // size up to the width of a coverage ramp, and then reduce total coverage
712 // to make the rect appear more thin.
713 v->codeAppend( "corner = max(abs(corner), aa_bloatradius) * sign(corner);");
714 v->codeAppend( "coverage_multiplier = 1 / (max(aa_bloatradius.x, 1) * "
715 "max(aa_bloatradius.y, 1));");
716 // Set radii to zero to ensure we take the "linear coverage" codepath.
717 // (The "coverage" variable only has effect in the linear codepath.)
718 v->codeAppend( "radii = float2(0);");
719 v->codeAppend("}");
720
721 // Unpack coverage.
722 v->codeAppend("float coverage = aa_bloat_and_coverage.z;");
723 if (proc.fFlags & ProcessorFlags::kMSAAEnabled) {
724 // MSAA has a wider ramp that goes from -.5 to 1.5 instead of 0 to 1.
725 v->codeAppendf("coverage = (coverage - .5) * aa_bloat_multiplier + .5;");
726 }
727
728 v->codeAppend("if (any(lessThan(radii, aa_bloatradius * 1.5))) {");
729 // The radii are very small. Demote this arc to a sharp 90 degree corner.
730 v->codeAppend( "radii = float2(0);");
731 // Convert to a standard picture frame for an AA rect instead of the round
732 // rect geometry.
733 v->codeAppend( "aa_bloat_direction = sign(corner);");
734 v->codeAppend( "if (coverage > .5) {"); // Are we an inset edge?
735 v->codeAppend( "aa_bloat_direction = -aa_bloat_direction;");
736 v->codeAppend( "}");
737 v->codeAppend( "is_linear_coverage = 1;");
738 v->codeAppend("} else {");
739 // Don't let radii get smaller than a coverage ramp plus an extra half
740 // pixel for MSAA. Always use the same amount so we don't pop when
741 // switching between MSAA and coverage.
742 v->codeAppend( "radii = clamp(radii, pixellength * 1.5, 2 - pixellength * 1.5);");
743 v->codeAppend( "neighbor_radii = clamp(neighbor_radii, pixellength * 1.5, "
744 "2 - pixellength * 1.5);");
745 // Don't let neighboring radii get closer together than 1/16 pixel.
746 v->codeAppend( "float2 spacing = 2 - radii - neighbor_radii;");
747 v->codeAppend( "float2 extra_pad = max(pixellength * .0625 - spacing, float2(0));");
748 v->codeAppend( "radii -= extra_pad * .5;");
749 v->codeAppend("}");
750
751 // Find our vertex position, adjusted for radii and bloated for AA. Our rect is drawn in
752 // normalized [-1,-1,+1,+1] space.
753 v->codeAppend("float2 aa_outset = "
754 "aa_bloat_direction * aa_bloatradius * aa_bloat_multiplier;");
755 v->codeAppend("float2 vertexpos = corner + radius_outset * radii + aa_outset;");
756
757 v->codeAppend("if (coverage > .5) {"); // Are we an inset edge?
758 // Don't allow the aa insets to overlap. i.e., Don't let them inset past
759 // the center (x=y=0). Since we don't allow the rect to become thinner
760 // than 1px, this should only happen when using MSAA, where we inset by an
761 // entire pixel instead of half.
762 v->codeAppend( "if (aa_bloat_direction.x != 0 && vertexpos.x * corner.x < 0) {");
763 v->codeAppend( "float backset = abs(vertexpos.x);");
764 v->codeAppend( "vertexpos.x = 0;");
765 v->codeAppend( "vertexpos.y += "
766 "backset * sign(corner.y) * pixellength.y/pixellength.x;");
767 v->codeAppend( "coverage = (coverage - .5) * abs(corner.x) / "
768 "(abs(corner.x) + backset) + .5;");
769 v->codeAppend( "}");
770 v->codeAppend( "if (aa_bloat_direction.y != 0 && vertexpos.y * corner.y < 0) {");
771 v->codeAppend( "float backset = abs(vertexpos.y);");
772 v->codeAppend( "vertexpos.y = 0;");
773 v->codeAppend( "vertexpos.x += "
774 "backset * sign(corner.x) * pixellength.x/pixellength.y;");
775 v->codeAppend( "coverage = (coverage - .5) * abs(corner.y) / "
776 "(abs(corner.y) + backset) + .5;");
777 v->codeAppend( "}");
778 v->codeAppend("}");
779
780 // Transform to device space.
781 v->codeAppend("float2x2 skewmatrix = float2x2(skew.xy, skew.zw);");
782 v->codeAppend("float2 devcoord = vertexpos * skewmatrix + translate_and_localrotate.xy;");
783 gpArgs->fPositionVar.set(SkSLType::kFloat2, "devcoord");
784
785 // Output local coordinates.
786 if (proc.fFlags & ProcessorFlags::kHasLocalCoords) {
787 // Do math in a way that preserves exact local coord boundaries when there is no local
788 // rotate and vertexpos is on an exact shape boundary.
789 v->codeAppend("float2 T = vertexpos * .5 + .5;");
790 v->codeAppend("float2 localcoord = localrect.xy * (1 - T) + "
791 "localrect.zw * T + "
792 "translate_and_localrotate.zw * T.yx;");
793 gpArgs->fLocalCoordVar.set(SkSLType::kFloat2, "localcoord");
794 }
795
796 // Setup interpolants for coverage.
797 GrGLSLVarying arcCoord(useHWDerivatives ? SkSLType::kFloat2 : SkSLType::kFloat4);
798 varyings->addVarying("arccoord", &arcCoord);
799 v->codeAppend("if (0 != is_linear_coverage) {");
800 // We are a non-corner piece: Set x=0 to indicate built-in coverage, and
801 // interpolate linear coverage across y.
802 v->codeAppendf( "%s.xy = float2(0, coverage * coverage_multiplier);",
803 arcCoord.vsOut());
804 v->codeAppend("} else {");
805 // Find the normalized arc coordinates for our corner ellipse.
806 // (i.e., the coordinate system where x^2 + y^2 == 1).
807 v->codeAppend( "float2 arccoord = 1 - abs(radius_outset) + aa_outset/radii * corner;");
808 // We are a corner piece: Interpolate the arc coordinates for coverage.
809 // Emit x+1 to ensure no pixel in the arc has a x value of 0 (since x=0
810 // instructs the fragment shader to use linear coverage).
811 v->codeAppendf( "%s.xy = float2(arccoord.x+1, arccoord.y);", arcCoord.vsOut());
812 if (!useHWDerivatives) {
813 // The gradient is order-1: Interpolate it across arccoord.zw.
814 v->codeAppendf("float2x2 derivatives = inverse(skewmatrix);");
815 v->codeAppendf("%s.zw = derivatives * (arccoord/radii * 2);", arcCoord.vsOut());
816 }
817 v->codeAppend("}");
818
819 // Emit the fragment shader.
820 f->codeAppendf("float x_plus_1=%s.x, y=%s.y;", arcCoord.fsIn(), arcCoord.fsIn());
821 f->codeAppendf("half coverage;");
822 f->codeAppendf("if (0 == x_plus_1) {");
823 f->codeAppendf( "coverage = half(y);"); // We are a non-arc pixel (linear coverage).
824 f->codeAppendf("} else {");
825 f->codeAppendf( "float fn = x_plus_1 * (x_plus_1 - 2);"); // fn = (x+1)*(x-1) = x^2-1
826 f->codeAppendf( "fn = fma(y,y, fn);"); // fn = x^2 + y^2 - 1
827 if (useHWDerivatives) {
828 f->codeAppendf("float fnwidth = fwidth(fn);");
829 } else {
830 // The gradient is interpolated across arccoord.zw.
831 f->codeAppendf("float gx=%s.z, gy=%s.w;", arcCoord.fsIn(), arcCoord.fsIn());
832 f->codeAppendf("float fnwidth = abs(gx) + abs(gy);");
833 }
834 f->codeAppendf( "coverage = .5 - half(fn/fnwidth);");
835 if (proc.fFlags & ProcessorFlags::kMSAAEnabled) {
836 // MSAA uses ramps larger than 1px, so we need to clamp in both branches.
837 f->codeAppendf("}");
838 }
839 f->codeAppendf("coverage = clamp(coverage, 0, 1);");
840 if (!(proc.fFlags & ProcessorFlags::kMSAAEnabled)) {
841 // When not using MSAA, we only need to clamp in the "arc" branch.
842 f->codeAppendf("}");
843 }
844 if (proc.fFlags & ProcessorFlags::kFakeNonAA) {
845 f->codeAppendf("coverage = (coverage >= .5) ? 1 : 0;");
846 }
847 f->codeAppendf("half4 %s = half4(coverage);", args.fOutputCoverage);
848 }
849 };
850
makeProgramImpl(const GrShaderCaps &) const851 std::unique_ptr<GrGeometryProcessor::ProgramImpl> FillRRectOpImpl::Processor::makeProgramImpl(
852 const GrShaderCaps&) const {
853 return std::make_unique<Impl>();
854 }
855
onCreateProgramInfo(const GrCaps * caps,SkArenaAlloc * arena,const GrSurfaceProxyView & writeView,bool usesMSAASurface,GrAppliedClip && appliedClip,const GrDstProxyView & dstProxyView,GrXferBarrierFlags renderPassXferBarriers,GrLoadOp colorLoadOp)856 void FillRRectOpImpl::onCreateProgramInfo(const GrCaps* caps,
857 SkArenaAlloc* arena,
858 const GrSurfaceProxyView& writeView,
859 bool usesMSAASurface,
860 GrAppliedClip&& appliedClip,
861 const GrDstProxyView& dstProxyView,
862 GrXferBarrierFlags renderPassXferBarriers,
863 GrLoadOp colorLoadOp) {
864 if (usesMSAASurface) {
865 fProcessorFlags |= ProcessorFlags::kMSAAEnabled;
866 }
867 GrGeometryProcessor* gp = Processor::Make(arena, fHelper.aaType(), fProcessorFlags);
868 fProgramInfo = fHelper.createProgramInfo(caps, arena, writeView, usesMSAASurface,
869 std::move(appliedClip), dstProxyView, gp,
870 GrPrimitiveType::kTriangles, renderPassXferBarriers,
871 colorLoadOp);
872 }
873
onExecute(GrOpFlushState * flushState,const SkRect & chainBounds)874 void FillRRectOpImpl::onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) {
875 if (!fInstanceBuffer || !fIndexBuffer || !fVertexBuffer) {
876 return; // Setup failed.
877 }
878
879 flushState->bindPipelineAndScissorClip(*fProgramInfo, this->bounds());
880 flushState->bindTextures(fProgramInfo->geomProc(), nullptr, fProgramInfo->pipeline());
881 flushState->bindBuffers(std::move(fIndexBuffer), std::move(fInstanceBuffer),
882 std::move(fVertexBuffer));
883 flushState->drawIndexedInstanced(std::size(kIndexData), 0, fInstanceCount, fBaseInstance, 0);
884 }
885
886 // Will the given corner look good if we use HW derivatives?
can_use_hw_derivatives_with_coverage(const skvx::float2 & devScale,const skvx::float2 & cornerRadii)887 bool can_use_hw_derivatives_with_coverage(const skvx::float2& devScale,
888 const skvx::float2& cornerRadii) {
889 skvx::float2 devRadii = devScale * cornerRadii;
890 if (devRadii[1] < devRadii[0]) {
891 devRadii = skvx::shuffle<1,0>(devRadii);
892 }
893 float minDevRadius = std::max(devRadii[0], 1.f); // Shader clamps radius at a minimum of 1.
894 // Is the gradient smooth enough for this corner look ok if we use hardware derivatives?
895 // This threshold was arrived at subjevtively on an NVIDIA chip.
896 return minDevRadius * minDevRadius * 5 > devRadii[1];
897 }
898
can_use_hw_derivatives_with_coverage(const skvx::float2 & devScale,const SkVector & cornerRadii)899 bool can_use_hw_derivatives_with_coverage(const skvx::float2& devScale,
900 const SkVector& cornerRadii) {
901 return can_use_hw_derivatives_with_coverage(devScale, skvx::float2::Load(&cornerRadii));
902 }
903
904 // Will the given round rect look good if we use HW derivatives?
can_use_hw_derivatives_with_coverage(const GrShaderCaps & shaderCaps,const SkMatrix & viewMatrix,const SkRRect & rrect)905 bool can_use_hw_derivatives_with_coverage(const GrShaderCaps& shaderCaps,
906 const SkMatrix& viewMatrix,
907 const SkRRect& rrect) {
908 if (!shaderCaps.fShaderDerivativeSupport) {
909 return false;
910 }
911
912 auto x = skvx::float2(viewMatrix.getScaleX(), viewMatrix.getSkewX());
913 auto y = skvx::float2(viewMatrix.getSkewY(), viewMatrix.getScaleY());
914 skvx::float2 devScale = sqrt(x*x + y*y);
915 switch (rrect.getType()) {
916 case SkRRect::kEmpty_Type:
917 case SkRRect::kRect_Type:
918 return true;
919
920 case SkRRect::kOval_Type:
921 case SkRRect::kSimple_Type:
922 return can_use_hw_derivatives_with_coverage(devScale, rrect.getSimpleRadii());
923
924 case SkRRect::kNinePatch_Type: {
925 skvx::float2 r0 = skvx::float2::Load(SkRRectPriv::GetRadiiArray(rrect));
926 skvx::float2 r1 = skvx::float2::Load(SkRRectPriv::GetRadiiArray(rrect) + 2);
927 skvx::float2 minRadii = min(r0, r1);
928 skvx::float2 maxRadii = max(r0, r1);
929 return can_use_hw_derivatives_with_coverage(devScale,
930 skvx::float2(minRadii[0], maxRadii[1])) &&
931 can_use_hw_derivatives_with_coverage(devScale,
932 skvx::float2(maxRadii[0], minRadii[1]));
933 }
934
935 case SkRRect::kComplex_Type: {
936 for (int i = 0; i < 4; ++i) {
937 auto corner = static_cast<SkRRect::Corner>(i);
938 if (!can_use_hw_derivatives_with_coverage(devScale, rrect.radii(corner))) {
939 return false;
940 }
941 }
942 return true;
943 }
944 }
945 SK_ABORT("Invalid round rect type.");
946 }
947
948 } // anonymous namespace
949
Make(GrRecordingContext * ctx,SkArenaAlloc * arena,GrPaint && paint,const SkMatrix & viewMatrix,const SkRRect & rrect,const SkRect & localRect,GrAA aa)950 GrOp::Owner Make(GrRecordingContext* ctx,
951 SkArenaAlloc* arena,
952 GrPaint&& paint,
953 const SkMatrix& viewMatrix,
954 const SkRRect& rrect,
955 const SkRect& localRect,
956 GrAA aa) {
957 return FillRRectOpImpl::Make(ctx, arena, std::move(paint), viewMatrix, rrect, localRect, aa);
958 }
959
Make(GrRecordingContext * ctx,SkArenaAlloc * arena,GrPaint && paint,const SkMatrix & viewMatrix,const SkRRect & rrect,const SkMatrix & localMatrix,GrAA aa)960 GrOp::Owner Make(GrRecordingContext* ctx,
961 SkArenaAlloc* arena,
962 GrPaint&& paint,
963 const SkMatrix& viewMatrix,
964 const SkRRect& rrect,
965 const SkMatrix& localMatrix,
966 GrAA aa) {
967 return FillRRectOpImpl::Make(ctx, arena, std::move(paint), viewMatrix, rrect, localMatrix, aa);
968 }
969
970 } // namespace skgpu::ganesh::FillRRectOp
971
972 #if defined(GR_TEST_UTILS)
973
974 #include "src/gpu/ganesh/GrDrawOpTest.h"
975
GR_DRAW_OP_TEST_DEFINE(FillRRectOp)976 GR_DRAW_OP_TEST_DEFINE(FillRRectOp) {
977 SkArenaAlloc arena(64 * sizeof(float));
978 SkMatrix viewMatrix = GrTest::TestMatrix(random);
979 GrAA aa = GrAA(random->nextBool());
980
981 SkRect rect = GrTest::TestRect(random);
982 float w = rect.width();
983 float h = rect.height();
984
985 SkRRect rrect;
986 // TODO: test out other rrect configurations
987 rrect.setNinePatch(rect, w / 3.0f, h / 4.0f, w / 5.0f, h / 6.0);
988
989 return skgpu::ganesh::FillRRectOp::Make(
990 context, &arena, std::move(paint), viewMatrix, rrect, rrect.rect(), aa);
991 }
992
993 #endif
994