1 /*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/ops/SoftwarePathRenderer.h"
9
10 #include "include/gpu/GrDirectContext.h"
11 #include "include/private/base/SkSemaphore.h"
12 #include "src/core/SkTaskGroup.h"
13 #include "src/core/SkTraceEvent.h"
14 #include "src/gpu/ganesh/GrAuditTrail.h"
15 #include "src/gpu/ganesh/GrCaps.h"
16 #include "src/gpu/ganesh/GrClip.h"
17 #include "src/gpu/ganesh/GrDeferredProxyUploader.h"
18 #include "src/gpu/ganesh/GrDirectContextPriv.h"
19 #include "src/gpu/ganesh/GrGpuResourcePriv.h"
20 #include "src/gpu/ganesh/GrOpFlushState.h"
21 #include "src/gpu/ganesh/GrProxyProvider.h"
22 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
23 #include "src/gpu/ganesh/GrSWMaskHelper.h"
24 #include "src/gpu/ganesh/GrUtil.h"
25 #include "src/gpu/ganesh/SkGr.h"
26 #include "src/gpu/ganesh/SurfaceDrawContext.h"
27 #include "src/gpu/ganesh/effects/GrTextureEffect.h"
28 #include "src/gpu/ganesh/geometry/GrStyledShape.h"
29 #include "src/gpu/ganesh/ops/GrDrawOp.h"
30
31 namespace {
32
33 /**
34 * Payload class for use with GrTDeferredProxyUploader. The software path renderer only draws
35 * a single path into the mask texture. This stores all of the information needed by the worker
36 * thread's call to drawShape (see below, in onDrawPath).
37 */
38 class SoftwarePathData {
39 public:
SoftwarePathData(const SkIRect & maskBounds,const SkMatrix & viewMatrix,const GrStyledShape & shape,GrAA aa)40 SoftwarePathData(const SkIRect& maskBounds, const SkMatrix& viewMatrix,
41 const GrStyledShape& shape, GrAA aa)
42 : fMaskBounds(maskBounds)
43 , fViewMatrix(viewMatrix)
44 , fShape(shape)
45 , fAA(aa) {}
46
getMaskBounds() const47 const SkIRect& getMaskBounds() const { return fMaskBounds; }
getViewMatrix() const48 const SkMatrix* getViewMatrix() const { return &fViewMatrix; }
getShape() const49 const GrStyledShape& getShape() const { return fShape; }
getAA() const50 GrAA getAA() const { return fAA; }
51
52 private:
53 SkIRect fMaskBounds;
54 SkMatrix fViewMatrix;
55 GrStyledShape fShape;
56 GrAA fAA;
57 };
58
get_unclipped_shape_dev_bounds(const GrStyledShape & shape,const SkMatrix & matrix,SkIRect * devBounds)59 bool get_unclipped_shape_dev_bounds(const GrStyledShape& shape, const SkMatrix& matrix,
60 SkIRect* devBounds) {
61 SkRect shapeBounds = shape.styledBounds();
62 if (shapeBounds.isEmpty()) {
63 return false;
64 }
65 SkRect shapeDevBounds;
66 matrix.mapRect(&shapeDevBounds, shapeBounds);
67 // Even though these are "unclipped" bounds we still clip to the int32_t range.
68 // This is the largest int32_t that is representable exactly as a float. The next 63 larger ints
69 // would round down to this value when cast to a float, but who really cares.
70 // INT32_MIN is exactly representable.
71 static constexpr int32_t kMaxInt = 2147483520;
72 if (!shapeDevBounds.intersect(SkRect::MakeLTRB(INT32_MIN, INT32_MIN, kMaxInt, kMaxInt))) {
73 return false;
74 }
75 // Make sure that the resulting SkIRect can have representable width and height
76 if (SkScalarRoundToInt(shapeDevBounds.width()) > kMaxInt ||
77 SkScalarRoundToInt(shapeDevBounds.height()) > kMaxInt) {
78 return false;
79 }
80 shapeDevBounds.roundOut(devBounds);
81 return true;
82 }
83
make_deferred_mask_texture_view(GrRecordingContext * rContext,SkBackingFit fit,SkISize dimensions)84 GrSurfaceProxyView make_deferred_mask_texture_view(GrRecordingContext* rContext,
85 SkBackingFit fit,
86 SkISize dimensions) {
87 GrProxyProvider* proxyProvider = rContext->priv().proxyProvider();
88 const GrCaps* caps = rContext->priv().caps();
89
90 const GrBackendFormat format = caps->getDefaultBackendFormat(GrColorType::kAlpha_8,
91 GrRenderable::kNo);
92
93 skgpu::Swizzle swizzle = caps->getReadSwizzle(format, GrColorType::kAlpha_8);
94
95 auto proxy = proxyProvider->createProxy(format,
96 dimensions,
97 GrRenderable::kNo,
98 1,
99 GrMipmapped::kNo,
100 fit,
101 skgpu::Budgeted::kYes,
102 GrProtected::kNo,
103 /*label=*/"MakeDeferredMaskTextureView");
104 return {std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle};
105 }
106
107
108 } // anonymous namespace
109
110 namespace skgpu::v1 {
111
112 ////////////////////////////////////////////////////////////////////////////////
onCanDrawPath(const CanDrawPathArgs & args) const113 PathRenderer::CanDrawPath SoftwarePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
114 // Pass on any style that applies. The caller will apply the style if a suitable renderer is
115 // not found and try again with the new GrStyledShape.
116 if (!args.fShape->style().applies() && SkToBool(fProxyProvider) &&
117 (args.fAAType == GrAAType::kCoverage || args.fAAType == GrAAType::kNone)) {
118 // This is the fallback renderer for when a path is too complicated for the GPU ones.
119 return CanDrawPath::kAsBackup;
120 }
121 return CanDrawPath::kNo;
122 }
123
124 ////////////////////////////////////////////////////////////////////////////////
125
126 // Gets the shape bounds, the clip bounds, and the intersection (if any). Returns false if there
127 // is no intersection.
GetShapeAndClipBounds(SurfaceDrawContext * sdc,const GrClip * clip,const GrStyledShape & shape,const SkMatrix & matrix,SkIRect * unclippedDevShapeBounds,SkIRect * clippedDevShapeBounds,SkIRect * devClipBounds)128 bool SoftwarePathRenderer::GetShapeAndClipBounds(SurfaceDrawContext* sdc,
129 const GrClip* clip,
130 const GrStyledShape& shape,
131 const SkMatrix& matrix,
132 SkIRect* unclippedDevShapeBounds,
133 SkIRect* clippedDevShapeBounds,
134 SkIRect* devClipBounds) {
135 // compute bounds as intersection of rt size, clip, and path
136 *devClipBounds = clip ? clip->getConservativeBounds()
137 : SkIRect::MakeWH(sdc->width(), sdc->height());
138
139 if (!get_unclipped_shape_dev_bounds(shape, matrix, unclippedDevShapeBounds)) {
140 *unclippedDevShapeBounds = SkIRect::MakeEmpty();
141 *clippedDevShapeBounds = SkIRect::MakeEmpty();
142 return false;
143 }
144 if (!clippedDevShapeBounds->intersect(*devClipBounds, *unclippedDevShapeBounds)) {
145 *clippedDevShapeBounds = SkIRect::MakeEmpty();
146 return false;
147 }
148 return true;
149 }
150
151 ////////////////////////////////////////////////////////////////////////////////
152
DrawNonAARect(SurfaceDrawContext * sdc,GrPaint && paint,const GrUserStencilSettings & userStencilSettings,const GrClip * clip,const SkMatrix & viewMatrix,const SkRect & rect,const SkMatrix & localMatrix)153 void SoftwarePathRenderer::DrawNonAARect(SurfaceDrawContext* sdc,
154 GrPaint&& paint,
155 const GrUserStencilSettings& userStencilSettings,
156 const GrClip* clip,
157 const SkMatrix& viewMatrix,
158 const SkRect& rect,
159 const SkMatrix& localMatrix) {
160 sdc->stencilRect(clip, &userStencilSettings, std::move(paint), GrAA::kNo,
161 viewMatrix, rect, &localMatrix);
162 }
163
DrawAroundInvPath(SurfaceDrawContext * sdc,GrPaint && paint,const GrUserStencilSettings & userStencilSettings,const GrClip * clip,const SkMatrix & viewMatrix,const SkIRect & devClipBounds,const SkIRect & devPathBounds)164 void SoftwarePathRenderer::DrawAroundInvPath(SurfaceDrawContext* sdc,
165 GrPaint&& paint,
166 const GrUserStencilSettings& userStencilSettings,
167 const GrClip* clip,
168 const SkMatrix& viewMatrix,
169 const SkIRect& devClipBounds,
170 const SkIRect& devPathBounds) {
171 SkMatrix invert;
172 if (!viewMatrix.invert(&invert)) {
173 return;
174 }
175
176 SkRect rect;
177 if (devClipBounds.fTop < devPathBounds.fTop) {
178 rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devClipBounds.fTop),
179 SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devPathBounds.fTop));
180 DrawNonAARect(sdc, GrPaint::Clone(paint), userStencilSettings, clip,
181 SkMatrix::I(), rect, invert);
182 }
183 if (devClipBounds.fLeft < devPathBounds.fLeft) {
184 rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devPathBounds.fTop),
185 SkIntToScalar(devPathBounds.fLeft), SkIntToScalar(devPathBounds.fBottom));
186 DrawNonAARect(sdc, GrPaint::Clone(paint), userStencilSettings, clip,
187 SkMatrix::I(), rect, invert);
188 }
189 if (devClipBounds.fRight > devPathBounds.fRight) {
190 rect.setLTRB(SkIntToScalar(devPathBounds.fRight), SkIntToScalar(devPathBounds.fTop),
191 SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devPathBounds.fBottom));
192 DrawNonAARect(sdc, GrPaint::Clone(paint), userStencilSettings, clip,
193 SkMatrix::I(), rect, invert);
194 }
195 if (devClipBounds.fBottom > devPathBounds.fBottom) {
196 rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devPathBounds.fBottom),
197 SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devClipBounds.fBottom));
198 DrawNonAARect(sdc, std::move(paint), userStencilSettings, clip,
199 SkMatrix::I(), rect, invert);
200 }
201 }
202
DrawToTargetWithShapeMask(GrSurfaceProxyView view,SurfaceDrawContext * sdc,GrPaint && paint,const GrUserStencilSettings & userStencilSettings,const GrClip * clip,const SkMatrix & viewMatrix,const SkIPoint & textureOriginInDeviceSpace,const SkIRect & deviceSpaceRectToDraw)203 void SoftwarePathRenderer::DrawToTargetWithShapeMask(
204 GrSurfaceProxyView view,
205 SurfaceDrawContext* sdc,
206 GrPaint&& paint,
207 const GrUserStencilSettings& userStencilSettings,
208 const GrClip* clip,
209 const SkMatrix& viewMatrix,
210 const SkIPoint& textureOriginInDeviceSpace,
211 const SkIRect& deviceSpaceRectToDraw) {
212 SkMatrix invert;
213 if (!viewMatrix.invert(&invert)) {
214 return;
215 }
216
217 view.concatSwizzle(skgpu::Swizzle("aaaa"));
218
219 SkRect dstRect = SkRect::Make(deviceSpaceRectToDraw);
220
221 // We use device coords to compute the texture coordinates. We take the device coords and apply
222 // a translation so that the top-left of the device bounds maps to 0,0, and then a scaling
223 // matrix to normalized coords.
224 SkMatrix maskMatrix = SkMatrix::Translate(SkIntToScalar(-textureOriginInDeviceSpace.fX),
225 SkIntToScalar(-textureOriginInDeviceSpace.fY));
226 maskMatrix.preConcat(viewMatrix);
227
228 paint.setCoverageFragmentProcessor(GrTextureEffect::Make(
229 std::move(view), kPremul_SkAlphaType, maskMatrix, GrSamplerState::Filter::kNearest));
230 DrawNonAARect(sdc, std::move(paint), userStencilSettings, clip, SkMatrix::I(),
231 dstRect, invert);
232 }
233
234 ////////////////////////////////////////////////////////////////////////////////
235 // return true on success; false on failure
onDrawPath(const DrawPathArgs & args)236 bool SoftwarePathRenderer::onDrawPath(const DrawPathArgs& args) {
237 GR_AUDIT_TRAIL_AUTO_FRAME(args.fContext->priv().auditTrail(),
238 "SoftwarePathRenderer::onDrawPath");
239
240 if (!fProxyProvider) {
241 return false;
242 }
243
244 SkASSERT(!args.fShape->style().applies());
245 // We really need to know if the shape will be inverse filled or not
246 // If the path is hairline, ignore inverse fill.
247 bool inverseFilled = args.fShape->inverseFilled() &&
248 !GrIsStrokeHairlineOrEquivalent(args.fShape->style(),
249 *args.fViewMatrix, nullptr);
250
251 SkIRect unclippedDevShapeBounds, clippedDevShapeBounds, devClipBounds;
252 // To prevent overloading the cache with entries during animations we limit the cache of masks
253 // to cases where the matrix preserves axis alignment.
254 bool useCache = fAllowCaching && !inverseFilled && args.fViewMatrix->preservesAxisAlignment() &&
255 args.fShape->hasUnstyledKey() && (GrAAType::kCoverage == args.fAAType);
256
257 if (!GetShapeAndClipBounds(args.fSurfaceDrawContext,
258 args.fClip, *args.fShape,
259 *args.fViewMatrix, &unclippedDevShapeBounds,
260 &clippedDevShapeBounds,
261 &devClipBounds)) {
262 if (inverseFilled) {
263 DrawAroundInvPath(args.fSurfaceDrawContext, std::move(args.fPaint),
264 *args.fUserStencilSettings, args.fClip, *args.fViewMatrix,
265 devClipBounds, unclippedDevShapeBounds);
266 }
267 return true;
268 }
269
270 const SkIRect* boundsForMask = &clippedDevShapeBounds;
271 if (useCache) {
272 // Use the cache only if >50% of the path is visible.
273 int unclippedWidth = unclippedDevShapeBounds.width();
274 int unclippedHeight = unclippedDevShapeBounds.height();
275 int64_t unclippedArea = sk_64_mul(unclippedWidth, unclippedHeight);
276 int64_t clippedArea = sk_64_mul(clippedDevShapeBounds.width(),
277 clippedDevShapeBounds.height());
278 int maxTextureSize = args.fSurfaceDrawContext->caps()->maxTextureSize();
279 if (unclippedArea > 2 * clippedArea || unclippedWidth > maxTextureSize ||
280 unclippedHeight > maxTextureSize) {
281 useCache = false;
282 } else {
283 boundsForMask = &unclippedDevShapeBounds;
284 }
285 }
286
287 skgpu::UniqueKey maskKey;
288 if (useCache) {
289 // We require the upper left 2x2 of the matrix to match exactly for a cache hit.
290 SkScalar sx = args.fViewMatrix->get(SkMatrix::kMScaleX);
291 SkScalar sy = args.fViewMatrix->get(SkMatrix::kMScaleY);
292 SkScalar kx = args.fViewMatrix->get(SkMatrix::kMSkewX);
293 SkScalar ky = args.fViewMatrix->get(SkMatrix::kMSkewY);
294 static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
295 skgpu::UniqueKey::Builder builder(&maskKey, kDomain, 7 + args.fShape->unstyledKeySize(),
296 "SW Path Mask");
297 builder[0] = boundsForMask->width();
298 builder[1] = boundsForMask->height();
299
300 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
301 // Fractional translate does not affect caching on Android. This is done for better cache
302 // hit ratio and speed, but it is matching HWUI behavior, which doesn't consider the matrix
303 // at all when caching paths.
304 SkFixed fracX = 0;
305 SkFixed fracY = 0;
306 #else
307 SkScalar tx = args.fViewMatrix->get(SkMatrix::kMTransX);
308 SkScalar ty = args.fViewMatrix->get(SkMatrix::kMTransY);
309 // Allow 8 bits each in x and y of subpixel positioning.
310 SkFixed fracX = SkScalarToFixed(SkScalarFraction(tx)) & 0x0000FF00;
311 SkFixed fracY = SkScalarToFixed(SkScalarFraction(ty)) & 0x0000FF00;
312 #endif
313 builder[2] = SkFloat2Bits(sx);
314 builder[3] = SkFloat2Bits(sy);
315 builder[4] = SkFloat2Bits(kx);
316 builder[5] = SkFloat2Bits(ky);
317 // Distinguish between hairline and filled paths. For hairlines, we also need to include
318 // the cap. (SW grows hairlines by 0.5 pixel with round and square caps). Note that
319 // stroke-and-fill of hairlines is turned into pure fill by SkStrokeRec, so this covers
320 // all cases we might see.
321 uint32_t styleBits = args.fShape->style().isSimpleHairline() ?
322 ((args.fShape->style().strokeRec().getCap() << 1) | 1) : 0;
323 builder[6] = fracX | (fracY >> 8) | (styleBits << 16);
324 args.fShape->writeUnstyledKey(&builder[7]);
325 }
326
327 GrSurfaceProxyView view;
328 if (useCache) {
329 sk_sp<GrTextureProxy> proxy = fProxyProvider->findOrCreateProxyByUniqueKey(maskKey);
330 if (proxy) {
331 skgpu::Swizzle swizzle = args.fSurfaceDrawContext->caps()->getReadSwizzle(
332 proxy->backendFormat(), GrColorType::kAlpha_8);
333 view = {std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle};
334 args.fContext->priv().stats()->incNumPathMasksCacheHits();
335 }
336 }
337 if (!view) {
338 SkBackingFit fit = useCache ? SkBackingFit::kExact : SkBackingFit::kApprox;
339 GrAA aa = GrAA(GrAAType::kCoverage == args.fAAType);
340
341 SkTaskGroup* taskGroup = nullptr;
342 if (auto direct = args.fContext->asDirectContext()) {
343 taskGroup = direct->priv().getTaskGroup();
344 }
345
346 if (taskGroup) {
347 view = make_deferred_mask_texture_view(args.fContext, fit, boundsForMask->size());
348 if (!view) {
349 return false;
350 }
351
352 auto uploader = std::make_unique<GrTDeferredProxyUploader<SoftwarePathData>>(
353 *boundsForMask, *args.fViewMatrix, *args.fShape, aa);
354 GrTDeferredProxyUploader<SoftwarePathData>* uploaderRaw = uploader.get();
355
356 auto drawAndUploadMask = [uploaderRaw] {
357 TRACE_EVENT0("skia.gpu", "Threaded SW Mask Render");
358 GrSWMaskHelper helper(uploaderRaw->getPixels());
359 if (helper.init(uploaderRaw->data().getMaskBounds())) {
360 helper.drawShape(uploaderRaw->data().getShape(),
361 *uploaderRaw->data().getViewMatrix(),
362 SkRegion::kReplace_Op, uploaderRaw->data().getAA(), 0xFF);
363 } else {
364 SkDEBUGFAIL("Unable to allocate SW mask.");
365 }
366 uploaderRaw->signalAndFreeData();
367 };
368 taskGroup->add(std::move(drawAndUploadMask));
369 view.asTextureProxy()->texPriv().setDeferredUploader(std::move(uploader));
370 } else {
371 GrSWMaskHelper helper;
372 if (!helper.init(*boundsForMask)) {
373 return false;
374 }
375 helper.drawShape(*args.fShape, *args.fViewMatrix, SkRegion::kReplace_Op, aa, 0xFF);
376 view = helper.toTextureView(args.fContext, fit);
377 }
378
379 if (!view) {
380 return false;
381 }
382 if (useCache) {
383 SkASSERT(view.origin() == kTopLeft_GrSurfaceOrigin);
384
385 // We will add an invalidator to the path so that if the path goes away we will
386 // delete or recycle the mask texture.
387 auto listener = GrMakeUniqueKeyInvalidationListener(&maskKey,
388 args.fContext->priv().contextID());
389 fProxyProvider->assignUniqueKeyToProxy(maskKey, view.asTextureProxy());
390 args.fShape->addGenIDChangeListener(std::move(listener));
391 }
392
393 args.fContext->priv().stats()->incNumPathMasksGenerated();
394 }
395 SkASSERT(view);
396 if (inverseFilled) {
397 DrawAroundInvPath(args.fSurfaceDrawContext, GrPaint::Clone(args.fPaint),
398 *args.fUserStencilSettings, args.fClip, *args.fViewMatrix, devClipBounds,
399 unclippedDevShapeBounds);
400 }
401 DrawToTargetWithShapeMask(std::move(view), args.fSurfaceDrawContext, std::move(args.fPaint),
402 *args.fUserStencilSettings, args.fClip, *args.fViewMatrix,
403 SkIPoint{boundsForMask->fLeft, boundsForMask->fTop}, *boundsForMask);
404
405 return true;
406 }
407
408 } // namespace skgpu::v1
409