1 /*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/ops/SoftwarePathRenderer.h"
9
10 #include "include/gpu/GrDirectContext.h"
11 #include "include/private/base/SkFixed.h"
12 #include "include/private/base/SkSemaphore.h"
13 #include "src/base/SkFloatBits.h"
14 #include "src/core/SkTaskGroup.h"
15 #include "src/core/SkTraceEvent.h"
16 #include "src/gpu/ganesh/GrAuditTrail.h"
17 #include "src/gpu/ganesh/GrCaps.h"
18 #include "src/gpu/ganesh/GrClip.h"
19 #include "src/gpu/ganesh/GrDeferredProxyUploader.h"
20 #include "src/gpu/ganesh/GrDirectContextPriv.h"
21 #include "src/gpu/ganesh/GrGpuResourcePriv.h"
22 #include "src/gpu/ganesh/GrOpFlushState.h"
23 #include "src/gpu/ganesh/GrProxyProvider.h"
24 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
25 #include "src/gpu/ganesh/GrSWMaskHelper.h"
26 #include "src/gpu/ganesh/GrUtil.h"
27 #include "src/gpu/ganesh/SkGr.h"
28 #include "src/gpu/ganesh/SurfaceDrawContext.h"
29 #include "src/gpu/ganesh/effects/GrTextureEffect.h"
30 #include "src/gpu/ganesh/geometry/GrStyledShape.h"
31 #include "src/gpu/ganesh/ops/GrDrawOp.h"
32
33 namespace {
34
35 /**
36 * Payload class for use with GrTDeferredProxyUploader. The software path renderer only draws
37 * a single path into the mask texture. This stores all of the information needed by the worker
38 * thread's call to drawShape (see below, in onDrawPath).
39 */
40 class SoftwarePathData {
41 public:
SoftwarePathData(const SkIRect & maskBounds,const SkMatrix & viewMatrix,const GrStyledShape & shape,GrAA aa)42 SoftwarePathData(const SkIRect& maskBounds, const SkMatrix& viewMatrix,
43 const GrStyledShape& shape, GrAA aa)
44 : fMaskBounds(maskBounds)
45 , fViewMatrix(viewMatrix)
46 , fShape(shape)
47 , fAA(aa) {}
48
getMaskBounds() const49 const SkIRect& getMaskBounds() const { return fMaskBounds; }
getViewMatrix() const50 const SkMatrix* getViewMatrix() const { return &fViewMatrix; }
getShape() const51 const GrStyledShape& getShape() const { return fShape; }
getAA() const52 GrAA getAA() const { return fAA; }
53
54 private:
55 SkIRect fMaskBounds;
56 SkMatrix fViewMatrix;
57 GrStyledShape fShape;
58 GrAA fAA;
59 };
60
get_unclipped_shape_dev_bounds(const GrStyledShape & shape,const SkMatrix & matrix,SkIRect * devBounds)61 bool get_unclipped_shape_dev_bounds(const GrStyledShape& shape, const SkMatrix& matrix,
62 SkIRect* devBounds) {
63 SkRect shapeBounds = shape.styledBounds();
64 if (shapeBounds.isEmpty()) {
65 return false;
66 }
67 SkRect shapeDevBounds;
68 matrix.mapRect(&shapeDevBounds, shapeBounds);
69 // Even though these are "unclipped" bounds we still clip to the int32_t range.
70 // This is the largest int32_t that is representable exactly as a float. The next 63 larger ints
71 // would round down to this value when cast to a float, but who really cares.
72 // INT32_MIN is exactly representable.
73 static constexpr int32_t kMaxInt = 2147483520;
74 if (!shapeDevBounds.intersect(SkRect::MakeLTRB(INT32_MIN, INT32_MIN, kMaxInt, kMaxInt))) {
75 return false;
76 }
77 // Make sure that the resulting SkIRect can have representable width and height
78 if (SkScalarRoundToInt(shapeDevBounds.width()) > kMaxInt ||
79 SkScalarRoundToInt(shapeDevBounds.height()) > kMaxInt) {
80 return false;
81 }
82 shapeDevBounds.roundOut(devBounds);
83 return true;
84 }
85
make_deferred_mask_texture_view(GrRecordingContext * rContext,SkBackingFit fit,SkISize dimensions)86 GrSurfaceProxyView make_deferred_mask_texture_view(GrRecordingContext* rContext,
87 SkBackingFit fit,
88 SkISize dimensions) {
89 GrProxyProvider* proxyProvider = rContext->priv().proxyProvider();
90 const GrCaps* caps = rContext->priv().caps();
91
92 const GrBackendFormat format = caps->getDefaultBackendFormat(GrColorType::kAlpha_8,
93 GrRenderable::kNo);
94
95 skgpu::Swizzle swizzle = caps->getReadSwizzle(format, GrColorType::kAlpha_8);
96
97 auto proxy = proxyProvider->createProxy(format,
98 dimensions,
99 GrRenderable::kNo,
100 1,
101 skgpu::Mipmapped::kNo,
102 fit,
103 skgpu::Budgeted::kYes,
104 GrProtected::kNo,
105 /*label=*/"MakeDeferredMaskTextureView");
106 return {std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle};
107 }
108
109
110 } // anonymous namespace
111
112 namespace skgpu::ganesh {
113
114 ////////////////////////////////////////////////////////////////////////////////
onCanDrawPath(const CanDrawPathArgs & args) const115 PathRenderer::CanDrawPath SoftwarePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
116 // Pass on any style that applies. The caller will apply the style if a suitable renderer is
117 // not found and try again with the new GrStyledShape.
118 if (!args.fShape->style().applies() && SkToBool(fProxyProvider) &&
119 (args.fAAType == GrAAType::kCoverage || args.fAAType == GrAAType::kNone)) {
120 // This is the fallback renderer for when a path is too complicated for the GPU ones.
121 return CanDrawPath::kAsBackup;
122 }
123 return CanDrawPath::kNo;
124 }
125
126 ////////////////////////////////////////////////////////////////////////////////
127
128 // Gets the shape bounds, the clip bounds, and the intersection (if any). Returns false if there
129 // is no intersection.
GetShapeAndClipBounds(SurfaceDrawContext * sdc,const GrClip * clip,const GrStyledShape & shape,const SkMatrix & matrix,SkIRect * unclippedDevShapeBounds,SkIRect * clippedDevShapeBounds,SkIRect * devClipBounds)130 bool SoftwarePathRenderer::GetShapeAndClipBounds(SurfaceDrawContext* sdc,
131 const GrClip* clip,
132 const GrStyledShape& shape,
133 const SkMatrix& matrix,
134 SkIRect* unclippedDevShapeBounds,
135 SkIRect* clippedDevShapeBounds,
136 SkIRect* devClipBounds) {
137 // compute bounds as intersection of rt size, clip, and path
138 *devClipBounds = clip ? clip->getConservativeBounds()
139 : SkIRect::MakeWH(sdc->width(), sdc->height());
140
141 if (!get_unclipped_shape_dev_bounds(shape, matrix, unclippedDevShapeBounds)) {
142 *unclippedDevShapeBounds = SkIRect::MakeEmpty();
143 *clippedDevShapeBounds = SkIRect::MakeEmpty();
144 return false;
145 }
146 if (!clippedDevShapeBounds->intersect(*devClipBounds, *unclippedDevShapeBounds)) {
147 *clippedDevShapeBounds = SkIRect::MakeEmpty();
148 return false;
149 }
150 return true;
151 }
152
153 ////////////////////////////////////////////////////////////////////////////////
154
DrawNonAARect(SurfaceDrawContext * sdc,GrPaint && paint,const GrUserStencilSettings & userStencilSettings,const GrClip * clip,const SkMatrix & viewMatrix,const SkRect & rect,const SkMatrix & localMatrix)155 void SoftwarePathRenderer::DrawNonAARect(SurfaceDrawContext* sdc,
156 GrPaint&& paint,
157 const GrUserStencilSettings& userStencilSettings,
158 const GrClip* clip,
159 const SkMatrix& viewMatrix,
160 const SkRect& rect,
161 const SkMatrix& localMatrix) {
162 sdc->stencilRect(clip, &userStencilSettings, std::move(paint), GrAA::kNo,
163 viewMatrix, rect, &localMatrix);
164 }
165
DrawAroundInvPath(SurfaceDrawContext * sdc,GrPaint && paint,const GrUserStencilSettings & userStencilSettings,const GrClip * clip,const SkMatrix & viewMatrix,const SkIRect & devClipBounds,const SkIRect & devPathBounds)166 void SoftwarePathRenderer::DrawAroundInvPath(SurfaceDrawContext* sdc,
167 GrPaint&& paint,
168 const GrUserStencilSettings& userStencilSettings,
169 const GrClip* clip,
170 const SkMatrix& viewMatrix,
171 const SkIRect& devClipBounds,
172 const SkIRect& devPathBounds) {
173 SkMatrix invert;
174 if (!viewMatrix.invert(&invert)) {
175 return;
176 }
177
178 SkRect rect;
179 if (devClipBounds.fTop < devPathBounds.fTop) {
180 rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devClipBounds.fTop),
181 SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devPathBounds.fTop));
182 DrawNonAARect(sdc, GrPaint::Clone(paint), userStencilSettings, clip,
183 SkMatrix::I(), rect, invert);
184 }
185 if (devClipBounds.fLeft < devPathBounds.fLeft) {
186 rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devPathBounds.fTop),
187 SkIntToScalar(devPathBounds.fLeft), SkIntToScalar(devPathBounds.fBottom));
188 DrawNonAARect(sdc, GrPaint::Clone(paint), userStencilSettings, clip,
189 SkMatrix::I(), rect, invert);
190 }
191 if (devClipBounds.fRight > devPathBounds.fRight) {
192 rect.setLTRB(SkIntToScalar(devPathBounds.fRight), SkIntToScalar(devPathBounds.fTop),
193 SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devPathBounds.fBottom));
194 DrawNonAARect(sdc, GrPaint::Clone(paint), userStencilSettings, clip,
195 SkMatrix::I(), rect, invert);
196 }
197 if (devClipBounds.fBottom > devPathBounds.fBottom) {
198 rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devPathBounds.fBottom),
199 SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devClipBounds.fBottom));
200 DrawNonAARect(sdc, std::move(paint), userStencilSettings, clip,
201 SkMatrix::I(), rect, invert);
202 }
203 }
204
DrawToTargetWithShapeMask(GrSurfaceProxyView view,SurfaceDrawContext * sdc,GrPaint && paint,const GrUserStencilSettings & userStencilSettings,const GrClip * clip,const SkMatrix & viewMatrix,const SkIPoint & textureOriginInDeviceSpace,const SkIRect & deviceSpaceRectToDraw)205 void SoftwarePathRenderer::DrawToTargetWithShapeMask(
206 GrSurfaceProxyView view,
207 SurfaceDrawContext* sdc,
208 GrPaint&& paint,
209 const GrUserStencilSettings& userStencilSettings,
210 const GrClip* clip,
211 const SkMatrix& viewMatrix,
212 const SkIPoint& textureOriginInDeviceSpace,
213 const SkIRect& deviceSpaceRectToDraw) {
214 SkMatrix invert;
215 if (!viewMatrix.invert(&invert)) {
216 return;
217 }
218
219 view.concatSwizzle(skgpu::Swizzle("aaaa"));
220
221 SkRect dstRect = SkRect::Make(deviceSpaceRectToDraw);
222
223 // We use device coords to compute the texture coordinates. We take the device coords and apply
224 // a translation so that the top-left of the device bounds maps to 0,0, and then a scaling
225 // matrix to normalized coords.
226 SkMatrix maskMatrix = SkMatrix::Translate(SkIntToScalar(-textureOriginInDeviceSpace.fX),
227 SkIntToScalar(-textureOriginInDeviceSpace.fY));
228 maskMatrix.preConcat(viewMatrix);
229
230 paint.setCoverageFragmentProcessor(GrTextureEffect::Make(
231 std::move(view), kPremul_SkAlphaType, maskMatrix, GrSamplerState::Filter::kNearest));
232 DrawNonAARect(sdc, std::move(paint), userStencilSettings, clip, SkMatrix::I(),
233 dstRect, invert);
234 }
235
236 ////////////////////////////////////////////////////////////////////////////////
237 // return true on success; false on failure
onDrawPath(const DrawPathArgs & args)238 bool SoftwarePathRenderer::onDrawPath(const DrawPathArgs& args) {
239 GR_AUDIT_TRAIL_AUTO_FRAME(args.fContext->priv().auditTrail(),
240 "SoftwarePathRenderer::onDrawPath");
241
242 if (!fProxyProvider) {
243 return false;
244 }
245
246 SkASSERT(!args.fShape->style().applies());
247 // We really need to know if the shape will be inverse filled or not
248 // If the path is hairline, ignore inverse fill.
249 bool inverseFilled = args.fShape->inverseFilled() &&
250 !GrIsStrokeHairlineOrEquivalent(args.fShape->style(),
251 *args.fViewMatrix, nullptr);
252
253 SkIRect unclippedDevShapeBounds, clippedDevShapeBounds, devClipBounds;
254 // To prevent overloading the cache with entries during animations we limit the cache of masks
255 // to cases where the matrix preserves axis alignment.
256 bool useCache = fAllowCaching && !inverseFilled && args.fViewMatrix->preservesAxisAlignment() &&
257 args.fShape->hasUnstyledKey() && (GrAAType::kCoverage == args.fAAType);
258
259 if (!GetShapeAndClipBounds(args.fSurfaceDrawContext,
260 args.fClip, *args.fShape,
261 *args.fViewMatrix, &unclippedDevShapeBounds,
262 &clippedDevShapeBounds,
263 &devClipBounds)) {
264 if (inverseFilled) {
265 DrawAroundInvPath(args.fSurfaceDrawContext, std::move(args.fPaint),
266 *args.fUserStencilSettings, args.fClip, *args.fViewMatrix,
267 devClipBounds, unclippedDevShapeBounds);
268 }
269 return true;
270 }
271
272 const SkIRect* boundsForMask = &clippedDevShapeBounds;
273 if (useCache) {
274 // Use the cache only if >50% of the path is visible.
275 int unclippedWidth = unclippedDevShapeBounds.width();
276 int unclippedHeight = unclippedDevShapeBounds.height();
277 int64_t unclippedArea = sk_64_mul(unclippedWidth, unclippedHeight);
278 int64_t clippedArea = sk_64_mul(clippedDevShapeBounds.width(),
279 clippedDevShapeBounds.height());
280 int maxTextureSize = args.fSurfaceDrawContext->caps()->maxTextureSize();
281 if (unclippedArea > 2 * clippedArea || unclippedWidth > maxTextureSize ||
282 unclippedHeight > maxTextureSize) {
283 useCache = false;
284 } else {
285 boundsForMask = &unclippedDevShapeBounds;
286 }
287 }
288
289 skgpu::UniqueKey maskKey;
290 if (useCache) {
291 // We require the upper left 2x2 of the matrix to match exactly for a cache hit.
292 SkScalar sx = args.fViewMatrix->get(SkMatrix::kMScaleX);
293 SkScalar sy = args.fViewMatrix->get(SkMatrix::kMScaleY);
294 SkScalar kx = args.fViewMatrix->get(SkMatrix::kMSkewX);
295 SkScalar ky = args.fViewMatrix->get(SkMatrix::kMSkewY);
296 static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
297 skgpu::UniqueKey::Builder builder(&maskKey, kDomain, 7 + args.fShape->unstyledKeySize(),
298 "SW Path Mask");
299 builder[0] = boundsForMask->width();
300 builder[1] = boundsForMask->height();
301
302 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
303 // Fractional translate does not affect caching on Android. This is done for better cache
304 // hit ratio and speed, but it is matching HWUI behavior, which doesn't consider the matrix
305 // at all when caching paths.
306 SkFixed fracX = 0;
307 SkFixed fracY = 0;
308 #else
309 SkScalar tx = args.fViewMatrix->get(SkMatrix::kMTransX);
310 SkScalar ty = args.fViewMatrix->get(SkMatrix::kMTransY);
311 // Allow 8 bits each in x and y of subpixel positioning.
312 SkFixed fracX = SkScalarToFixed(SkScalarFraction(tx)) & 0x0000FF00;
313 SkFixed fracY = SkScalarToFixed(SkScalarFraction(ty)) & 0x0000FF00;
314 #endif
315 builder[2] = SkFloat2Bits(sx);
316 builder[3] = SkFloat2Bits(sy);
317 builder[4] = SkFloat2Bits(kx);
318 builder[5] = SkFloat2Bits(ky);
319 // Distinguish between hairline and filled paths. For hairlines, we also need to include
320 // the cap. (SW grows hairlines by 0.5 pixel with round and square caps). Note that
321 // stroke-and-fill of hairlines is turned into pure fill by SkStrokeRec, so this covers
322 // all cases we might see.
323 uint32_t styleBits = args.fShape->style().isSimpleHairline() ?
324 ((args.fShape->style().strokeRec().getCap() << 1) | 1) : 0;
325 builder[6] = fracX | (fracY >> 8) | (styleBits << 16);
326 args.fShape->writeUnstyledKey(&builder[7]);
327 }
328
329 GrSurfaceProxyView view;
330 if (useCache) {
331 sk_sp<GrTextureProxy> proxy = fProxyProvider->findOrCreateProxyByUniqueKey(maskKey);
332 if (proxy) {
333 skgpu::Swizzle swizzle = args.fSurfaceDrawContext->caps()->getReadSwizzle(
334 proxy->backendFormat(), GrColorType::kAlpha_8);
335 view = {std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle};
336 args.fContext->priv().stats()->incNumPathMasksCacheHits();
337 }
338 }
339 if (!view) {
340 SkBackingFit fit = useCache ? SkBackingFit::kExact : SkBackingFit::kApprox;
341 GrAA aa = GrAA(GrAAType::kCoverage == args.fAAType);
342
343 SkTaskGroup* taskGroup = nullptr;
344 if (auto direct = args.fContext->asDirectContext()) {
345 taskGroup = direct->priv().getTaskGroup();
346 }
347
348 if (taskGroup) {
349 view = make_deferred_mask_texture_view(args.fContext, fit, boundsForMask->size());
350 if (!view) {
351 return false;
352 }
353
354 auto uploader = std::make_unique<GrTDeferredProxyUploader<SoftwarePathData>>(
355 *boundsForMask, *args.fViewMatrix, *args.fShape, aa);
356 GrTDeferredProxyUploader<SoftwarePathData>* uploaderRaw = uploader.get();
357
358 auto drawAndUploadMask = [uploaderRaw] {
359 TRACE_EVENT0("skia.gpu", "Threaded SW Mask Render");
360 GrSWMaskHelper helper(uploaderRaw->getPixels());
361 if (helper.init(uploaderRaw->data().getMaskBounds())) {
362 helper.drawShape(uploaderRaw->data().getShape(),
363 *uploaderRaw->data().getViewMatrix(),
364 uploaderRaw->data().getAA(), 0xFF);
365 } else {
366 SkDEBUGFAIL("Unable to allocate SW mask.");
367 }
368 uploaderRaw->signalAndFreeData();
369 };
370 taskGroup->add(std::move(drawAndUploadMask));
371 view.asTextureProxy()->texPriv().setDeferredUploader(std::move(uploader));
372 } else {
373 GrSWMaskHelper helper;
374 if (!helper.init(*boundsForMask)) {
375 return false;
376 }
377 helper.drawShape(*args.fShape, *args.fViewMatrix, aa, 0xFF);
378 view = helper.toTextureView(args.fContext, fit);
379 }
380
381 if (!view) {
382 return false;
383 }
384 if (useCache) {
385 SkASSERT(view.origin() == kTopLeft_GrSurfaceOrigin);
386
387 // We will add an invalidator to the path so that if the path goes away we will
388 // delete or recycle the mask texture.
389 auto listener = GrMakeUniqueKeyInvalidationListener(&maskKey,
390 args.fContext->priv().contextID());
391 fProxyProvider->assignUniqueKeyToProxy(maskKey, view.asTextureProxy());
392 args.fShape->addGenIDChangeListener(std::move(listener));
393 }
394
395 args.fContext->priv().stats()->incNumPathMasksGenerated();
396 }
397 SkASSERT(view);
398 if (inverseFilled) {
399 DrawAroundInvPath(args.fSurfaceDrawContext, GrPaint::Clone(args.fPaint),
400 *args.fUserStencilSettings, args.fClip, *args.fViewMatrix, devClipBounds,
401 unclippedDevShapeBounds);
402 }
403 DrawToTargetWithShapeMask(std::move(view), args.fSurfaceDrawContext, std::move(args.fPaint),
404 *args.fUserStencilSettings, args.fClip, *args.fViewMatrix,
405 SkIPoint{boundsForMask->fLeft, boundsForMask->fTop}, *boundsForMask);
406
407 return true;
408 }
409
410 } // namespace skgpu::ganesh
411