1 /*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/private/SkSemaphore.h"
9 #include "src/core/SkMakeUnique.h"
10 #include "src/core/SkTaskGroup.h"
11 #include "src/core/SkTraceEvent.h"
12 #include "src/gpu/GrAuditTrail.h"
13 #include "src/gpu/GrCaps.h"
14 #include "src/gpu/GrClip.h"
15 #include "src/gpu/GrContextPriv.h"
16 #include "src/gpu/GrDeferredProxyUploader.h"
17 #include "src/gpu/GrGpuResourcePriv.h"
18 #include "src/gpu/GrOpFlushState.h"
19 #include "src/gpu/GrOpList.h"
20 #include "src/gpu/GrProxyProvider.h"
21 #include "src/gpu/GrRecordingContextPriv.h"
22 #include "src/gpu/GrRenderTargetContextPriv.h"
23 #include "src/gpu/GrSWMaskHelper.h"
24 #include "src/gpu/GrSoftwarePathRenderer.h"
25 #include "src/gpu/GrSurfaceContextPriv.h"
26 #include "src/gpu/geometry/GrShape.h"
27 #include "src/gpu/ops/GrDrawOp.h"
28
29 ////////////////////////////////////////////////////////////////////////////////
30 GrPathRenderer::CanDrawPath
onCanDrawPath(const CanDrawPathArgs & args) const31 GrSoftwarePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
32 // Pass on any style that applies. The caller will apply the style if a suitable renderer is
33 // not found and try again with the new GrShape.
34 if (!args.fShape->style().applies() && SkToBool(fProxyProvider) &&
35 (args.fAAType == GrAAType::kCoverage || args.fAAType == GrAAType::kNone)) {
36 // This is the fallback renderer for when a path is too complicated for the GPU ones.
37 return CanDrawPath::kAsBackup;
38 }
39 return CanDrawPath::kNo;
40 }
41
42 ////////////////////////////////////////////////////////////////////////////////
get_unclipped_shape_dev_bounds(const GrShape & shape,const SkMatrix & matrix,SkIRect * devBounds)43 static bool get_unclipped_shape_dev_bounds(const GrShape& shape, const SkMatrix& matrix,
44 SkIRect* devBounds) {
45 SkRect shapeBounds = shape.styledBounds();
46 if (shapeBounds.isEmpty()) {
47 return false;
48 }
49 SkRect shapeDevBounds;
50 matrix.mapRect(&shapeDevBounds, shapeBounds);
51 // Even though these are "unclipped" bounds we still clip to the int32_t range.
52 // This is the largest int32_t that is representable exactly as a float. The next 63 larger ints
53 // would round down to this value when cast to a float, but who really cares.
54 // INT32_MIN is exactly representable.
55 static constexpr int32_t kMaxInt = 2147483520;
56 if (!shapeDevBounds.intersect(SkRect::MakeLTRB(INT32_MIN, INT32_MIN, kMaxInt, kMaxInt))) {
57 return false;
58 }
59 // Make sure that the resulting SkIRect can have representable width and height
60 if (SkScalarRoundToInt(shapeDevBounds.width()) > kMaxInt ||
61 SkScalarRoundToInt(shapeDevBounds.height()) > kMaxInt) {
62 return false;
63 }
64 shapeDevBounds.roundOut(devBounds);
65 return true;
66 }
67
68 // Gets the shape bounds, the clip bounds, and the intersection (if any). Returns false if there
69 // is no intersection.
GetShapeAndClipBounds(GrRenderTargetContext * renderTargetContext,const GrClip & clip,const GrShape & shape,const SkMatrix & matrix,SkIRect * unclippedDevShapeBounds,SkIRect * clippedDevShapeBounds,SkIRect * devClipBounds)70 bool GrSoftwarePathRenderer::GetShapeAndClipBounds(GrRenderTargetContext* renderTargetContext,
71 const GrClip& clip,
72 const GrShape& shape,
73 const SkMatrix& matrix,
74 SkIRect* unclippedDevShapeBounds,
75 SkIRect* clippedDevShapeBounds,
76 SkIRect* devClipBounds) {
77 // compute bounds as intersection of rt size, clip, and path
78 clip.getConservativeBounds(renderTargetContext->width(),
79 renderTargetContext->height(),
80 devClipBounds);
81
82 if (!get_unclipped_shape_dev_bounds(shape, matrix, unclippedDevShapeBounds)) {
83 *unclippedDevShapeBounds = SkIRect::EmptyIRect();
84 *clippedDevShapeBounds = SkIRect::EmptyIRect();
85 return false;
86 }
87 if (!clippedDevShapeBounds->intersect(*devClipBounds, *unclippedDevShapeBounds)) {
88 *clippedDevShapeBounds = SkIRect::EmptyIRect();
89 return false;
90 }
91 return true;
92 }
93
94 ////////////////////////////////////////////////////////////////////////////////
95
DrawNonAARect(GrRenderTargetContext * renderTargetContext,GrPaint && paint,const GrUserStencilSettings & userStencilSettings,const GrClip & clip,const SkMatrix & viewMatrix,const SkRect & rect,const SkMatrix & localMatrix)96 void GrSoftwarePathRenderer::DrawNonAARect(GrRenderTargetContext* renderTargetContext,
97 GrPaint&& paint,
98 const GrUserStencilSettings& userStencilSettings,
99 const GrClip& clip,
100 const SkMatrix& viewMatrix,
101 const SkRect& rect,
102 const SkMatrix& localMatrix) {
103 renderTargetContext->priv().stencilRect(clip, &userStencilSettings, std::move(paint), GrAA::kNo,
104 viewMatrix, rect, &localMatrix);
105 }
106
DrawAroundInvPath(GrRenderTargetContext * renderTargetContext,GrPaint && paint,const GrUserStencilSettings & userStencilSettings,const GrClip & clip,const SkMatrix & viewMatrix,const SkIRect & devClipBounds,const SkIRect & devPathBounds)107 void GrSoftwarePathRenderer::DrawAroundInvPath(GrRenderTargetContext* renderTargetContext,
108 GrPaint&& paint,
109 const GrUserStencilSettings& userStencilSettings,
110 const GrClip& clip,
111 const SkMatrix& viewMatrix,
112 const SkIRect& devClipBounds,
113 const SkIRect& devPathBounds) {
114 SkMatrix invert;
115 if (!viewMatrix.invert(&invert)) {
116 return;
117 }
118
119 SkRect rect;
120 if (devClipBounds.fTop < devPathBounds.fTop) {
121 rect.iset(devClipBounds.fLeft, devClipBounds.fTop,
122 devClipBounds.fRight, devPathBounds.fTop);
123 DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip,
124 SkMatrix::I(), rect, invert);
125 }
126 if (devClipBounds.fLeft < devPathBounds.fLeft) {
127 rect.iset(devClipBounds.fLeft, devPathBounds.fTop,
128 devPathBounds.fLeft, devPathBounds.fBottom);
129 DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip,
130 SkMatrix::I(), rect, invert);
131 }
132 if (devClipBounds.fRight > devPathBounds.fRight) {
133 rect.iset(devPathBounds.fRight, devPathBounds.fTop,
134 devClipBounds.fRight, devPathBounds.fBottom);
135 DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip,
136 SkMatrix::I(), rect, invert);
137 }
138 if (devClipBounds.fBottom > devPathBounds.fBottom) {
139 rect.iset(devClipBounds.fLeft, devPathBounds.fBottom,
140 devClipBounds.fRight, devClipBounds.fBottom);
141 DrawNonAARect(renderTargetContext, std::move(paint), userStencilSettings, clip,
142 SkMatrix::I(), rect, invert);
143 }
144 }
145
DrawToTargetWithShapeMask(sk_sp<GrTextureProxy> proxy,GrRenderTargetContext * renderTargetContext,GrPaint && paint,const GrUserStencilSettings & userStencilSettings,const GrClip & clip,const SkMatrix & viewMatrix,const SkIPoint & textureOriginInDeviceSpace,const SkIRect & deviceSpaceRectToDraw)146 void GrSoftwarePathRenderer::DrawToTargetWithShapeMask(
147 sk_sp<GrTextureProxy> proxy,
148 GrRenderTargetContext* renderTargetContext,
149 GrPaint&& paint,
150 const GrUserStencilSettings& userStencilSettings,
151 const GrClip& clip,
152 const SkMatrix& viewMatrix,
153 const SkIPoint& textureOriginInDeviceSpace,
154 const SkIRect& deviceSpaceRectToDraw) {
155 SkMatrix invert;
156 if (!viewMatrix.invert(&invert)) {
157 return;
158 }
159
160 SkRect dstRect = SkRect::Make(deviceSpaceRectToDraw);
161
162 // We use device coords to compute the texture coordinates. We take the device coords and apply
163 // a translation so that the top-left of the device bounds maps to 0,0, and then a scaling
164 // matrix to normalized coords.
165 SkMatrix maskMatrix = SkMatrix::MakeTrans(SkIntToScalar(-textureOriginInDeviceSpace.fX),
166 SkIntToScalar(-textureOriginInDeviceSpace.fY));
167 maskMatrix.preConcat(viewMatrix);
168 paint.addCoverageFragmentProcessor(GrSimpleTextureEffect::Make(
169 std::move(proxy), maskMatrix, GrSamplerState::Filter::kNearest));
170 DrawNonAARect(renderTargetContext, std::move(paint), userStencilSettings, clip, SkMatrix::I(),
171 dstRect, invert);
172 }
173
make_deferred_mask_texture_proxy(GrRecordingContext * context,SkBackingFit fit,int width,int height)174 static sk_sp<GrTextureProxy> make_deferred_mask_texture_proxy(GrRecordingContext* context,
175 SkBackingFit fit,
176 int width, int height) {
177 GrProxyProvider* proxyProvider = context->priv().proxyProvider();
178 const GrCaps* caps = context->priv().caps();
179
180 GrSurfaceDesc desc;
181 desc.fWidth = width;
182 desc.fHeight = height;
183 desc.fConfig = kAlpha_8_GrPixelConfig;
184
185 const GrBackendFormat format = caps->getDefaultBackendFormat(GrColorType::kAlpha_8,
186 GrRenderable::kNo);
187
188 return proxyProvider->createProxy(format, desc, GrRenderable::kNo, 1, kTopLeft_GrSurfaceOrigin,
189 fit, SkBudgeted::kYes, GrProtected::kNo);
190 }
191
192 namespace {
193
194 /**
195 * Payload class for use with GrTDeferredProxyUploader. The software path renderer only draws
196 * a single path into the mask texture. This stores all of the information needed by the worker
197 * thread's call to drawShape (see below, in onDrawPath).
198 */
199 class SoftwarePathData {
200 public:
SoftwarePathData(const SkIRect & maskBounds,const SkMatrix & viewMatrix,const GrShape & shape,GrAA aa)201 SoftwarePathData(const SkIRect& maskBounds, const SkMatrix& viewMatrix, const GrShape& shape,
202 GrAA aa)
203 : fMaskBounds(maskBounds)
204 , fViewMatrix(viewMatrix)
205 , fShape(shape)
206 , fAA(aa) {}
207
getMaskBounds() const208 const SkIRect& getMaskBounds() const { return fMaskBounds; }
getViewMatrix() const209 const SkMatrix* getViewMatrix() const { return &fViewMatrix; }
getShape() const210 const GrShape& getShape() const { return fShape; }
getAA() const211 GrAA getAA() const { return fAA; }
212
213 private:
214 SkIRect fMaskBounds;
215 SkMatrix fViewMatrix;
216 GrShape fShape;
217 GrAA fAA;
218 };
219
220 // When the SkPathRef genID changes, invalidate a corresponding GrResource described by key.
221 class PathInvalidator : public SkPathRef::GenIDChangeListener {
222 public:
PathInvalidator(const GrUniqueKey & key,uint32_t contextUniqueID)223 PathInvalidator(const GrUniqueKey& key, uint32_t contextUniqueID)
224 : fMsg(key, contextUniqueID) {}
225
226 private:
227 GrUniqueKeyInvalidatedMessage fMsg;
228
onChange()229 void onChange() override {
230 SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(fMsg);
231 }
232 };
233
234 }
235
236 ////////////////////////////////////////////////////////////////////////////////
237 // return true on success; false on failure
onDrawPath(const DrawPathArgs & args)238 bool GrSoftwarePathRenderer::onDrawPath(const DrawPathArgs& args) {
239 GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
240 "GrSoftwarePathRenderer::onDrawPath");
241 if (!fProxyProvider) {
242 return false;
243 }
244
245 SkASSERT(!args.fShape->style().applies());
246 // We really need to know if the shape will be inverse filled or not
247 // If the path is hairline, ignore inverse fill.
248 bool inverseFilled = args.fShape->inverseFilled() &&
249 !IsStrokeHairlineOrEquivalent(args.fShape->style(),
250 *args.fViewMatrix, nullptr);
251
252 SkIRect unclippedDevShapeBounds, clippedDevShapeBounds, devClipBounds;
253 // To prevent overloading the cache with entries during animations we limit the cache of masks
254 // to cases where the matrix preserves axis alignment.
255 bool useCache = fAllowCaching && !inverseFilled && args.fViewMatrix->preservesAxisAlignment() &&
256 args.fShape->hasUnstyledKey() && (GrAAType::kCoverage == args.fAAType);
257
258 if (!GetShapeAndClipBounds(args.fRenderTargetContext,
259 *args.fClip, *args.fShape,
260 *args.fViewMatrix, &unclippedDevShapeBounds,
261 &clippedDevShapeBounds,
262 &devClipBounds)) {
263 if (inverseFilled) {
264 DrawAroundInvPath(args.fRenderTargetContext, std::move(args.fPaint),
265 *args.fUserStencilSettings, *args.fClip, *args.fViewMatrix,
266 devClipBounds, unclippedDevShapeBounds);
267 }
268 return true;
269 }
270
271 const SkIRect* boundsForMask = &clippedDevShapeBounds;
272 if (useCache) {
273 // Use the cache only if >50% of the path is visible.
274 int unclippedWidth = unclippedDevShapeBounds.width();
275 int unclippedHeight = unclippedDevShapeBounds.height();
276 int64_t unclippedArea = sk_64_mul(unclippedWidth, unclippedHeight);
277 int64_t clippedArea = sk_64_mul(clippedDevShapeBounds.width(),
278 clippedDevShapeBounds.height());
279 int maxTextureSize = args.fRenderTargetContext->caps()->maxTextureSize();
280 if (unclippedArea > 2 * clippedArea || unclippedWidth > maxTextureSize ||
281 unclippedHeight > maxTextureSize) {
282 useCache = false;
283 } else {
284 boundsForMask = &unclippedDevShapeBounds;
285 }
286 }
287
288 GrUniqueKey maskKey;
289 if (useCache) {
290 // We require the upper left 2x2 of the matrix to match exactly for a cache hit.
291 SkScalar sx = args.fViewMatrix->get(SkMatrix::kMScaleX);
292 SkScalar sy = args.fViewMatrix->get(SkMatrix::kMScaleY);
293 SkScalar kx = args.fViewMatrix->get(SkMatrix::kMSkewX);
294 SkScalar ky = args.fViewMatrix->get(SkMatrix::kMSkewY);
295 static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
296 GrUniqueKey::Builder builder(&maskKey, kDomain, 5 + args.fShape->unstyledKeySize(),
297 "SW Path Mask");
298 #if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) || defined(OHOS_STANDARD_SYSTEM)
299 // Fractional translate does not affect caching on Android. This is done for better cache
300 // hit ratio and speed, but it is matching HWUI behavior, which doesn't consider the matrix
301 // at all when caching paths.
302 SkFixed fracX = 0;
303 SkFixed fracY = 0;
304 #else
305 SkScalar tx = args.fViewMatrix->get(SkMatrix::kMTransX);
306 SkScalar ty = args.fViewMatrix->get(SkMatrix::kMTransY);
307 // Allow 8 bits each in x and y of subpixel positioning.
308 SkFixed fracX = SkScalarToFixed(SkScalarFraction(tx)) & 0x0000FF00;
309 SkFixed fracY = SkScalarToFixed(SkScalarFraction(ty)) & 0x0000FF00;
310 #endif
311 builder[0] = SkFloat2Bits(sx);
312 builder[1] = SkFloat2Bits(sy);
313 builder[2] = SkFloat2Bits(kx);
314 builder[3] = SkFloat2Bits(ky);
315 // Distinguish between hairline and filled paths. For hairlines, we also need to include
316 // the cap. (SW grows hairlines by 0.5 pixel with round and square caps). Note that
317 // stroke-and-fill of hairlines is turned into pure fill by SkStrokeRec, so this covers
318 // all cases we might see.
319 uint32_t styleBits = args.fShape->style().isSimpleHairline() ?
320 ((args.fShape->style().strokeRec().getCap() << 1) | 1) : 0;
321 builder[4] = fracX | (fracY >> 8) | (styleBits << 16);
322 args.fShape->writeUnstyledKey(&builder[5]);
323 }
324
325 sk_sp<GrTextureProxy> proxy;
326 if (useCache) {
327 proxy = fProxyProvider->findOrCreateProxyByUniqueKey(maskKey, GrColorType::kAlpha_8,
328 kTopLeft_GrSurfaceOrigin);
329 }
330 if (!proxy) {
331 SkBackingFit fit = useCache ? SkBackingFit::kExact : SkBackingFit::kApprox;
332 GrAA aa = GrAA(GrAAType::kCoverage == args.fAAType);
333
334 SkTaskGroup* taskGroup = nullptr;
335 if (auto direct = args.fContext->priv().asDirectContext()) {
336 taskGroup = direct->priv().getTaskGroup();
337 }
338
339 if (taskGroup) {
340 proxy = make_deferred_mask_texture_proxy(args.fContext, fit,
341 boundsForMask->width(),
342 boundsForMask->height());
343 if (!proxy) {
344 return false;
345 }
346
347 auto uploader = skstd::make_unique<GrTDeferredProxyUploader<SoftwarePathData>>(
348 *boundsForMask, *args.fViewMatrix, *args.fShape, aa);
349 GrTDeferredProxyUploader<SoftwarePathData>* uploaderRaw = uploader.get();
350
351 auto drawAndUploadMask = [uploaderRaw] {
352 TRACE_EVENT0("skia.gpu", "Threaded SW Mask Render");
353 GrSWMaskHelper helper(uploaderRaw->getPixels());
354 if (helper.init(uploaderRaw->data().getMaskBounds())) {
355 helper.drawShape(uploaderRaw->data().getShape(),
356 *uploaderRaw->data().getViewMatrix(),
357 SkRegion::kReplace_Op, uploaderRaw->data().getAA(), 0xFF);
358 } else {
359 SkDEBUGFAIL("Unable to allocate SW mask.");
360 }
361 uploaderRaw->signalAndFreeData();
362 };
363 taskGroup->add(std::move(drawAndUploadMask));
364 proxy->texPriv().setDeferredUploader(std::move(uploader));
365 } else {
366 GrSWMaskHelper helper;
367 if (!helper.init(*boundsForMask)) {
368 return false;
369 }
370 helper.drawShape(*args.fShape, *args.fViewMatrix, SkRegion::kReplace_Op, aa, 0xFF);
371 proxy = helper.toTextureProxy(args.fContext, fit);
372 }
373
374 if (!proxy) {
375 return false;
376 }
377 if (useCache) {
378 SkASSERT(proxy->origin() == kTopLeft_GrSurfaceOrigin);
379 fProxyProvider->assignUniqueKeyToProxy(maskKey, proxy.get());
380 args.fShape->addGenIDChangeListener(
381 sk_make_sp<PathInvalidator>(maskKey, args.fContext->priv().contextID()));
382 }
383 }
384 if (inverseFilled) {
385 DrawAroundInvPath(args.fRenderTargetContext, GrPaint::Clone(args.fPaint),
386 *args.fUserStencilSettings, *args.fClip, *args.fViewMatrix, devClipBounds,
387 unclippedDevShapeBounds);
388 }
389 DrawToTargetWithShapeMask(
390 std::move(proxy), args.fRenderTargetContext, std::move(args.fPaint),
391 *args.fUserStencilSettings, *args.fClip, *args.fViewMatrix,
392 SkIPoint{boundsForMask->fLeft, boundsForMask->fTop}, *boundsForMask);
393
394 return true;
395 }
396