1 /*
2 * Copyright 2006 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "include/core/SkBitmap.h"
9 #include "include/core/SkMaskFilter.h"
10 #include "include/core/SkPathBuilder.h"
11 #include "include/core/SkRRect.h"
12 #include "include/core/SkStrokeRec.h"
13 #include "include/core/SkVertices.h"
14 #include "src/base/SkMathPriv.h"
15 #include "src/core/SkBlurMask.h"
16 #include "src/core/SkGpuBlurUtils.h"
17 #include "src/core/SkMaskFilterBase.h"
18 #include "src/core/SkMatrixProvider.h"
19 #include "src/core/SkRRectPriv.h"
20 #include "src/core/SkReadBuffer.h"
21 #include "src/core/SkStringUtils.h"
22 #include "src/core/SkWriteBuffer.h"
23
24 #if defined(SK_GANESH)
25 #include "include/gpu/GrRecordingContext.h"
26 #include "src/core/SkRuntimeEffectPriv.h"
27 #include "src/gpu/SkBackingFit.h"
28 #include "src/gpu/ganesh/GrCaps.h"
29 #include "src/gpu/ganesh/GrFragmentProcessor.h"
30 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
31 #include "src/gpu/ganesh/GrResourceProvider.h"
32 #include "src/gpu/ganesh/GrShaderCaps.h"
33 #include "src/gpu/ganesh/GrStyle.h"
34 #include "src/gpu/ganesh/GrTextureProxy.h"
35 #include "src/gpu/ganesh/GrThreadSafeCache.h"
36 #include "src/gpu/ganesh/SkGr.h"
37 #include "src/gpu/ganesh/SurfaceDrawContext.h"
38 #include "src/gpu/ganesh/effects/GrBlendFragmentProcessor.h"
39 #include "src/gpu/ganesh/effects/GrMatrixEffect.h"
40 #include "src/gpu/ganesh/effects/GrSkSLFP.h"
41 #include "src/gpu/ganesh/effects/GrTextureEffect.h"
42 #include "src/gpu/ganesh/geometry/GrStyledShape.h"
43 #include "src/gpu/ganesh/glsl/GrGLSLFragmentShaderBuilder.h"
44 #include "src/gpu/ganesh/glsl/GrGLSLProgramDataManager.h"
45 #include "src/gpu/ganesh/glsl/GrGLSLUniformHandler.h"
46 #endif // defined(SK_GANESH)
47
48 using namespace skia_private;
49
50 class SkBlurMaskFilterImpl : public SkMaskFilterBase {
51 public:
52 SkBlurMaskFilterImpl(SkScalar sigma, SkBlurStyle, bool respectCTM);
53
54 // overrides from SkMaskFilter
55 SkMask::Format getFormat() const override;
56 bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
57 SkIPoint* margin) const override;
58
59 #if defined(SK_GANESH)
60 bool canFilterMaskGPU(const GrStyledShape& shape,
61 const SkIRect& devSpaceShapeBounds,
62 const SkIRect& clipBounds,
63 const SkMatrix& ctm,
64 SkIRect* maskRect) const override;
65 bool directFilterMaskGPU(GrRecordingContext*,
66 skgpu::v1::SurfaceDrawContext*,
67 GrPaint&&,
68 const GrClip*,
69 const SkMatrix& viewMatrix,
70 const GrStyledShape&) const override;
71 GrSurfaceProxyView filterMaskGPU(GrRecordingContext*,
72 GrSurfaceProxyView srcView,
73 GrColorType srcColorType,
74 SkAlphaType srcAlphaType,
75 const SkMatrix& ctm,
76 const SkIRect& maskRect) const override;
77 #endif
78
79 void computeFastBounds(const SkRect&, SkRect*) const override;
80 bool asABlur(BlurRec*) const override;
81
82
83 protected:
84 FilterReturn filterRectsToNine(const SkRect[], int count, const SkMatrix&,
85 const SkIRect& clipBounds,
86 NinePatch*) const override;
87
88 FilterReturn filterRRectToNine(const SkRRect&, const SkMatrix&,
89 const SkIRect& clipBounds,
90 NinePatch*) const override;
91
92 bool filterRectMask(SkMask* dstM, const SkRect& r, const SkMatrix& matrix,
93 SkIPoint* margin, SkMask::CreateMode createMode) const;
94 bool filterRRectMask(SkMask* dstM, const SkRRect& r, const SkMatrix& matrix,
95 SkIPoint* margin, SkMask::CreateMode createMode) const;
96
ignoreXform() const97 bool ignoreXform() const { return !fRespectCTM; }
98
99 private:
100 SK_FLATTENABLE_HOOKS(SkBlurMaskFilterImpl)
101 // To avoid unseemly allocation requests (esp. for finite platforms like
102 // handset) we limit the radius so something manageable. (as opposed to
103 // a request like 10,000)
104 static const SkScalar kMAX_BLUR_SIGMA;
105
106 SkScalar fSigma;
107 SkBlurStyle fBlurStyle;
108 bool fRespectCTM;
109
110 SkBlurMaskFilterImpl(SkReadBuffer&);
111 void flatten(SkWriteBuffer&) const override;
112
computeXformedSigma(const SkMatrix & ctm) const113 SkScalar computeXformedSigma(const SkMatrix& ctm) const {
114 SkScalar xformedSigma = this->ignoreXform() ? fSigma : ctm.mapRadius(fSigma);
115 return std::min(xformedSigma, kMAX_BLUR_SIGMA);
116 }
117
118 friend class SkBlurMaskFilter;
119
120 using INHERITED = SkMaskFilter;
121 friend void sk_register_blur_maskfilter_createproc();
122 };
123
124 const SkScalar SkBlurMaskFilterImpl::kMAX_BLUR_SIGMA = SkIntToScalar(128);
125
126 ///////////////////////////////////////////////////////////////////////////////
127
SkBlurMaskFilterImpl(SkScalar sigma,SkBlurStyle style,bool respectCTM)128 SkBlurMaskFilterImpl::SkBlurMaskFilterImpl(SkScalar sigma, SkBlurStyle style, bool respectCTM)
129 : fSigma(sigma)
130 , fBlurStyle(style)
131 , fRespectCTM(respectCTM) {
132 SkASSERT(fSigma > 0);
133 SkASSERT((unsigned)style <= kLastEnum_SkBlurStyle);
134 }
135
getFormat() const136 SkMask::Format SkBlurMaskFilterImpl::getFormat() const {
137 return SkMask::kA8_Format;
138 }
139
asABlur(BlurRec * rec) const140 bool SkBlurMaskFilterImpl::asABlur(BlurRec* rec) const {
141 if (this->ignoreXform()) {
142 return false;
143 }
144
145 if (rec) {
146 rec->fSigma = fSigma;
147 rec->fStyle = fBlurStyle;
148 }
149 return true;
150 }
151
filterMask(SkMask * dst,const SkMask & src,const SkMatrix & matrix,SkIPoint * margin) const152 bool SkBlurMaskFilterImpl::filterMask(SkMask* dst, const SkMask& src,
153 const SkMatrix& matrix,
154 SkIPoint* margin) const {
155 SkScalar sigma = this->computeXformedSigma(matrix);
156 return SkBlurMask::BoxBlur(dst, src, sigma, fBlurStyle, margin);
157 }
158
filterRectMask(SkMask * dst,const SkRect & r,const SkMatrix & matrix,SkIPoint * margin,SkMask::CreateMode createMode) const159 bool SkBlurMaskFilterImpl::filterRectMask(SkMask* dst, const SkRect& r,
160 const SkMatrix& matrix,
161 SkIPoint* margin, SkMask::CreateMode createMode) const {
162 SkScalar sigma = computeXformedSigma(matrix);
163
164 return SkBlurMask::BlurRect(sigma, dst, r, fBlurStyle, margin, createMode);
165 }
166
filterRRectMask(SkMask * dst,const SkRRect & r,const SkMatrix & matrix,SkIPoint * margin,SkMask::CreateMode createMode) const167 bool SkBlurMaskFilterImpl::filterRRectMask(SkMask* dst, const SkRRect& r,
168 const SkMatrix& matrix,
169 SkIPoint* margin, SkMask::CreateMode createMode) const {
170 SkScalar sigma = computeXformedSigma(matrix);
171
172 return SkBlurMask::BlurRRect(sigma, dst, r, fBlurStyle, margin, createMode);
173 }
174
175 #include "include/core/SkCanvas.h"
176
prepare_to_draw_into_mask(const SkRect & bounds,SkMask * mask)177 static bool prepare_to_draw_into_mask(const SkRect& bounds, SkMask* mask) {
178 SkASSERT(mask != nullptr);
179
180 mask->fBounds = bounds.roundOut();
181 mask->fRowBytes = SkAlign4(mask->fBounds.width());
182 mask->fFormat = SkMask::kA8_Format;
183 const size_t size = mask->computeImageSize();
184 mask->fImage = SkMask::AllocImage(size, SkMask::kZeroInit_Alloc);
185 if (nullptr == mask->fImage) {
186 return false;
187 }
188 return true;
189 }
190
draw_rrect_into_mask(const SkRRect rrect,SkMask * mask)191 static bool draw_rrect_into_mask(const SkRRect rrect, SkMask* mask) {
192 if (!prepare_to_draw_into_mask(rrect.rect(), mask)) {
193 return false;
194 }
195
196 // FIXME: This code duplicates code in draw_rects_into_mask, below. Is there a
197 // clean way to share more code?
198 SkBitmap bitmap;
199 bitmap.installMaskPixels(*mask);
200
201 SkCanvas canvas(bitmap);
202 canvas.translate(-SkIntToScalar(mask->fBounds.left()),
203 -SkIntToScalar(mask->fBounds.top()));
204
205 SkPaint paint;
206 paint.setAntiAlias(true);
207 canvas.drawRRect(rrect, paint);
208 return true;
209 }
210
draw_rects_into_mask(const SkRect rects[],int count,SkMask * mask)211 static bool draw_rects_into_mask(const SkRect rects[], int count, SkMask* mask) {
212 if (!prepare_to_draw_into_mask(rects[0], mask)) {
213 return false;
214 }
215
216 SkBitmap bitmap;
217 bitmap.installPixels(SkImageInfo::Make(mask->fBounds.width(),
218 mask->fBounds.height(),
219 kAlpha_8_SkColorType,
220 kPremul_SkAlphaType),
221 mask->fImage, mask->fRowBytes);
222
223 SkCanvas canvas(bitmap);
224 canvas.translate(-SkIntToScalar(mask->fBounds.left()),
225 -SkIntToScalar(mask->fBounds.top()));
226
227 SkPaint paint;
228 paint.setAntiAlias(true);
229
230 if (1 == count) {
231 canvas.drawRect(rects[0], paint);
232 } else {
233 // todo: do I need a fast way to do this?
234 SkPath path = SkPathBuilder().addRect(rects[0])
235 .addRect(rects[1])
236 .setFillType(SkPathFillType::kEvenOdd)
237 .detach();
238 canvas.drawPath(path, paint);
239 }
240 return true;
241 }
242
rect_exceeds(const SkRect & r,SkScalar v)243 static bool rect_exceeds(const SkRect& r, SkScalar v) {
244 return r.fLeft < -v || r.fTop < -v || r.fRight > v || r.fBottom > v ||
245 r.width() > v || r.height() > v;
246 }
247
248 #include "src/core/SkMaskCache.h"
249
copy_mask_to_cacheddata(SkMask * mask)250 static SkCachedData* copy_mask_to_cacheddata(SkMask* mask) {
251 const size_t size = mask->computeTotalImageSize();
252 SkCachedData* data = SkResourceCache::NewCachedData(size);
253 if (data) {
254 memcpy(data->writable_data(), mask->fImage, size);
255 SkMask::FreeImage(mask->fImage);
256 mask->fImage = (uint8_t*)data->data();
257 }
258 return data;
259 }
260
find_cached_rrect(SkMask * mask,SkScalar sigma,SkBlurStyle style,const SkRRect & rrect)261 static SkCachedData* find_cached_rrect(SkMask* mask, SkScalar sigma, SkBlurStyle style,
262 const SkRRect& rrect) {
263 return SkMaskCache::FindAndRef(sigma, style, rrect, mask);
264 }
265
add_cached_rrect(SkMask * mask,SkScalar sigma,SkBlurStyle style,const SkRRect & rrect)266 static SkCachedData* add_cached_rrect(SkMask* mask, SkScalar sigma, SkBlurStyle style,
267 const SkRRect& rrect) {
268 SkCachedData* cache = copy_mask_to_cacheddata(mask);
269 if (cache) {
270 SkMaskCache::Add(sigma, style, rrect, *mask, cache);
271 }
272 return cache;
273 }
274
find_cached_rects(SkMask * mask,SkScalar sigma,SkBlurStyle style,const SkRect rects[],int count)275 static SkCachedData* find_cached_rects(SkMask* mask, SkScalar sigma, SkBlurStyle style,
276 const SkRect rects[], int count) {
277 return SkMaskCache::FindAndRef(sigma, style, rects, count, mask);
278 }
279
add_cached_rects(SkMask * mask,SkScalar sigma,SkBlurStyle style,const SkRect rects[],int count)280 static SkCachedData* add_cached_rects(SkMask* mask, SkScalar sigma, SkBlurStyle style,
281 const SkRect rects[], int count) {
282 SkCachedData* cache = copy_mask_to_cacheddata(mask);
283 if (cache) {
284 SkMaskCache::Add(sigma, style, rects, count, *mask, cache);
285 }
286 return cache;
287 }
288
289 static const bool c_analyticBlurRRect{true};
290
291 SkMaskFilterBase::FilterReturn
filterRRectToNine(const SkRRect & rrect,const SkMatrix & matrix,const SkIRect & clipBounds,NinePatch * patch) const292 SkBlurMaskFilterImpl::filterRRectToNine(const SkRRect& rrect, const SkMatrix& matrix,
293 const SkIRect& clipBounds,
294 NinePatch* patch) const {
295 SkASSERT(patch != nullptr);
296 switch (rrect.getType()) {
297 case SkRRect::kEmpty_Type:
298 // Nothing to draw.
299 return kFalse_FilterReturn;
300
301 case SkRRect::kRect_Type:
302 // We should have caught this earlier.
303 SkASSERT(false);
304 [[fallthrough]];
305 case SkRRect::kOval_Type:
306 // The nine patch special case does not handle ovals, and we
307 // already have code for rectangles.
308 return kUnimplemented_FilterReturn;
309
310 // These three can take advantage of this fast path.
311 case SkRRect::kSimple_Type:
312 case SkRRect::kNinePatch_Type:
313 case SkRRect::kComplex_Type:
314 break;
315 }
316
317 // TODO: report correct metrics for innerstyle, where we do not grow the
318 // total bounds, but we do need an inset the size of our blur-radius
319 if (kInner_SkBlurStyle == fBlurStyle) {
320 return kUnimplemented_FilterReturn;
321 }
322
323 // TODO: take clipBounds into account to limit our coordinates up front
324 // for now, just skip too-large src rects (to take the old code path).
325 if (rect_exceeds(rrect.rect(), SkIntToScalar(32767))) {
326 return kUnimplemented_FilterReturn;
327 }
328
329 SkIPoint margin;
330 SkMask srcM, dstM;
331 srcM.fBounds = rrect.rect().roundOut();
332 srcM.fFormat = SkMask::kA8_Format;
333 srcM.fRowBytes = 0;
334
335 bool filterResult = false;
336 if (c_analyticBlurRRect) {
337 // special case for fast round rect blur
338 // don't actually do the blur the first time, just compute the correct size
339 filterResult = this->filterRRectMask(&dstM, rrect, matrix, &margin,
340 SkMask::kJustComputeBounds_CreateMode);
341 }
342
343 if (!filterResult) {
344 filterResult = this->filterMask(&dstM, srcM, matrix, &margin);
345 }
346
347 if (!filterResult) {
348 return kFalse_FilterReturn;
349 }
350
351 // Now figure out the appropriate width and height of the smaller round rectangle
352 // to stretch. It will take into account the larger radius per side as well as double
353 // the margin, to account for inner and outer blur.
354 const SkVector& UL = rrect.radii(SkRRect::kUpperLeft_Corner);
355 const SkVector& UR = rrect.radii(SkRRect::kUpperRight_Corner);
356 const SkVector& LR = rrect.radii(SkRRect::kLowerRight_Corner);
357 const SkVector& LL = rrect.radii(SkRRect::kLowerLeft_Corner);
358
359 const SkScalar leftUnstretched = std::max(UL.fX, LL.fX) + SkIntToScalar(2 * margin.fX);
360 const SkScalar rightUnstretched = std::max(UR.fX, LR.fX) + SkIntToScalar(2 * margin.fX);
361
362 // Extra space in the middle to ensure an unchanging piece for stretching. Use 3 to cover
363 // any fractional space on either side plus 1 for the part to stretch.
364 const SkScalar stretchSize = SkIntToScalar(3);
365
366 const SkScalar totalSmallWidth = leftUnstretched + rightUnstretched + stretchSize;
367 if (totalSmallWidth >= rrect.rect().width()) {
368 // There is no valid piece to stretch.
369 return kUnimplemented_FilterReturn;
370 }
371
372 const SkScalar topUnstretched = std::max(UL.fY, UR.fY) + SkIntToScalar(2 * margin.fY);
373 const SkScalar bottomUnstretched = std::max(LL.fY, LR.fY) + SkIntToScalar(2 * margin.fY);
374
375 const SkScalar totalSmallHeight = topUnstretched + bottomUnstretched + stretchSize;
376 if (totalSmallHeight >= rrect.rect().height()) {
377 // There is no valid piece to stretch.
378 return kUnimplemented_FilterReturn;
379 }
380
381 SkRect smallR = SkRect::MakeWH(totalSmallWidth, totalSmallHeight);
382
383 SkRRect smallRR;
384 SkVector radii[4];
385 radii[SkRRect::kUpperLeft_Corner] = UL;
386 radii[SkRRect::kUpperRight_Corner] = UR;
387 radii[SkRRect::kLowerRight_Corner] = LR;
388 radii[SkRRect::kLowerLeft_Corner] = LL;
389 smallRR.setRectRadii(smallR, radii);
390
391 const SkScalar sigma = this->computeXformedSigma(matrix);
392 SkCachedData* cache = find_cached_rrect(&patch->fMask, sigma, fBlurStyle, smallRR);
393 if (!cache) {
394 bool analyticBlurWorked = false;
395 if (c_analyticBlurRRect) {
396 analyticBlurWorked =
397 this->filterRRectMask(&patch->fMask, smallRR, matrix, &margin,
398 SkMask::kComputeBoundsAndRenderImage_CreateMode);
399 }
400
401 if (!analyticBlurWorked) {
402 if (!draw_rrect_into_mask(smallRR, &srcM)) {
403 return kFalse_FilterReturn;
404 }
405
406 SkAutoMaskFreeImage amf(srcM.fImage);
407
408 if (!this->filterMask(&patch->fMask, srcM, matrix, &margin)) {
409 return kFalse_FilterReturn;
410 }
411 }
412 cache = add_cached_rrect(&patch->fMask, sigma, fBlurStyle, smallRR);
413 }
414
415 patch->fMask.fBounds.offsetTo(0, 0);
416 patch->fOuterRect = dstM.fBounds;
417 patch->fCenter.fX = SkScalarCeilToInt(leftUnstretched) + 1;
418 patch->fCenter.fY = SkScalarCeilToInt(topUnstretched) + 1;
419 SkASSERT(nullptr == patch->fCache);
420 patch->fCache = cache; // transfer ownership to patch
421 return kTrue_FilterReturn;
422 }
423
424 // Use the faster analytic blur approach for ninepatch rects
425 static const bool c_analyticBlurNinepatch{true};
426
427 SkMaskFilterBase::FilterReturn
filterRectsToNine(const SkRect rects[],int count,const SkMatrix & matrix,const SkIRect & clipBounds,NinePatch * patch) const428 SkBlurMaskFilterImpl::filterRectsToNine(const SkRect rects[], int count,
429 const SkMatrix& matrix,
430 const SkIRect& clipBounds,
431 NinePatch* patch) const {
432 if (count < 1 || count > 2) {
433 return kUnimplemented_FilterReturn;
434 }
435
436 // TODO: report correct metrics for innerstyle, where we do not grow the
437 // total bounds, but we do need an inset the size of our blur-radius
438 if (kInner_SkBlurStyle == fBlurStyle || kOuter_SkBlurStyle == fBlurStyle) {
439 return kUnimplemented_FilterReturn;
440 }
441
442 // TODO: take clipBounds into account to limit our coordinates up front
443 // for now, just skip too-large src rects (to take the old code path).
444 if (rect_exceeds(rects[0], SkIntToScalar(32767))) {
445 return kUnimplemented_FilterReturn;
446 }
447
448 SkIPoint margin;
449 SkMask srcM, dstM;
450 srcM.fBounds = rects[0].roundOut();
451 srcM.fFormat = SkMask::kA8_Format;
452 srcM.fRowBytes = 0;
453
454 bool filterResult = false;
455 if (count == 1 && c_analyticBlurNinepatch) {
456 // special case for fast rect blur
457 // don't actually do the blur the first time, just compute the correct size
458 filterResult = this->filterRectMask(&dstM, rects[0], matrix, &margin,
459 SkMask::kJustComputeBounds_CreateMode);
460 } else {
461 filterResult = this->filterMask(&dstM, srcM, matrix, &margin);
462 }
463
464 if (!filterResult) {
465 return kFalse_FilterReturn;
466 }
467
468 /*
469 * smallR is the smallest version of 'rect' that will still guarantee that
470 * we get the same blur results on all edges, plus 1 center row/col that is
471 * representative of the extendible/stretchable edges of the ninepatch.
472 * Since our actual edge may be fractional we inset 1 more to be sure we
473 * don't miss any interior blur.
474 * x is an added pixel of blur, and { and } are the (fractional) edge
475 * pixels from the original rect.
476 *
477 * x x { x x .... x x } x x
478 *
479 * Thus, in this case, we inset by a total of 5 (on each side) beginning
480 * with our outer-rect (dstM.fBounds)
481 */
482 SkRect smallR[2];
483 SkIPoint center;
484
485 // +2 is from +1 for each edge (to account for possible fractional edges
486 int smallW = dstM.fBounds.width() - srcM.fBounds.width() + 2;
487 int smallH = dstM.fBounds.height() - srcM.fBounds.height() + 2;
488 SkIRect innerIR;
489
490 if (1 == count) {
491 innerIR = srcM.fBounds;
492 center.set(smallW, smallH);
493 } else {
494 SkASSERT(2 == count);
495 rects[1].roundIn(&innerIR);
496 center.set(smallW + (innerIR.left() - srcM.fBounds.left()),
497 smallH + (innerIR.top() - srcM.fBounds.top()));
498 }
499
500 // +1 so we get a clean, stretchable, center row/col
501 smallW += 1;
502 smallH += 1;
503
504 // we want the inset amounts to be integral, so we don't change any
505 // fractional phase on the fRight or fBottom of our smallR.
506 const SkScalar dx = SkIntToScalar(innerIR.width() - smallW);
507 const SkScalar dy = SkIntToScalar(innerIR.height() - smallH);
508 if (dx < 0 || dy < 0) {
509 // we're too small, relative to our blur, to break into nine-patch,
510 // so we ask to have our normal filterMask() be called.
511 return kUnimplemented_FilterReturn;
512 }
513
514 smallR[0].setLTRB(rects[0].left(), rects[0].top(),
515 rects[0].right() - dx, rects[0].bottom() - dy);
516 if (smallR[0].width() < 2 || smallR[0].height() < 2) {
517 return kUnimplemented_FilterReturn;
518 }
519 if (2 == count) {
520 smallR[1].setLTRB(rects[1].left(), rects[1].top(),
521 rects[1].right() - dx, rects[1].bottom() - dy);
522 SkASSERT(!smallR[1].isEmpty());
523 }
524
525 const SkScalar sigma = this->computeXformedSigma(matrix);
526 SkCachedData* cache = find_cached_rects(&patch->fMask, sigma, fBlurStyle, smallR, count);
527 if (!cache) {
528 if (count > 1 || !c_analyticBlurNinepatch) {
529 if (!draw_rects_into_mask(smallR, count, &srcM)) {
530 return kFalse_FilterReturn;
531 }
532
533 SkAutoMaskFreeImage amf(srcM.fImage);
534
535 if (!this->filterMask(&patch->fMask, srcM, matrix, &margin)) {
536 return kFalse_FilterReturn;
537 }
538 } else {
539 if (!this->filterRectMask(&patch->fMask, smallR[0], matrix, &margin,
540 SkMask::kComputeBoundsAndRenderImage_CreateMode)) {
541 return kFalse_FilterReturn;
542 }
543 }
544 cache = add_cached_rects(&patch->fMask, sigma, fBlurStyle, smallR, count);
545 }
546 patch->fMask.fBounds.offsetTo(0, 0);
547 patch->fOuterRect = dstM.fBounds;
548 patch->fCenter = center;
549 SkASSERT(nullptr == patch->fCache);
550 patch->fCache = cache; // transfer ownership to patch
551 return kTrue_FilterReturn;
552 }
553
computeFastBounds(const SkRect & src,SkRect * dst) const554 void SkBlurMaskFilterImpl::computeFastBounds(const SkRect& src,
555 SkRect* dst) const {
556 // TODO: if we're doing kInner blur, should we return a different outset?
557 // i.e. pad == 0 ?
558
559 SkScalar pad = 3.0f * fSigma;
560
561 dst->setLTRB(src.fLeft - pad, src.fTop - pad,
562 src.fRight + pad, src.fBottom + pad);
563 }
564
CreateProc(SkReadBuffer & buffer)565 sk_sp<SkFlattenable> SkBlurMaskFilterImpl::CreateProc(SkReadBuffer& buffer) {
566 const SkScalar sigma = buffer.readScalar();
567 SkBlurStyle style = buffer.read32LE(kLastEnum_SkBlurStyle);
568
569 uint32_t flags = buffer.read32LE(0x3); // historically we only recorded 2 bits
570 bool respectCTM = !(flags & 1); // historically we stored ignoreCTM in low bit
571
572 return SkMaskFilter::MakeBlur((SkBlurStyle)style, sigma, respectCTM);
573 }
574
flatten(SkWriteBuffer & buffer) const575 void SkBlurMaskFilterImpl::flatten(SkWriteBuffer& buffer) const {
576 buffer.writeScalar(fSigma);
577 buffer.writeUInt(fBlurStyle);
578 buffer.writeUInt(!fRespectCTM); // historically we recorded ignoreCTM
579 }
580
581
582 #if defined(SK_GANESH) && defined(SK_GANESH)
583
584 ///////////////////////////////////////////////////////////////////////////////
585 // Circle Blur
586 ///////////////////////////////////////////////////////////////////////////////
587
588 // Computes an unnormalized half kernel (right side). Returns the summation of all the half
589 // kernel values.
make_unnormalized_half_kernel(float * halfKernel,int halfKernelSize,float sigma)590 static float make_unnormalized_half_kernel(float* halfKernel, int halfKernelSize, float sigma) {
591 const float invSigma = 1.f / sigma;
592 const float b = -0.5f * invSigma * invSigma;
593 float tot = 0.0f;
594 // Compute half kernel values at half pixel steps out from the center.
595 float t = 0.5f;
596 for (int i = 0; i < halfKernelSize; ++i) {
597 float value = expf(t * t * b);
598 tot += value;
599 halfKernel[i] = value;
600 t += 1.f;
601 }
602 return tot;
603 }
604
605 // Create a Gaussian half-kernel (right side) and a summed area table given a sigma and number
606 // of discrete steps. The half kernel is normalized to sum to 0.5.
make_half_kernel_and_summed_table(float * halfKernel,float * summedHalfKernel,int halfKernelSize,float sigma)607 static void make_half_kernel_and_summed_table(float* halfKernel,
608 float* summedHalfKernel,
609 int halfKernelSize,
610 float sigma) {
611 // The half kernel should sum to 0.5 not 1.0.
612 const float tot = 2.f * make_unnormalized_half_kernel(halfKernel, halfKernelSize, sigma);
613 float sum = 0.f;
614 for (int i = 0; i < halfKernelSize; ++i) {
615 halfKernel[i] /= tot;
616 sum += halfKernel[i];
617 summedHalfKernel[i] = sum;
618 }
619 }
620
621 // Applies the 1D half kernel vertically at points along the x axis to a circle centered at the
622 // origin with radius circleR.
apply_kernel_in_y(float * results,int numSteps,float firstX,float circleR,int halfKernelSize,const float * summedHalfKernelTable)623 void apply_kernel_in_y(float* results,
624 int numSteps,
625 float firstX,
626 float circleR,
627 int halfKernelSize,
628 const float* summedHalfKernelTable) {
629 float x = firstX;
630 for (int i = 0; i < numSteps; ++i, x += 1.f) {
631 if (x < -circleR || x > circleR) {
632 results[i] = 0;
633 continue;
634 }
635 float y = sqrtf(circleR * circleR - x * x);
636 // In the column at x we exit the circle at +y and -y
637 // The summed table entry j is actually reflects an offset of j + 0.5.
638 y -= 0.5f;
639 int yInt = SkScalarFloorToInt(y);
640 SkASSERT(yInt >= -1);
641 if (y < 0) {
642 results[i] = (y + 0.5f) * summedHalfKernelTable[0];
643 } else if (yInt >= halfKernelSize - 1) {
644 results[i] = 0.5f;
645 } else {
646 float yFrac = y - yInt;
647 results[i] = (1.f - yFrac) * summedHalfKernelTable[yInt] +
648 yFrac * summedHalfKernelTable[yInt + 1];
649 }
650 }
651 }
652
653 // Apply a Gaussian at point (evalX, 0) to a circle centered at the origin with radius circleR.
654 // This relies on having a half kernel computed for the Gaussian and a table of applications of
655 // the half kernel in y to columns at (evalX - halfKernel, evalX - halfKernel + 1, ..., evalX +
656 // halfKernel) passed in as yKernelEvaluations.
eval_at(float evalX,float circleR,const float * halfKernel,int halfKernelSize,const float * yKernelEvaluations)657 static uint8_t eval_at(float evalX,
658 float circleR,
659 const float* halfKernel,
660 int halfKernelSize,
661 const float* yKernelEvaluations) {
662 float acc = 0;
663
664 float x = evalX - halfKernelSize;
665 for (int i = 0; i < halfKernelSize; ++i, x += 1.f) {
666 if (x < -circleR || x > circleR) {
667 continue;
668 }
669 float verticalEval = yKernelEvaluations[i];
670 acc += verticalEval * halfKernel[halfKernelSize - i - 1];
671 }
672 for (int i = 0; i < halfKernelSize; ++i, x += 1.f) {
673 if (x < -circleR || x > circleR) {
674 continue;
675 }
676 float verticalEval = yKernelEvaluations[i + halfKernelSize];
677 acc += verticalEval * halfKernel[i];
678 }
679 // Since we applied a half kernel in y we multiply acc by 2 (the circle is symmetric about
680 // the x axis).
681 return SkUnitScalarClampToByte(2.f * acc);
682 }
683
684 // This function creates a profile of a blurred circle. It does this by computing a kernel for
685 // half the Gaussian and a matching summed area table. The summed area table is used to compute
686 // an array of vertical applications of the half kernel to the circle along the x axis. The
687 // table of y evaluations has 2 * k + n entries where k is the size of the half kernel and n is
688 // the size of the profile being computed. Then for each of the n profile entries we walk out k
689 // steps in each horizontal direction multiplying the corresponding y evaluation by the half
690 // kernel entry and sum these values to compute the profile entry.
create_circle_profile(uint8_t * weights,float sigma,float circleR,int profileTextureWidth)691 static void create_circle_profile(uint8_t* weights,
692 float sigma,
693 float circleR,
694 int profileTextureWidth) {
695 const int numSteps = profileTextureWidth;
696
697 // The full kernel is 6 sigmas wide.
698 int halfKernelSize = SkScalarCeilToInt(6.0f * sigma);
699 // round up to next multiple of 2 and then divide by 2
700 halfKernelSize = ((halfKernelSize + 1) & ~1) >> 1;
701
702 // Number of x steps at which to apply kernel in y to cover all the profile samples in x.
703 int numYSteps = numSteps + 2 * halfKernelSize;
704
705 AutoTArray<float> bulkAlloc(halfKernelSize + halfKernelSize + numYSteps);
706 float* halfKernel = bulkAlloc.get();
707 float* summedKernel = bulkAlloc.get() + halfKernelSize;
708 float* yEvals = bulkAlloc.get() + 2 * halfKernelSize;
709 make_half_kernel_and_summed_table(halfKernel, summedKernel, halfKernelSize, sigma);
710
711 float firstX = -halfKernelSize + 0.5f;
712 apply_kernel_in_y(yEvals, numYSteps, firstX, circleR, halfKernelSize, summedKernel);
713
714 for (int i = 0; i < numSteps - 1; ++i) {
715 float evalX = i + 0.5f;
716 weights[i] = eval_at(evalX, circleR, halfKernel, halfKernelSize, yEvals + i);
717 }
718 // Ensure the tail of the Gaussian goes to zero.
719 weights[numSteps - 1] = 0;
720 }
721
create_half_plane_profile(uint8_t * profile,int profileWidth)722 static void create_half_plane_profile(uint8_t* profile, int profileWidth) {
723 SkASSERT(!(profileWidth & 0x1));
724 // The full kernel is 6 sigmas wide.
725 float sigma = profileWidth / 6.f;
726 int halfKernelSize = profileWidth / 2;
727
728 AutoTArray<float> halfKernel(halfKernelSize);
729
730 // The half kernel should sum to 0.5.
731 const float tot = 2.f * make_unnormalized_half_kernel(halfKernel.get(), halfKernelSize, sigma);
732 float sum = 0.f;
733 // Populate the profile from the right edge to the middle.
734 for (int i = 0; i < halfKernelSize; ++i) {
735 halfKernel[halfKernelSize - i - 1] /= tot;
736 sum += halfKernel[halfKernelSize - i - 1];
737 profile[profileWidth - i - 1] = SkUnitScalarClampToByte(sum);
738 }
739 // Populate the profile from the middle to the left edge (by flipping the half kernel and
740 // continuing the summation).
741 for (int i = 0; i < halfKernelSize; ++i) {
742 sum += halfKernel[i];
743 profile[halfKernelSize - i - 1] = SkUnitScalarClampToByte(sum);
744 }
745 // Ensure tail goes to 0.
746 profile[profileWidth - 1] = 0;
747 }
748
create_profile_effect(GrRecordingContext * rContext,const SkRect & circle,float sigma,float * solidRadius,float * textureRadius)749 static std::unique_ptr<GrFragmentProcessor> create_profile_effect(GrRecordingContext* rContext,
750 const SkRect& circle,
751 float sigma,
752 float* solidRadius,
753 float* textureRadius) {
754 float circleR = circle.width() / 2.0f;
755 if (!sk_float_isfinite(circleR) || circleR < SK_ScalarNearlyZero) {
756 return nullptr;
757 }
758
759 auto threadSafeCache = rContext->priv().threadSafeCache();
760
761 // Profile textures are cached by the ratio of sigma to circle radius and by the size of the
762 // profile texture (binned by powers of 2).
763 SkScalar sigmaToCircleRRatio = sigma / circleR;
764 // When sigma is really small this becomes a equivalent to convolving a Gaussian with a
765 // half-plane. Similarly, in the extreme high ratio cases circle becomes a point WRT to the
766 // Guassian and the profile texture is a just a Gaussian evaluation. However, we haven't yet
767 // implemented this latter optimization.
768 sigmaToCircleRRatio = std::min(sigmaToCircleRRatio, 8.f);
769 SkFixed sigmaToCircleRRatioFixed;
770 static const SkScalar kHalfPlaneThreshold = 0.1f;
771 bool useHalfPlaneApprox = false;
772 if (sigmaToCircleRRatio <= kHalfPlaneThreshold) {
773 useHalfPlaneApprox = true;
774 sigmaToCircleRRatioFixed = 0;
775 *solidRadius = circleR - 3 * sigma;
776 *textureRadius = 6 * sigma;
777 } else {
778 // Convert to fixed point for the key.
779 sigmaToCircleRRatioFixed = SkScalarToFixed(sigmaToCircleRRatio);
780 // We shave off some bits to reduce the number of unique entries. We could probably
781 // shave off more than we do.
782 sigmaToCircleRRatioFixed &= ~0xff;
783 sigmaToCircleRRatio = SkFixedToScalar(sigmaToCircleRRatioFixed);
784 sigma = circleR * sigmaToCircleRRatio;
785 *solidRadius = 0;
786 *textureRadius = circleR + 3 * sigma;
787 }
788
789 static constexpr int kProfileTextureWidth = 512;
790 // This would be kProfileTextureWidth/textureRadius if it weren't for the fact that we do
791 // the calculation of the profile coord in a coord space that has already been scaled by
792 // 1 / textureRadius. This is done to avoid overflow in length().
793 SkMatrix texM = SkMatrix::Scale(kProfileTextureWidth, 1.f);
794
795 static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
796 skgpu::UniqueKey key;
797 skgpu::UniqueKey::Builder builder(&key, kDomain, 1, "1-D Circular Blur");
798 builder[0] = sigmaToCircleRRatioFixed;
799 builder.finish();
800
801 GrSurfaceProxyView profileView = threadSafeCache->find(key);
802 if (profileView) {
803 SkASSERT(profileView.asTextureProxy());
804 SkASSERT(profileView.origin() == kTopLeft_GrSurfaceOrigin);
805 return GrTextureEffect::Make(std::move(profileView), kPremul_SkAlphaType, texM);
806 }
807
808 SkBitmap bm;
809 if (!bm.tryAllocPixels(SkImageInfo::MakeA8(kProfileTextureWidth, 1))) {
810 return nullptr;
811 }
812
813 if (useHalfPlaneApprox) {
814 create_half_plane_profile(bm.getAddr8(0, 0), kProfileTextureWidth);
815 } else {
816 // Rescale params to the size of the texture we're creating.
817 SkScalar scale = kProfileTextureWidth / *textureRadius;
818 create_circle_profile(
819 bm.getAddr8(0, 0), sigma * scale, circleR * scale, kProfileTextureWidth);
820 }
821 bm.setImmutable();
822
823 profileView = std::get<0>(GrMakeUncachedBitmapProxyView(rContext, bm));
824 if (!profileView) {
825 return nullptr;
826 }
827
828 profileView = threadSafeCache->add(key, profileView);
829 return GrTextureEffect::Make(std::move(profileView), kPremul_SkAlphaType, texM);
830 }
831
make_circle_blur(GrRecordingContext * context,const SkRect & circle,float sigma)832 static std::unique_ptr<GrFragmentProcessor> make_circle_blur(GrRecordingContext* context,
833 const SkRect& circle,
834 float sigma) {
835 if (SkGpuBlurUtils::IsEffectivelyZeroSigma(sigma)) {
836 return nullptr;
837 }
838
839 float solidRadius;
840 float textureRadius;
841 std::unique_ptr<GrFragmentProcessor> profile =
842 create_profile_effect(context, circle, sigma, &solidRadius, &textureRadius);
843 if (!profile) {
844 return nullptr;
845 }
846
847 static const SkRuntimeEffect* effect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
848 "uniform shader blurProfile;"
849 "uniform half4 circleData;"
850
851 "half4 main(float2 xy) {"
852 // We just want to compute "(length(vec) - circleData.z + 0.5) * circleData.w" but need
853 // to rearrange to avoid passing large values to length() that would overflow.
854 "half2 vec = half2((sk_FragCoord.xy - circleData.xy) * circleData.w);"
855 "half dist = length(vec) + (0.5 - circleData.z) * circleData.w;"
856 "return blurProfile.eval(half2(dist, 0.5)).aaaa;"
857 "}"
858 );
859
860 SkV4 circleData = {circle.centerX(), circle.centerY(), solidRadius, 1.f / textureRadius};
861 auto circleBlurFP = GrSkSLFP::Make(effect, "CircleBlur", /*inputFP=*/nullptr,
862 GrSkSLFP::OptFlags::kCompatibleWithCoverageAsAlpha,
863 "blurProfile", GrSkSLFP::IgnoreOptFlags(std::move(profile)),
864 "circleData", circleData);
865 // Modulate blur with the input color.
866 return GrBlendFragmentProcessor::Make<SkBlendMode::kModulate>(std::move(circleBlurFP),
867 /*dst=*/nullptr);
868 }
869
870 ///////////////////////////////////////////////////////////////////////////////
871 // Rect Blur
872 ///////////////////////////////////////////////////////////////////////////////
873
make_rect_integral_fp(GrRecordingContext * rContext,float sixSigma)874 static std::unique_ptr<GrFragmentProcessor> make_rect_integral_fp(GrRecordingContext* rContext,
875 float sixSigma) {
876 SkASSERT(!SkGpuBlurUtils::IsEffectivelyZeroSigma(sixSigma / 6.f));
877 auto threadSafeCache = rContext->priv().threadSafeCache();
878
879 int width = SkGpuBlurUtils::CreateIntegralTable(sixSigma, nullptr);
880
881 static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
882 skgpu::UniqueKey key;
883 skgpu::UniqueKey::Builder builder(&key, kDomain, 1, "Rect Blur Mask");
884 builder[0] = width;
885 builder.finish();
886
887 SkMatrix m = SkMatrix::Scale(width / sixSigma, 1.f);
888
889 GrSurfaceProxyView view = threadSafeCache->find(key);
890
891 if (view) {
892 SkASSERT(view.origin() == kTopLeft_GrSurfaceOrigin);
893 return GrTextureEffect::Make(
894 std::move(view), kPremul_SkAlphaType, m, GrSamplerState::Filter::kLinear);
895 }
896
897 SkBitmap bitmap;
898 if (!SkGpuBlurUtils::CreateIntegralTable(sixSigma, &bitmap)) {
899 return {};
900 }
901
902 view = std::get<0>(GrMakeUncachedBitmapProxyView(rContext, bitmap));
903 if (!view) {
904 return {};
905 }
906
907 view = threadSafeCache->add(key, view);
908
909 SkASSERT(view.origin() == kTopLeft_GrSurfaceOrigin);
910 return GrTextureEffect::Make(
911 std::move(view), kPremul_SkAlphaType, m, GrSamplerState::Filter::kLinear);
912 }
913
make_rect_blur(GrRecordingContext * context,const GrShaderCaps & caps,const SkRect & srcRect,const SkMatrix & viewMatrix,float transformedSigma)914 static std::unique_ptr<GrFragmentProcessor> make_rect_blur(GrRecordingContext* context,
915 const GrShaderCaps& caps,
916 const SkRect& srcRect,
917 const SkMatrix& viewMatrix,
918 float transformedSigma) {
919 SkASSERT(viewMatrix.preservesRightAngles());
920 SkASSERT(srcRect.isSorted());
921
922 if (SkGpuBlurUtils::IsEffectivelyZeroSigma(transformedSigma)) {
923 // No need to blur the rect
924 return nullptr;
925 }
926
927 SkMatrix invM;
928 SkRect rect;
929 if (viewMatrix.rectStaysRect()) {
930 invM = SkMatrix::I();
931 // We can do everything in device space when the src rect projects to a rect in device space
932 SkAssertResult(viewMatrix.mapRect(&rect, srcRect));
933 } else {
934 // The view matrix may scale, perhaps anisotropically. But we want to apply our device space
935 // "transformedSigma" to the delta of frag coord from the rect edges. Factor out the scaling
936 // to define a space that is purely rotation/translation from device space (and scale from
937 // src space) We'll meet in the middle: pre-scale the src rect to be in this space and then
938 // apply the inverse of the rotation/translation portion to the frag coord.
939 SkMatrix m;
940 SkSize scale;
941 if (!viewMatrix.decomposeScale(&scale, &m)) {
942 return nullptr;
943 }
944 if (!m.invert(&invM)) {
945 return nullptr;
946 }
947 rect = {srcRect.left() * scale.width(),
948 srcRect.top() * scale.height(),
949 srcRect.right() * scale.width(),
950 srcRect.bottom() * scale.height()};
951 }
952
953 if (!caps.fFloatIs32Bits) {
954 // We promote the math that gets us into the Gaussian space to full float when the rect
955 // coords are large. If we don't have full float then fail. We could probably clip the rect
956 // to an outset device bounds instead.
957 if (SkScalarAbs(rect.fLeft) > 16000.f || SkScalarAbs(rect.fTop) > 16000.f ||
958 SkScalarAbs(rect.fRight) > 16000.f || SkScalarAbs(rect.fBottom) > 16000.f) {
959 return nullptr;
960 }
961 }
962
963 const float sixSigma = 6 * transformedSigma;
964 std::unique_ptr<GrFragmentProcessor> integral = make_rect_integral_fp(context, sixSigma);
965 if (!integral) {
966 return nullptr;
967 }
968
969 // In the fast variant we think of the midpoint of the integral texture as aligning with the
970 // closest rect edge both in x and y. To simplify texture coord calculation we inset the rect so
971 // that the edge of the inset rect corresponds to t = 0 in the texture. It actually simplifies
972 // things a bit in the !isFast case, too.
973 float threeSigma = sixSigma / 2;
974 SkRect insetRect = {rect.left() + threeSigma,
975 rect.top() + threeSigma,
976 rect.right() - threeSigma,
977 rect.bottom() - threeSigma};
978
979 // In our fast variant we find the nearest horizontal and vertical edges and for each do a
980 // lookup in the integral texture for each and multiply them. When the rect is less than 6 sigma
981 // wide then things aren't so simple and we have to consider both the left and right edge of the
982 // rectangle (and similar in y).
983 bool isFast = insetRect.isSorted();
984
985 static const SkRuntimeEffect* effect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
986 // Effect that is a LUT for integral of normal distribution. The value at x:[0,6*sigma] is
987 // the integral from -inf to (3*sigma - x). I.e. x is mapped from [0, 6*sigma] to
988 // [3*sigma to -3*sigma]. The flip saves a reversal in the shader.
989 "uniform shader integral;"
990
991 "uniform float4 rect;"
992 "uniform int isFast;" // specialized
993
994 "half4 main(float2 pos) {"
995 "half xCoverage, yCoverage;"
996 "if (bool(isFast)) {"
997 // Get the smaller of the signed distance from the frag coord to the left and right
998 // edges and similar for y.
999 // The integral texture goes "backwards" (from 3*sigma to -3*sigma), So, the below
1000 // computations align the left edge of the integral texture with the inset rect's
1001 // edge extending outward 6 * sigma from the inset rect.
1002 "half2 xy = max(half2(rect.LT - pos), half2(pos - rect.RB));"
1003 "xCoverage = integral.eval(half2(xy.x, 0.5)).a;"
1004 "yCoverage = integral.eval(half2(xy.y, 0.5)).a;"
1005 "} else {"
1006 // We just consider just the x direction here. In practice we compute x and y
1007 // separately and multiply them together.
1008 // We define our coord system so that the point at which we're evaluating a kernel
1009 // defined by the normal distribution (K) at 0. In this coord system let L be left
1010 // edge and R be the right edge of the rectangle.
1011 // We can calculate C by integrating K with the half infinite ranges outside the
1012 // L to R range and subtracting from 1:
1013 // C = 1 - <integral of K from from -inf to L> - <integral of K from R to inf>
1014 // K is symmetric about x=0 so:
1015 // C = 1 - <integral of K from from -inf to L> - <integral of K from -inf to -R>
1016
1017 // The integral texture goes "backwards" (from 3*sigma to -3*sigma) which is
1018 // factored in to the below calculations.
1019 // Also, our rect uniform was pre-inset by 3 sigma from the actual rect being
1020 // blurred, also factored in.
1021 "half4 rect = half4(half2(rect.LT - pos), half2(pos - rect.RB));"
1022 "xCoverage = 1 - integral.eval(half2(rect.L, 0.5)).a"
1023 "- integral.eval(half2(rect.R, 0.5)).a;"
1024 "yCoverage = 1 - integral.eval(half2(rect.T, 0.5)).a"
1025 "- integral.eval(half2(rect.B, 0.5)).a;"
1026 "}"
1027 "return half4(xCoverage * yCoverage);"
1028 "}"
1029 );
1030
1031 std::unique_ptr<GrFragmentProcessor> fp =
1032 GrSkSLFP::Make(effect, "RectBlur", /*inputFP=*/nullptr,
1033 GrSkSLFP::OptFlags::kCompatibleWithCoverageAsAlpha,
1034 "integral", GrSkSLFP::IgnoreOptFlags(std::move(integral)),
1035 "rect", insetRect,
1036 "isFast", GrSkSLFP::Specialize<int>(isFast));
1037 // Modulate blur with the input color.
1038 fp = GrBlendFragmentProcessor::Make<SkBlendMode::kModulate>(std::move(fp),
1039 /*dst=*/nullptr);
1040 if (!invM.isIdentity()) {
1041 fp = GrMatrixEffect::Make(invM, std::move(fp));
1042 }
1043 return GrFragmentProcessor::DeviceSpace(std::move(fp));
1044 }
1045
1046 ///////////////////////////////////////////////////////////////////////////////
1047 // RRect Blur
1048 ///////////////////////////////////////////////////////////////////////////////
1049
1050 static constexpr auto kBlurredRRectMaskOrigin = kTopLeft_GrSurfaceOrigin;
1051
make_blurred_rrect_key(skgpu::UniqueKey * key,const SkRRect & rrectToDraw,float xformedSigma)1052 static void make_blurred_rrect_key(skgpu::UniqueKey* key,
1053 const SkRRect& rrectToDraw,
1054 float xformedSigma) {
1055 SkASSERT(!SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma));
1056 static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
1057
1058 skgpu::UniqueKey::Builder builder(key, kDomain, 9, "RoundRect Blur Mask");
1059 builder[0] = SkScalarCeilToInt(xformedSigma - 1 / 6.0f);
1060
1061 int index = 1;
1062 // TODO: this is overkill for _simple_ circular rrects
1063 for (auto c : {SkRRect::kUpperLeft_Corner,
1064 SkRRect::kUpperRight_Corner,
1065 SkRRect::kLowerRight_Corner,
1066 SkRRect::kLowerLeft_Corner}) {
1067 SkASSERT(SkScalarIsInt(rrectToDraw.radii(c).fX) && SkScalarIsInt(rrectToDraw.radii(c).fY));
1068 builder[index++] = SkScalarCeilToInt(rrectToDraw.radii(c).fX);
1069 builder[index++] = SkScalarCeilToInt(rrectToDraw.radii(c).fY);
1070 }
1071 builder.finish();
1072 }
1073
fillin_view_on_gpu(GrDirectContext * dContext,const GrSurfaceProxyView & lazyView,sk_sp<GrThreadSafeCache::Trampoline> trampoline,const SkRRect & rrectToDraw,const SkISize & dimensions,float xformedSigma)1074 static bool fillin_view_on_gpu(GrDirectContext* dContext,
1075 const GrSurfaceProxyView& lazyView,
1076 sk_sp<GrThreadSafeCache::Trampoline> trampoline,
1077 const SkRRect& rrectToDraw,
1078 const SkISize& dimensions,
1079 float xformedSigma) {
1080 #if defined(SK_GANESH)
1081 SkASSERT(!SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma));
1082
1083 // We cache blur masks. Use default surface props here so we can use the same cached mask
1084 // regardless of the final dst surface.
1085 SkSurfaceProps defaultSurfaceProps;
1086
1087 std::unique_ptr<skgpu::v1::SurfaceDrawContext> sdc =
1088 skgpu::v1::SurfaceDrawContext::MakeWithFallback(dContext,
1089 GrColorType::kAlpha_8,
1090 nullptr,
1091 SkBackingFit::kExact,
1092 dimensions,
1093 defaultSurfaceProps,
1094 1,
1095 GrMipmapped::kNo,
1096 GrProtected::kNo,
1097 kBlurredRRectMaskOrigin);
1098 if (!sdc) {
1099 return false;
1100 }
1101
1102 GrPaint paint;
1103
1104 sdc->clear(SK_PMColor4fTRANSPARENT);
1105 sdc->drawRRect(nullptr,
1106 std::move(paint),
1107 GrAA::kYes,
1108 SkMatrix::I(),
1109 rrectToDraw,
1110 GrStyle::SimpleFill());
1111
1112 GrSurfaceProxyView srcView = sdc->readSurfaceView();
1113 SkASSERT(srcView.asTextureProxy());
1114 auto rtc2 = SkGpuBlurUtils::GaussianBlur(dContext,
1115 std::move(srcView),
1116 sdc->colorInfo().colorType(),
1117 sdc->colorInfo().alphaType(),
1118 nullptr,
1119 SkIRect::MakeSize(dimensions),
1120 SkIRect::MakeSize(dimensions),
1121 xformedSigma,
1122 xformedSigma,
1123 SkTileMode::kClamp,
1124 SkBackingFit::kExact);
1125 if (!rtc2 || !rtc2->readSurfaceView()) {
1126 return false;
1127 }
1128
1129 auto view = rtc2->readSurfaceView();
1130 SkASSERT(view.swizzle() == lazyView.swizzle());
1131 SkASSERT(view.origin() == lazyView.origin());
1132 trampoline->fProxy = view.asTextureProxyRef();
1133
1134 return true;
1135 #else
1136 return false;
1137 #endif
1138 }
1139
1140 // Evaluate the vertical blur at the specified 'y' value given the location of the top of the
1141 // rrect.
eval_V(float top,int y,const uint8_t * integral,int integralSize,float sixSigma)1142 static uint8_t eval_V(float top, int y, const uint8_t* integral, int integralSize, float sixSigma) {
1143 if (top < 0) {
1144 return 0; // an empty column
1145 }
1146
1147 float fT = (top - y - 0.5f) * (integralSize / sixSigma);
1148 if (fT < 0) {
1149 return 255;
1150 } else if (fT >= integralSize - 1) {
1151 return 0;
1152 }
1153
1154 int lower = (int)fT;
1155 float frac = fT - lower;
1156
1157 SkASSERT(lower + 1 < integralSize);
1158
1159 return integral[lower] * (1.0f - frac) + integral[lower + 1] * frac;
1160 }
1161
1162 // Apply a gaussian 'kernel' horizontally at the specified 'x', 'y' location.
eval_H(int x,int y,const std::vector<float> & topVec,const float * kernel,int kernelSize,const uint8_t * integral,int integralSize,float sixSigma)1163 static uint8_t eval_H(int x,
1164 int y,
1165 const std::vector<float>& topVec,
1166 const float* kernel,
1167 int kernelSize,
1168 const uint8_t* integral,
1169 int integralSize,
1170 float sixSigma) {
1171 SkASSERT(0 <= x && x < (int)topVec.size());
1172 SkASSERT(kernelSize % 2);
1173
1174 float accum = 0.0f;
1175
1176 int xSampleLoc = x - (kernelSize / 2);
1177 for (int i = 0; i < kernelSize; ++i, ++xSampleLoc) {
1178 if (xSampleLoc < 0 || xSampleLoc >= (int)topVec.size()) {
1179 continue;
1180 }
1181
1182 accum += kernel[i] * eval_V(topVec[xSampleLoc], y, integral, integralSize, sixSigma);
1183 }
1184
1185 return accum + 0.5f;
1186 }
1187
1188 // Create a cpu-side blurred-rrect mask that is close to the version the gpu would've produced.
1189 // The match needs to be close bc the cpu- and gpu-generated version must be interchangeable.
create_mask_on_cpu(GrRecordingContext * rContext,const SkRRect & rrectToDraw,const SkISize & dimensions,float xformedSigma)1190 static GrSurfaceProxyView create_mask_on_cpu(GrRecordingContext* rContext,
1191 const SkRRect& rrectToDraw,
1192 const SkISize& dimensions,
1193 float xformedSigma) {
1194 SkASSERT(!SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma));
1195 int radius = SkGpuBlurUtils::SigmaRadius(xformedSigma);
1196 int kernelSize = 2 * radius + 1;
1197
1198 SkASSERT(kernelSize % 2);
1199 SkASSERT(dimensions.width() % 2);
1200 SkASSERT(dimensions.height() % 2);
1201
1202 SkVector radii = rrectToDraw.getSimpleRadii();
1203 SkASSERT(SkScalarNearlyEqual(radii.fX, radii.fY));
1204
1205 const int halfWidthPlus1 = (dimensions.width() / 2) + 1;
1206 const int halfHeightPlus1 = (dimensions.height() / 2) + 1;
1207
1208 std::unique_ptr<float[]> kernel(new float[kernelSize]);
1209
1210 SkGpuBlurUtils::Compute1DGaussianKernel(kernel.get(), xformedSigma, radius);
1211
1212 SkBitmap integral;
1213 if (!SkGpuBlurUtils::CreateIntegralTable(6 * xformedSigma, &integral)) {
1214 return {};
1215 }
1216
1217 SkBitmap result;
1218 if (!result.tryAllocPixels(SkImageInfo::MakeA8(dimensions.width(), dimensions.height()))) {
1219 return {};
1220 }
1221
1222 std::vector<float> topVec;
1223 topVec.reserve(dimensions.width());
1224 for (int x = 0; x < dimensions.width(); ++x) {
1225 if (x < rrectToDraw.rect().fLeft || x > rrectToDraw.rect().fRight) {
1226 topVec.push_back(-1);
1227 } else {
1228 if (x + 0.5f < rrectToDraw.rect().fLeft + radii.fX) { // in the circular section
1229 float xDist = rrectToDraw.rect().fLeft + radii.fX - x - 0.5f;
1230 float h = sqrtf(radii.fX * radii.fX - xDist * xDist);
1231 SkASSERT(0 <= h && h < radii.fY);
1232 topVec.push_back(rrectToDraw.rect().fTop + radii.fX - h + 3 * xformedSigma);
1233 } else {
1234 topVec.push_back(rrectToDraw.rect().fTop + 3 * xformedSigma);
1235 }
1236 }
1237 }
1238
1239 for (int y = 0; y < halfHeightPlus1; ++y) {
1240 uint8_t* scanline = result.getAddr8(0, y);
1241
1242 for (int x = 0; x < halfWidthPlus1; ++x) {
1243 scanline[x] = eval_H(x,
1244 y,
1245 topVec,
1246 kernel.get(),
1247 kernelSize,
1248 integral.getAddr8(0, 0),
1249 integral.width(),
1250 6 * xformedSigma);
1251 scanline[dimensions.width() - x - 1] = scanline[x];
1252 }
1253
1254 memcpy(result.getAddr8(0, dimensions.height() - y - 1), scanline, result.rowBytes());
1255 }
1256
1257 result.setImmutable();
1258
1259 auto view = std::get<0>(GrMakeUncachedBitmapProxyView(rContext, result));
1260 if (!view) {
1261 return {};
1262 }
1263
1264 SkASSERT(view.origin() == kBlurredRRectMaskOrigin);
1265 return view;
1266 }
1267
find_or_create_rrect_blur_mask_fp(GrRecordingContext * rContext,const SkRRect & rrectToDraw,const SkISize & dimensions,float xformedSigma)1268 static std::unique_ptr<GrFragmentProcessor> find_or_create_rrect_blur_mask_fp(
1269 GrRecordingContext* rContext,
1270 const SkRRect& rrectToDraw,
1271 const SkISize& dimensions,
1272 float xformedSigma) {
1273 SkASSERT(!SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma));
1274 skgpu::UniqueKey key;
1275 make_blurred_rrect_key(&key, rrectToDraw, xformedSigma);
1276
1277 auto threadSafeCache = rContext->priv().threadSafeCache();
1278
1279 // It seems like we could omit this matrix and modify the shader code to not normalize
1280 // the coords used to sample the texture effect. However, the "proxyDims" value in the
1281 // shader is not always the actual the proxy dimensions. This is because 'dimensions' here
1282 // was computed using integer corner radii as determined in
1283 // SkComputeBlurredRRectParams whereas the shader code uses the float radius to compute
1284 // 'proxyDims'. Why it draws correctly with these unequal values is a mystery for the ages.
1285 auto m = SkMatrix::Scale(dimensions.width(), dimensions.height());
1286
1287 GrSurfaceProxyView view;
1288
1289 if (GrDirectContext* dContext = rContext->asDirectContext()) {
1290 // The gpu thread gets priority over the recording threads. If the gpu thread is first,
1291 // it crams a lazy proxy into the cache and then fills it in later.
1292 auto [lazyView, trampoline] = GrThreadSafeCache::CreateLazyView(dContext,
1293 GrColorType::kAlpha_8,
1294 dimensions,
1295 kBlurredRRectMaskOrigin,
1296 SkBackingFit::kExact);
1297 if (!lazyView) {
1298 return nullptr;
1299 }
1300
1301 view = threadSafeCache->findOrAdd(key, lazyView);
1302 if (view != lazyView) {
1303 SkASSERT(view.asTextureProxy());
1304 SkASSERT(view.origin() == kBlurredRRectMaskOrigin);
1305 return GrTextureEffect::Make(std::move(view), kPremul_SkAlphaType, m);
1306 }
1307
1308 if (!fillin_view_on_gpu(dContext,
1309 lazyView,
1310 std::move(trampoline),
1311 rrectToDraw,
1312 dimensions,
1313 xformedSigma)) {
1314 // In this case something has gone disastrously wrong so set up to drop the draw
1315 // that needed this resource and reduce future pollution of the cache.
1316 threadSafeCache->remove(key);
1317 return nullptr;
1318 }
1319 } else {
1320 view = threadSafeCache->find(key);
1321 if (view) {
1322 SkASSERT(view.asTextureProxy());
1323 SkASSERT(view.origin() == kBlurredRRectMaskOrigin);
1324 return GrTextureEffect::Make(std::move(view), kPremul_SkAlphaType, m);
1325 }
1326
1327 view = create_mask_on_cpu(rContext, rrectToDraw, dimensions, xformedSigma);
1328 if (!view) {
1329 return nullptr;
1330 }
1331
1332 view = threadSafeCache->add(key, view);
1333 }
1334
1335 SkASSERT(view.asTextureProxy());
1336 SkASSERT(view.origin() == kBlurredRRectMaskOrigin);
1337 return GrTextureEffect::Make(std::move(view), kPremul_SkAlphaType, m);
1338 }
1339
make_rrect_blur(GrRecordingContext * context,float sigma,float xformedSigma,const SkRRect & srcRRect,const SkRRect & devRRect)1340 static std::unique_ptr<GrFragmentProcessor> make_rrect_blur(GrRecordingContext* context,
1341 float sigma,
1342 float xformedSigma,
1343 const SkRRect& srcRRect,
1344 const SkRRect& devRRect) {
1345 // Should've been caught up-stream
1346 #ifdef SK_DEBUG
1347 SkASSERTF(!SkRRectPriv::IsCircle(devRRect),
1348 "Unexpected circle. %d\n\t%s\n\t%s",
1349 SkRRectPriv::IsCircle(srcRRect),
1350 srcRRect.dumpToString(true).c_str(),
1351 devRRect.dumpToString(true).c_str());
1352 SkASSERTF(!devRRect.isRect(),
1353 "Unexpected rect. %d\n\t%s\n\t%s",
1354 srcRRect.isRect(),
1355 srcRRect.dumpToString(true).c_str(),
1356 devRRect.dumpToString(true).c_str());
1357 #endif
1358
1359 // TODO: loosen this up
1360 if (!SkRRectPriv::IsSimpleCircular(devRRect)) {
1361 return nullptr;
1362 }
1363
1364 if (SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma)) {
1365 return nullptr;
1366 }
1367
1368 // Make sure we can successfully ninepatch this rrect -- the blur sigma has to be sufficiently
1369 // small relative to both the size of the corner radius and the width (and height) of the rrect.
1370 SkRRect rrectToDraw;
1371 SkISize dimensions;
1372 SkScalar ignored[SkGpuBlurUtils::kBlurRRectMaxDivisions];
1373
1374 bool ninePatchable = SkGpuBlurUtils::ComputeBlurredRRectParams(srcRRect,
1375 devRRect,
1376 sigma,
1377 xformedSigma,
1378 &rrectToDraw,
1379 &dimensions,
1380 ignored,
1381 ignored,
1382 ignored,
1383 ignored);
1384 if (!ninePatchable) {
1385 return nullptr;
1386 }
1387
1388 std::unique_ptr<GrFragmentProcessor> maskFP =
1389 find_or_create_rrect_blur_mask_fp(context, rrectToDraw, dimensions, xformedSigma);
1390 if (!maskFP) {
1391 return nullptr;
1392 }
1393
1394 static const SkRuntimeEffect* effect = SkMakeRuntimeEffect(SkRuntimeEffect::MakeForShader,
1395 "uniform shader ninePatchFP;"
1396
1397 "uniform half cornerRadius;"
1398 "uniform float4 proxyRect;"
1399 "uniform half blurRadius;"
1400
1401 "half4 main(float2 xy) {"
1402 // Warp the fragment position to the appropriate part of the 9-patch blur texture by
1403 // snipping out the middle section of the proxy rect.
1404 "float2 translatedFragPosFloat = sk_FragCoord.xy - proxyRect.LT;"
1405 "float2 proxyCenter = (proxyRect.RB - proxyRect.LT) * 0.5;"
1406 "half edgeSize = 2.0 * blurRadius + cornerRadius + 0.5;"
1407
1408 // Position the fragment so that (0, 0) marks the center of the proxy rectangle.
1409 // Negative coordinates are on the left/top side and positive numbers are on the
1410 // right/bottom.
1411 "translatedFragPosFloat -= proxyCenter;"
1412
1413 // Temporarily strip off the fragment's sign. x/y are now strictly increasing as we
1414 // move away from the center.
1415 "half2 fragDirection = half2(sign(translatedFragPosFloat));"
1416 "translatedFragPosFloat = abs(translatedFragPosFloat);"
1417
1418 // Our goal is to snip out the "middle section" of the proxy rect (everything but the
1419 // edge). We've repositioned our fragment position so that (0, 0) is the centerpoint
1420 // and x/y are always positive, so we can subtract here and interpret negative results
1421 // as being within the middle section.
1422 "half2 translatedFragPosHalf = half2(translatedFragPosFloat - (proxyCenter - edgeSize));"
1423
1424 // Remove the middle section by clamping to zero.
1425 "translatedFragPosHalf = max(translatedFragPosHalf, 0);"
1426
1427 // Reapply the fragment's sign, so that negative coordinates once again mean left/top
1428 // side and positive means bottom/right side.
1429 "translatedFragPosHalf *= fragDirection;"
1430
1431 // Offset the fragment so that (0, 0) marks the upper-left again, instead of the center
1432 // point.
1433 "translatedFragPosHalf += half2(edgeSize);"
1434
1435 "half2 proxyDims = half2(2.0 * edgeSize);"
1436 "half2 texCoord = translatedFragPosHalf / proxyDims;"
1437
1438 "return ninePatchFP.eval(texCoord).aaaa;"
1439 "}"
1440 );
1441
1442 float cornerRadius = SkRRectPriv::GetSimpleRadii(devRRect).fX;
1443 float blurRadius = 3.f * SkScalarCeilToScalar(xformedSigma - 1 / 6.0f);
1444 SkRect proxyRect = devRRect.getBounds().makeOutset(blurRadius, blurRadius);
1445
1446 auto rrectBlurFP = GrSkSLFP::Make(effect, "RRectBlur", /*inputFP=*/nullptr,
1447 GrSkSLFP::OptFlags::kCompatibleWithCoverageAsAlpha,
1448 "ninePatchFP", GrSkSLFP::IgnoreOptFlags(std::move(maskFP)),
1449 "cornerRadius", cornerRadius,
1450 "proxyRect", proxyRect,
1451 "blurRadius", blurRadius);
1452 // Modulate blur with the input color.
1453 return GrBlendFragmentProcessor::Make<SkBlendMode::kModulate>(std::move(rrectBlurFP),
1454 /*dst=*/nullptr);
1455 }
1456
1457 ///////////////////////////////////////////////////////////////////////////////
1458
directFilterMaskGPU(GrRecordingContext * context,skgpu::v1::SurfaceDrawContext * sdc,GrPaint && paint,const GrClip * clip,const SkMatrix & viewMatrix,const GrStyledShape & shape) const1459 bool SkBlurMaskFilterImpl::directFilterMaskGPU(GrRecordingContext* context,
1460 skgpu::v1::SurfaceDrawContext* sdc,
1461 GrPaint&& paint,
1462 const GrClip* clip,
1463 const SkMatrix& viewMatrix,
1464 const GrStyledShape& shape) const {
1465 SkASSERT(sdc);
1466
1467 if (fBlurStyle != kNormal_SkBlurStyle) {
1468 return false;
1469 }
1470
1471 // TODO: we could handle blurred stroked circles
1472 if (!shape.style().isSimpleFill()) {
1473 return false;
1474 }
1475
1476 SkScalar xformedSigma = this->computeXformedSigma(viewMatrix);
1477 if (SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma)) {
1478 sdc->drawShape(clip, std::move(paint), GrAA::kYes, viewMatrix, GrStyledShape(shape));
1479 return true;
1480 }
1481
1482 SkRRect srcRRect;
1483 bool inverted;
1484 if (!shape.asRRect(&srcRRect, nullptr, nullptr, &inverted) || inverted) {
1485 return false;
1486 }
1487
1488 std::unique_ptr<GrFragmentProcessor> fp;
1489
1490 SkRRect devRRect;
1491 bool devRRectIsValid = srcRRect.transform(viewMatrix, &devRRect);
1492
1493 bool devRRectIsCircle = devRRectIsValid && SkRRectPriv::IsCircle(devRRect);
1494
1495 bool canBeRect = srcRRect.isRect() && viewMatrix.preservesRightAngles();
1496 bool canBeCircle = (SkRRectPriv::IsCircle(srcRRect) && viewMatrix.isSimilarity()) ||
1497 devRRectIsCircle;
1498
1499 if (canBeRect || canBeCircle) {
1500 if (canBeRect) {
1501 fp = make_rect_blur(context, *context->priv().caps()->shaderCaps(),
1502 srcRRect.rect(), viewMatrix, xformedSigma);
1503 } else {
1504 SkRect devBounds;
1505 if (devRRectIsCircle) {
1506 devBounds = devRRect.getBounds();
1507 } else {
1508 SkPoint center = {srcRRect.getBounds().centerX(), srcRRect.getBounds().centerY()};
1509 viewMatrix.mapPoints(¢er, 1);
1510 SkScalar radius = viewMatrix.mapVector(0, srcRRect.width()/2.f).length();
1511 devBounds = {center.x() - radius,
1512 center.y() - radius,
1513 center.x() + radius,
1514 center.y() + radius};
1515 }
1516 fp = make_circle_blur(context, devBounds, xformedSigma);
1517 }
1518
1519 if (!fp) {
1520 return false;
1521 }
1522
1523 SkRect srcProxyRect = srcRRect.rect();
1524 // Determine how much to outset the src rect to ensure we hit pixels within three sigma.
1525 SkScalar outsetX = 3.0f*xformedSigma;
1526 SkScalar outsetY = 3.0f*xformedSigma;
1527 if (viewMatrix.isScaleTranslate()) {
1528 outsetX /= SkScalarAbs(viewMatrix.getScaleX());
1529 outsetY /= SkScalarAbs(viewMatrix.getScaleY());
1530 } else {
1531 SkSize scale;
1532 if (!viewMatrix.decomposeScale(&scale, nullptr)) {
1533 return false;
1534 }
1535 outsetX /= scale.width();
1536 outsetY /= scale.height();
1537 }
1538 srcProxyRect.outset(outsetX, outsetY);
1539
1540 paint.setCoverageFragmentProcessor(std::move(fp));
1541 sdc->drawRect(clip, std::move(paint), GrAA::kNo, viewMatrix, srcProxyRect);
1542 return true;
1543 }
1544 if (!viewMatrix.isScaleTranslate()) {
1545 return false;
1546 }
1547 if (!devRRectIsValid || !SkRRectPriv::AllCornersCircular(devRRect)) {
1548 return false;
1549 }
1550
1551 fp = make_rrect_blur(context, fSigma, xformedSigma, srcRRect, devRRect);
1552 if (!fp) {
1553 return false;
1554 }
1555
1556 if (!this->ignoreXform()) {
1557 SkRect srcProxyRect = srcRRect.rect();
1558 srcProxyRect.outset(3.0f*fSigma, 3.0f*fSigma);
1559 paint.setCoverageFragmentProcessor(std::move(fp));
1560 sdc->drawRect(clip, std::move(paint), GrAA::kNo, viewMatrix, srcProxyRect);
1561 } else {
1562 SkMatrix inverse;
1563 if (!viewMatrix.invert(&inverse)) {
1564 return false;
1565 }
1566
1567 SkIRect proxyBounds;
1568 float extra=3.f*SkScalarCeilToScalar(xformedSigma-1/6.0f);
1569 devRRect.rect().makeOutset(extra, extra).roundOut(&proxyBounds);
1570
1571 paint.setCoverageFragmentProcessor(std::move(fp));
1572 sdc->fillPixelsWithLocalMatrix(clip, std::move(paint), proxyBounds, inverse);
1573 }
1574
1575 return true;
1576 }
1577
canFilterMaskGPU(const GrStyledShape & shape,const SkIRect & devSpaceShapeBounds,const SkIRect & clipBounds,const SkMatrix & ctm,SkIRect * maskRect) const1578 bool SkBlurMaskFilterImpl::canFilterMaskGPU(const GrStyledShape& shape,
1579 const SkIRect& devSpaceShapeBounds,
1580 const SkIRect& clipBounds,
1581 const SkMatrix& ctm,
1582 SkIRect* maskRect) const {
1583 SkScalar xformedSigma = this->computeXformedSigma(ctm);
1584 if (SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma)) {
1585 *maskRect = devSpaceShapeBounds;
1586 return maskRect->intersect(clipBounds);
1587 }
1588
1589 if (maskRect) {
1590 float sigma3 = 3 * SkScalarToFloat(xformedSigma);
1591
1592 // Outset srcRect and clipRect by 3 * sigma, to compute affected blur area.
1593 SkIRect clipRect = clipBounds.makeOutset(sigma3, sigma3);
1594 SkIRect srcRect = devSpaceShapeBounds.makeOutset(sigma3, sigma3);
1595
1596 if (!srcRect.intersect(clipRect)) {
1597 srcRect.setEmpty();
1598 }
1599 *maskRect = srcRect;
1600 }
1601
1602 // We prefer to blur paths with small blur radii on the CPU.
1603 static const SkScalar kMIN_GPU_BLUR_SIZE = SkIntToScalar(64);
1604 static const SkScalar kMIN_GPU_BLUR_SIGMA = SkIntToScalar(32);
1605
1606 if (devSpaceShapeBounds.width() <= kMIN_GPU_BLUR_SIZE &&
1607 devSpaceShapeBounds.height() <= kMIN_GPU_BLUR_SIZE &&
1608 xformedSigma <= kMIN_GPU_BLUR_SIGMA) {
1609 return false;
1610 }
1611
1612 return true;
1613 }
1614
filterMaskGPU(GrRecordingContext * context,GrSurfaceProxyView srcView,GrColorType srcColorType,SkAlphaType srcAlphaType,const SkMatrix & ctm,const SkIRect & maskRect) const1615 GrSurfaceProxyView SkBlurMaskFilterImpl::filterMaskGPU(GrRecordingContext* context,
1616 GrSurfaceProxyView srcView,
1617 GrColorType srcColorType,
1618 SkAlphaType srcAlphaType,
1619 const SkMatrix& ctm,
1620 const SkIRect& maskRect) const {
1621 // 'maskRect' isn't snapped to the UL corner but the mask in 'src' is.
1622 const SkIRect clipRect = SkIRect::MakeWH(maskRect.width(), maskRect.height());
1623
1624 SkScalar xformedSigma = this->computeXformedSigma(ctm);
1625
1626 // If we're doing a normal blur, we can clobber the pathTexture in the
1627 // gaussianBlur. Otherwise, we need to save it for later compositing.
1628 bool isNormalBlur = (kNormal_SkBlurStyle == fBlurStyle);
1629 auto srcBounds = SkIRect::MakeSize(srcView.proxy()->dimensions());
1630 auto surfaceDrawContext = SkGpuBlurUtils::GaussianBlur(context,
1631 srcView,
1632 srcColorType,
1633 srcAlphaType,
1634 nullptr,
1635 clipRect,
1636 srcBounds,
1637 xformedSigma,
1638 xformedSigma,
1639 SkTileMode::kClamp);
1640 if (!surfaceDrawContext || !surfaceDrawContext->asTextureProxy()) {
1641 return {};
1642 }
1643
1644 if (!isNormalBlur) {
1645 GrPaint paint;
1646 // Blend pathTexture over blurTexture.
1647 paint.setCoverageFragmentProcessor(GrTextureEffect::Make(std::move(srcView), srcAlphaType));
1648 if (kInner_SkBlurStyle == fBlurStyle) {
1649 // inner: dst = dst * src
1650 paint.setCoverageSetOpXPFactory(SkRegion::kIntersect_Op);
1651 } else if (kSolid_SkBlurStyle == fBlurStyle) {
1652 // solid: dst = src + dst - src * dst
1653 // = src + (1 - src) * dst
1654 paint.setCoverageSetOpXPFactory(SkRegion::kUnion_Op);
1655 } else if (kOuter_SkBlurStyle == fBlurStyle) {
1656 // outer: dst = dst * (1 - src)
1657 // = 0 * src + (1 - src) * dst
1658 paint.setCoverageSetOpXPFactory(SkRegion::kDifference_Op);
1659 } else {
1660 paint.setCoverageSetOpXPFactory(SkRegion::kReplace_Op);
1661 }
1662
1663 surfaceDrawContext->fillPixelsWithLocalMatrix(nullptr, std::move(paint), clipRect,
1664 SkMatrix::I());
1665 }
1666
1667 return surfaceDrawContext->readSurfaceView();
1668 }
1669
1670 #endif // defined(SK_GANESH) && defined(SK_GANESH)
1671
sk_register_blur_maskfilter_createproc()1672 void sk_register_blur_maskfilter_createproc() { SK_REGISTER_FLATTENABLE(SkBlurMaskFilterImpl); }
1673
MakeBlur(SkBlurStyle style,SkScalar sigma,bool respectCTM)1674 sk_sp<SkMaskFilter> SkMaskFilter::MakeBlur(SkBlurStyle style, SkScalar sigma, bool respectCTM) {
1675 if (SkScalarIsFinite(sigma) && sigma > 0) {
1676 return sk_sp<SkMaskFilter>(new SkBlurMaskFilterImpl(sigma, style, respectCTM));
1677 }
1678 return nullptr;
1679 }
1680