1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "gl/GrGLPathRendering.h"
9 #include "gl/GrGLUtil.h"
10 #include "gl/GrGLGpu.h"
11
12 #include "GrGLPath.h"
13 #include "GrGLPathRange.h"
14 #include "GrGLPathRendering.h"
15
16 #include "SkStream.h"
17 #include "SkTypeface.h"
18
19 #define GL_CALL(X) GR_GL_CALL(this->gpu()->glInterface(), X)
20 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->gpu()->glInterface(), RET, X)
21
22 // Number of paths to allocate per glGenPaths call. The call can be overly slow on command buffer GL
23 // implementation. The call has a result value, and thus waiting for the call completion is needed.
24 static const GrGLsizei kPathIDPreallocationAmount = 65536;
25
26 static const GrGLenum gIndexType2GLType[] = {
27 GR_GL_UNSIGNED_BYTE,
28 GR_GL_UNSIGNED_SHORT,
29 GR_GL_UNSIGNED_INT
30 };
31
32 GR_STATIC_ASSERT(0 == GrPathRange::kU8_PathIndexType);
33 GR_STATIC_ASSERT(1 == GrPathRange::kU16_PathIndexType);
34 GR_STATIC_ASSERT(2 == GrPathRange::kU32_PathIndexType);
35 GR_STATIC_ASSERT(GrPathRange::kU32_PathIndexType == GrPathRange::kLast_PathIndexType);
36
37 static const GrGLenum gXformType2GLType[] = {
38 GR_GL_NONE,
39 GR_GL_TRANSLATE_X,
40 GR_GL_TRANSLATE_Y,
41 GR_GL_TRANSLATE_2D,
42 GR_GL_TRANSPOSE_AFFINE_2D
43 };
44
45 GR_STATIC_ASSERT(0 == GrPathRendering::kNone_PathTransformType);
46 GR_STATIC_ASSERT(1 == GrPathRendering::kTranslateX_PathTransformType);
47 GR_STATIC_ASSERT(2 == GrPathRendering::kTranslateY_PathTransformType);
48 GR_STATIC_ASSERT(3 == GrPathRendering::kTranslate_PathTransformType);
49 GR_STATIC_ASSERT(4 == GrPathRendering::kAffine_PathTransformType);
50 GR_STATIC_ASSERT(GrPathRendering::kAffine_PathTransformType == GrPathRendering::kLast_PathTransformType);
51
52 #ifdef SK_DEBUG
53 static const GrGLenum gXformType2ComponentCount[] = {
54 0,
55 1,
56 1,
57 2,
58 6
59 };
60
verify_floats(const float * floats,int count)61 static void verify_floats(const float* floats, int count) {
62 for (int i = 0; i < count; ++i) {
63 SkASSERT(!SkScalarIsNaN(SkFloatToScalar(floats[i])));
64 }
65 }
66 #endif
67
gr_stencil_op_to_gl_path_rendering_fill_mode(GrStencilOp op)68 static GrGLenum gr_stencil_op_to_gl_path_rendering_fill_mode(GrStencilOp op) {
69 switch (op) {
70 default:
71 SkFAIL("Unexpected path fill.");
72 /* fallthrough */;
73 case GrStencilOp::kIncWrap:
74 return GR_GL_COUNT_UP;
75 case GrStencilOp::kInvert:
76 return GR_GL_INVERT;
77 }
78 }
79
GrGLPathRendering(GrGLGpu * gpu)80 GrGLPathRendering::GrGLPathRendering(GrGLGpu* gpu)
81 : GrPathRendering(gpu)
82 , fPreallocatedPathCount(0) {
83 const GrGLInterface* glInterface = gpu->glInterface();
84 fCaps.bindFragmentInputSupport =
85 nullptr != glInterface->fFunctions.fBindFragmentInputLocation;
86 }
87
~GrGLPathRendering()88 GrGLPathRendering::~GrGLPathRendering() {
89 if (fPreallocatedPathCount > 0) {
90 this->deletePaths(fFirstPreallocatedPathID, fPreallocatedPathCount);
91 }
92 }
93
disconnect(GrGpu::DisconnectType type)94 void GrGLPathRendering::disconnect(GrGpu::DisconnectType type) {
95 if (GrGpu::DisconnectType::kCleanup == type) {
96 this->deletePaths(fFirstPreallocatedPathID, fPreallocatedPathCount);
97 };
98 fPreallocatedPathCount = 0;
99 }
100
resetContext()101 void GrGLPathRendering::resetContext() {
102 fHWProjectionMatrixState.invalidate();
103 // we don't use the model view matrix.
104 GL_CALL(MatrixLoadIdentity(GR_GL_PATH_MODELVIEW));
105
106 fHWPathStencilSettings.invalidate();
107 }
108
createPath(const SkPath & inPath,const GrStyle & style)109 sk_sp<GrPath> GrGLPathRendering::createPath(const SkPath& inPath, const GrStyle& style) {
110 return sk_make_sp<GrGLPath>(this->gpu(), inPath, style);
111 }
112
createPathRange(GrPathRange::PathGenerator * pathGenerator,const GrStyle & style)113 sk_sp<GrPathRange> GrGLPathRendering::createPathRange(GrPathRange::PathGenerator* pathGenerator,
114 const GrStyle& style) {
115 return sk_make_sp<GrGLPathRange>(this->gpu(), pathGenerator, style);
116 }
117
onStencilPath(const StencilPathArgs & args,const GrPath * path)118 void GrGLPathRendering::onStencilPath(const StencilPathArgs& args, const GrPath* path) {
119 GrGLGpu* gpu = this->gpu();
120 SkASSERT(gpu->caps()->shaderCaps()->pathRenderingSupport());
121 gpu->flushColorWrite(false);
122
123 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(args.fRenderTarget);
124 SkISize size = SkISize::Make(rt->width(), rt->height());
125 this->setProjectionMatrix(*args.fViewMatrix, size, rt->origin());
126 gpu->flushScissor(*args.fScissor, rt->getViewport(), rt->origin());
127 gpu->flushHWAAState(rt, args.fUseHWAA, true);
128 gpu->flushRenderTarget(rt, nullptr);
129
130 const GrGLPath* glPath = static_cast<const GrGLPath*>(path);
131
132 this->flushPathStencilSettings(*args.fStencil);
133 SkASSERT(!fHWPathStencilSettings.isTwoSided());
134
135 GrGLenum fillMode =
136 gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.front().fPassOp);
137 GrGLint writeMask = fHWPathStencilSettings.front().fWriteMask;
138
139 if (glPath->shouldFill()) {
140 GL_CALL(StencilFillPath(glPath->pathID(), fillMode, writeMask));
141 }
142 if (glPath->shouldStroke()) {
143 GL_CALL(StencilStrokePath(glPath->pathID(), 0xffff, writeMask));
144 }
145 }
146
onDrawPath(const GrPipeline & pipeline,const GrPrimitiveProcessor & primProc,const GrStencilSettings & stencilPassSettings,const GrPath * path)147 void GrGLPathRendering::onDrawPath(const GrPipeline& pipeline,
148 const GrPrimitiveProcessor& primProc,
149 const GrStencilSettings& stencilPassSettings,
150 const GrPath* path) {
151 if (!this->gpu()->flushGLState(pipeline, primProc, false)) {
152 return;
153 }
154 const GrGLPath* glPath = static_cast<const GrGLPath*>(path);
155
156 this->flushPathStencilSettings(stencilPassSettings);
157 SkASSERT(!fHWPathStencilSettings.isTwoSided());
158
159 GrGLenum fillMode =
160 gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.front().fPassOp);
161 GrGLint writeMask = fHWPathStencilSettings.front().fWriteMask;
162
163 if (glPath->shouldStroke()) {
164 if (glPath->shouldFill()) {
165 GL_CALL(StencilFillPath(glPath->pathID(), fillMode, writeMask));
166 }
167 GL_CALL(StencilThenCoverStrokePath(glPath->pathID(), 0xffff, writeMask,
168 GR_GL_BOUNDING_BOX));
169 } else {
170 GL_CALL(StencilThenCoverFillPath(glPath->pathID(), fillMode, writeMask,
171 GR_GL_BOUNDING_BOX));
172 }
173 }
174
onDrawPaths(const GrPipeline & pipeline,const GrPrimitiveProcessor & primProc,const GrStencilSettings & stencilPassSettings,const GrPathRange * pathRange,const void * indices,PathIndexType indexType,const float transformValues[],PathTransformType transformType,int count)175 void GrGLPathRendering::onDrawPaths(const GrPipeline& pipeline,
176 const GrPrimitiveProcessor& primProc,
177 const GrStencilSettings& stencilPassSettings,
178 const GrPathRange* pathRange, const void* indices,
179 PathIndexType indexType, const float transformValues[],
180 PathTransformType transformType, int count) {
181 SkDEBUGCODE(verify_floats(transformValues, gXformType2ComponentCount[transformType] * count));
182
183 if (!this->gpu()->flushGLState(pipeline, primProc, false)) {
184 return;
185 }
186 this->flushPathStencilSettings(stencilPassSettings);
187 SkASSERT(!fHWPathStencilSettings.isTwoSided());
188
189
190 const GrGLPathRange* glPathRange = static_cast<const GrGLPathRange*>(pathRange);
191
192 GrGLenum fillMode =
193 gr_stencil_op_to_gl_path_rendering_fill_mode(fHWPathStencilSettings.front().fPassOp);
194 GrGLint writeMask = fHWPathStencilSettings.front().fWriteMask;
195
196 if (glPathRange->shouldStroke()) {
197 if (glPathRange->shouldFill()) {
198 GL_CALL(StencilFillPathInstanced(
199 count, gIndexType2GLType[indexType], indices, glPathRange->basePathID(),
200 fillMode, writeMask, gXformType2GLType[transformType],
201 transformValues));
202 }
203 GL_CALL(StencilThenCoverStrokePathInstanced(
204 count, gIndexType2GLType[indexType], indices, glPathRange->basePathID(),
205 0xffff, writeMask, GR_GL_BOUNDING_BOX_OF_BOUNDING_BOXES,
206 gXformType2GLType[transformType], transformValues));
207 } else {
208 GL_CALL(StencilThenCoverFillPathInstanced(
209 count, gIndexType2GLType[indexType], indices, glPathRange->basePathID(),
210 fillMode, writeMask, GR_GL_BOUNDING_BOX_OF_BOUNDING_BOXES,
211 gXformType2GLType[transformType], transformValues));
212 }
213 }
214
setProgramPathFragmentInputTransform(GrGLuint program,GrGLint location,GrGLenum genMode,GrGLint components,const SkMatrix & matrix)215 void GrGLPathRendering::setProgramPathFragmentInputTransform(GrGLuint program, GrGLint location,
216 GrGLenum genMode, GrGLint components,
217 const SkMatrix& matrix) {
218 float coefficients[3 * 3];
219 SkASSERT(components >= 1 && components <= 3);
220
221 coefficients[0] = SkScalarToFloat(matrix[SkMatrix::kMScaleX]);
222 coefficients[1] = SkScalarToFloat(matrix[SkMatrix::kMSkewX]);
223 coefficients[2] = SkScalarToFloat(matrix[SkMatrix::kMTransX]);
224
225 if (components >= 2) {
226 coefficients[3] = SkScalarToFloat(matrix[SkMatrix::kMSkewY]);
227 coefficients[4] = SkScalarToFloat(matrix[SkMatrix::kMScaleY]);
228 coefficients[5] = SkScalarToFloat(matrix[SkMatrix::kMTransY]);
229 }
230
231 if (components >= 3) {
232 coefficients[6] = SkScalarToFloat(matrix[SkMatrix::kMPersp0]);
233 coefficients[7] = SkScalarToFloat(matrix[SkMatrix::kMPersp1]);
234 coefficients[8] = SkScalarToFloat(matrix[SkMatrix::kMPersp2]);
235 }
236 SkDEBUGCODE(verify_floats(coefficients, components * 3));
237
238 GL_CALL(ProgramPathFragmentInputGen(program, location, genMode, components, coefficients));
239 }
240
setProjectionMatrix(const SkMatrix & matrix,const SkISize & renderTargetSize,GrSurfaceOrigin renderTargetOrigin)241 void GrGLPathRendering::setProjectionMatrix(const SkMatrix& matrix,
242 const SkISize& renderTargetSize,
243 GrSurfaceOrigin renderTargetOrigin) {
244
245 SkASSERT(this->gpu()->glCaps().shaderCaps()->pathRenderingSupport());
246
247 if (renderTargetOrigin == fHWProjectionMatrixState.fRenderTargetOrigin &&
248 renderTargetSize == fHWProjectionMatrixState.fRenderTargetSize &&
249 matrix.cheapEqualTo(fHWProjectionMatrixState.fViewMatrix)) {
250 return;
251 }
252
253 fHWProjectionMatrixState.fViewMatrix = matrix;
254 fHWProjectionMatrixState.fRenderTargetSize = renderTargetSize;
255 fHWProjectionMatrixState.fRenderTargetOrigin = renderTargetOrigin;
256
257 float glMatrix[4 * 4];
258 fHWProjectionMatrixState.getRTAdjustedGLMatrix<4>(glMatrix);
259 SkDEBUGCODE(verify_floats(glMatrix, SK_ARRAY_COUNT(glMatrix)));
260 GL_CALL(MatrixLoadf(GR_GL_PATH_PROJECTION, glMatrix));
261 }
262
genPaths(GrGLsizei range)263 GrGLuint GrGLPathRendering::genPaths(GrGLsizei range) {
264 SkASSERT(range > 0);
265 GrGLuint firstID;
266 if (fPreallocatedPathCount >= range) {
267 firstID = fFirstPreallocatedPathID;
268 fPreallocatedPathCount -= range;
269 fFirstPreallocatedPathID += range;
270 return firstID;
271 }
272 // Allocate range + the amount to fill up preallocation amount. If succeed, either join with
273 // the existing preallocation range or delete the existing and use the new (potentially partial)
274 // preallocation range.
275 GrGLsizei allocAmount = range + (kPathIDPreallocationAmount - fPreallocatedPathCount);
276 if (allocAmount >= range) {
277 GL_CALL_RET(firstID, GenPaths(allocAmount));
278
279 if (firstID != 0) {
280 if (fPreallocatedPathCount > 0 &&
281 firstID == fFirstPreallocatedPathID + fPreallocatedPathCount) {
282 firstID = fFirstPreallocatedPathID;
283 fPreallocatedPathCount += allocAmount - range;
284 fFirstPreallocatedPathID += range;
285 return firstID;
286 }
287
288 if (allocAmount > range) {
289 if (fPreallocatedPathCount > 0) {
290 this->deletePaths(fFirstPreallocatedPathID, fPreallocatedPathCount);
291 }
292 fFirstPreallocatedPathID = firstID + range;
293 fPreallocatedPathCount = allocAmount - range;
294 }
295 // Special case: if allocAmount == range, we have full preallocated range.
296 return firstID;
297 }
298 }
299 // Failed to allocate with preallocation. Remove existing preallocation and try to allocate just
300 // the range.
301 if (fPreallocatedPathCount > 0) {
302 this->deletePaths(fFirstPreallocatedPathID, fPreallocatedPathCount);
303 fPreallocatedPathCount = 0;
304 }
305
306 GL_CALL_RET(firstID, GenPaths(range));
307 if (firstID == 0) {
308 SkDebugf("Warning: Failed to allocate path\n");
309 }
310 return firstID;
311 }
312
deletePaths(GrGLuint path,GrGLsizei range)313 void GrGLPathRendering::deletePaths(GrGLuint path, GrGLsizei range) {
314 GL_CALL(DeletePaths(path, range));
315 }
316
flushPathStencilSettings(const GrStencilSettings & stencilSettings)317 void GrGLPathRendering::flushPathStencilSettings(const GrStencilSettings& stencilSettings) {
318 if (fHWPathStencilSettings != stencilSettings) {
319 SkASSERT(stencilSettings.isValid());
320 // Just the func, ref, and mask is set here. The op and write mask are params to the call
321 // that draws the path to the SB (glStencilFillPath)
322 uint16_t ref = stencilSettings.front().fRef;
323 GrStencilTest test = stencilSettings.front().fTest;
324 uint16_t testMask = stencilSettings.front().fTestMask;
325
326 if (!fHWPathStencilSettings.isValid() ||
327 ref != fHWPathStencilSettings.front().fRef ||
328 test != fHWPathStencilSettings.front().fTest ||
329 testMask != fHWPathStencilSettings.front().fTestMask) {
330 GL_CALL(PathStencilFunc(GrToGLStencilFunc(test), ref, testMask));
331 }
332 fHWPathStencilSettings = stencilSettings;
333 }
334 }
335
gpu()336 inline GrGLGpu* GrGLPathRendering::gpu() {
337 return static_cast<GrGLGpu*>(fGpu);
338 }
339