1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrVkPipelineState.h"
9
10 #include "GrPipeline.h"
11 #include "GrTexturePriv.h"
12 #include "GrVkCommandBuffer.h"
13 #include "GrVkDescriptorPool.h"
14 #include "GrVkDescriptorSet.h"
15 #include "GrVkGpu.h"
16 #include "GrVkImageView.h"
17 #include "GrVkMemory.h"
18 #include "GrVkPipeline.h"
19 #include "GrVkRenderTarget.h"
20 #include "GrVkSampler.h"
21 #include "GrVkTexture.h"
22 #include "GrVkUniformBuffer.h"
23 #include "glsl/GrGLSLFragmentProcessor.h"
24 #include "glsl/GrGLSLGeometryProcessor.h"
25 #include "glsl/GrGLSLXferProcessor.h"
26 #include "SkMipMap.h"
27
GrVkPipelineState(GrVkGpu * gpu,const GrVkPipelineState::Desc & desc,GrVkPipeline * pipeline,VkPipelineLayout layout,const GrVkDescriptorSetManager::Handle & samplerDSHandle,const BuiltinUniformHandles & builtinUniformHandles,const UniformInfoArray & uniforms,uint32_t vertexUniformSize,uint32_t fragmentUniformSize,uint32_t numSamplers,GrGLSLPrimitiveProcessor * geometryProcessor,GrGLSLXferProcessor * xferProcessor,const GrGLSLFragProcs & fragmentProcessors)28 GrVkPipelineState::GrVkPipelineState(GrVkGpu* gpu,
29 const GrVkPipelineState::Desc& desc,
30 GrVkPipeline* pipeline,
31 VkPipelineLayout layout,
32 const GrVkDescriptorSetManager::Handle& samplerDSHandle,
33 const BuiltinUniformHandles& builtinUniformHandles,
34 const UniformInfoArray& uniforms,
35 uint32_t vertexUniformSize,
36 uint32_t fragmentUniformSize,
37 uint32_t numSamplers,
38 GrGLSLPrimitiveProcessor* geometryProcessor,
39 GrGLSLXferProcessor* xferProcessor,
40 const GrGLSLFragProcs& fragmentProcessors)
41 : fPipeline(pipeline)
42 , fPipelineLayout(layout)
43 , fUniformDescriptorSet(nullptr)
44 , fSamplerDescriptorSet(nullptr)
45 , fSamplerDSHandle(samplerDSHandle)
46 , fStartDS(SK_MaxS32)
47 , fDSCount(0)
48 , fBuiltinUniformHandles(builtinUniformHandles)
49 , fGeometryProcessor(geometryProcessor)
50 , fXferProcessor(xferProcessor)
51 , fFragmentProcessors(fragmentProcessors)
52 , fDesc(desc)
53 , fDataManager(uniforms, vertexUniformSize, fragmentUniformSize) {
54 fSamplers.setReserve(numSamplers);
55 fTextureViews.setReserve(numSamplers);
56 fTextures.setReserve(numSamplers);
57
58 fDescriptorSets[0] = VK_NULL_HANDLE;
59 fDescriptorSets[1] = VK_NULL_HANDLE;
60
61 // Currently we are always binding a descriptor set for uniform buffers.
62 if (vertexUniformSize || fragmentUniformSize) {
63 fDSCount++;
64 fStartDS = GrVkUniformHandler::kUniformBufferDescSet;
65 }
66 if (numSamplers) {
67 fDSCount++;
68 fStartDS = SkTMin(fStartDS, (int)GrVkUniformHandler::kSamplerDescSet);
69 }
70
71 fVertexUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, vertexUniformSize));
72 fFragmentUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, fragmentUniformSize));
73
74 fNumSamplers = numSamplers;
75 }
76
~GrVkPipelineState()77 GrVkPipelineState::~GrVkPipelineState() {
78 // Must have freed all GPU resources before this is destroyed
79 SkASSERT(!fPipeline);
80 SkASSERT(!fPipelineLayout);
81 SkASSERT(!fSamplers.count());
82 SkASSERT(!fTextureViews.count());
83 SkASSERT(!fTextures.count());
84 for (int i = 0; i < fFragmentProcessors.count(); ++i) {
85 delete fFragmentProcessors[i];
86 }
87 }
88
freeTempResources(const GrVkGpu * gpu)89 void GrVkPipelineState::freeTempResources(const GrVkGpu* gpu) {
90 for (int i = 0; i < fSamplers.count(); ++i) {
91 fSamplers[i]->unref(gpu);
92 }
93 fSamplers.rewind();
94
95 for (int i = 0; i < fTextureViews.count(); ++i) {
96 fTextureViews[i]->unref(gpu);
97 }
98 fTextureViews.rewind();
99
100 for (int i = 0; i < fTextures.count(); ++i) {
101 fTextures[i]->unref(gpu);
102 }
103 fTextures.rewind();
104 }
105
freeGPUResources(const GrVkGpu * gpu)106 void GrVkPipelineState::freeGPUResources(const GrVkGpu* gpu) {
107 if (fPipeline) {
108 fPipeline->unref(gpu);
109 fPipeline = nullptr;
110 }
111
112 if (fPipelineLayout) {
113 GR_VK_CALL(gpu->vkInterface(), DestroyPipelineLayout(gpu->device(),
114 fPipelineLayout,
115 nullptr));
116 fPipelineLayout = VK_NULL_HANDLE;
117 }
118
119 if (fVertexUniformBuffer) {
120 fVertexUniformBuffer->release(gpu);
121 }
122
123 if (fFragmentUniformBuffer) {
124 fFragmentUniformBuffer->release(gpu);
125 }
126
127 if (fUniformDescriptorSet) {
128 fUniformDescriptorSet->recycle(const_cast<GrVkGpu*>(gpu));
129 fUniformDescriptorSet = nullptr;
130 }
131
132 if (fSamplerDescriptorSet) {
133 fSamplerDescriptorSet->recycle(const_cast<GrVkGpu*>(gpu));
134 fSamplerDescriptorSet = nullptr;
135 }
136
137 this->freeTempResources(gpu);
138 }
139
abandonGPUResources()140 void GrVkPipelineState::abandonGPUResources() {
141 fPipeline->unrefAndAbandon();
142 fPipeline = nullptr;
143
144 fPipelineLayout = VK_NULL_HANDLE;
145
146 fVertexUniformBuffer->abandon();
147 fFragmentUniformBuffer->abandon();
148
149 for (int i = 0; i < fSamplers.count(); ++i) {
150 fSamplers[i]->unrefAndAbandon();
151 }
152 fSamplers.rewind();
153
154 for (int i = 0; i < fTextureViews.count(); ++i) {
155 fTextureViews[i]->unrefAndAbandon();
156 }
157 fTextureViews.rewind();
158
159 for (int i = 0; i < fTextures.count(); ++i) {
160 fTextures[i]->unrefAndAbandon();
161 }
162 fTextures.rewind();
163
164 if (fUniformDescriptorSet) {
165 fUniformDescriptorSet->unrefAndAbandon();
166 fUniformDescriptorSet = nullptr;
167 }
168
169 if (fSamplerDescriptorSet) {
170 fSamplerDescriptorSet->unrefAndAbandon();
171 fSamplerDescriptorSet = nullptr;
172 }
173 }
174
append_texture_bindings(const GrProcessor & processor,SkTArray<const GrProcessor::TextureSampler * > * textureBindings)175 static void append_texture_bindings(const GrProcessor& processor,
176 SkTArray<const GrProcessor::TextureSampler*>* textureBindings) {
177 // We don't support image storages in VK.
178 SkASSERT(!processor.numImageStorages());
179 if (int numTextureSamplers = processor.numTextureSamplers()) {
180 const GrProcessor::TextureSampler** bindings =
181 textureBindings->push_back_n(numTextureSamplers);
182 int i = 0;
183 do {
184 bindings[i] = &processor.textureSampler(i);
185 } while (++i < numTextureSamplers);
186 }
187 }
188
setData(GrVkGpu * gpu,const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline)189 void GrVkPipelineState::setData(GrVkGpu* gpu,
190 const GrPrimitiveProcessor& primProc,
191 const GrPipeline& pipeline) {
192 // This is here to protect against someone calling setData multiple times in a row without
193 // freeing the tempData between calls.
194 this->freeTempResources(gpu);
195
196 this->setRenderTargetState(pipeline);
197
198 SkSTArray<8, const GrProcessor::TextureSampler*> textureBindings;
199
200 fGeometryProcessor->setData(fDataManager, primProc,
201 GrFragmentProcessor::CoordTransformIter(pipeline));
202 append_texture_bindings(primProc, &textureBindings);
203
204 GrFragmentProcessor::Iter iter(pipeline);
205 GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.begin(),
206 fFragmentProcessors.count());
207 const GrFragmentProcessor* fp = iter.next();
208 GrGLSLFragmentProcessor* glslFP = glslIter.next();
209 while (fp && glslFP) {
210 glslFP->setData(fDataManager, *fp);
211 append_texture_bindings(*fp, &textureBindings);
212 fp = iter.next();
213 glslFP = glslIter.next();
214 }
215 SkASSERT(!fp && !glslFP);
216
217 fXferProcessor->setData(fDataManager, pipeline.getXferProcessor());
218 append_texture_bindings(pipeline.getXferProcessor(), &textureBindings);
219
220 // Get new descriptor sets
221 if (fNumSamplers) {
222 if (fSamplerDescriptorSet) {
223 fSamplerDescriptorSet->recycle(gpu);
224 }
225 fSamplerDescriptorSet = gpu->resourceProvider().getSamplerDescriptorSet(fSamplerDSHandle);
226 int samplerDSIdx = GrVkUniformHandler::kSamplerDescSet;
227 fDescriptorSets[samplerDSIdx] = fSamplerDescriptorSet->descriptorSet();
228 this->writeSamplers(gpu, textureBindings, pipeline.getAllowSRGBInputs());
229 }
230
231 if (fVertexUniformBuffer.get() || fFragmentUniformBuffer.get()) {
232 if (fDataManager.uploadUniformBuffers(gpu,
233 fVertexUniformBuffer.get(),
234 fFragmentUniformBuffer.get())
235 || !fUniformDescriptorSet)
236 {
237 if (fUniformDescriptorSet) {
238 fUniformDescriptorSet->recycle(gpu);
239 }
240 fUniformDescriptorSet = gpu->resourceProvider().getUniformDescriptorSet();
241 int uniformDSIdx = GrVkUniformHandler::kUniformBufferDescSet;
242 fDescriptorSets[uniformDSIdx] = fUniformDescriptorSet->descriptorSet();
243 this->writeUniformBuffers(gpu);
244 }
245 }
246 }
247
writeUniformBuffers(const GrVkGpu * gpu)248 void GrVkPipelineState::writeUniformBuffers(const GrVkGpu* gpu) {
249 VkWriteDescriptorSet descriptorWrites[2];
250 memset(descriptorWrites, 0, 2 * sizeof(VkWriteDescriptorSet));
251
252 uint32_t firstUniformWrite = 0;
253 uint32_t uniformBindingUpdateCount = 0;
254
255 VkDescriptorBufferInfo vertBufferInfo;
256 // Vertex Uniform Buffer
257 if (fVertexUniformBuffer.get()) {
258 ++uniformBindingUpdateCount;
259 memset(&vertBufferInfo, 0, sizeof(VkDescriptorBufferInfo));
260 vertBufferInfo.buffer = fVertexUniformBuffer->buffer();
261 vertBufferInfo.offset = fVertexUniformBuffer->offset();
262 vertBufferInfo.range = fVertexUniformBuffer->size();
263
264 descriptorWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
265 descriptorWrites[0].pNext = nullptr;
266 descriptorWrites[0].dstSet = fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet];
267 descriptorWrites[0].dstBinding = GrVkUniformHandler::kVertexBinding;
268 descriptorWrites[0].dstArrayElement = 0;
269 descriptorWrites[0].descriptorCount = 1;
270 descriptorWrites[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
271 descriptorWrites[0].pImageInfo = nullptr;
272 descriptorWrites[0].pBufferInfo = &vertBufferInfo;
273 descriptorWrites[0].pTexelBufferView = nullptr;
274 }
275
276 VkDescriptorBufferInfo fragBufferInfo;
277 // Fragment Uniform Buffer
278 if (fFragmentUniformBuffer.get()) {
279 if (0 == uniformBindingUpdateCount) {
280 firstUniformWrite = 1;
281 }
282 ++uniformBindingUpdateCount;
283 memset(&fragBufferInfo, 0, sizeof(VkDescriptorBufferInfo));
284 fragBufferInfo.buffer = fFragmentUniformBuffer->buffer();
285 fragBufferInfo.offset = fFragmentUniformBuffer->offset();
286 fragBufferInfo.range = fFragmentUniformBuffer->size();
287
288 descriptorWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
289 descriptorWrites[1].pNext = nullptr;
290 descriptorWrites[1].dstSet = fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet];
291 descriptorWrites[1].dstBinding = GrVkUniformHandler::kFragBinding;;
292 descriptorWrites[1].dstArrayElement = 0;
293 descriptorWrites[1].descriptorCount = 1;
294 descriptorWrites[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
295 descriptorWrites[1].pImageInfo = nullptr;
296 descriptorWrites[1].pBufferInfo = &fragBufferInfo;
297 descriptorWrites[1].pTexelBufferView = nullptr;
298 }
299
300 if (uniformBindingUpdateCount) {
301 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
302 uniformBindingUpdateCount,
303 &descriptorWrites[firstUniformWrite],
304 0, nullptr));
305 }
306 }
307
writeSamplers(GrVkGpu * gpu,const SkTArray<const GrProcessor::TextureSampler * > & textureBindings,bool allowSRGBInputs)308 void GrVkPipelineState::writeSamplers(
309 GrVkGpu* gpu,
310 const SkTArray<const GrProcessor::TextureSampler*>& textureBindings,
311 bool allowSRGBInputs) {
312 SkASSERT(fNumSamplers == textureBindings.count());
313
314 for (int i = 0; i < textureBindings.count(); ++i) {
315 const GrSamplerParams& params = textureBindings[i]->params();
316
317 GrVkTexture* texture = static_cast<GrVkTexture*>(textureBindings[i]->texture());
318
319 fSamplers.push(gpu->resourceProvider().findOrCreateCompatibleSampler(params,
320 texture->texturePriv().maxMipMapLevel()));
321
322 const GrVkResource* textureResource = texture->resource();
323 textureResource->ref();
324 fTextures.push(textureResource);
325
326 const GrVkImageView* textureView = texture->textureView(allowSRGBInputs);
327 textureView->ref();
328 fTextureViews.push(textureView);
329
330 VkDescriptorImageInfo imageInfo;
331 memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
332 imageInfo.sampler = fSamplers[i]->sampler();
333 imageInfo.imageView = textureView->imageView();
334 imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
335
336 VkWriteDescriptorSet writeInfo;
337 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
338 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
339 writeInfo.pNext = nullptr;
340 writeInfo.dstSet = fDescriptorSets[GrVkUniformHandler::kSamplerDescSet];
341 writeInfo.dstBinding = i;
342 writeInfo.dstArrayElement = 0;
343 writeInfo.descriptorCount = 1;
344 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
345 writeInfo.pImageInfo = &imageInfo;
346 writeInfo.pBufferInfo = nullptr;
347 writeInfo.pTexelBufferView = nullptr;
348
349 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
350 1,
351 &writeInfo,
352 0,
353 nullptr));
354 }
355 }
356
setRenderTargetState(const GrPipeline & pipeline)357 void GrVkPipelineState::setRenderTargetState(const GrPipeline& pipeline) {
358 // Load the RT height uniform if it is needed to y-flip gl_FragCoord.
359 if (fBuiltinUniformHandles.fRTHeightUni.isValid() &&
360 fRenderTargetState.fRenderTargetSize.fHeight != pipeline.getRenderTarget()->height()) {
361 fDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni,
362 SkIntToScalar(pipeline.getRenderTarget()->height()));
363 }
364
365 // set RT adjustment
366 const GrRenderTarget* rt = pipeline.getRenderTarget();
367 SkISize size;
368 size.set(rt->width(), rt->height());
369 SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid());
370 if (fRenderTargetState.fRenderTargetOrigin != rt->origin() ||
371 fRenderTargetState.fRenderTargetSize != size) {
372 fRenderTargetState.fRenderTargetSize = size;
373 fRenderTargetState.fRenderTargetOrigin = rt->origin();
374
375 float rtAdjustmentVec[4];
376 fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec);
377 fDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec);
378 }
379 }
380
bind(const GrVkGpu * gpu,GrVkCommandBuffer * commandBuffer)381 void GrVkPipelineState::bind(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffer) {
382 commandBuffer->bindPipeline(gpu, fPipeline);
383
384 if (fDSCount) {
385 commandBuffer->bindDescriptorSets(gpu, this, fPipelineLayout, fStartDS, fDSCount,
386 &fDescriptorSets[fStartDS], 0, nullptr);
387 }
388 }
389
addUniformResources(GrVkCommandBuffer & commandBuffer)390 void GrVkPipelineState::addUniformResources(GrVkCommandBuffer& commandBuffer) {
391 if (fUniformDescriptorSet) {
392 commandBuffer.addRecycledResource(fUniformDescriptorSet);
393 }
394 if (fSamplerDescriptorSet) {
395 commandBuffer.addRecycledResource(fSamplerDescriptorSet);
396 }
397
398 if (fVertexUniformBuffer.get()) {
399 commandBuffer.addRecycledResource(fVertexUniformBuffer->resource());
400 }
401 if (fFragmentUniformBuffer.get()) {
402 commandBuffer.addRecycledResource(fFragmentUniformBuffer->resource());
403 }
404
405 for (int i = 0; i < fSamplers.count(); ++i) {
406 commandBuffer.addResource(fSamplers[i]);
407 }
408
409 for (int i = 0; i < fTextureViews.count(); ++i) {
410 commandBuffer.addResource(fTextureViews[i]);
411 }
412
413 for (int i = 0; i < fTextures.count(); ++i) {
414 commandBuffer.addResource(fTextures[i]);
415 }
416 }
417
418 ////////////////////////////////////////////////////////////////////////////////
419
getNewPool(GrVkGpu * gpu)420 void GrVkPipelineState::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) {
421 if (fPool) {
422 fPool->unref(gpu);
423 uint32_t newPoolSize = fMaxDescriptors + ((fMaxDescriptors + 1) >> 1);
424 if (newPoolSize < kMaxDescLimit) {
425 fMaxDescriptors = newPoolSize;
426 } else {
427 fMaxDescriptors = kMaxDescLimit;
428 }
429
430 }
431 if (fMaxDescriptors) {
432 fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDescType,
433 fMaxDescriptors);
434 }
435 SkASSERT(fPool || !fMaxDescriptors);
436 }
437
getNewDescriptorSet(GrVkGpu * gpu,VkDescriptorSet * ds)438 void GrVkPipelineState::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu,
439 VkDescriptorSet* ds) {
440 if (!fMaxDescriptors) {
441 return;
442 }
443 fCurrentDescriptorCount += fDescCountPerSet;
444 if (fCurrentDescriptorCount > fMaxDescriptors) {
445 this->getNewPool(gpu);
446 fCurrentDescriptorCount = fDescCountPerSet;
447 }
448
449 VkDescriptorSetAllocateInfo dsAllocateInfo;
450 memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo));
451 dsAllocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
452 dsAllocateInfo.pNext = nullptr;
453 dsAllocateInfo.descriptorPool = fPool->descPool();
454 dsAllocateInfo.descriptorSetCount = 1;
455 dsAllocateInfo.pSetLayouts = &fDescLayout;
456 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), AllocateDescriptorSets(gpu->device(),
457 &dsAllocateInfo,
458 ds));
459 }
460
freeGPUResources(const GrVkGpu * gpu)461 void GrVkPipelineState::DescriptorPoolManager::freeGPUResources(const GrVkGpu* gpu) {
462 if (fDescLayout) {
463 GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorSetLayout(gpu->device(), fDescLayout,
464 nullptr));
465 fDescLayout = VK_NULL_HANDLE;
466 }
467
468 if (fPool) {
469 fPool->unref(gpu);
470 fPool = nullptr;
471 }
472 }
473
abandonGPUResources()474 void GrVkPipelineState::DescriptorPoolManager::abandonGPUResources() {
475 fDescLayout = VK_NULL_HANDLE;
476 if (fPool) {
477 fPool->unrefAndAbandon();
478 fPool = nullptr;
479 }
480 }
481
get_blend_info_key(const GrPipeline & pipeline)482 uint32_t get_blend_info_key(const GrPipeline& pipeline) {
483 GrXferProcessor::BlendInfo blendInfo;
484 pipeline.getXferProcessor().getBlendInfo(&blendInfo);
485
486 static const uint32_t kBlendWriteShift = 1;
487 static const uint32_t kBlendCoeffShift = 5;
488 GR_STATIC_ASSERT(kLast_GrBlendCoeff < (1 << kBlendCoeffShift));
489 GR_STATIC_ASSERT(kFirstAdvancedGrBlendEquation - 1 < 4);
490
491 uint32_t key = blendInfo.fWriteColor;
492 key |= (blendInfo.fSrcBlend << kBlendWriteShift);
493 key |= (blendInfo.fDstBlend << (kBlendWriteShift + kBlendCoeffShift));
494 key |= (blendInfo.fEquation << (kBlendWriteShift + 2 * kBlendCoeffShift));
495
496 return key;
497 }
498
Build(Desc * desc,const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,const GrStencilSettings & stencil,GrPrimitiveType primitiveType,const GrShaderCaps & caps)499 bool GrVkPipelineState::Desc::Build(Desc* desc,
500 const GrPrimitiveProcessor& primProc,
501 const GrPipeline& pipeline,
502 const GrStencilSettings& stencil,
503 GrPrimitiveType primitiveType,
504 const GrShaderCaps& caps) {
505 if (!INHERITED::Build(desc, primProc, primitiveType == kPoints_GrPrimitiveType, pipeline,
506 caps)) {
507 return false;
508 }
509
510 GrProcessorKeyBuilder b(&desc->key());
511 GrVkRenderTarget* vkRT = (GrVkRenderTarget*)pipeline.getRenderTarget();
512 vkRT->simpleRenderPass()->genKey(&b);
513
514 stencil.genKey(&b);
515
516 SkASSERT(sizeof(GrDrawFace) <= sizeof(uint32_t));
517 b.add32((int32_t)pipeline.getDrawFace());
518
519 b.add32(get_blend_info_key(pipeline));
520
521 b.add32(primitiveType);
522
523 return true;
524 }
525