• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/mtl/GrMtlGpuCommandBuffer.h"
9
10#include "src/gpu/GrColor.h"
11#include "src/gpu/GrFixedClip.h"
12#include "src/gpu/GrRenderTargetPriv.h"
13#include "src/gpu/GrTexturePriv.h"
14#include "src/gpu/mtl/GrMtlCommandBuffer.h"
15#include "src/gpu/mtl/GrMtlPipelineState.h"
16#include "src/gpu/mtl/GrMtlPipelineStateBuilder.h"
17#include "src/gpu/mtl/GrMtlRenderTarget.h"
18#include "src/gpu/mtl/GrMtlTexture.h"
19
20#if !__has_feature(objc_arc)
21#error This file must be compiled with Arc. Use -fobjc-arc flag
22#endif
23
24GrMtlGpuRTCommandBuffer::GrMtlGpuRTCommandBuffer(
25        GrMtlGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin, const SkRect& bounds,
26        const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo,
27        const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo)
28        : INHERITED(rt, origin)
29        , fGpu(gpu)
30#ifdef SK_DEBUG
31        , fRTBounds(bounds)
32#endif
33        {
34    this->setupRenderPass(colorInfo, stencilInfo);
35}
36
37GrMtlGpuRTCommandBuffer::~GrMtlGpuRTCommandBuffer() {
38    SkASSERT(nil == fActiveRenderCmdEncoder);
39}
40
41void GrMtlGpuRTCommandBuffer::precreateCmdEncoder() {
42    // For clears, we may not have an associated draw. So we prepare a cmdEncoder that
43    // will be submitted whether there's a draw or not.
44    SkASSERT(nil == fActiveRenderCmdEncoder);
45
46    SkDEBUGCODE(id<MTLRenderCommandEncoder> cmdEncoder =)
47            fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
48    SkASSERT(nil != cmdEncoder);
49}
50
51void GrMtlGpuRTCommandBuffer::submit() {
52    if (!fRenderTarget) {
53        return;
54    }
55    SkIRect iBounds;
56    fBounds.roundOut(&iBounds);
57    fGpu->submitIndirectCommandBuffer(fRenderTarget, fOrigin, &iBounds);
58}
59
60void GrMtlGpuRTCommandBuffer::copy(GrSurface* src, const SkIRect& srcRect,
61const SkIPoint& dstPoint) {
62    // We cannot have an active encoder when we call copy since it requires its own
63    // command encoder.
64    SkASSERT(nil == fActiveRenderCmdEncoder);
65    fGpu->copySurface(fRenderTarget, src, srcRect, dstPoint);
66}
67
68void GrMtlGpuRTCommandBuffer::transferFrom(const SkIRect& srcRect, GrColorType surfaceColorType,
69                                           GrColorType bufferColorType, GrGpuBuffer* transferBuffer,
70                                           size_t offset) {
71    // We cannot have an active encoder when we call transferFrom since it requires its own
72    // command encoder.
73    SkASSERT(nil == fActiveRenderCmdEncoder);
74    fGpu->transferPixelsFrom(fRenderTarget, srcRect.fLeft, srcRect.fTop, srcRect.width(),
75                             srcRect.height(), surfaceColorType, bufferColorType, transferBuffer,
76                             offset);
77}
78
79GrMtlPipelineState* GrMtlGpuRTCommandBuffer::prepareDrawState(
80        const GrPrimitiveProcessor& primProc,
81        const GrPipeline& pipeline,
82        const GrPipeline::FixedDynamicState* fixedDynamicState,
83        GrPrimitiveType primType) {
84    // TODO: resolve textures and regenerate mipmaps as needed
85
86    const GrTextureProxy* const* primProcProxies = nullptr;
87    if (fixedDynamicState) {
88        primProcProxies = fixedDynamicState->fPrimitiveProcessorTextures;
89    }
90    SkASSERT(SkToBool(primProcProxies) == SkToBool(primProc.numTextureSamplers()));
91
92    GrMtlPipelineState* pipelineState =
93        fGpu->resourceProvider().findOrCreateCompatiblePipelineState(fRenderTarget, fOrigin,
94                                                                     pipeline,
95                                                                     primProc,
96                                                                     primProcProxies,
97                                                                     primType);
98    if (!pipelineState) {
99        return nullptr;
100    }
101    pipelineState->setData(fRenderTarget, fOrigin, primProc, pipeline, primProcProxies);
102    fCurrentVertexStride = primProc.vertexStride();
103
104    return pipelineState;
105}
106
107void GrMtlGpuRTCommandBuffer::onDraw(const GrPrimitiveProcessor& primProc,
108                                     const GrPipeline& pipeline,
109                                     const GrPipeline::FixedDynamicState* fixedDynamicState,
110                                     const GrPipeline::DynamicStateArrays* dynamicStateArrays,
111                                     const GrMesh meshes[],
112                                     int meshCount,
113                                     const SkRect& bounds) {
114    if (!meshCount) {
115        return;
116    }
117
118    auto prepareSampledImage = [&](GrTexture* texture, GrSamplerState::Filter filter) {
119        GrMtlTexture* mtlTexture = static_cast<GrMtlTexture*>(texture);
120        // We may need to resolve the texture first if it is also a render target
121        GrMtlRenderTarget* texRT = static_cast<GrMtlRenderTarget*>(mtlTexture->asRenderTarget());
122        if (texRT) {
123            fGpu->resolveRenderTargetNoFlush(texRT);
124        }
125
126        // Check if we need to regenerate any mip maps
127        if (GrSamplerState::Filter::kMipMap == filter &&
128            (texture->width() != 1 || texture->height() != 1)) {
129            SkASSERT(texture->texturePriv().mipMapped() == GrMipMapped::kYes);
130            if (texture->texturePriv().mipMapsAreDirty()) {
131                fGpu->regenerateMipMapLevels(texture);
132            }
133        }
134    };
135
136    if (dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures) {
137        for (int m = 0, i = 0; m < meshCount; ++m) {
138            for (int s = 0; s < primProc.numTextureSamplers(); ++s, ++i) {
139                auto texture = dynamicStateArrays->fPrimitiveProcessorTextures[i]->peekTexture();
140                prepareSampledImage(texture, primProc.textureSampler(s).samplerState().filter());
141            }
142        }
143    } else {
144        for (int i = 0; i < primProc.numTextureSamplers(); ++i) {
145            auto texture = fixedDynamicState->fPrimitiveProcessorTextures[i]->peekTexture();
146            prepareSampledImage(texture, primProc.textureSampler(i).samplerState().filter());
147        }
148    }
149    GrFragmentProcessor::Iter iter(pipeline);
150    while (const GrFragmentProcessor* fp = iter.next()) {
151        for (int i = 0; i < fp->numTextureSamplers(); ++i) {
152            const GrFragmentProcessor::TextureSampler& sampler = fp->textureSampler(i);
153            prepareSampledImage(sampler.peekTexture(), sampler.samplerState().filter());
154        }
155    }
156
157    GrPrimitiveType primitiveType = meshes[0].primitiveType();
158    GrMtlPipelineState* pipelineState = this->prepareDrawState(primProc, pipeline,
159                                                               fixedDynamicState, primitiveType);
160    if (!pipelineState) {
161        return;
162    }
163
164    SkASSERT(nil == fActiveRenderCmdEncoder);
165    fActiveRenderCmdEncoder = fGpu->commandBuffer()->getRenderCommandEncoder(
166            fRenderPassDesc, pipelineState, this);
167    SkASSERT(fActiveRenderCmdEncoder);
168
169    [fActiveRenderCmdEncoder setRenderPipelineState:pipelineState->mtlPipelineState()];
170    pipelineState->setDrawState(fActiveRenderCmdEncoder, pipeline.outputSwizzle(),
171                                pipeline.getXferProcessor());
172
173    bool dynamicScissor =
174            pipeline.isScissorEnabled() && dynamicStateArrays && dynamicStateArrays->fScissorRects;
175    if (!pipeline.isScissorEnabled()) {
176        GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder,
177                                                       fRenderTarget, fOrigin,
178                                                       SkIRect::MakeWH(fRenderTarget->width(),
179                                                                       fRenderTarget->height()));
180    } else if (!dynamicScissor) {
181        SkASSERT(fixedDynamicState);
182        GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder,
183                                                       fRenderTarget, fOrigin,
184                                                       fixedDynamicState->fScissorRect);
185    }
186
187    for (int i = 0; i < meshCount; ++i) {
188        const GrMesh& mesh = meshes[i];
189        SkASSERT(nil != fActiveRenderCmdEncoder);
190        if (mesh.primitiveType() != primitiveType) {
191            SkDEBUGCODE(pipelineState = nullptr);
192            primitiveType = mesh.primitiveType();
193            pipelineState = this->prepareDrawState(primProc, pipeline, fixedDynamicState,
194                                                   primitiveType);
195            if (!pipelineState) {
196                return;
197            }
198
199            [fActiveRenderCmdEncoder setRenderPipelineState:pipelineState->mtlPipelineState()];
200            pipelineState->setDrawState(fActiveRenderCmdEncoder, pipeline.outputSwizzle(),
201                                        pipeline.getXferProcessor());
202        }
203
204        if (dynamicScissor) {
205            GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder, fRenderTarget,
206                                                           fOrigin,
207                                                           dynamicStateArrays->fScissorRects[i]);
208        }
209
210        mesh.sendToGpu(this);
211    }
212
213    fActiveRenderCmdEncoder = nil;
214    fBounds.join(bounds);
215}
216
217void GrMtlGpuRTCommandBuffer::onClear(const GrFixedClip& clip, const SkPMColor4f& color) {
218    // if we end up here from absClear, the clear bounds may be bigger than the RT proxy bounds -
219    // but in that case, scissor should be enabled, so this check should still succeed
220    SkASSERT(!clip.scissorEnabled() || clip.scissorRect().contains(fRTBounds));
221    fRenderPassDesc.colorAttachments[0].clearColor = MTLClearColorMake(color.fR, color.fG, color.fB,
222                                                                       color.fA);
223    fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionClear;
224    this->precreateCmdEncoder();
225    fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionLoad;
226}
227
228void GrMtlGpuRTCommandBuffer::onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
229    SkASSERT(!clip.hasWindowRectangles());
230
231    GrStencilAttachment* sb = fRenderTarget->renderTargetPriv().getStencilAttachment();
232    // this should only be called internally when we know we have a
233    // stencil buffer.
234    SkASSERT(sb);
235    int stencilBitCount = sb->bits();
236
237    // The contract with the callers does not guarantee that we preserve all bits in the stencil
238    // during this clear. Thus we will clear the entire stencil to the desired value.
239    if (insideStencilMask) {
240        fRenderPassDesc.stencilAttachment.clearStencil = (1 << (stencilBitCount - 1));
241    } else {
242        fRenderPassDesc.stencilAttachment.clearStencil = 0;
243    }
244
245    fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionClear;
246    this->precreateCmdEncoder();
247    fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad;
248}
249
250void GrMtlGpuRTCommandBuffer::initRenderState(id<MTLRenderCommandEncoder> encoder) {
251    [encoder pushDebugGroup:@"initRenderState"];
252    [encoder setFrontFacingWinding:MTLWindingCounterClockwise];
253    // Strictly speaking we shouldn't have to set this, as the default viewport is the size of
254    // the drawable used to generate the renderCommandEncoder -- but just in case.
255    MTLViewport viewport = { 0.0, 0.0,
256                             (double) fRenderTarget->width(), (double) fRenderTarget->height(),
257                             0.0, 1.0 };
258    [encoder setViewport:viewport];
259    this->resetBufferBindings();
260    [encoder popDebugGroup];
261}
262
263void GrMtlGpuRTCommandBuffer::setupRenderPass(
264        const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo,
265        const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) {
266    const static MTLLoadAction mtlLoadAction[] {
267        MTLLoadActionLoad,
268        MTLLoadActionClear,
269        MTLLoadActionDontCare
270    };
271    GR_STATIC_ASSERT((int)GrLoadOp::kLoad == 0);
272    GR_STATIC_ASSERT((int)GrLoadOp::kClear == 1);
273    GR_STATIC_ASSERT((int)GrLoadOp::kDiscard == 2);
274    SkASSERT(colorInfo.fLoadOp <= GrLoadOp::kDiscard);
275    SkASSERT(stencilInfo.fLoadOp <= GrLoadOp::kDiscard);
276
277    const static MTLStoreAction mtlStoreAction[] {
278        MTLStoreActionStore,
279        MTLStoreActionDontCare
280    };
281    GR_STATIC_ASSERT((int)GrStoreOp::kStore == 0);
282    GR_STATIC_ASSERT((int)GrStoreOp::kDiscard == 1);
283    SkASSERT(colorInfo.fStoreOp <= GrStoreOp::kDiscard);
284    SkASSERT(stencilInfo.fStoreOp <= GrStoreOp::kDiscard);
285
286    auto renderPassDesc = [MTLRenderPassDescriptor renderPassDescriptor];
287    renderPassDesc.colorAttachments[0].texture =
288            static_cast<GrMtlRenderTarget*>(fRenderTarget)->mtlColorTexture();
289    renderPassDesc.colorAttachments[0].slice = 0;
290    renderPassDesc.colorAttachments[0].level = 0;
291    const SkPMColor4f& clearColor = colorInfo.fClearColor;
292    renderPassDesc.colorAttachments[0].clearColor =
293            MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]);
294    renderPassDesc.colorAttachments[0].loadAction =
295            mtlLoadAction[static_cast<int>(colorInfo.fLoadOp)];
296    renderPassDesc.colorAttachments[0].storeAction =
297            mtlStoreAction[static_cast<int>(colorInfo.fStoreOp)];
298
299    const GrMtlStencilAttachment* stencil = static_cast<GrMtlStencilAttachment*>(
300            fRenderTarget->renderTargetPriv().getStencilAttachment());
301    if (stencil) {
302        renderPassDesc.stencilAttachment.texture = stencil->stencilView();
303    }
304    renderPassDesc.stencilAttachment.clearStencil = 0;
305    renderPassDesc.stencilAttachment.loadAction =
306            mtlLoadAction[static_cast<int>(stencilInfo.fLoadOp)];
307    renderPassDesc.stencilAttachment.storeAction =
308            mtlStoreAction[static_cast<int>(stencilInfo.fStoreOp)];
309
310    fRenderPassDesc = renderPassDesc;
311
312    // Manage initial clears
313    if (colorInfo.fLoadOp == GrLoadOp::kClear || stencilInfo.fLoadOp == GrLoadOp::kClear)  {
314        fBounds = SkRect::MakeWH(fRenderTarget->width(),
315                                 fRenderTarget->height());
316        this->precreateCmdEncoder();
317        if (colorInfo.fLoadOp == GrLoadOp::kClear) {
318            fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionLoad;
319        }
320        if (stencilInfo.fLoadOp == GrLoadOp::kClear) {
321            fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad;
322        }
323    } else {
324        fBounds.setEmpty();
325    }
326}
327
328static MTLPrimitiveType gr_to_mtl_primitive(GrPrimitiveType primitiveType) {
329    const static MTLPrimitiveType mtlPrimitiveType[] {
330        MTLPrimitiveTypeTriangle,
331        MTLPrimitiveTypeTriangleStrip,
332        MTLPrimitiveTypePoint,
333        MTLPrimitiveTypeLine,
334        MTLPrimitiveTypeLineStrip
335    };
336    GR_STATIC_ASSERT((int)GrPrimitiveType::kTriangles == 0);
337    GR_STATIC_ASSERT((int)GrPrimitiveType::kTriangleStrip == 1);
338    GR_STATIC_ASSERT((int)GrPrimitiveType::kPoints == 2);
339    GR_STATIC_ASSERT((int)GrPrimitiveType::kLines == 3);
340    GR_STATIC_ASSERT((int)GrPrimitiveType::kLineStrip == 4);
341
342    SkASSERT(primitiveType <= GrPrimitiveType::kLineStrip);
343    return mtlPrimitiveType[static_cast<int>(primitiveType)];
344}
345
346void GrMtlGpuRTCommandBuffer::bindGeometry(const GrBuffer* vertexBuffer,
347                                           size_t vertexOffset,
348                                           const GrBuffer* instanceBuffer) {
349    size_t bufferIndex = GrMtlUniformHandler::kLastUniformBinding + 1;
350    if (vertexBuffer) {
351        SkASSERT(!vertexBuffer->isCpuBuffer());
352        SkASSERT(!static_cast<const GrGpuBuffer*>(vertexBuffer)->isMapped());
353
354        const GrMtlBuffer* grMtlBuffer = static_cast<const GrMtlBuffer*>(vertexBuffer);
355        this->setVertexBuffer(fActiveRenderCmdEncoder, grMtlBuffer, vertexOffset, bufferIndex++);
356    }
357    if (instanceBuffer) {
358        SkASSERT(!instanceBuffer->isCpuBuffer());
359        SkASSERT(!static_cast<const GrGpuBuffer*>(instanceBuffer)->isMapped());
360
361        const GrMtlBuffer* grMtlBuffer = static_cast<const GrMtlBuffer*>(instanceBuffer);
362        this->setVertexBuffer(fActiveRenderCmdEncoder, grMtlBuffer, 0, bufferIndex++);
363    }
364}
365
366void GrMtlGpuRTCommandBuffer::sendMeshToGpu(GrPrimitiveType primitiveType,
367                                            const GrBuffer* vertexBuffer,
368                                            int vertexCount,
369                                            int baseVertex) {
370    this->bindGeometry(vertexBuffer, 0, nullptr);
371
372    SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported.
373    [fActiveRenderCmdEncoder drawPrimitives:gr_to_mtl_primitive(primitiveType)
374                                vertexStart:baseVertex
375                                vertexCount:vertexCount];
376}
377
378void GrMtlGpuRTCommandBuffer::sendIndexedMeshToGpu(GrPrimitiveType primitiveType,
379                                                   const GrBuffer* indexBuffer,
380                                                   int indexCount,
381                                                   int baseIndex,
382                                                   uint16_t /*minIndexValue*/,
383                                                   uint16_t /*maxIndexValue*/,
384                                                   const GrBuffer* vertexBuffer,
385                                                   int baseVertex,
386                                                   GrPrimitiveRestart restart) {
387    this->bindGeometry(vertexBuffer, fCurrentVertexStride*baseVertex, nullptr);
388
389    SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported.
390    id<MTLBuffer> mtlIndexBuffer = nil;
391    if (indexBuffer) {
392        SkASSERT(!indexBuffer->isCpuBuffer());
393        SkASSERT(!static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped());
394
395        mtlIndexBuffer = static_cast<const GrMtlBuffer*>(indexBuffer)->mtlBuffer();
396        SkASSERT(mtlIndexBuffer);
397    }
398
399    SkASSERT(restart == GrPrimitiveRestart::kNo);
400    size_t indexOffset = static_cast<const GrMtlBuffer*>(indexBuffer)->offset() +
401                         sizeof(uint16_t) * baseIndex;
402    [fActiveRenderCmdEncoder drawIndexedPrimitives:gr_to_mtl_primitive(primitiveType)
403                                        indexCount:indexCount
404                                         indexType:MTLIndexTypeUInt16
405                                       indexBuffer:mtlIndexBuffer
406                                 indexBufferOffset:indexOffset];
407    fGpu->stats()->incNumDraws();
408}
409
410void GrMtlGpuRTCommandBuffer::sendInstancedMeshToGpu(GrPrimitiveType primitiveType,
411                                                     const GrBuffer* vertexBuffer,
412                                                     int vertexCount,
413                                                     int baseVertex,
414                                                     const GrBuffer* instanceBuffer,
415                                                     int instanceCount,
416                                                     int baseInstance) {
417    this->bindGeometry(vertexBuffer, 0, instanceBuffer);
418
419    SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported.
420    [fActiveRenderCmdEncoder drawPrimitives:gr_to_mtl_primitive(primitiveType)
421                                vertexStart:baseVertex
422                                vertexCount:vertexCount
423                              instanceCount:instanceCount
424                               baseInstance:baseInstance];
425}
426
427void GrMtlGpuRTCommandBuffer::sendIndexedInstancedMeshToGpu(GrPrimitiveType primitiveType,
428                                                            const GrBuffer* indexBuffer,
429                                                            int indexCount,
430                                                            int baseIndex,
431                                                            const GrBuffer* vertexBuffer,
432                                                            int baseVertex,
433                                                            const GrBuffer* instanceBuffer,
434                                                            int instanceCount,
435                                                            int baseInstance,
436                                                            GrPrimitiveRestart restart) {
437    this->bindGeometry(vertexBuffer, 0, instanceBuffer);
438
439    SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported.
440    id<MTLBuffer> mtlIndexBuffer = nil;
441    if (indexBuffer) {
442        SkASSERT(!indexBuffer->isCpuBuffer());
443        SkASSERT(!static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped());
444
445        mtlIndexBuffer = static_cast<const GrMtlBuffer*>(indexBuffer)->mtlBuffer();
446        SkASSERT(mtlIndexBuffer);
447    }
448
449    SkASSERT(restart == GrPrimitiveRestart::kNo);
450    size_t indexOffset = static_cast<const GrMtlBuffer*>(indexBuffer)->offset() +
451                         sizeof(uint16_t) * baseIndex;
452    [fActiveRenderCmdEncoder drawIndexedPrimitives:gr_to_mtl_primitive(primitiveType)
453                                        indexCount:indexCount
454                                         indexType:MTLIndexTypeUInt16
455                                       indexBuffer:mtlIndexBuffer
456                                 indexBufferOffset:indexOffset
457                                     instanceCount:instanceCount
458                                        baseVertex:baseVertex
459                                      baseInstance:baseInstance];
460    fGpu->stats()->incNumDraws();
461}
462
463void GrMtlGpuRTCommandBuffer::setVertexBuffer(id<MTLRenderCommandEncoder> encoder,
464                                              const GrMtlBuffer* buffer,
465                                              size_t vertexOffset,
466                                              size_t index) {
467    SkASSERT(index < 4);
468    id<MTLBuffer> mtlVertexBuffer = buffer->mtlBuffer();
469    SkASSERT(mtlVertexBuffer);
470    // Apple recommends using setVertexBufferOffset: when changing the offset
471    // for a currently bound vertex buffer, rather than setVertexBuffer:
472    size_t offset = buffer->offset() + vertexOffset;
473    if (fBufferBindings[index].fBuffer != mtlVertexBuffer) {
474        [encoder setVertexBuffer: mtlVertexBuffer
475                          offset: offset
476                         atIndex: index];
477        fBufferBindings[index].fBuffer = mtlVertexBuffer;
478        fBufferBindings[index].fOffset = offset;
479    } else if (fBufferBindings[index].fOffset != offset) {
480        [encoder setVertexBufferOffset: offset
481                               atIndex: index];
482        fBufferBindings[index].fOffset = offset;
483    }
484}
485
486void GrMtlGpuRTCommandBuffer::resetBufferBindings() {
487    for (size_t i = 0; i < kNumBindings; ++i) {
488        fBufferBindings[i].fBuffer = nil;
489    }
490}
491