• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright 2021 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/graphite/mtl/MtlCommandBuffer.h"
9
10#include "include/gpu/graphite/BackendSemaphore.h"
11#include "include/gpu/graphite/mtl/MtlGraphiteTypes.h"
12#include "src/gpu/graphite/ContextUtils.h"
13#include "src/gpu/graphite/Log.h"
14#include "src/gpu/graphite/RenderPassDesc.h"
15#include "src/gpu/graphite/TextureProxy.h"
16#include "src/gpu/graphite/UniformManager.h"
17#include "src/gpu/graphite/compute/DispatchGroup.h"
18#include "src/gpu/graphite/mtl/MtlBlitCommandEncoder.h"
19#include "src/gpu/graphite/mtl/MtlBuffer.h"
20#include "src/gpu/graphite/mtl/MtlCaps.h"
21#include "src/gpu/graphite/mtl/MtlCommandBuffer.h"
22#include "src/gpu/graphite/mtl/MtlComputeCommandEncoder.h"
23#include "src/gpu/graphite/mtl/MtlComputePipeline.h"
24#include "src/gpu/graphite/mtl/MtlGraphicsPipeline.h"
25#include "src/gpu/graphite/mtl/MtlRenderCommandEncoder.h"
26#include "src/gpu/graphite/mtl/MtlSampler.h"
27#include "src/gpu/graphite/mtl/MtlSharedContext.h"
28#include "src/gpu/graphite/mtl/MtlTexture.h"
29#include "src/gpu/mtl/MtlUtilsPriv.h"
30
31namespace skgpu::graphite {
32
33std::unique_ptr<MtlCommandBuffer> MtlCommandBuffer::Make(id<MTLCommandQueue> queue,
34                                                         const MtlSharedContext* sharedContext,
35                                                         MtlResourceProvider* resourceProvider) {
36    auto commandBuffer = std::unique_ptr<MtlCommandBuffer>(
37            new MtlCommandBuffer(queue, sharedContext, resourceProvider));
38    if (!commandBuffer) {
39        return nullptr;
40    }
41    if (!commandBuffer->createNewMTLCommandBuffer()) {
42        return nullptr;
43    }
44    return commandBuffer;
45}
46
47MtlCommandBuffer::MtlCommandBuffer(id<MTLCommandQueue> queue,
48                                   const MtlSharedContext* sharedContext,
49                                   MtlResourceProvider* resourceProvider)
50        : CommandBuffer(Protected::kNo)  // Metal doesn't support protected memory
51        , fQueue(queue)
52        , fSharedContext(sharedContext)
53        , fResourceProvider(resourceProvider) {}
54
55MtlCommandBuffer::~MtlCommandBuffer() {
56    SkASSERT(!fActiveRenderCommandEncoder);
57    SkASSERT(!fActiveComputeCommandEncoder);
58    SkASSERT(!fActiveBlitCommandEncoder);
59}
60
61bool MtlCommandBuffer::setNewCommandBufferResources() {
62    return this->createNewMTLCommandBuffer();
63}
64
65bool MtlCommandBuffer::createNewMTLCommandBuffer() {
66    SkASSERT(fCommandBuffer == nil);
67
68    // Inserting a pool here so the autorelease occurs when we return and the
69    // only remaining ref is the retain below.
70    @autoreleasepool {
71        if (@available(macOS 11.0, iOS 14.0, tvOS 14.0, *)) {
72            sk_cfp<MTLCommandBufferDescriptor*> desc([[MTLCommandBufferDescriptor alloc] init]);
73            (*desc).retainedReferences = NO;
74#ifdef SK_ENABLE_MTL_DEBUG_INFO
75            (*desc).errorOptions = MTLCommandBufferErrorOptionEncoderExecutionStatus;
76#endif
77            // We add a retain here because the command buffer is set to autorelease (not alloc or copy)
78            fCommandBuffer.reset([[fQueue commandBufferWithDescriptor:desc.get()] retain]);
79        } else {
80            // We add a retain here because the command buffer is set to autorelease (not alloc or copy)
81            fCommandBuffer.reset([[fQueue commandBufferWithUnretainedReferences] retain]);
82        }
83    }
84    return fCommandBuffer != nil;
85}
86
87bool MtlCommandBuffer::commit() {
88    SkASSERT(!fActiveRenderCommandEncoder);
89    SkASSERT(!fActiveComputeCommandEncoder);
90    this->endBlitCommandEncoder();
91    [(*fCommandBuffer) commit];
92
93    if ((*fCommandBuffer).status == MTLCommandBufferStatusError) {
94        NSString* description = (*fCommandBuffer).error.localizedDescription;
95        const char* errorString = [description UTF8String];
96        SKGPU_LOG_E("Failure submitting command buffer: %s", errorString);
97    }
98
99    return ((*fCommandBuffer).status != MTLCommandBufferStatusError);
100}
101
102void MtlCommandBuffer::onResetCommandBuffer() {
103    fCommandBuffer.reset();
104    fActiveRenderCommandEncoder.reset();
105    fActiveComputeCommandEncoder.reset();
106    fActiveBlitCommandEncoder.reset();
107    fCurrentIndexBuffer = nil;
108    fCurrentIndexBufferOffset = 0;
109}
110
111void MtlCommandBuffer::addWaitSemaphores(size_t numWaitSemaphores,
112                                         const BackendSemaphore* waitSemaphores) {
113    if (!waitSemaphores) {
114        SkASSERT(numWaitSemaphores == 0);
115        return;
116    }
117
118    // Can only insert events with no active encoder
119    SkASSERT(!fActiveRenderCommandEncoder);
120    SkASSERT(!fActiveComputeCommandEncoder);
121    this->endBlitCommandEncoder();
122    if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
123        for (size_t i = 0; i < numWaitSemaphores; ++i) {
124            auto semaphore = waitSemaphores[i];
125            if (semaphore.isValid() && semaphore.backend() == BackendApi::kMetal) {
126                id<MTLEvent> mtlEvent =
127                        (__bridge id<MTLEvent>)BackendSemaphores::GetMtlEvent(semaphore);
128                [(*fCommandBuffer) encodeWaitForEvent:mtlEvent
129                                                value:BackendSemaphores::GetMtlValue(semaphore)];
130            }
131        }
132    }
133}
134
135void MtlCommandBuffer::addSignalSemaphores(size_t numSignalSemaphores,
136                                           const BackendSemaphore* signalSemaphores) {
137    if (!signalSemaphores) {
138        SkASSERT(numSignalSemaphores == 0);
139        return;
140    }
141
142    // Can only insert events with no active encoder
143    SkASSERT(!fActiveRenderCommandEncoder);
144    SkASSERT(!fActiveComputeCommandEncoder);
145    this->endBlitCommandEncoder();
146
147    if (@available(macOS 10.14, iOS 12.0, tvOS 12.0, *)) {
148        for (size_t i = 0; i < numSignalSemaphores; ++i) {
149            auto semaphore = signalSemaphores[i];
150            if (semaphore.isValid() && semaphore.backend() == BackendApi::kMetal) {
151                id<MTLEvent> mtlEvent = (__bridge id<MTLEvent>)BackendSemaphores::GetMtlEvent;
152                [(*fCommandBuffer) encodeSignalEvent:mtlEvent
153                                               value:BackendSemaphores::GetMtlValue(semaphore)];
154            }
155        }
156    }
157}
158
159bool MtlCommandBuffer::onAddRenderPass(const RenderPassDesc& renderPassDesc,
160                                       SkIRect renderPassBounds,
161                                       const Texture* colorTexture,
162                                       const Texture* resolveTexture,
163                                       const Texture* depthStencilTexture,
164                                       SkIRect viewport,
165                                       const DrawPassList& drawPasses) {
166    if (!this->beginRenderPass(renderPassDesc, colorTexture, resolveTexture, depthStencilTexture)) {
167        return false;
168    }
169
170    this->setViewport(viewport.x(), viewport.y(), viewport.width(), viewport.height(), 0, 1);
171    this->updateIntrinsicUniforms(viewport);
172
173    for (const auto& drawPass : drawPasses) {
174        this->addDrawPass(drawPass.get());
175    }
176
177    this->endRenderPass();
178    return true;
179}
180
181bool MtlCommandBuffer::onAddComputePass(DispatchGroupSpan groups) {
182    this->beginComputePass();
183    for (const auto& group : groups) {
184        group->addResourceRefs(this);
185        for (const auto& dispatch : group->dispatches()) {
186            this->bindComputePipeline(group->getPipeline(dispatch.fPipelineIndex));
187            for (const ResourceBinding& binding : dispatch.fBindings) {
188                if (const BindBufferInfo* buffer = std::get_if<BindBufferInfo>(&binding.fResource)) {
189                    this->bindBuffer(buffer->fBuffer, buffer->fOffset, binding.fIndex);
190                } else if (const TextureIndex* texIdx =
191                                   std::get_if<TextureIndex>(&binding.fResource)) {
192                    SkASSERT(texIdx);
193                    this->bindTexture(group->getTexture(texIdx->fValue), binding.fIndex);
194                } else {
195                    const SamplerIndex* samplerIdx = std::get_if<SamplerIndex>(&binding.fResource);
196                    SkASSERT(samplerIdx);
197                    this->bindSampler(group->getSampler(samplerIdx->fValue), binding.fIndex);
198                }
199            }
200            SkASSERT(fActiveComputeCommandEncoder);
201            for (const ComputeStep::WorkgroupBufferDesc& wgBuf : dispatch.fWorkgroupBuffers) {
202                fActiveComputeCommandEncoder->setThreadgroupMemoryLength(
203                        SkAlignTo(wgBuf.size, 16),
204                        wgBuf.index);
205            }
206            if (const WorkgroupSize* globalSize =
207                        std::get_if<WorkgroupSize>(&dispatch.fGlobalSizeOrIndirect)) {
208                this->dispatchThreadgroups(*globalSize, dispatch.fLocalSize);
209            } else {
210                SkASSERT(std::holds_alternative<BindBufferInfo>(dispatch.fGlobalSizeOrIndirect));
211                const BindBufferInfo& indirect =
212                        *std::get_if<BindBufferInfo>(&dispatch.fGlobalSizeOrIndirect);
213                this->dispatchThreadgroupsIndirect(
214                        dispatch.fLocalSize, indirect.fBuffer, indirect.fOffset);
215            }
216        }
217    }
218    this->endComputePass();
219    return true;
220}
221
222bool MtlCommandBuffer::beginRenderPass(const RenderPassDesc& renderPassDesc,
223                                       const Texture* colorTexture,
224                                       const Texture* resolveTexture,
225                                       const Texture* depthStencilTexture) {
226    SkASSERT(!fActiveRenderCommandEncoder);
227    SkASSERT(!fActiveComputeCommandEncoder);
228    this->endBlitCommandEncoder();
229
230    const static MTLLoadAction mtlLoadAction[] {
231        MTLLoadActionLoad,
232        MTLLoadActionClear,
233        MTLLoadActionDontCare
234    };
235    static_assert((int)LoadOp::kLoad == 0);
236    static_assert((int)LoadOp::kClear == 1);
237    static_assert((int)LoadOp::kDiscard == 2);
238    static_assert(std::size(mtlLoadAction) == kLoadOpCount);
239
240    const static MTLStoreAction mtlStoreAction[] {
241        MTLStoreActionStore,
242        MTLStoreActionDontCare
243    };
244    static_assert((int)StoreOp::kStore == 0);
245    static_assert((int)StoreOp::kDiscard == 1);
246    static_assert(std::size(mtlStoreAction) == kStoreOpCount);
247
248    sk_cfp<MTLRenderPassDescriptor*> descriptor([[MTLRenderPassDescriptor alloc] init]);
249    // Set up color attachment.
250    auto& colorInfo = renderPassDesc.fColorAttachment;
251    bool loadMSAAFromResolve = false;
252    if (colorTexture) {
253        // TODO: check Texture matches RenderPassDesc
254        auto colorAttachment = (*descriptor).colorAttachments[0];
255        colorAttachment.texture = ((const MtlTexture*)colorTexture)->mtlTexture();
256        const std::array<float, 4>& clearColor = renderPassDesc.fClearColor;
257        colorAttachment.clearColor =
258                MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]);
259        colorAttachment.loadAction = mtlLoadAction[static_cast<int>(colorInfo.fLoadOp)];
260        colorAttachment.storeAction = mtlStoreAction[static_cast<int>(colorInfo.fStoreOp)];
261        // Set up resolve attachment
262        if (resolveTexture) {
263            SkASSERT(renderPassDesc.fColorResolveAttachment.fStoreOp == StoreOp::kStore);
264            // TODO: check Texture matches RenderPassDesc
265            colorAttachment.resolveTexture = ((const MtlTexture*)resolveTexture)->mtlTexture();
266            // Inclusion of a resolve texture implies the client wants to finish the
267            // renderpass with a resolve.
268            if (@available(macOS 10.12, iOS 10.0, tvOS 10.0, *)) {
269                SkASSERT(colorAttachment.storeAction == MTLStoreActionDontCare);
270                colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
271            } else {
272                // We expect at least Metal 2
273                // TODO: Add error output
274                SkASSERT(false);
275            }
276            // But it also means we have to load the resolve texture into the MSAA color attachment
277            loadMSAAFromResolve = renderPassDesc.fColorResolveAttachment.fLoadOp == LoadOp::kLoad;
278            // TODO: If the color resolve texture is read-only we can use a private (vs. memoryless)
279            // msaa attachment that's coupled to the framebuffer and the StoreAndMultisampleResolve
280            // action instead of loading as a draw.
281        }
282    }
283
284    // Set up stencil/depth attachment
285    auto& depthStencilInfo = renderPassDesc.fDepthStencilAttachment;
286    if (depthStencilTexture) {
287        // TODO: check Texture matches RenderPassDesc
288        id<MTLTexture> mtlTexture = ((const MtlTexture*)depthStencilTexture)->mtlTexture();
289        if (MtlFormatIsDepth(mtlTexture.pixelFormat)) {
290            auto depthAttachment = (*descriptor).depthAttachment;
291            depthAttachment.texture = mtlTexture;
292            depthAttachment.clearDepth = renderPassDesc.fClearDepth;
293            depthAttachment.loadAction =
294                     mtlLoadAction[static_cast<int>(depthStencilInfo.fLoadOp)];
295            depthAttachment.storeAction =
296                     mtlStoreAction[static_cast<int>(depthStencilInfo.fStoreOp)];
297        }
298        if (MtlFormatIsStencil(mtlTexture.pixelFormat)) {
299            auto stencilAttachment = (*descriptor).stencilAttachment;
300            stencilAttachment.texture = mtlTexture;
301            stencilAttachment.clearStencil = renderPassDesc.fClearStencil;
302            stencilAttachment.loadAction =
303                     mtlLoadAction[static_cast<int>(depthStencilInfo.fLoadOp)];
304            stencilAttachment.storeAction =
305                     mtlStoreAction[static_cast<int>(depthStencilInfo.fStoreOp)];
306        }
307    } else {
308        SkASSERT(!depthStencilInfo.fTextureInfo.isValid());
309    }
310
311    fActiveRenderCommandEncoder = MtlRenderCommandEncoder::Make(fSharedContext,
312                                                                fCommandBuffer.get(),
313                                                                descriptor.get());
314    this->trackResource(fActiveRenderCommandEncoder);
315
316    if (loadMSAAFromResolve) {
317        // Manually load the contents of the resolve texture into the MSAA attachment as a draw,
318        // so the actual load op for the MSAA attachment had better have been discard.
319        SkASSERT(colorInfo.fLoadOp == LoadOp::kDiscard);
320        auto loadPipeline = fResourceProvider->findOrCreateLoadMSAAPipeline(renderPassDesc);
321        if (!loadPipeline) {
322            SKGPU_LOG_E("Unable to create pipeline to load resolve texture into MSAA attachment");
323            return false;
324        }
325        this->bindGraphicsPipeline(loadPipeline.get());
326        // The load msaa pipeline takes no uniforms, no vertex/instance attributes and only uses
327        // one texture that does not require a sampler.
328        fActiveRenderCommandEncoder->setFragmentTexture(
329                ((const MtlTexture*) resolveTexture)->mtlTexture(), 0);
330        this->draw(PrimitiveType::kTriangleStrip, 0, 4);
331    }
332
333    return true;
334}
335
336void MtlCommandBuffer::endRenderPass() {
337    SkASSERT(fActiveRenderCommandEncoder);
338    fActiveRenderCommandEncoder->endEncoding();
339    fActiveRenderCommandEncoder.reset();
340    fDrawIsOffscreen = false;
341}
342
343void MtlCommandBuffer::addDrawPass(const DrawPass* drawPass) {
344    const SkIRect replayedBounds = drawPass->bounds().makeOffset(fReplayTranslation.x(),
345                                                                 fReplayTranslation.y());
346    if (!SkIRect::Intersects(replayedBounds, fRenderPassBounds)) {
347        // The entire DrawPass is offscreen given the replay translation so skip adding any
348        // commands. When the DrawPass is partially offscreen individual draw commands will be
349        // culled while preserving state changing commands.
350        return;
351    }
352
353    drawPass->addResourceRefs(this);
354
355    for (auto[type, cmdPtr] : drawPass->commands()) {
356        // Skip draw commands if they'd be offscreen.
357        if (fDrawIsOffscreen) {
358            switch (type) {
359                case DrawPassCommands::Type::kDraw:
360                case DrawPassCommands::Type::kDrawIndexed:
361                case DrawPassCommands::Type::kDrawInstanced:
362                case DrawPassCommands::Type::kDrawIndexedInstanced:
363                    continue;
364                default:
365                    break;
366            }
367        }
368
369        switch (type) {
370            case DrawPassCommands::Type::kBindGraphicsPipeline: {
371                auto bgp = static_cast<DrawPassCommands::BindGraphicsPipeline*>(cmdPtr);
372                this->bindGraphicsPipeline(drawPass->getPipeline(bgp->fPipelineIndex));
373                break;
374            }
375            case DrawPassCommands::Type::kSetBlendConstants: {
376                auto sbc = static_cast<DrawPassCommands::SetBlendConstants*>(cmdPtr);
377                this->setBlendConstants(sbc->fBlendConstants);
378                break;
379            }
380            case DrawPassCommands::Type::kBindUniformBuffer: {
381                auto bub = static_cast<DrawPassCommands::BindUniformBuffer*>(cmdPtr);
382                this->bindUniformBuffer(bub->fInfo, bub->fSlot);
383                break;
384            }
385            case DrawPassCommands::Type::kBindDrawBuffers: {
386                auto bdb = static_cast<DrawPassCommands::BindDrawBuffers*>(cmdPtr);
387                this->bindDrawBuffers(
388                        bdb->fVertices, bdb->fInstances, bdb->fIndices, bdb->fIndirect);
389                break;
390            }
391            case DrawPassCommands::Type::kBindTexturesAndSamplers: {
392                auto bts = static_cast<DrawPassCommands::BindTexturesAndSamplers*>(cmdPtr);
393                for (int j = 0; j < bts->fNumTexSamplers; ++j) {
394                    this->bindTextureAndSampler(drawPass->getTexture(bts->fTextureIndices[j]),
395                                                drawPass->getSampler(bts->fSamplerIndices[j]),
396                                                j);
397                }
398                break;
399            }
400            case DrawPassCommands::Type::kSetScissor: {
401                auto ss = static_cast<DrawPassCommands::SetScissor*>(cmdPtr);
402                this->setScissor(ss->fScissor);
403                break;
404            }
405            case DrawPassCommands::Type::kDraw: {
406                auto draw = static_cast<DrawPassCommands::Draw*>(cmdPtr);
407                this->draw(draw->fType, draw->fBaseVertex, draw->fVertexCount);
408                break;
409            }
410            case DrawPassCommands::Type::kDrawIndexed: {
411                auto draw = static_cast<DrawPassCommands::DrawIndexed*>(cmdPtr);
412                this->drawIndexed(draw->fType,
413                                  draw->fBaseIndex,
414                                  draw->fIndexCount,
415                                  draw->fBaseVertex);
416                break;
417            }
418            case DrawPassCommands::Type::kDrawInstanced: {
419                auto draw = static_cast<DrawPassCommands::DrawInstanced*>(cmdPtr);
420                this->drawInstanced(draw->fType,
421                                    draw->fBaseVertex,
422                                    draw->fVertexCount,
423                                    draw->fBaseInstance,
424                                    draw->fInstanceCount);
425                break;
426            }
427            case DrawPassCommands::Type::kDrawIndexedInstanced: {
428                auto draw = static_cast<DrawPassCommands::DrawIndexedInstanced*>(cmdPtr);
429                this->drawIndexedInstanced(draw->fType,
430                                           draw->fBaseIndex,
431                                           draw->fIndexCount,
432                                           draw->fBaseVertex,
433                                           draw->fBaseInstance,
434                                           draw->fInstanceCount);
435                break;
436            }
437            case DrawPassCommands::Type::kDrawIndirect: {
438                auto draw = static_cast<DrawPassCommands::DrawIndirect*>(cmdPtr);
439                this->drawIndirect(draw->fType);
440                break;
441            }
442            case DrawPassCommands::Type::kDrawIndexedIndirect: {
443                auto draw = static_cast<DrawPassCommands::DrawIndexedIndirect*>(cmdPtr);
444                this->drawIndexedIndirect(draw->fType);
445                break;
446            }
447            case DrawPassCommands::Type::kAddBarrier: {
448                SKGPU_LOG_E("MtlCommandBuffer does not support the addition of barriers.");
449                break;
450            }
451        }
452    }
453}
454
455MtlBlitCommandEncoder* MtlCommandBuffer::getBlitCommandEncoder() {
456    if (fActiveBlitCommandEncoder) {
457        return fActiveBlitCommandEncoder.get();
458    }
459
460    fActiveBlitCommandEncoder = MtlBlitCommandEncoder::Make(fSharedContext, fCommandBuffer.get());
461
462    if (!fActiveBlitCommandEncoder) {
463        return nullptr;
464    }
465
466    // We add the ref on the command buffer for the BlitCommandEncoder now so that we don't need
467    // to add a ref for every copy we do.
468    this->trackResource(fActiveBlitCommandEncoder);
469    return fActiveBlitCommandEncoder.get();
470}
471
472void MtlCommandBuffer::endBlitCommandEncoder() {
473    if (fActiveBlitCommandEncoder) {
474        fActiveBlitCommandEncoder->endEncoding();
475        fActiveBlitCommandEncoder.reset();
476    }
477}
478
479void MtlCommandBuffer::bindGraphicsPipeline(const GraphicsPipeline* graphicsPipeline) {
480    SkASSERT(fActiveRenderCommandEncoder);
481
482    auto mtlPipeline = static_cast<const MtlGraphicsPipeline*>(graphicsPipeline);
483    auto pipelineState = mtlPipeline->mtlPipelineState();
484    fActiveRenderCommandEncoder->setRenderPipelineState(pipelineState);
485    auto depthStencilState = mtlPipeline->mtlDepthStencilState();
486    fActiveRenderCommandEncoder->setDepthStencilState(depthStencilState);
487    uint32_t stencilRefValue = mtlPipeline->stencilReferenceValue();
488    fActiveRenderCommandEncoder->setStencilReferenceValue(stencilRefValue);
489
490    if (graphicsPipeline->dstReadStrategy() == DstReadStrategy::kTextureCopy) {
491        // The last texture binding is reserved for the dstCopy texture, which is not included in
492        // the list on each BindTexturesAndSamplers command. We can set it once now and any
493        // subsequent BindTexturesAndSamplers commands in a DrawPass will set the other N-1.
494        SkASSERT(fDstCopy.first && fDstCopy.second);
495        const int textureIndex = graphicsPipeline->numFragTexturesAndSamplers() - 1;
496        this->bindTextureAndSampler(fDstCopy.first, fDstCopy.second, textureIndex);
497    }
498}
499
500void MtlCommandBuffer::bindUniformBuffer(const BindBufferInfo& info, UniformSlot slot) {
501    SkASSERT(fActiveRenderCommandEncoder);
502
503    id<MTLBuffer> mtlBuffer = info.fBuffer ?
504            static_cast<const MtlBuffer*>(info.fBuffer)->mtlBuffer() : nullptr;
505
506    unsigned int bufferIndex;
507    switch(slot) {
508        case UniformSlot::kRenderStep:
509            bufferIndex = MtlGraphicsPipeline::kRenderStepUniformBufferIndex;
510            break;
511        case UniformSlot::kPaint:
512            bufferIndex = MtlGraphicsPipeline::kPaintUniformBufferIndex;
513            break;
514        case UniformSlot::kGradient:
515            bufferIndex = MtlGraphicsPipeline::kGradientBufferIndex;
516            break;
517    }
518
519    fActiveRenderCommandEncoder->setVertexBuffer(mtlBuffer, info.fOffset, bufferIndex);
520    fActiveRenderCommandEncoder->setFragmentBuffer(mtlBuffer, info.fOffset, bufferIndex);
521}
522
523void MtlCommandBuffer::bindDrawBuffers(const BindBufferInfo& vertices,
524                                       const BindBufferInfo& instances,
525                                       const BindBufferInfo& indices,
526                                       const BindBufferInfo& indirect) {
527    this->bindVertexBuffers(vertices.fBuffer,
528                            vertices.fOffset,
529                            instances.fBuffer,
530                            instances.fOffset);
531    this->bindIndexBuffer(indices.fBuffer, indices.fOffset);
532    this->bindIndirectBuffer(indirect.fBuffer, indirect.fOffset);
533}
534
535void MtlCommandBuffer::bindVertexBuffers(const Buffer* vertexBuffer,
536                                         size_t vertexOffset,
537                                         const Buffer* instanceBuffer,
538                                         size_t instanceOffset) {
539    SkASSERT(fActiveRenderCommandEncoder);
540
541    if (vertexBuffer) {
542        id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(vertexBuffer)->mtlBuffer();
543        // Metal requires buffer offsets to be aligned to the data type, which is at most 4 bytes
544        // since we use [[attribute]] to automatically unpack float components into SIMD arrays.
545        SkASSERT((vertexOffset & 0b11) == 0);
546        fActiveRenderCommandEncoder->setVertexBuffer(mtlBuffer, vertexOffset,
547                                                     MtlGraphicsPipeline::kVertexBufferIndex);
548    }
549    if (instanceBuffer) {
550        id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(instanceBuffer)->mtlBuffer();
551        SkASSERT((instanceOffset & 0b11) == 0);
552        fActiveRenderCommandEncoder->setVertexBuffer(mtlBuffer, instanceOffset,
553                                                     MtlGraphicsPipeline::kInstanceBufferIndex);
554    }
555}
556
557void MtlCommandBuffer::bindIndexBuffer(const Buffer* indexBuffer, size_t offset) {
558    if (indexBuffer) {
559        fCurrentIndexBuffer = static_cast<const MtlBuffer*>(indexBuffer)->mtlBuffer();
560        fCurrentIndexBufferOffset = offset;
561    } else {
562        fCurrentIndexBuffer = nil;
563        fCurrentIndexBufferOffset = 0;
564    }
565}
566
567void MtlCommandBuffer::bindIndirectBuffer(const Buffer* indirectBuffer, size_t offset) {
568    if (indirectBuffer) {
569        fCurrentIndirectBuffer = static_cast<const MtlBuffer*>(indirectBuffer)->mtlBuffer();
570        fCurrentIndirectBufferOffset = offset;
571    } else {
572        fCurrentIndirectBuffer = nil;
573        fCurrentIndirectBufferOffset = 0;
574    }
575}
576
577void MtlCommandBuffer::bindTextureAndSampler(const Texture* texture,
578                                             const Sampler* sampler,
579                                             unsigned int bindIndex) {
580    SkASSERT(texture && sampler);
581    SkASSERT(fActiveRenderCommandEncoder);
582
583    id<MTLTexture> mtlTexture = ((const MtlTexture*)texture)->mtlTexture();
584    id<MTLSamplerState> mtlSamplerState = ((const MtlSampler*)sampler)->mtlSamplerState();
585    fActiveRenderCommandEncoder->setFragmentTexture(mtlTexture, bindIndex);
586    fActiveRenderCommandEncoder->setFragmentSamplerState(mtlSamplerState, bindIndex);
587}
588
589void MtlCommandBuffer::setScissor(const Scissor& scissor) {
590    SkASSERT(fActiveRenderCommandEncoder);
591
592    SkIRect rect = scissor.getRect(fReplayTranslation, fRenderPassBounds);
593    fDrawIsOffscreen = rect.isEmpty();
594
595    fActiveRenderCommandEncoder->setScissorRect({
596            static_cast<unsigned int>(rect.x()),
597            static_cast<unsigned int>(rect.y()),
598            static_cast<unsigned int>(rect.width()),
599            static_cast<unsigned int>(rect.height()),
600    });
601}
602
603void MtlCommandBuffer::setViewport(float x, float y, float width, float height,
604                                   float minDepth, float maxDepth) {
605    SkASSERT(fActiveRenderCommandEncoder);
606    MTLViewport viewport = {x,
607                            y,
608                            width,
609                            height,
610                            minDepth,
611                            maxDepth};
612    fActiveRenderCommandEncoder->setViewport(viewport);
613}
614
615void MtlCommandBuffer::updateIntrinsicUniforms(SkIRect viewport) {
616    UniformManager intrinsicValues{Layout::kMetal};
617    CollectIntrinsicUniforms(fSharedContext->caps(), viewport, fDstReadBounds, &intrinsicValues);
618    SkSpan<const char> bytes = intrinsicValues.finish();
619    fActiveRenderCommandEncoder->setVertexBytes(
620            bytes.data(), bytes.size_bytes(), MtlGraphicsPipeline::kIntrinsicUniformBufferIndex);
621    fActiveRenderCommandEncoder->setFragmentBytes(
622            bytes.data(), bytes.size_bytes(), MtlGraphicsPipeline::kIntrinsicUniformBufferIndex);
623}
624
625void MtlCommandBuffer::setBlendConstants(float* blendConstants) {
626    SkASSERT(fActiveRenderCommandEncoder);
627
628    fActiveRenderCommandEncoder->setBlendColor(blendConstants);
629}
630
631static MTLPrimitiveType graphite_to_mtl_primitive(PrimitiveType primitiveType) {
632    const static MTLPrimitiveType mtlPrimitiveType[] {
633        MTLPrimitiveTypeTriangle,
634        MTLPrimitiveTypeTriangleStrip,
635        MTLPrimitiveTypePoint,
636    };
637    static_assert((int)PrimitiveType::kTriangles == 0);
638    static_assert((int)PrimitiveType::kTriangleStrip == 1);
639    static_assert((int)PrimitiveType::kPoints == 2);
640
641    SkASSERT(primitiveType <= PrimitiveType::kPoints);
642    return mtlPrimitiveType[static_cast<int>(primitiveType)];
643}
644
645void MtlCommandBuffer::draw(PrimitiveType type,
646                            unsigned int baseVertex,
647                            unsigned int vertexCount) {
648    SkASSERT(fActiveRenderCommandEncoder);
649
650    auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
651
652    fActiveRenderCommandEncoder->drawPrimitives(mtlPrimitiveType, baseVertex, vertexCount);
653}
654
655void MtlCommandBuffer::drawIndexed(PrimitiveType type, unsigned int baseIndex,
656                                   unsigned int indexCount, unsigned int baseVertex) {
657    SkASSERT(fActiveRenderCommandEncoder);
658
659    if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
660        auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
661        size_t indexOffset =  fCurrentIndexBufferOffset + sizeof(uint16_t )* baseIndex;
662        // Use the "instance" variant witha count of 1 so that we can pass in a base vertex
663        // instead of rebinding a vertex buffer offset.
664        fActiveRenderCommandEncoder->drawIndexedPrimitives(mtlPrimitiveType, indexCount,
665                                                           MTLIndexTypeUInt16, fCurrentIndexBuffer,
666                                                           indexOffset, 1, baseVertex, 0);
667
668    } else {
669        SKGPU_LOG_E("Skipping unsupported draw call.");
670    }
671}
672
673void MtlCommandBuffer::drawInstanced(PrimitiveType type, unsigned int baseVertex,
674                                     unsigned int vertexCount, unsigned int baseInstance,
675                                     unsigned int instanceCount) {
676    SkASSERT(fActiveRenderCommandEncoder);
677
678    auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
679
680    // This ordering is correct
681    fActiveRenderCommandEncoder->drawPrimitives(mtlPrimitiveType, baseVertex, vertexCount,
682                                                instanceCount, baseInstance);
683}
684
685void MtlCommandBuffer::drawIndexedInstanced(PrimitiveType type,
686                                            unsigned int baseIndex,
687                                            unsigned int indexCount,
688                                            unsigned int baseVertex,
689                                            unsigned int baseInstance,
690                                            unsigned int instanceCount) {
691    SkASSERT(fActiveRenderCommandEncoder);
692
693    if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
694        auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
695        size_t indexOffset =  fCurrentIndexBufferOffset + sizeof(uint16_t) * baseIndex;
696        fActiveRenderCommandEncoder->drawIndexedPrimitives(mtlPrimitiveType, indexCount,
697                                                           MTLIndexTypeUInt16, fCurrentIndexBuffer,
698                                                           indexOffset, instanceCount,
699                                                           baseVertex, baseInstance);
700    } else {
701        SKGPU_LOG_E("Skipping unsupported draw call.");
702    }
703}
704
705void MtlCommandBuffer::drawIndirect(PrimitiveType type) {
706    SkASSERT(fActiveRenderCommandEncoder);
707    SkASSERT(fCurrentIndirectBuffer);
708
709    if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
710        auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
711        fActiveRenderCommandEncoder->drawPrimitives(
712                mtlPrimitiveType, fCurrentIndirectBuffer, fCurrentIndirectBufferOffset);
713    } else {
714        SKGPU_LOG_E("Skipping unsupported draw call.");
715    }
716}
717
718void MtlCommandBuffer::drawIndexedIndirect(PrimitiveType type) {
719    SkASSERT(fActiveRenderCommandEncoder);
720    SkASSERT(fCurrentIndirectBuffer);
721
722    if (@available(macOS 10.11, iOS 9.0, tvOS 9.0, *)) {
723        auto mtlPrimitiveType = graphite_to_mtl_primitive(type);
724        fActiveRenderCommandEncoder->drawIndexedPrimitives(mtlPrimitiveType,
725                                                           MTLIndexTypeUInt32,
726                                                           fCurrentIndexBuffer,
727                                                           fCurrentIndexBufferOffset,
728                                                           fCurrentIndirectBuffer,
729                                                           fCurrentIndirectBufferOffset);
730    } else {
731        SKGPU_LOG_E("Skipping unsupported draw call.");
732    }
733}
734
735void MtlCommandBuffer::beginComputePass() {
736    SkASSERT(!fActiveRenderCommandEncoder);
737    SkASSERT(!fActiveComputeCommandEncoder);
738    this->endBlitCommandEncoder();
739    fActiveComputeCommandEncoder = MtlComputeCommandEncoder::Make(fSharedContext,
740                                                                  fCommandBuffer.get());
741}
742
743void MtlCommandBuffer::bindComputePipeline(const ComputePipeline* computePipeline) {
744    SkASSERT(fActiveComputeCommandEncoder);
745
746    auto mtlPipeline = static_cast<const MtlComputePipeline*>(computePipeline);
747    fActiveComputeCommandEncoder->setComputePipelineState(mtlPipeline->mtlPipelineState());
748}
749
750void MtlCommandBuffer::bindBuffer(const Buffer* buffer, unsigned int offset, unsigned int index) {
751    SkASSERT(fActiveComputeCommandEncoder);
752
753    id<MTLBuffer> mtlBuffer = buffer ? static_cast<const MtlBuffer*>(buffer)->mtlBuffer() : nil;
754    fActiveComputeCommandEncoder->setBuffer(mtlBuffer, offset, index);
755}
756
757void MtlCommandBuffer::bindTexture(const Texture* texture, unsigned int index) {
758    SkASSERT(fActiveComputeCommandEncoder);
759
760    id<MTLTexture> mtlTexture =
761            texture ? static_cast<const MtlTexture*>(texture)->mtlTexture() : nil;
762    fActiveComputeCommandEncoder->setTexture(mtlTexture, index);
763}
764
765void MtlCommandBuffer::bindSampler(const Sampler* sampler, unsigned int index) {
766    SkASSERT(fActiveComputeCommandEncoder);
767
768    id<MTLSamplerState> mtlSamplerState =
769            sampler ? static_cast<const MtlSampler*>(sampler)->mtlSamplerState() : nil;
770    fActiveComputeCommandEncoder->setSamplerState(mtlSamplerState, index);
771}
772
773void MtlCommandBuffer::dispatchThreadgroups(const WorkgroupSize& globalSize,
774                                            const WorkgroupSize& localSize) {
775    SkASSERT(fActiveComputeCommandEncoder);
776    fActiveComputeCommandEncoder->dispatchThreadgroups(globalSize, localSize);
777}
778
779void MtlCommandBuffer::dispatchThreadgroupsIndirect(const WorkgroupSize& localSize,
780                                                    const Buffer* indirectBuffer,
781                                                    size_t indirectBufferOffset) {
782    SkASSERT(fActiveComputeCommandEncoder);
783
784    id<MTLBuffer> mtlIndirectBuffer = static_cast<const MtlBuffer*>(indirectBuffer)->mtlBuffer();
785    fActiveComputeCommandEncoder->dispatchThreadgroupsWithIndirectBuffer(
786            mtlIndirectBuffer, indirectBufferOffset, localSize);
787}
788
789void MtlCommandBuffer::endComputePass() {
790    SkASSERT(fActiveComputeCommandEncoder);
791    fActiveComputeCommandEncoder->endEncoding();
792    fActiveComputeCommandEncoder.reset();
793}
794
795static bool check_max_blit_width(int widthInPixels) {
796    if (widthInPixels > 32767) {
797        SkASSERT(false); // surfaces should not be this wide anyway
798        return false;
799    }
800    return true;
801}
802
803bool MtlCommandBuffer::onCopyBufferToBuffer(const Buffer* srcBuffer,
804                                            size_t srcOffset,
805                                            const Buffer* dstBuffer,
806                                            size_t dstOffset,
807                                            size_t size) {
808    SkASSERT(!fActiveRenderCommandEncoder);
809    SkASSERT(!fActiveComputeCommandEncoder);
810
811    id<MTLBuffer> mtlSrcBuffer = static_cast<const MtlBuffer*>(srcBuffer)->mtlBuffer();
812    id<MTLBuffer> mtlDstBuffer = static_cast<const MtlBuffer*>(dstBuffer)->mtlBuffer();
813
814    MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
815    if (!blitCmdEncoder) {
816        return false;
817    }
818
819#ifdef SK_ENABLE_MTL_DEBUG_INFO
820    blitCmdEncoder->pushDebugGroup(@"copyBufferToBuffer");
821#endif
822    blitCmdEncoder->copyBufferToBuffer(mtlSrcBuffer, srcOffset, mtlDstBuffer, dstOffset, size);
823#ifdef SK_ENABLE_MTL_DEBUG_INFO
824    blitCmdEncoder->popDebugGroup();
825#endif
826    return true;
827}
828
829bool MtlCommandBuffer::onCopyTextureToBuffer(const Texture* texture,
830                                             SkIRect srcRect,
831                                             const Buffer* buffer,
832                                             size_t bufferOffset,
833                                             size_t bufferRowBytes) {
834    SkASSERT(!fActiveRenderCommandEncoder);
835    SkASSERT(!fActiveComputeCommandEncoder);
836
837    if (!check_max_blit_width(srcRect.width())) {
838        return false;
839    }
840
841    id<MTLTexture> mtlTexture = static_cast<const MtlTexture*>(texture)->mtlTexture();
842    id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
843
844    MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
845    if (!blitCmdEncoder) {
846        return false;
847    }
848
849#ifdef SK_ENABLE_MTL_DEBUG_INFO
850    blitCmdEncoder->pushDebugGroup(@"copyTextureToBuffer");
851#endif
852    blitCmdEncoder->copyFromTexture(mtlTexture, srcRect, mtlBuffer, bufferOffset, bufferRowBytes);
853#ifdef SK_ENABLE_MTL_DEBUG_INFO
854    blitCmdEncoder->popDebugGroup();
855#endif
856    return true;
857}
858
859bool MtlCommandBuffer::onCopyBufferToTexture(const Buffer* buffer,
860                                             const Texture* texture,
861                                             const BufferTextureCopyData* copyData,
862                                             int count) {
863    SkASSERT(!fActiveRenderCommandEncoder);
864    SkASSERT(!fActiveComputeCommandEncoder);
865
866    id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
867    id<MTLTexture> mtlTexture = static_cast<const MtlTexture*>(texture)->mtlTexture();
868
869    MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
870    if (!blitCmdEncoder) {
871        return false;
872    }
873
874#ifdef SK_ENABLE_MTL_DEBUG_INFO
875    blitCmdEncoder->pushDebugGroup(@"copyBufferToTexture");
876#endif
877    for (int i = 0; i < count; ++i) {
878        if (!check_max_blit_width(copyData[i].fRect.width())) {
879            return false;
880        }
881
882        blitCmdEncoder->copyFromBuffer(mtlBuffer,
883                                       copyData[i].fBufferOffset,
884                                       copyData[i].fBufferRowBytes,
885                                       mtlTexture,
886                                       copyData[i].fRect,
887                                       copyData[i].fMipLevel);
888    }
889
890#ifdef SK_ENABLE_MTL_DEBUG_INFO
891    blitCmdEncoder->popDebugGroup();
892#endif
893    return true;
894}
895
896bool MtlCommandBuffer::onCopyTextureToTexture(const Texture* src,
897                                              SkIRect srcRect,
898                                              const Texture* dst,
899                                              SkIPoint dstPoint,
900                                              int mipLevel) {
901    SkASSERT(!fActiveRenderCommandEncoder);
902    SkASSERT(!fActiveComputeCommandEncoder);
903
904    id<MTLTexture> srcMtlTexture = static_cast<const MtlTexture*>(src)->mtlTexture();
905    id<MTLTexture> dstMtlTexture = static_cast<const MtlTexture*>(dst)->mtlTexture();
906
907    MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
908    if (!blitCmdEncoder) {
909        return false;
910    }
911
912#ifdef SK_ENABLE_MTL_DEBUG_INFO
913    blitCmdEncoder->pushDebugGroup(@"copyTextureToTexture");
914#endif
915
916    blitCmdEncoder->copyTextureToTexture(srcMtlTexture, srcRect, dstMtlTexture, dstPoint, mipLevel);
917
918#ifdef SK_ENABLE_MTL_DEBUG_INFO
919    blitCmdEncoder->popDebugGroup();
920#endif
921    return true;
922}
923
924bool MtlCommandBuffer::onSynchronizeBufferToCpu(const Buffer* buffer, bool* outDidResultInWork) {
925#ifdef SK_BUILD_FOR_MAC
926    SkASSERT(!fActiveRenderCommandEncoder);
927    SkASSERT(!fActiveComputeCommandEncoder);
928
929    id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
930    if ([mtlBuffer storageMode] != MTLStorageModeManaged) {
931        *outDidResultInWork = false;
932        return true;
933    }
934
935    MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
936    if (!blitCmdEncoder) {
937        return false;
938    }
939
940#ifdef SK_ENABLE_MTL_DEBUG_INFO
941    blitCmdEncoder->pushDebugGroup(@"synchronizeToCpu");
942#endif
943    blitCmdEncoder->synchronizeResource(mtlBuffer);
944#ifdef SK_ENABLE_MTL_DEBUG_INFO
945    blitCmdEncoder->popDebugGroup();
946#endif
947
948    *outDidResultInWork = true;
949    return true;
950#else   // SK_BUILD_FOR_MAC
951    // Explicit synchronization is never necessary on builds that are not macOS since we never use
952    // discrete GPUs with managed mode buffers outside of macOS.
953    *outDidResultInWork = false;
954    return true;
955#endif  // SK_BUILD_FOR_MAC
956}
957
958bool MtlCommandBuffer::onClearBuffer(const Buffer* buffer, size_t offset, size_t size) {
959    SkASSERT(!fActiveRenderCommandEncoder);
960    SkASSERT(!fActiveComputeCommandEncoder);
961
962    MtlBlitCommandEncoder* blitCmdEncoder = this->getBlitCommandEncoder();
963    if (!blitCmdEncoder) {
964        return false;
965    }
966
967    id<MTLBuffer> mtlBuffer = static_cast<const MtlBuffer*>(buffer)->mtlBuffer();
968    blitCmdEncoder->fillBuffer(mtlBuffer, offset, size, 0);
969
970    return true;
971}
972
973} // namespace skgpu::graphite
974