• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/mtl/GrMtlOpsRenderPass.h"
9
10#include "src/gpu/GrBackendUtils.h"
11#include "src/gpu/GrColor.h"
12#include "src/gpu/GrRenderTarget.h"
13#include "src/gpu/mtl/GrMtlCommandBuffer.h"
14#include "src/gpu/mtl/GrMtlPipelineState.h"
15#include "src/gpu/mtl/GrMtlPipelineStateBuilder.h"
16#include "src/gpu/mtl/GrMtlRenderTarget.h"
17#include "src/gpu/mtl/GrMtlTexture.h"
18
19#if !__has_feature(objc_arc)
20#error This file must be compiled with Arc. Use -fobjc-arc flag
21#endif
22
23GR_NORETAIN_BEGIN
24
25GrMtlOpsRenderPass::GrMtlOpsRenderPass(GrMtlGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin,
26                                       const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
27                                       const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo)
28        : INHERITED(rt, origin)
29        , fGpu(gpu) {
30    this->setupRenderPass(colorInfo, stencilInfo);
31}
32
33GrMtlOpsRenderPass::~GrMtlOpsRenderPass() {
34}
35
36void GrMtlOpsRenderPass::precreateCmdEncoder() {
37    // For clears, we may not have an associated draw. So we prepare a cmdEncoder that
38    // will be submitted whether there's a draw or not.
39    SkDEBUGCODE(id<MTLRenderCommandEncoder> cmdEncoder =)
40            fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
41    SkASSERT(nil != cmdEncoder);
42}
43
44void GrMtlOpsRenderPass::submit() {
45    if (!fRenderTarget) {
46        return;
47    }
48    SkIRect iBounds;
49    fBounds.roundOut(&iBounds);
50    fGpu->submitIndirectCommandBuffer(fRenderTarget, fOrigin, &iBounds);
51    fActiveRenderCmdEncoder = nil;
52}
53
54static MTLPrimitiveType gr_to_mtl_primitive(GrPrimitiveType primitiveType) {
55    const static MTLPrimitiveType mtlPrimitiveType[] {
56        MTLPrimitiveTypeTriangle,
57        MTLPrimitiveTypeTriangleStrip,
58        MTLPrimitiveTypePoint,
59        MTLPrimitiveTypeLine,
60        MTLPrimitiveTypeLineStrip
61    };
62    static_assert((int)GrPrimitiveType::kTriangles == 0);
63    static_assert((int)GrPrimitiveType::kTriangleStrip == 1);
64    static_assert((int)GrPrimitiveType::kPoints == 2);
65    static_assert((int)GrPrimitiveType::kLines == 3);
66    static_assert((int)GrPrimitiveType::kLineStrip == 4);
67
68    SkASSERT(primitiveType <= GrPrimitiveType::kLineStrip);
69    return mtlPrimitiveType[static_cast<int>(primitiveType)];
70}
71
72bool GrMtlOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo,
73                                        const SkRect& drawBounds) {
74    const GrMtlCaps& caps = fGpu->mtlCaps();
75    GrProgramDesc programDesc = caps.makeDesc(fRenderTarget, programInfo,
76                                              GrCaps::ProgramDescOverrideFlags::kNone);
77    if (!programDesc.isValid()) {
78        return false;
79    }
80
81    fActivePipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState(
82            programDesc, programInfo);
83    if (!fActivePipelineState) {
84        return false;
85    }
86
87    fActivePipelineState->setData(fRenderTarget, programInfo);
88    fCurrentVertexStride = programInfo.geomProc().vertexStride();
89
90    if (!fActiveRenderCmdEncoder) {
91        fActiveRenderCmdEncoder =
92                fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
93    }
94
95    [fActiveRenderCmdEncoder setRenderPipelineState:fActivePipelineState->mtlPipelineState()];
96    fActivePipelineState->setDrawState(fActiveRenderCmdEncoder,
97                                       programInfo.pipeline().writeSwizzle(),
98                                       programInfo.pipeline().getXferProcessor());
99    if (this->gpu()->caps()->wireframeMode() || programInfo.pipeline().isWireframe()) {
100        [fActiveRenderCmdEncoder setTriangleFillMode:MTLTriangleFillModeLines];
101    } else {
102        [fActiveRenderCmdEncoder setTriangleFillMode:MTLTriangleFillModeFill];
103    }
104
105    if (!programInfo.pipeline().isScissorTestEnabled()) {
106        // "Disable" scissor by setting it to the full pipeline bounds.
107        GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder,
108                                                       fRenderTarget, fOrigin,
109                                                       SkIRect::MakeWH(fRenderTarget->width(),
110                                                                       fRenderTarget->height()));
111    }
112
113    fActivePrimitiveType = gr_to_mtl_primitive(programInfo.primitiveType());
114    fBounds.join(drawBounds);
115    return true;
116}
117
118void GrMtlOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
119    SkASSERT(fActivePipelineState);
120    SkASSERT(fActiveRenderCmdEncoder);
121    GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder, fRenderTarget,
122                                                   fOrigin, scissor);
123}
124
125bool GrMtlOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc,
126                                        const GrSurfaceProxy* const geomProcTextures[],
127                                        const GrPipeline& pipeline) {
128    SkASSERT(fActivePipelineState);
129    SkASSERT(fActiveRenderCmdEncoder);
130    fActivePipelineState->setTextures(geomProc, pipeline, geomProcTextures);
131    fActivePipelineState->bindTextures(fActiveRenderCmdEncoder);
132    return true;
133}
134
135void GrMtlOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) {
136    // Partial clears are not supported
137    SkASSERT(!scissor.enabled());
138
139    // Ideally we should never end up here since all clears should either be done as draws or
140    // load ops in metal. However, if a client inserts a wait op we need to handle it.
141    auto colorAttachment = fRenderPassDesc.colorAttachments[0];
142    colorAttachment.clearColor = MTLClearColorMake(color[0], color[1], color[2], color[3]);
143    colorAttachment.loadAction = MTLLoadActionClear;
144    this->precreateCmdEncoder();
145    colorAttachment.loadAction = MTLLoadActionLoad;
146    fActiveRenderCmdEncoder =
147            fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
148}
149
150void GrMtlOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) {
151    // Partial clears are not supported
152    SkASSERT(!scissor.enabled());
153
154    GrAttachment* sb = fRenderTarget->getStencilAttachment();
155    // this should only be called internally when we know we have a
156    // stencil buffer.
157    SkASSERT(sb);
158    int stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
159
160    // The contract with the callers does not guarantee that we preserve all bits in the stencil
161    // during this clear. Thus we will clear the entire stencil to the desired value.
162    auto stencilAttachment = fRenderPassDesc.stencilAttachment;
163    if (insideStencilMask) {
164        stencilAttachment.clearStencil = (1 << (stencilBitCount - 1));
165    } else {
166        stencilAttachment.clearStencil = 0;
167    }
168
169    stencilAttachment.loadAction = MTLLoadActionClear;
170    this->precreateCmdEncoder();
171    stencilAttachment.loadAction = MTLLoadActionLoad;
172    fActiveRenderCmdEncoder =
173            fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
174}
175
176void GrMtlOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
177    // TODO: this could be more efficient
178    state->doUpload(upload);
179    // doUpload() creates a blitCommandEncoder, so we need to recreate a renderCommandEncoder
180    fActiveRenderCmdEncoder =
181            fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
182}
183
184void GrMtlOpsRenderPass::initRenderState(id<MTLRenderCommandEncoder> encoder) {
185    [encoder pushDebugGroup:@"initRenderState"];
186    [encoder setFrontFacingWinding:MTLWindingCounterClockwise];
187    // Strictly speaking we shouldn't have to set this, as the default viewport is the size of
188    // the drawable used to generate the renderCommandEncoder -- but just in case.
189    MTLViewport viewport = { 0.0, 0.0,
190                             (double) fRenderTarget->width(), (double) fRenderTarget->height(),
191                             0.0, 1.0 };
192    [encoder setViewport:viewport];
193    this->resetBufferBindings();
194    [encoder popDebugGroup];
195}
196
197void GrMtlOpsRenderPass::setupRenderPass(
198        const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
199        const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) {
200    const static MTLLoadAction mtlLoadAction[] {
201        MTLLoadActionLoad,
202        MTLLoadActionClear,
203        MTLLoadActionDontCare
204    };
205    static_assert((int)GrLoadOp::kLoad == 0);
206    static_assert((int)GrLoadOp::kClear == 1);
207    static_assert((int)GrLoadOp::kDiscard == 2);
208    SkASSERT(colorInfo.fLoadOp <= GrLoadOp::kDiscard);
209    SkASSERT(stencilInfo.fLoadOp <= GrLoadOp::kDiscard);
210
211    const static MTLStoreAction mtlStoreAction[] {
212        MTLStoreActionStore,
213        MTLStoreActionDontCare
214    };
215    static_assert((int)GrStoreOp::kStore == 0);
216    static_assert((int)GrStoreOp::kDiscard == 1);
217    SkASSERT(colorInfo.fStoreOp <= GrStoreOp::kDiscard);
218    SkASSERT(stencilInfo.fStoreOp <= GrStoreOp::kDiscard);
219
220    fRenderPassDesc = [MTLRenderPassDescriptor new];
221    auto colorAttachment = fRenderPassDesc.colorAttachments[0];
222    colorAttachment.texture =
223            static_cast<GrMtlRenderTarget*>(fRenderTarget)->mtlColorTexture();
224    const std::array<float, 4>& clearColor = colorInfo.fClearColor;
225    colorAttachment.clearColor =
226            MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]);
227    colorAttachment.loadAction = mtlLoadAction[static_cast<int>(colorInfo.fLoadOp)];
228    colorAttachment.storeAction = mtlStoreAction[static_cast<int>(colorInfo.fStoreOp)];
229
230    auto* stencil = static_cast<GrMtlAttachment*>(fRenderTarget->getStencilAttachment());
231    auto mtlStencil = fRenderPassDesc.stencilAttachment;
232    if (stencil) {
233        mtlStencil.texture = stencil->view();
234    }
235    mtlStencil.clearStencil = 0;
236    mtlStencil.loadAction = mtlLoadAction[static_cast<int>(stencilInfo.fLoadOp)];
237    mtlStencil.storeAction = mtlStoreAction[static_cast<int>(stencilInfo.fStoreOp)];
238
239    // Manage initial clears
240    if (colorInfo.fLoadOp == GrLoadOp::kClear || stencilInfo.fLoadOp == GrLoadOp::kClear)  {
241        fBounds = SkRect::MakeWH(fRenderTarget->width(),
242                                 fRenderTarget->height());
243        this->precreateCmdEncoder();
244        if (colorInfo.fLoadOp == GrLoadOp::kClear) {
245            colorAttachment.loadAction = MTLLoadActionLoad;
246        }
247        if (stencilInfo.fLoadOp == GrLoadOp::kClear) {
248            mtlStencil.loadAction = MTLLoadActionLoad;
249        }
250    } else {
251        fBounds.setEmpty();
252    }
253
254    // For now, we lazily create the renderCommandEncoder because we may have no draws,
255    // and an empty renderCommandEncoder can still produce output. This can cause issues
256    // when we've cleared a texture upon creation -- we'll subsequently discard the contents.
257    // This can be removed when that ordering is fixed.
258    fActiveRenderCmdEncoder = nil;
259}
260
261void GrMtlOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer,
262                                       sk_sp<const GrBuffer> instanceBuffer,
263                                       sk_sp<const GrBuffer> vertexBuffer,
264                                       GrPrimitiveRestart primRestart) {
265    SkASSERT(GrPrimitiveRestart::kNo == primRestart);
266    int inputBufferIndex = 0;
267    if (vertexBuffer) {
268        SkASSERT(!vertexBuffer->isCpuBuffer());
269        SkASSERT(!static_cast<const GrGpuBuffer*>(vertexBuffer.get())->isMapped());
270        fActiveVertexBuffer = std::move(vertexBuffer);
271        fGpu->commandBuffer()->addGrBuffer(fActiveVertexBuffer);
272        ++inputBufferIndex;
273    }
274    if (instanceBuffer) {
275        SkASSERT(!instanceBuffer->isCpuBuffer());
276        SkASSERT(!static_cast<const GrGpuBuffer*>(instanceBuffer.get())->isMapped());
277        this->setVertexBuffer(fActiveRenderCmdEncoder, instanceBuffer.get(), 0, inputBufferIndex++);
278        fActiveInstanceBuffer = std::move(instanceBuffer);
279        fGpu->commandBuffer()->addGrBuffer(fActiveInstanceBuffer);
280    }
281    if (indexBuffer) {
282        SkASSERT(!indexBuffer->isCpuBuffer());
283        SkASSERT(!static_cast<const GrGpuBuffer*>(indexBuffer.get())->isMapped());
284        fActiveIndexBuffer = std::move(indexBuffer);
285        fGpu->commandBuffer()->addGrBuffer(fActiveIndexBuffer);
286    }
287}
288
289void GrMtlOpsRenderPass::onDraw(int vertexCount, int baseVertex) {
290    SkASSERT(fActivePipelineState);
291    SkASSERT(nil != fActiveRenderCmdEncoder);
292    this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
293
294    [fActiveRenderCmdEncoder drawPrimitives:fActivePrimitiveType
295                                vertexStart:baseVertex
296                                vertexCount:vertexCount];
297    fGpu->stats()->incNumDraws();
298}
299
300void GrMtlOpsRenderPass::onDrawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue,
301                                       uint16_t maxIndexValue, int baseVertex) {
302    SkASSERT(fActivePipelineState);
303    SkASSERT(nil != fActiveRenderCmdEncoder);
304    SkASSERT(fActiveIndexBuffer);
305    this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(),
306                          fCurrentVertexStride * baseVertex, 0);
307
308    auto mtlIndexBuffer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get());
309    size_t indexOffset = mtlIndexBuffer->offset() + sizeof(uint16_t) * baseIndex;
310    [fActiveRenderCmdEncoder drawIndexedPrimitives:fActivePrimitiveType
311                                        indexCount:indexCount
312                                         indexType:MTLIndexTypeUInt16
313                                       indexBuffer:mtlIndexBuffer->mtlBuffer()
314                                 indexBufferOffset:indexOffset];
315    fGpu->stats()->incNumDraws();
316}
317
318void GrMtlOpsRenderPass::onDrawInstanced(int instanceCount, int baseInstance, int vertexCount,
319                                         int baseVertex) {
320    SkASSERT(fActivePipelineState);
321    SkASSERT(nil != fActiveRenderCmdEncoder);
322    this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
323
324    if (@available(macOS 10.11, iOS 9.0, *)) {
325        [fActiveRenderCmdEncoder drawPrimitives:fActivePrimitiveType
326                                    vertexStart:baseVertex
327                                    vertexCount:vertexCount
328                                  instanceCount:instanceCount
329                                   baseInstance:baseInstance];
330    } else {
331        SkASSERT(false);
332    }
333    fGpu->stats()->incNumDraws();
334}
335
336void GrMtlOpsRenderPass::onDrawIndexedInstanced(
337        int indexCount, int baseIndex, int instanceCount, int baseInstance, int baseVertex) {
338    SkASSERT(fActivePipelineState);
339    SkASSERT(nil != fActiveRenderCmdEncoder);
340    SkASSERT(fActiveIndexBuffer);
341    this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
342
343    auto mtlIndexBuffer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get());
344    size_t indexOffset = mtlIndexBuffer->offset() + sizeof(uint16_t) * baseIndex;
345    if (@available(macOS 10.11, iOS 9.0, *)) {
346        [fActiveRenderCmdEncoder drawIndexedPrimitives:fActivePrimitiveType
347                                            indexCount:indexCount
348                                             indexType:MTLIndexTypeUInt16
349                                           indexBuffer:mtlIndexBuffer->mtlBuffer()
350                                     indexBufferOffset:indexOffset
351                                         instanceCount:instanceCount
352                                            baseVertex:baseVertex
353                                          baseInstance:baseInstance];
354    } else {
355        SkASSERT(false);
356    }
357    fGpu->stats()->incNumDraws();
358}
359
360void GrMtlOpsRenderPass::onDrawIndirect(const GrBuffer* drawIndirectBuffer,
361                                        size_t bufferOffset,
362                                        int drawCount) {
363    SkASSERT(fGpu->caps()->nativeDrawIndirectSupport());
364    SkASSERT(fActivePipelineState);
365    SkASSERT(nil != fActiveRenderCmdEncoder);
366    this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
367
368    auto mtlIndirectBuffer = static_cast<const GrMtlBuffer*>(drawIndirectBuffer);
369    const size_t stride = sizeof(GrDrawIndirectCommand);
370    while (drawCount >= 1) {
371        if (@available(macOS 10.11, iOS 9.0, *)) {
372            [fActiveRenderCmdEncoder drawPrimitives:fActivePrimitiveType
373                                     indirectBuffer:mtlIndirectBuffer->mtlBuffer()
374                               indirectBufferOffset:bufferOffset];
375        } else {
376            SkASSERT(false);
377        }
378        drawCount--;
379        bufferOffset += stride;
380        fGpu->stats()->incNumDraws();
381    }
382}
383
384void GrMtlOpsRenderPass::onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer,
385                                               size_t bufferOffset,
386                                               int drawCount) {
387    SkASSERT(fGpu->caps()->nativeDrawIndirectSupport());
388    SkASSERT(fActivePipelineState);
389    SkASSERT(nil != fActiveRenderCmdEncoder);
390    SkASSERT(fActiveIndexBuffer);
391    this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
392
393    auto mtlIndexBuffer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get());
394    auto mtlIndirectBuffer = static_cast<const GrMtlBuffer*>(drawIndirectBuffer);
395    size_t indexOffset = mtlIndexBuffer->offset();
396
397    const size_t stride = sizeof(GrDrawIndexedIndirectCommand);
398    while (drawCount >= 1) {
399        if (@available(macOS 10.11, iOS 9.0, *)) {
400            [fActiveRenderCmdEncoder drawIndexedPrimitives:fActivePrimitiveType
401                                                 indexType:MTLIndexTypeUInt16
402                                               indexBuffer:mtlIndexBuffer->mtlBuffer()
403                                         indexBufferOffset:indexOffset
404                                            indirectBuffer:mtlIndirectBuffer->mtlBuffer()
405                                      indirectBufferOffset:bufferOffset];
406        } else {
407            SkASSERT(false);
408        }
409        drawCount--;
410        bufferOffset += stride;
411        fGpu->stats()->incNumDraws();
412    }
413}
414
415void GrMtlOpsRenderPass::setVertexBuffer(id<MTLRenderCommandEncoder> encoder,
416                                         const GrBuffer* buffer,
417                                         size_t vertexOffset,
418                                         size_t inputBufferIndex) {
419    constexpr static int kFirstBufferBindingIdx = GrMtlUniformHandler::kLastUniformBinding + 1;
420    int index = inputBufferIndex + kFirstBufferBindingIdx;
421    SkASSERT(index < 4);
422    auto mtlBuffer = static_cast<const GrMtlBuffer*>(buffer);
423    id<MTLBuffer> mtlVertexBuffer = mtlBuffer->mtlBuffer();
424    SkASSERT(mtlVertexBuffer);
425    // Apple recommends using setVertexBufferOffset: when changing the offset
426    // for a currently bound vertex buffer, rather than setVertexBuffer:
427    size_t offset = mtlBuffer->offset() + vertexOffset;
428    if (fBufferBindings[index].fBuffer != mtlVertexBuffer) {
429        [encoder setVertexBuffer: mtlVertexBuffer
430                          offset: offset
431                         atIndex: index];
432        fBufferBindings[index].fBuffer = mtlVertexBuffer;
433        fBufferBindings[index].fOffset = offset;
434    } else if (fBufferBindings[index].fOffset != offset) {
435        if (@available(macOS 10.11, iOS 8.3, *)) {
436            [encoder setVertexBufferOffset: offset
437                                   atIndex: index];
438        } else {
439            // We only support iOS 9.0+, so we should never hit this
440            SK_ABORT("Missing interface. Skia only supports Metal on iOS 9.0 and higher");
441        }
442        fBufferBindings[index].fOffset = offset;
443    }
444}
445
446void GrMtlOpsRenderPass::resetBufferBindings() {
447    for (size_t i = 0; i < kNumBindings; ++i) {
448        fBufferBindings[i].fBuffer = nil;
449    }
450}
451
452GR_NORETAIN_END
453