• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/mtl/GrMtlOpsRenderPass.h"
9
10#include "src/gpu/GrColor.h"
11#include "src/gpu/GrFixedClip.h"
12#include "src/gpu/GrRenderTargetPriv.h"
13#include "src/gpu/mtl/GrMtlCommandBuffer.h"
14#include "src/gpu/mtl/GrMtlPipelineState.h"
15#include "src/gpu/mtl/GrMtlPipelineStateBuilder.h"
16#include "src/gpu/mtl/GrMtlRenderTarget.h"
17#include "src/gpu/mtl/GrMtlTexture.h"
18
19#if !__has_feature(objc_arc)
20#error This file must be compiled with Arc. Use -fobjc-arc flag
21#endif
22
23GrMtlOpsRenderPass::GrMtlOpsRenderPass(GrMtlGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin,
24                                       const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
25                                       const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo)
26        : INHERITED(rt, origin)
27        , fGpu(gpu) {
28    this->setupRenderPass(colorInfo, stencilInfo);
29}
30
31GrMtlOpsRenderPass::~GrMtlOpsRenderPass() {
32}
33
34void GrMtlOpsRenderPass::precreateCmdEncoder() {
35    // For clears, we may not have an associated draw. So we prepare a cmdEncoder that
36    // will be submitted whether there's a draw or not.
37    SkDEBUGCODE(id<MTLRenderCommandEncoder> cmdEncoder =)
38            fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
39    SkASSERT(nil != cmdEncoder);
40}
41
42void GrMtlOpsRenderPass::submit() {
43    if (!fRenderTarget) {
44        return;
45    }
46    SkIRect iBounds;
47    fBounds.roundOut(&iBounds);
48    fGpu->submitIndirectCommandBuffer(fRenderTarget, fOrigin, &iBounds);
49    fActiveRenderCmdEncoder = nil;
50}
51
52bool GrMtlOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo,
53                                        const SkRect& drawBounds) {
54    fActivePipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState(
55            fRenderTarget, programInfo);
56    if (!fActivePipelineState) {
57        return false;
58    }
59
60    fActivePipelineState->setData(fRenderTarget, programInfo);
61    fCurrentVertexStride = programInfo.primProc().vertexStride();
62
63    if (!fActiveRenderCmdEncoder) {
64        fActiveRenderCmdEncoder =
65                fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
66    }
67
68    [fActiveRenderCmdEncoder setRenderPipelineState:fActivePipelineState->mtlPipelineState()];
69    fActivePipelineState->setDrawState(fActiveRenderCmdEncoder,
70                                       programInfo.pipeline().outputSwizzle(),
71                                       programInfo.pipeline().getXferProcessor());
72    if (this->gpu()->caps()->wireframeMode() || programInfo.pipeline().isWireframe()) {
73        [fActiveRenderCmdEncoder setTriangleFillMode:MTLTriangleFillModeLines];
74    } else {
75        [fActiveRenderCmdEncoder setTriangleFillMode:MTLTriangleFillModeFill];
76    }
77
78    if (!programInfo.pipeline().isScissorEnabled()) {
79        GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder,
80                                                       fRenderTarget, fOrigin,
81                                                       SkIRect::MakeWH(fRenderTarget->width(),
82                                                                       fRenderTarget->height()));
83    } else if (!programInfo.hasDynamicScissors()) {
84        SkASSERT(programInfo.hasFixedScissor());
85
86        GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder,
87                                                       fRenderTarget, fOrigin,
88                                                       programInfo.fixedScissor());
89    }
90
91    if (!programInfo.hasDynamicPrimProcTextures()) {
92        fActivePipelineState->bindTextures(fActiveRenderCmdEncoder);
93    }
94
95    fBounds.join(drawBounds);
96    return true;
97}
98
99void GrMtlOpsRenderPass::onDrawMeshes(const GrProgramInfo& programInfo, const GrMesh meshes[],
100                                      int meshCount) {
101    SkASSERT(fActivePipelineState);
102
103    for (int i = 0; i < meshCount; ++i) {
104        const GrMesh& mesh = meshes[i];
105        SkASSERT(nil != fActiveRenderCmdEncoder);
106
107        if (programInfo.hasDynamicScissors()) {
108            GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder, fRenderTarget,
109                                                           fOrigin,
110                                                           programInfo.dynamicScissor(i));
111        }
112        if (programInfo.hasDynamicPrimProcTextures()) {
113            auto meshProxies = programInfo.dynamicPrimProcTextures(i);
114            fActivePipelineState->setTextures(programInfo, meshProxies);
115            fActivePipelineState->bindTextures(fActiveRenderCmdEncoder);
116        }
117
118        mesh.sendToGpu(programInfo.primitiveType(), this);
119    }
120}
121
122void GrMtlOpsRenderPass::onClear(const GrFixedClip& clip, const SkPMColor4f& color) {
123    // We should never end up here since all clears should either be done as draws or load ops in
124    // metal. If we hit this assert then we missed a chance to set a load op on the
125    // GrRenderTargetContext level.
126    SkASSERT(false);
127}
128
129void GrMtlOpsRenderPass::onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
130    SkASSERT(!clip.hasWindowRectangles());
131
132    GrStencilAttachment* sb = fRenderTarget->renderTargetPriv().getStencilAttachment();
133    // this should only be called internally when we know we have a
134    // stencil buffer.
135    SkASSERT(sb);
136    int stencilBitCount = sb->bits();
137
138    // The contract with the callers does not guarantee that we preserve all bits in the stencil
139    // during this clear. Thus we will clear the entire stencil to the desired value.
140    if (insideStencilMask) {
141        fRenderPassDesc.stencilAttachment.clearStencil = (1 << (stencilBitCount - 1));
142    } else {
143        fRenderPassDesc.stencilAttachment.clearStencil = 0;
144    }
145
146    fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionClear;
147    this->precreateCmdEncoder();
148    fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad;
149    fActiveRenderCmdEncoder =
150            fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
151}
152
153void GrMtlOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
154    // TODO: this could be more efficient
155    state->doUpload(upload);
156    // doUpload() creates a blitCommandEncoder, so we need to recreate a renderCommandEncoder
157    fActiveRenderCmdEncoder =
158            fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
159}
160
161void GrMtlOpsRenderPass::initRenderState(id<MTLRenderCommandEncoder> encoder) {
162    [encoder pushDebugGroup:@"initRenderState"];
163    [encoder setFrontFacingWinding:MTLWindingCounterClockwise];
164    // Strictly speaking we shouldn't have to set this, as the default viewport is the size of
165    // the drawable used to generate the renderCommandEncoder -- but just in case.
166    MTLViewport viewport = { 0.0, 0.0,
167                             (double) fRenderTarget->width(), (double) fRenderTarget->height(),
168                             0.0, 1.0 };
169    [encoder setViewport:viewport];
170    this->resetBufferBindings();
171    [encoder popDebugGroup];
172}
173
174void GrMtlOpsRenderPass::setupRenderPass(
175        const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
176        const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) {
177    const static MTLLoadAction mtlLoadAction[] {
178        MTLLoadActionLoad,
179        MTLLoadActionClear,
180        MTLLoadActionDontCare
181    };
182    static_assert((int)GrLoadOp::kLoad == 0);
183    static_assert((int)GrLoadOp::kClear == 1);
184    static_assert((int)GrLoadOp::kDiscard == 2);
185    SkASSERT(colorInfo.fLoadOp <= GrLoadOp::kDiscard);
186    SkASSERT(stencilInfo.fLoadOp <= GrLoadOp::kDiscard);
187
188    const static MTLStoreAction mtlStoreAction[] {
189        MTLStoreActionStore,
190        MTLStoreActionDontCare
191    };
192    static_assert((int)GrStoreOp::kStore == 0);
193    static_assert((int)GrStoreOp::kDiscard == 1);
194    SkASSERT(colorInfo.fStoreOp <= GrStoreOp::kDiscard);
195    SkASSERT(stencilInfo.fStoreOp <= GrStoreOp::kDiscard);
196
197    auto renderPassDesc = [MTLRenderPassDescriptor renderPassDescriptor];
198    renderPassDesc.colorAttachments[0].texture =
199            static_cast<GrMtlRenderTarget*>(fRenderTarget)->mtlColorTexture();
200    renderPassDesc.colorAttachments[0].slice = 0;
201    renderPassDesc.colorAttachments[0].level = 0;
202    const SkPMColor4f& clearColor = colorInfo.fClearColor;
203    renderPassDesc.colorAttachments[0].clearColor =
204            MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]);
205    renderPassDesc.colorAttachments[0].loadAction =
206            mtlLoadAction[static_cast<int>(colorInfo.fLoadOp)];
207    renderPassDesc.colorAttachments[0].storeAction =
208            mtlStoreAction[static_cast<int>(colorInfo.fStoreOp)];
209
210    const GrMtlStencilAttachment* stencil = static_cast<GrMtlStencilAttachment*>(
211            fRenderTarget->renderTargetPriv().getStencilAttachment());
212    if (stencil) {
213        renderPassDesc.stencilAttachment.texture = stencil->stencilView();
214    }
215    renderPassDesc.stencilAttachment.clearStencil = 0;
216    renderPassDesc.stencilAttachment.loadAction =
217            mtlLoadAction[static_cast<int>(stencilInfo.fLoadOp)];
218    renderPassDesc.stencilAttachment.storeAction =
219            mtlStoreAction[static_cast<int>(stencilInfo.fStoreOp)];
220
221    fRenderPassDesc = renderPassDesc;
222
223    // Manage initial clears
224    if (colorInfo.fLoadOp == GrLoadOp::kClear || stencilInfo.fLoadOp == GrLoadOp::kClear)  {
225        fBounds = SkRect::MakeWH(fRenderTarget->width(),
226                                 fRenderTarget->height());
227        this->precreateCmdEncoder();
228        if (colorInfo.fLoadOp == GrLoadOp::kClear) {
229            fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionLoad;
230        }
231        if (stencilInfo.fLoadOp == GrLoadOp::kClear) {
232            fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad;
233        }
234    } else {
235        fBounds.setEmpty();
236    }
237
238    // For now, we lazily create the renderCommandEncoder because we may have no draws,
239    // and an empty renderCommandEncoder can still produce output. This can cause issues
240    // when we've cleared a texture upon creation -- we'll subsequently discard the contents.
241    // This can be removed when that ordering is fixed.
242    fActiveRenderCmdEncoder = nil;
243}
244
245static MTLPrimitiveType gr_to_mtl_primitive(GrPrimitiveType primitiveType) {
246    const static MTLPrimitiveType mtlPrimitiveType[] {
247        MTLPrimitiveTypeTriangle,
248        MTLPrimitiveTypeTriangleStrip,
249        MTLPrimitiveTypePoint,
250        MTLPrimitiveTypeLine,
251        MTLPrimitiveTypeLineStrip
252    };
253    static_assert((int)GrPrimitiveType::kTriangles == 0);
254    static_assert((int)GrPrimitiveType::kTriangleStrip == 1);
255    static_assert((int)GrPrimitiveType::kPoints == 2);
256    static_assert((int)GrPrimitiveType::kLines == 3);
257    static_assert((int)GrPrimitiveType::kLineStrip == 4);
258
259    SkASSERT(primitiveType <= GrPrimitiveType::kLineStrip);
260    return mtlPrimitiveType[static_cast<int>(primitiveType)];
261}
262
263void GrMtlOpsRenderPass::bindGeometry(const GrBuffer* vertexBuffer,
264                                      size_t vertexOffset,
265                                      const GrBuffer* instanceBuffer) {
266    size_t bufferIndex = GrMtlUniformHandler::kLastUniformBinding + 1;
267    if (vertexBuffer) {
268        SkASSERT(!vertexBuffer->isCpuBuffer());
269        SkASSERT(!static_cast<const GrGpuBuffer*>(vertexBuffer)->isMapped());
270
271        const GrMtlBuffer* grMtlBuffer = static_cast<const GrMtlBuffer*>(vertexBuffer);
272        this->setVertexBuffer(fActiveRenderCmdEncoder, grMtlBuffer, vertexOffset, bufferIndex++);
273    }
274    if (instanceBuffer) {
275        SkASSERT(!instanceBuffer->isCpuBuffer());
276        SkASSERT(!static_cast<const GrGpuBuffer*>(instanceBuffer)->isMapped());
277
278        const GrMtlBuffer* grMtlBuffer = static_cast<const GrMtlBuffer*>(instanceBuffer);
279        this->setVertexBuffer(fActiveRenderCmdEncoder, grMtlBuffer, 0, bufferIndex++);
280    }
281}
282
283void GrMtlOpsRenderPass::sendArrayMeshToGpu(GrPrimitiveType primitiveType, const GrMesh& mesh,
284                                            int vertexCount, int baseVertex) {
285    this->bindGeometry(mesh.vertexBuffer(), 0, nullptr);
286
287    [fActiveRenderCmdEncoder drawPrimitives:gr_to_mtl_primitive(primitiveType)
288                                vertexStart:baseVertex
289                                vertexCount:vertexCount];
290}
291
292void GrMtlOpsRenderPass::sendIndexedMeshToGpu(GrPrimitiveType primitiveType, const GrMesh& mesh,
293                                              int indexCount, int baseIndex,
294                                              uint16_t /*minIndexValue*/,
295                                              uint16_t /*maxIndexValue*/, int baseVertex) {
296    this->bindGeometry(mesh.vertexBuffer(), fCurrentVertexStride*baseVertex, nullptr);
297
298    id<MTLBuffer> mtlIndexBuffer = nil;
299    if (mesh.indexBuffer()) {
300        SkASSERT(!mesh.indexBuffer()->isCpuBuffer());
301        SkASSERT(!static_cast<const GrGpuBuffer*>(mesh.indexBuffer())->isMapped());
302
303        mtlIndexBuffer = static_cast<const GrMtlBuffer*>(mesh.indexBuffer())->mtlBuffer();
304        SkASSERT(mtlIndexBuffer);
305    }
306
307    SkASSERT(mesh.primitiveRestart() == GrPrimitiveRestart::kNo);
308    size_t indexOffset = static_cast<const GrMtlBuffer*>(mesh.indexBuffer())->offset() +
309                         sizeof(uint16_t) * baseIndex;
310    [fActiveRenderCmdEncoder drawIndexedPrimitives:gr_to_mtl_primitive(primitiveType)
311                                        indexCount:indexCount
312                                         indexType:MTLIndexTypeUInt16
313                                       indexBuffer:mtlIndexBuffer
314                                 indexBufferOffset:indexOffset];
315    fGpu->stats()->incNumDraws();
316}
317
318void GrMtlOpsRenderPass::sendInstancedMeshToGpu(GrPrimitiveType primitiveType, const GrMesh& mesh,
319                                                int vertexCount, int baseVertex, int instanceCount,
320                                                int baseInstance) {
321    this->bindGeometry(mesh.vertexBuffer(), 0, mesh.instanceBuffer());
322
323    if (@available(macOS 10.11, iOS 9.0, *)) {
324        [fActiveRenderCmdEncoder drawPrimitives:gr_to_mtl_primitive(primitiveType)
325                                    vertexStart:baseVertex
326                                    vertexCount:vertexCount
327                                  instanceCount:instanceCount
328                                   baseInstance:baseInstance];
329    } else {
330        SkASSERT(false);
331    }
332}
333
334void GrMtlOpsRenderPass::sendIndexedInstancedMeshToGpu(GrPrimitiveType primitiveType,
335                                                       const GrMesh& mesh, int indexCount,
336                                                       int baseIndex, int baseVertex,
337                                                       int instanceCount, int baseInstance) {
338    this->bindGeometry(mesh.vertexBuffer(), 0, mesh.instanceBuffer());
339
340    id<MTLBuffer> mtlIndexBuffer = nil;
341    if (mesh.indexBuffer()) {
342        SkASSERT(!mesh.indexBuffer()->isCpuBuffer());
343        SkASSERT(!static_cast<const GrGpuBuffer*>(mesh.indexBuffer())->isMapped());
344
345        mtlIndexBuffer = static_cast<const GrMtlBuffer*>(mesh.indexBuffer())->mtlBuffer();
346        SkASSERT(mtlIndexBuffer);
347    }
348
349    SkASSERT(mesh.primitiveRestart() == GrPrimitiveRestart::kNo);
350    size_t indexOffset = static_cast<const GrMtlBuffer*>(mesh.indexBuffer())->offset() +
351                         sizeof(uint16_t) * baseIndex;
352
353    if (@available(macOS 10.11, iOS 9.0, *)) {
354        [fActiveRenderCmdEncoder drawIndexedPrimitives:gr_to_mtl_primitive(primitiveType)
355                                            indexCount:indexCount
356                                             indexType:MTLIndexTypeUInt16
357                                           indexBuffer:mtlIndexBuffer
358                                     indexBufferOffset:indexOffset
359                                         instanceCount:instanceCount
360                                            baseVertex:baseVertex
361                                          baseInstance:baseInstance];
362    } else {
363        SkASSERT(false);
364    }
365    fGpu->stats()->incNumDraws();
366}
367
368void GrMtlOpsRenderPass::setVertexBuffer(id<MTLRenderCommandEncoder> encoder,
369                                         const GrMtlBuffer* buffer,
370                                         size_t vertexOffset,
371                                         size_t index) {
372    SkASSERT(index < 4);
373    id<MTLBuffer> mtlVertexBuffer = buffer->mtlBuffer();
374    SkASSERT(mtlVertexBuffer);
375    // Apple recommends using setVertexBufferOffset: when changing the offset
376    // for a currently bound vertex buffer, rather than setVertexBuffer:
377    size_t offset = buffer->offset() + vertexOffset;
378    if (fBufferBindings[index].fBuffer != mtlVertexBuffer) {
379        [encoder setVertexBuffer: mtlVertexBuffer
380                          offset: offset
381                         atIndex: index];
382        fBufferBindings[index].fBuffer = mtlVertexBuffer;
383        fBufferBindings[index].fOffset = offset;
384    } else if (fBufferBindings[index].fOffset != offset) {
385        if (@available(macOS 10.11, iOS 8.3, *)) {
386            [encoder setVertexBufferOffset: offset
387                                   atIndex: index];
388        } else {
389            [encoder setVertexBuffer: mtlVertexBuffer
390                              offset: offset
391                             atIndex: index];
392        }
393        fBufferBindings[index].fOffset = offset;
394    }
395}
396
397void GrMtlOpsRenderPass::resetBufferBindings() {
398    for (size_t i = 0; i < kNumBindings; ++i) {
399        fBufferBindings[i].fBuffer = nil;
400    }
401}
402