• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrMtlPipelineState_DEFINED
9 #define GrMtlPipelineState_DEFINED
10 
11 #include "include/private/GrTypesPriv.h"
12 #include "src/gpu/GrStencilSettings.h"
13 #include "src/gpu/glsl/GrGLSLProgramBuilder.h"
14 #include "src/gpu/mtl/GrMtlBuffer.h"
15 #include "src/gpu/mtl/GrMtlPipelineStateDataManager.h"
16 
17 #import <Metal/Metal.h>
18 
19 class GrMtlGpu;
20 class GrMtlPipelineStateDataManager;
21 class GrMtlSampler;
22 class GrMtlTexture;
23 class GrPipeline;
24 
25 /**
26  * Wraps a MTLRenderPipelineState object and also contains more info about the pipeline as needed
27  * by Ganesh
28  */
29 class GrMtlPipelineState {
30 public:
31     using UniformInfoArray = GrMtlPipelineStateDataManager::UniformInfoArray;
32     using UniformHandle = GrGLSLProgramDataManager::UniformHandle;
33 
34     GrMtlPipelineState(
35             GrMtlGpu* gpu,
36             id<MTLRenderPipelineState> pipelineState,
37             MTLPixelFormat pixelFormat,
38             const GrGLSLBuiltinUniformHandles& builtinUniformHandles,
39             const UniformInfoArray& uniforms,
40             uint32_t uniformBufferSize,
41             uint32_t numSamplers,
42             std::unique_ptr<GrGLSLPrimitiveProcessor> geometryProcessor,
43             std::unique_ptr<GrGLSLXferProcessor> xferPRocessor,
44             std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors,
45             int fFragmentProcessorCnt);
46 
mtlPipelineState()47     id<MTLRenderPipelineState> mtlPipelineState() { return fPipelineState; }
48 
49     void setData(const GrRenderTarget*, GrSurfaceOrigin,
50                  const GrPrimitiveProcessor& primPRoc, const GrPipeline& pipeline,
51                  const GrTextureProxy* const primProcTextures[]);
52 
53     void setDrawState(id<MTLRenderCommandEncoder>, const GrSwizzle& outputSwizzle,
54                       const GrXferProcessor&);
55 
56     static void SetDynamicScissorRectState(id<MTLRenderCommandEncoder> renderCmdEncoder,
57                                            const GrRenderTarget* renderTarget,
58                                            GrSurfaceOrigin rtOrigin,
59                                            SkIRect scissorRect);
60 
61     bool doesntSampleAttachment(const MTLRenderPassAttachmentDescriptor*) const;
62 
63 private:
64     /**
65     * We use the RT's size and origin to adjust from Skia device space to Metal normalized device
66     * space and to make device space positions have the correct origin for processors that require
67     * them.
68     */
69     struct RenderTargetState {
70         SkISize         fRenderTargetSize;
71         GrSurfaceOrigin fRenderTargetOrigin;
72 
RenderTargetStateRenderTargetState73         RenderTargetState() { this->invalidate(); }
invalidateRenderTargetState74         void invalidate() {
75             fRenderTargetSize.fWidth = -1;
76             fRenderTargetSize.fHeight = -1;
77             fRenderTargetOrigin = (GrSurfaceOrigin)-1;
78         }
79 
80         /**
81         * Gets a float4 that adjusts the position from Skia device coords to Metals normalized
82         * device coords. Assuming the transformed position, pos, is a homogeneous float3, the vec,
83         * v, is applied as such:
84         * pos.x = dot(v.xy, pos.xz)
85         * pos.y = dot(v.zw, pos.yz)
86         */
getRTAdjustmentVecRenderTargetState87         void getRTAdjustmentVec(float* destVec) {
88             destVec[0] = 2.f / fRenderTargetSize.fWidth;
89             destVec[1] = -1.f;
90             if (kBottomLeft_GrSurfaceOrigin == fRenderTargetOrigin) {
91                 destVec[2] = -2.f / fRenderTargetSize.fHeight;
92                 destVec[3] = 1.f;
93             } else {
94                 destVec[2] = 2.f / fRenderTargetSize.fHeight;
95                 destVec[3] = -1.f;
96             }
97         }
98     };
99 
100     void setRenderTargetState(const GrRenderTarget*, GrSurfaceOrigin);
101 
102     void bind(id<MTLRenderCommandEncoder>);
103 
104     void setBlendConstants(id<MTLRenderCommandEncoder>, const GrSwizzle&, const GrXferProcessor&);
105 
106     void setDepthStencilState(id<MTLRenderCommandEncoder> renderCmdEncoder);
107 
108     struct SamplerBindings {
109         GrMtlSampler*  fSampler;
110         id<MTLTexture> fTexture;
111 
112         SamplerBindings(const GrSamplerState& state, GrTexture* texture, GrMtlGpu*);
113     };
114 
115     GrMtlGpu* fGpu;
116     id<MTLRenderPipelineState> fPipelineState;
117     MTLPixelFormat             fPixelFormat;
118 
119     RenderTargetState fRenderTargetState;
120     GrGLSLBuiltinUniformHandles fBuiltinUniformHandles;
121 
122     GrStencilSettings fStencil;
123 
124     int fNumSamplers;
125     SkTArray<SamplerBindings> fSamplerBindings;
126 
127     std::unique_ptr<GrGLSLPrimitiveProcessor> fGeometryProcessor;
128     std::unique_ptr<GrGLSLXferProcessor> fXferProcessor;
129     std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fFragmentProcessors;
130     int fFragmentProcessorCnt;
131 
132     GrMtlPipelineStateDataManager fDataManager;
133 };
134 
135 #endif
136