• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #ifndef GrMtlPipelineState_DEFINED
9 #define GrMtlPipelineState_DEFINED
10 
11 #include "include/private/GrTypesPriv.h"
12 #include "src/gpu/GrStencilSettings.h"
13 #include "src/gpu/glsl/GrGLSLProgramBuilder.h"
14 #include "src/gpu/mtl/GrMtlBuffer.h"
15 #include "src/gpu/mtl/GrMtlPipelineStateDataManager.h"
16 
17 #import <Metal/Metal.h>
18 
19 class GrMtlGpu;
20 class GrMtlPipelineStateDataManager;
21 class GrMtlSampler;
22 class GrMtlTexture;
23 class GrPipeline;
24 
25 /**
26  * Wraps a MTLRenderPipelineState object and also contains more info about the pipeline as needed
27  * by Ganesh
28  */
29 class GrMtlPipelineState {
30 public:
31     using UniformInfoArray = GrMtlPipelineStateDataManager::UniformInfoArray;
32     using UniformHandle = GrGLSLProgramDataManager::UniformHandle;
33 
34     GrMtlPipelineState(
35             GrMtlGpu* gpu,
36             id<MTLRenderPipelineState> pipelineState,
37             MTLPixelFormat pixelFormat,
38             const GrGLSLBuiltinUniformHandles& builtinUniformHandles,
39             const UniformInfoArray& uniforms,
40             uint32_t uniformBufferSize,
41             uint32_t numSamplers,
42             std::unique_ptr<GrGLSLPrimitiveProcessor> geometryProcessor,
43             std::unique_ptr<GrGLSLXferProcessor> xferPRocessor,
44             std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors,
45             int fFragmentProcessorCnt);
46 
mtlPipelineState()47     id<MTLRenderPipelineState> mtlPipelineState() { return fPipelineState; }
48 
49     void setData(const GrRenderTarget*, const GrProgramInfo&);
50 
51     void setTextures(const GrProgramInfo& programInfo,
52                      const GrSurfaceProxy* const primProcTextures[]);
53     void bindTextures(id<MTLRenderCommandEncoder> renderCmdEncoder);
54 
55     void setDrawState(id<MTLRenderCommandEncoder>, const GrSwizzle& outputSwizzle,
56                       const GrXferProcessor&);
57 
58     static void SetDynamicScissorRectState(id<MTLRenderCommandEncoder> renderCmdEncoder,
59                                            const GrRenderTarget* renderTarget,
60                                            GrSurfaceOrigin rtOrigin,
61                                            SkIRect scissorRect);
62 
63     bool doesntSampleAttachment(const MTLRenderPassAttachmentDescriptor*) const;
64 
65 private:
66     /**
67     * We use the RT's size and origin to adjust from Skia device space to Metal normalized device
68     * space and to make device space positions have the correct origin for processors that require
69     * them.
70     */
71     struct RenderTargetState {
72         SkISize         fRenderTargetSize;
73         GrSurfaceOrigin fRenderTargetOrigin;
74 
RenderTargetStateRenderTargetState75         RenderTargetState() { this->invalidate(); }
invalidateRenderTargetState76         void invalidate() {
77             fRenderTargetSize.fWidth = -1;
78             fRenderTargetSize.fHeight = -1;
79             fRenderTargetOrigin = (GrSurfaceOrigin)-1;
80         }
81 
82         /**
83         * Gets a float4 that adjusts the position from Skia device coords to Metals normalized
84         * device coords. Assuming the transformed position, pos, is a homogeneous float3, the vec,
85         * v, is applied as such:
86         * pos.x = dot(v.xy, pos.xz)
87         * pos.y = dot(v.zw, pos.yz)
88         */
getRTAdjustmentVecRenderTargetState89         void getRTAdjustmentVec(float* destVec) {
90             destVec[0] = 2.f / fRenderTargetSize.fWidth;
91             destVec[1] = -1.f;
92             if (kBottomLeft_GrSurfaceOrigin == fRenderTargetOrigin) {
93                 destVec[2] = -2.f / fRenderTargetSize.fHeight;
94                 destVec[3] = 1.f;
95             } else {
96                 destVec[2] = 2.f / fRenderTargetSize.fHeight;
97                 destVec[3] = -1.f;
98             }
99         }
100     };
101 
102     void setRenderTargetState(const GrRenderTarget*, GrSurfaceOrigin);
103 
104     void bindUniforms(id<MTLRenderCommandEncoder>);
105 
106     void setBlendConstants(id<MTLRenderCommandEncoder>, const GrSwizzle&, const GrXferProcessor&);
107 
108     void setDepthStencilState(id<MTLRenderCommandEncoder> renderCmdEncoder);
109 
110     struct SamplerBindings {
111         GrMtlSampler*  fSampler;
112         id<MTLTexture> fTexture;
113 
114         SamplerBindings(GrSamplerState state, GrTexture* texture, GrMtlGpu*);
115     };
116 
117     GrMtlGpu* fGpu;
118     id<MTLRenderPipelineState> fPipelineState;
119     MTLPixelFormat             fPixelFormat;
120 
121     RenderTargetState fRenderTargetState;
122     GrGLSLBuiltinUniformHandles fBuiltinUniformHandles;
123 
124     GrStencilSettings fStencil;
125 
126     int fNumSamplers;
127     SkTArray<SamplerBindings> fSamplerBindings;
128 
129     std::unique_ptr<GrGLSLPrimitiveProcessor> fGeometryProcessor;
130     std::unique_ptr<GrGLSLXferProcessor> fXferProcessor;
131     std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fFragmentProcessors;
132     int fFragmentProcessorCnt;
133 
134     GrMtlPipelineStateDataManager fDataManager;
135 };
136 
137 #endif
138