1 // Copyright 2018 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #ifndef VK_PIPELINE_HPP_
16 #define VK_PIPELINE_HPP_
17
18 #include "Device/Context.hpp"
19 #include "Vulkan/VkPipelineCache.hpp"
20 #include <memory>
21
22 namespace sw {
23
24 class ComputeProgram;
25 class SpirvShader;
26
27 } // namespace sw
28
29 namespace vk {
30
31 class ShaderModule;
32
33 class Pipeline
34 {
35 public:
36 Pipeline(PipelineLayout *layout, Device *device, bool robustBufferAccess);
37 virtual ~Pipeline() = default;
38
operator VkPipeline()39 operator VkPipeline()
40 {
41 return vk::TtoVkT<Pipeline, VkPipeline>(this);
42 }
43
Cast(VkPipeline object)44 static inline Pipeline *Cast(VkPipeline object)
45 {
46 return vk::VkTtoT<Pipeline, VkPipeline>(object);
47 }
48
49 void destroy(const VkAllocationCallbacks *pAllocator);
50
51 virtual void destroyPipeline(const VkAllocationCallbacks *pAllocator) = 0;
52 #ifndef NDEBUG
53 virtual VkPipelineBindPoint bindPoint() const = 0;
54 #endif
55
getLayout() const56 PipelineLayout *getLayout() const
57 {
58 return layout;
59 }
60
61 struct PushConstantStorage
62 {
63 unsigned char data[vk::MAX_PUSH_CONSTANT_SIZE];
64 };
65
66 protected:
67 PipelineLayout *layout = nullptr;
68 Device *const device;
69
70 const bool robustBufferAccess = true;
71 };
72
73 class GraphicsPipeline : public Pipeline, public ObjectBase<GraphicsPipeline, VkPipeline>
74 {
75 public:
76 GraphicsPipeline(const VkGraphicsPipelineCreateInfo *pCreateInfo,
77 void *mem,
78 Device *device);
79 virtual ~GraphicsPipeline() = default;
80
81 void destroyPipeline(const VkAllocationCallbacks *pAllocator) override;
82
83 #ifndef NDEBUG
bindPoint() const84 VkPipelineBindPoint bindPoint() const override
85 {
86 return VK_PIPELINE_BIND_POINT_GRAPHICS;
87 }
88 #endif
89
90 static size_t ComputeRequiredAllocationSize(const VkGraphicsPipelineCreateInfo *pCreateInfo);
91 static VkGraphicsPipelineLibraryFlagsEXT GetGraphicsPipelineSubset(const VkGraphicsPipelineCreateInfo *pCreateInfo);
92
93 VkResult compileShaders(const VkAllocationCallbacks *pAllocator, const VkGraphicsPipelineCreateInfo *pCreateInfo, PipelineCache *pipelineCache);
94
getCombinedState(const DynamicState & ds) const95 GraphicsState getCombinedState(const DynamicState &ds) const { return state.combineStates(ds); }
getState() const96 const GraphicsState &getState() const { return state; }
97
98 void getIndexBuffers(const vk::DynamicState &dynamicState, uint32_t count, uint32_t first, bool indexed, std::vector<std::pair<uint32_t, void *>> *indexBuffers) const;
hasDynamicVertexStride() const99 bool hasDynamicVertexStride() const { return state.getVertexInputInterfaceState().hasDynamicVertexStride(); }
100
getIndexBuffer()101 IndexBuffer &getIndexBuffer() { return indexBuffer; }
getIndexBuffer() const102 const IndexBuffer &getIndexBuffer() const { return indexBuffer; }
getAttachments()103 Attachments &getAttachments() { return attachments; }
getAttachments() const104 const Attachments &getAttachments() const { return attachments; }
getInputs()105 Inputs &getInputs() { return inputs; }
getInputs() const106 const Inputs &getInputs() const { return inputs; }
107
108 bool preRasterizationContainsImageWrite() const;
109 bool fragmentContainsImageWrite() const;
110
111 const std::shared_ptr<sw::SpirvShader> getShader(const VkShaderStageFlagBits &stage) const;
112
113 private:
114 void setShader(const VkShaderStageFlagBits &stage, const std::shared_ptr<sw::SpirvShader> spirvShader);
115 std::shared_ptr<sw::SpirvShader> vertexShader;
116 std::shared_ptr<sw::SpirvShader> fragmentShader;
117
118 const GraphicsState state;
119
120 IndexBuffer indexBuffer;
121 Attachments attachments;
122 Inputs inputs;
123 };
124
125 class ComputePipeline : public Pipeline, public ObjectBase<ComputePipeline, VkPipeline>
126 {
127 public:
128 ComputePipeline(const VkComputePipelineCreateInfo *pCreateInfo, void *mem, Device *device);
129 virtual ~ComputePipeline() = default;
130
131 void destroyPipeline(const VkAllocationCallbacks *pAllocator) override;
132
133 #ifndef NDEBUG
bindPoint() const134 VkPipelineBindPoint bindPoint() const override
135 {
136 return VK_PIPELINE_BIND_POINT_COMPUTE;
137 }
138 #endif
139
140 static size_t ComputeRequiredAllocationSize(const VkComputePipelineCreateInfo *pCreateInfo);
141
142 VkResult compileShaders(const VkAllocationCallbacks *pAllocator, const VkComputePipelineCreateInfo *pCreateInfo, PipelineCache *pipelineCache);
143
144 void run(uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ,
145 uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ,
146 const vk::DescriptorSet::Array &descriptorSetObjects,
147 const vk::DescriptorSet::Bindings &descriptorSets,
148 const vk::DescriptorSet::DynamicOffsets &descriptorDynamicOffsets,
149 const vk::Pipeline::PushConstantStorage &pushConstants);
150
151 protected:
152 std::shared_ptr<sw::SpirvShader> shader;
153 std::shared_ptr<sw::ComputeProgram> program;
154 };
155
Cast(VkPipeline object)156 static inline Pipeline *Cast(VkPipeline object)
157 {
158 return Pipeline::Cast(object);
159 }
160
161 } // namespace vk
162
163 #endif // VK_PIPELINE_HPP_
164