1 // Copyright 2018 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #ifndef VK_PIPELINE_HPP_
16 #define VK_PIPELINE_HPP_
17
18 #include "Device/Context.hpp"
19 #include "Vulkan/VkPipelineCache.hpp"
20 #include <memory>
21
22 namespace sw {
23
24 class ComputeProgram;
25 class SpirvShader;
26
27 } // namespace sw
28
29 namespace vk {
30
31 namespace dbg {
32 class Context;
33 } // namespace dbg
34
35 class ShaderModule;
36
37 class Pipeline
38 {
39 public:
40 Pipeline(PipelineLayout *layout, Device *device);
41 virtual ~Pipeline() = default;
42
operator VkPipeline()43 operator VkPipeline()
44 {
45 return vk::TtoVkT<Pipeline, VkPipeline>(this);
46 }
47
Cast(VkPipeline object)48 static inline Pipeline *Cast(VkPipeline object)
49 {
50 return vk::VkTtoT<Pipeline, VkPipeline>(object);
51 }
52
53 void destroy(const VkAllocationCallbacks *pAllocator);
54
55 virtual void destroyPipeline(const VkAllocationCallbacks *pAllocator) = 0;
56 #ifndef NDEBUG
57 virtual VkPipelineBindPoint bindPoint() const = 0;
58 #endif
59
getLayout() const60 PipelineLayout *getLayout() const
61 {
62 return layout;
63 }
64
65 struct PushConstantStorage
66 {
67 unsigned char data[vk::MAX_PUSH_CONSTANT_SIZE];
68 };
69
70 protected:
71 PipelineLayout *layout = nullptr;
72 Device *const device;
73
74 const bool robustBufferAccess = true;
75 };
76
77 class GraphicsPipeline : public Pipeline, public ObjectBase<GraphicsPipeline, VkPipeline>
78 {
79 public:
80 GraphicsPipeline(const VkGraphicsPipelineCreateInfo *pCreateInfo,
81 void *mem,
82 Device *device);
83 virtual ~GraphicsPipeline() = default;
84
85 void destroyPipeline(const VkAllocationCallbacks *pAllocator) override;
86
87 #ifndef NDEBUG
bindPoint() const88 VkPipelineBindPoint bindPoint() const override
89 {
90 return VK_PIPELINE_BIND_POINT_GRAPHICS;
91 }
92 #endif
93
94 static size_t ComputeRequiredAllocationSize(const VkGraphicsPipelineCreateInfo *pCreateInfo);
95
96 void compileShaders(const VkAllocationCallbacks *pAllocator, const VkGraphicsPipelineCreateInfo *pCreateInfo, PipelineCache *pipelineCache);
97
getState(const DynamicState & ds) const98 const GraphicsState getState(const DynamicState &ds) const { return state.combineStates(ds); }
99
100 void getIndexBuffers(uint32_t count, uint32_t first, bool indexed, std::vector<std::pair<uint32_t, void *>> *indexBuffers) const;
101
getIndexBuffer()102 IndexBuffer &getIndexBuffer() { return indexBuffer; }
getIndexBuffer() const103 const IndexBuffer &getIndexBuffer() const { return indexBuffer; }
getAttachments()104 Attachments &getAttachments() { return attachments; }
getAttachments() const105 const Attachments &getAttachments() const { return attachments; }
getInputs()106 Inputs &getInputs() { return inputs; }
getInputs() const107 const Inputs &getInputs() const { return inputs; }
108
109 bool containsImageWrite() const;
110
111 const std::shared_ptr<sw::SpirvShader> getShader(const VkShaderStageFlagBits &stage) const;
112
113 private:
114 void setShader(const VkShaderStageFlagBits &stage, const std::shared_ptr<sw::SpirvShader> spirvShader);
115 std::shared_ptr<sw::SpirvShader> vertexShader;
116 std::shared_ptr<sw::SpirvShader> fragmentShader;
117
118 const GraphicsState state;
119
120 IndexBuffer indexBuffer;
121 Attachments attachments;
122 Inputs inputs;
123 };
124
125 class ComputePipeline : public Pipeline, public ObjectBase<ComputePipeline, VkPipeline>
126 {
127 public:
128 ComputePipeline(const VkComputePipelineCreateInfo *pCreateInfo, void *mem, Device *device);
129 virtual ~ComputePipeline() = default;
130
131 void destroyPipeline(const VkAllocationCallbacks *pAllocator) override;
132
133 #ifndef NDEBUG
bindPoint() const134 VkPipelineBindPoint bindPoint() const override
135 {
136 return VK_PIPELINE_BIND_POINT_COMPUTE;
137 }
138 #endif
139
140 static size_t ComputeRequiredAllocationSize(const VkComputePipelineCreateInfo *pCreateInfo);
141
142 void compileShaders(const VkAllocationCallbacks *pAllocator, const VkComputePipelineCreateInfo *pCreateInfo, PipelineCache *pipelineCache);
143
144 void run(uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ,
145 uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ,
146 vk::DescriptorSet::Array const &descriptorSetObjects,
147 vk::DescriptorSet::Bindings const &descriptorSets,
148 vk::DescriptorSet::DynamicOffsets const &descriptorDynamicOffsets,
149 vk::Pipeline::PushConstantStorage const &pushConstants);
150
151 protected:
152 std::shared_ptr<sw::SpirvShader> shader;
153 std::shared_ptr<sw::ComputeProgram> program;
154 };
155
Cast(VkPipeline object)156 static inline Pipeline *Cast(VkPipeline object)
157 {
158 return Pipeline::Cast(object);
159 }
160
161 } // namespace vk
162
163 #endif // VK_PIPELINE_HPP_
164