• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //    http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "VkPipelineCache.hpp"
16 #include <cstring>
17 
18 namespace vk {
19 
SpecializationInfo(const VkSpecializationInfo * specializationInfo)20 PipelineCache::SpirvShaderKey::SpecializationInfo::SpecializationInfo(const VkSpecializationInfo *specializationInfo)
21 {
22 	if(specializationInfo)
23 	{
24 		auto ptr = reinterpret_cast<VkSpecializationInfo *>(
25 		    allocate(sizeof(VkSpecializationInfo), REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY));
26 
27 		info = std::shared_ptr<VkSpecializationInfo>(ptr, Deleter());
28 
29 		info->mapEntryCount = specializationInfo->mapEntryCount;
30 		if(specializationInfo->mapEntryCount > 0)
31 		{
32 			size_t entriesSize = specializationInfo->mapEntryCount * sizeof(VkSpecializationMapEntry);
33 			VkSpecializationMapEntry *mapEntries = reinterpret_cast<VkSpecializationMapEntry *>(
34 			    allocate(entriesSize, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY));
35 			memcpy(mapEntries, specializationInfo->pMapEntries, entriesSize);
36 			info->pMapEntries = mapEntries;
37 		}
38 
39 		info->dataSize = specializationInfo->dataSize;
40 		if(specializationInfo->dataSize > 0)
41 		{
42 			void *data = allocate(specializationInfo->dataSize, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY);
43 			memcpy(data, specializationInfo->pData, specializationInfo->dataSize);
44 			info->pData = data;
45 		}
46 		else
47 		{
48 			info->pData = nullptr;
49 		}
50 	}
51 }
52 
operator ()(VkSpecializationInfo * info) const53 void PipelineCache::SpirvShaderKey::SpecializationInfo::Deleter::operator()(VkSpecializationInfo *info) const
54 {
55 	if(info)
56 	{
57 		deallocate(const_cast<VkSpecializationMapEntry *>(info->pMapEntries), DEVICE_MEMORY);
58 		deallocate(const_cast<void *>(info->pData), DEVICE_MEMORY);
59 		deallocate(info, DEVICE_MEMORY);
60 	}
61 }
62 
operator <(const SpecializationInfo & specializationInfo) const63 bool PipelineCache::SpirvShaderKey::SpecializationInfo::operator<(const SpecializationInfo &specializationInfo) const
64 {
65 	// Check that either both or neither keys have specialization info.
66 	if((info.get() == nullptr) != (specializationInfo.info.get() == nullptr))
67 	{
68 		return info.get() == nullptr;
69 	}
70 
71 	if(!info)
72 	{
73 		ASSERT(!specializationInfo.info);
74 		return false;
75 	}
76 
77 	if(info->mapEntryCount != specializationInfo.info->mapEntryCount)
78 	{
79 		return info->mapEntryCount < specializationInfo.info->mapEntryCount;
80 	}
81 
82 	if(info->dataSize != specializationInfo.info->dataSize)
83 	{
84 		return info->dataSize < specializationInfo.info->dataSize;
85 	}
86 
87 	if(info->mapEntryCount > 0)
88 	{
89 		int cmp = memcmp(info->pMapEntries, specializationInfo.info->pMapEntries, info->mapEntryCount * sizeof(VkSpecializationMapEntry));
90 		if(cmp != 0)
91 		{
92 			return cmp < 0;
93 		}
94 	}
95 
96 	if(info->dataSize > 0)
97 	{
98 		int cmp = memcmp(info->pData, specializationInfo.info->pData, info->dataSize);
99 		if(cmp != 0)
100 		{
101 			return cmp < 0;
102 		}
103 	}
104 
105 	return false;
106 }
107 
SpirvShaderKey(const VkShaderStageFlagBits pipelineStage,const std::string & entryPointName,const std::vector<uint32_t> & insns,const vk::RenderPass * renderPass,const uint32_t subpassIndex,const VkSpecializationInfo * specializationInfo)108 PipelineCache::SpirvShaderKey::SpirvShaderKey(const VkShaderStageFlagBits pipelineStage,
109                                               const std::string &entryPointName,
110                                               const std::vector<uint32_t> &insns,
111                                               const vk::RenderPass *renderPass,
112                                               const uint32_t subpassIndex,
113                                               const VkSpecializationInfo *specializationInfo)
114     : pipelineStage(pipelineStage)
115     , entryPointName(entryPointName)
116     , insns(insns)
117     , renderPass(renderPass)
118     , subpassIndex(subpassIndex)
119     , specializationInfo(specializationInfo)
120 {
121 }
122 
operator <(const SpirvShaderKey & other) const123 bool PipelineCache::SpirvShaderKey::operator<(const SpirvShaderKey &other) const
124 {
125 	if(pipelineStage != other.pipelineStage)
126 	{
127 		return pipelineStage < other.pipelineStage;
128 	}
129 
130 	if(renderPass != other.renderPass)
131 	{
132 		return renderPass < other.renderPass;
133 	}
134 
135 	if(subpassIndex != other.subpassIndex)
136 	{
137 		return subpassIndex < other.subpassIndex;
138 	}
139 
140 	if(insns.size() != other.insns.size())
141 	{
142 		return insns.size() < other.insns.size();
143 	}
144 
145 	if(entryPointName.size() != other.entryPointName.size())
146 	{
147 		return entryPointName.size() < other.entryPointName.size();
148 	}
149 
150 	int cmp = memcmp(entryPointName.c_str(), other.entryPointName.c_str(), entryPointName.size());
151 	if(cmp != 0)
152 	{
153 		return cmp < 0;
154 	}
155 
156 	cmp = memcmp(insns.data(), other.insns.data(), insns.size() * sizeof(uint32_t));
157 	if(cmp != 0)
158 	{
159 		return cmp < 0;
160 	}
161 
162 	return (specializationInfo < other.specializationInfo);
163 }
164 
PipelineCache(const VkPipelineCacheCreateInfo * pCreateInfo,void * mem)165 PipelineCache::PipelineCache(const VkPipelineCacheCreateInfo *pCreateInfo, void *mem)
166     : dataSize(ComputeRequiredAllocationSize(pCreateInfo))
167     , data(reinterpret_cast<uint8_t *>(mem))
168 {
169 	CacheHeader *header = reinterpret_cast<CacheHeader *>(mem);
170 	header->headerLength = sizeof(CacheHeader);
171 	header->headerVersion = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
172 	header->vendorID = VENDOR_ID;
173 	header->deviceID = DEVICE_ID;
174 	memcpy(header->pipelineCacheUUID, SWIFTSHADER_UUID, VK_UUID_SIZE);
175 
176 	if(pCreateInfo->pInitialData && (pCreateInfo->initialDataSize > 0))
177 	{
178 		memcpy(data + sizeof(CacheHeader), pCreateInfo->pInitialData, pCreateInfo->initialDataSize);
179 	}
180 }
181 
~PipelineCache()182 PipelineCache::~PipelineCache()
183 {
184 	spirvShaders.clear();
185 	computePrograms.clear();
186 }
187 
destroy(const VkAllocationCallbacks * pAllocator)188 void PipelineCache::destroy(const VkAllocationCallbacks *pAllocator)
189 {
190 	vk::deallocate(data, pAllocator);
191 }
192 
ComputeRequiredAllocationSize(const VkPipelineCacheCreateInfo * pCreateInfo)193 size_t PipelineCache::ComputeRequiredAllocationSize(const VkPipelineCacheCreateInfo *pCreateInfo)
194 {
195 	return pCreateInfo->initialDataSize + sizeof(CacheHeader);
196 }
197 
getData(size_t * pDataSize,void * pData)198 VkResult PipelineCache::getData(size_t *pDataSize, void *pData)
199 {
200 	if(!pData)
201 	{
202 		*pDataSize = dataSize;
203 		return VK_SUCCESS;
204 	}
205 
206 	if(*pDataSize != dataSize)
207 	{
208 		*pDataSize = 0;
209 		return VK_INCOMPLETE;
210 	}
211 
212 	if(*pDataSize > 0)
213 	{
214 		memcpy(pData, data, *pDataSize);
215 	}
216 
217 	return VK_SUCCESS;
218 }
219 
merge(uint32_t srcCacheCount,const VkPipelineCache * pSrcCaches)220 VkResult PipelineCache::merge(uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches)
221 {
222 	for(uint32_t i = 0; i < srcCacheCount; i++)
223 	{
224 		PipelineCache *srcCache = Cast(pSrcCaches[i]);
225 
226 		{
227 			std::unique_lock<std::mutex> lock(spirvShadersMutex);
228 			spirvShaders.insert(srcCache->spirvShaders.begin(), srcCache->spirvShaders.end());
229 		}
230 
231 		{
232 			std::unique_lock<std::mutex> lock(computeProgramsMutex);
233 			computePrograms.insert(srcCache->computePrograms.begin(), srcCache->computePrograms.end());
234 		}
235 	}
236 
237 	return VK_SUCCESS;
238 }
239 
operator [](const PipelineCache::SpirvShaderKey & key) const240 const std::shared_ptr<sw::SpirvShader> *PipelineCache::operator[](const PipelineCache::SpirvShaderKey &key) const
241 {
242 	auto it = spirvShaders.find(key);
243 	return (it != spirvShaders.end()) ? &(it->second) : nullptr;
244 }
245 
insert(const PipelineCache::SpirvShaderKey & key,const std::shared_ptr<sw::SpirvShader> & shader)246 void PipelineCache::insert(const PipelineCache::SpirvShaderKey &key, const std::shared_ptr<sw::SpirvShader> &shader)
247 {
248 	spirvShaders[key] = shader;
249 }
250 
operator [](const PipelineCache::ComputeProgramKey & key) const251 const std::shared_ptr<sw::ComputeProgram> *PipelineCache::operator[](const PipelineCache::ComputeProgramKey &key) const
252 {
253 	auto it = computePrograms.find(key);
254 	return (it != computePrograms.end()) ? &(it->second) : nullptr;
255 }
256 
insert(const PipelineCache::ComputeProgramKey & key,const std::shared_ptr<sw::ComputeProgram> & computeProgram)257 void PipelineCache::insert(const PipelineCache::ComputeProgramKey &key, const std::shared_ptr<sw::ComputeProgram> &computeProgram)
258 {
259 	computePrograms[key] = computeProgram;
260 }
261 
262 }  // namespace vk
263