• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //    http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "SpirvShader.hpp"
16 #include "SpirvShaderDebug.hpp"
17 
18 #include "ShaderCore.hpp"
19 #include "Reactor/Assert.hpp"
20 #include "Vulkan/VkPipelineLayout.hpp"
21 
22 #include <spirv/unified1/spirv.hpp>
23 
24 namespace sw {
25 
EmitLoad(InsnIterator insn,EmitState * state) const26 SpirvShader::EmitResult SpirvShader::EmitLoad(InsnIterator insn, EmitState *state) const
27 {
28 	bool atomic = (insn.opcode() == spv::OpAtomicLoad);
29 	Object::ID resultId = insn.word(2);
30 	Object::ID pointerId = insn.word(3);
31 	auto &result = getObject(resultId);
32 	auto &resultTy = getType(result);
33 	auto &pointer = getObject(pointerId);
34 	auto &pointerTy = getType(pointer);
35 	std::memory_order memoryOrder = std::memory_order_relaxed;
36 
37 	ASSERT(getType(pointer).element == result.typeId());
38 	ASSERT(Type::ID(insn.word(1)) == result.typeId());
39 	ASSERT(!atomic || getType(getType(pointer).element).opcode() == spv::OpTypeInt);  // Vulkan 1.1: "Atomic instructions must declare a scalar 32-bit integer type, for the value pointed to by Pointer."
40 
41 	if(pointerTy.storageClass == spv::StorageClassUniformConstant)
42 	{
43 		// Just propagate the pointer.
44 		auto &ptr = state->getPointer(pointerId);
45 		state->createPointer(resultId, ptr);
46 		return EmitResult::Continue;
47 	}
48 
49 	if(atomic)
50 	{
51 		Object::ID semanticsId = insn.word(5);
52 		auto memorySemantics = static_cast<spv::MemorySemanticsMask>(getObject(semanticsId).constantValue[0]);
53 		memoryOrder = MemoryOrder(memorySemantics);
54 	}
55 
56 	auto ptr = GetPointerToData(pointerId, 0, state);
57 	bool interleavedByLane = IsStorageInterleavedByLane(pointerTy.storageClass);
58 	auto &dst = state->createIntermediate(resultId, resultTy.componentCount);
59 	auto robustness = getOutOfBoundsBehavior(pointerId, state);
60 
61 	VisitMemoryObject(pointerId, [&](const MemoryElement &el) {
62 		auto p = ptr + el.offset;
63 		if(interleavedByLane) { p = InterleaveByLane(p); }  // TODO: Interleave once, then add offset?
64 		dst.move(el.index, p.Load<SIMD::Float>(robustness, state->activeLaneMask(), atomic, memoryOrder));
65 	});
66 
67 	SPIRV_SHADER_DBG("Load(atomic: {0}, order: {1}, ptr: {2}, val: {3}, mask: {4})", atomic, int(memoryOrder), ptr, dst, state->activeLaneMask());
68 
69 	return EmitResult::Continue;
70 }
71 
EmitStore(InsnIterator insn,EmitState * state) const72 SpirvShader::EmitResult SpirvShader::EmitStore(InsnIterator insn, EmitState *state) const
73 {
74 	bool atomic = (insn.opcode() == spv::OpAtomicStore);
75 	Object::ID pointerId = insn.word(1);
76 	Object::ID objectId = insn.word(atomic ? 4 : 2);
77 	std::memory_order memoryOrder = std::memory_order_relaxed;
78 
79 	if(atomic)
80 	{
81 		Object::ID semanticsId = insn.word(3);
82 		auto memorySemantics = static_cast<spv::MemorySemanticsMask>(getObject(semanticsId).constantValue[0]);
83 		memoryOrder = MemoryOrder(memorySemantics);
84 	}
85 
86 	const auto &value = Operand(this, state, objectId);
87 
88 	Store(pointerId, value, atomic, memoryOrder, state);
89 
90 	return EmitResult::Continue;
91 }
92 
Store(Object::ID pointerId,const Operand & value,bool atomic,std::memory_order memoryOrder,EmitState * state) const93 void SpirvShader::Store(Object::ID pointerId, const Operand &value, bool atomic, std::memory_order memoryOrder, EmitState *state) const
94 {
95 	auto &pointer = getObject(pointerId);
96 	auto &pointerTy = getType(pointer);
97 	auto &elementTy = getType(pointerTy.element);
98 
99 	ASSERT(!atomic || elementTy.opcode() == spv::OpTypeInt);  // Vulkan 1.1: "Atomic instructions must declare a scalar 32-bit integer type, for the value pointed to by Pointer."
100 
101 	auto ptr = GetPointerToData(pointerId, 0, state);
102 	bool interleavedByLane = IsStorageInterleavedByLane(pointerTy.storageClass);
103 	auto robustness = getOutOfBoundsBehavior(pointerId, state);
104 
105 	SIMD::Int mask = state->activeLaneMask();
106 	if(!StoresInHelperInvocation(pointerTy.storageClass))
107 	{
108 		mask = mask & state->storesAndAtomicsMask();
109 	}
110 
111 	SPIRV_SHADER_DBG("Store(atomic: {0}, order: {1}, ptr: {2}, val: {3}, mask: {4}", atomic, int(memoryOrder), ptr, value, mask);
112 
113 	VisitMemoryObject(pointerId, [&](const MemoryElement &el) {
114 		auto p = ptr + el.offset;
115 		if(interleavedByLane) { p = InterleaveByLane(p); }
116 		p.Store(value.Float(el.index), robustness, mask, atomic, memoryOrder);
117 	});
118 }
119 
EmitVariable(InsnIterator insn,EmitState * state) const120 SpirvShader::EmitResult SpirvShader::EmitVariable(InsnIterator insn, EmitState *state) const
121 {
122 	auto routine = state->routine;
123 	Object::ID resultId = insn.word(2);
124 	auto &object = getObject(resultId);
125 	auto &objectTy = getType(object);
126 
127 	switch(objectTy.storageClass)
128 	{
129 	case spv::StorageClassOutput:
130 	case spv::StorageClassPrivate:
131 	case spv::StorageClassFunction:
132 		{
133 			ASSERT(objectTy.opcode() == spv::OpTypePointer);
134 			auto base = &routine->getVariable(resultId)[0];
135 			auto elementTy = getType(objectTy.element);
136 			auto size = elementTy.componentCount * static_cast<uint32_t>(sizeof(float)) * SIMD::Width;
137 			state->createPointer(resultId, SIMD::Pointer(base, size));
138 		}
139 		break;
140 	case spv::StorageClassWorkgroup:
141 		{
142 			ASSERT(objectTy.opcode() == spv::OpTypePointer);
143 			auto base = &routine->workgroupMemory[0];
144 			auto size = workgroupMemory.size();
145 			state->createPointer(resultId, SIMD::Pointer(base, size, workgroupMemory.offsetOf(resultId)));
146 		}
147 		break;
148 	case spv::StorageClassInput:
149 		{
150 			if(object.kind == Object::Kind::InterfaceVariable)
151 			{
152 				auto &dst = routine->getVariable(resultId);
153 				int offset = 0;
154 				VisitInterface(resultId,
155 				               [&](Decorations const &d, AttribType type) {
156 					               auto scalarSlot = d.Location << 2 | d.Component;
157 					               dst[offset++] = routine->inputs[scalarSlot];
158 				               });
159 			}
160 			ASSERT(objectTy.opcode() == spv::OpTypePointer);
161 			auto base = &routine->getVariable(resultId)[0];
162 			auto elementTy = getType(objectTy.element);
163 			auto size = elementTy.componentCount * static_cast<uint32_t>(sizeof(float)) * SIMD::Width;
164 			state->createPointer(resultId, SIMD::Pointer(base, size));
165 		}
166 		break;
167 	case spv::StorageClassUniformConstant:
168 		{
169 			const auto &d = descriptorDecorations.at(resultId);
170 			ASSERT(d.DescriptorSet >= 0);
171 			ASSERT(d.Binding >= 0);
172 
173 			uint32_t bindingOffset = routine->pipelineLayout->getBindingOffset(d.DescriptorSet, d.Binding);
174 			Pointer<Byte> set = routine->descriptorSets[d.DescriptorSet];  // DescriptorSet*
175 			Pointer<Byte> binding = Pointer<Byte>(set + bindingOffset);    // vk::SampledImageDescriptor*
176 			auto size = 0;                                                 // Not required as this pointer is not directly used by SIMD::Read or SIMD::Write.
177 			state->createPointer(resultId, SIMD::Pointer(binding, size));
178 		}
179 		break;
180 	case spv::StorageClassUniform:
181 	case spv::StorageClassStorageBuffer:
182 		{
183 			const auto &d = descriptorDecorations.at(resultId);
184 			ASSERT(d.DescriptorSet >= 0);
185 			auto size = 0;  // Not required as this pointer is not directly used by SIMD::Read or SIMD::Write.
186 			// Note: the module may contain descriptor set references that are not suitable for this implementation -- using a set index higher than the number
187 			// of descriptor set binding points we support. As long as the selected entrypoint doesn't actually touch the out of range binding points, this
188 			// is valid. In this case make the value nullptr to make it easier to diagnose an attempt to dereference it.
189 			if(static_cast<uint32_t>(d.DescriptorSet) < vk::MAX_BOUND_DESCRIPTOR_SETS)
190 			{
191 				state->createPointer(resultId, SIMD::Pointer(routine->descriptorSets[d.DescriptorSet], size));
192 			}
193 			else
194 			{
195 				state->createPointer(resultId, SIMD::Pointer(nullptr, 0));
196 			}
197 		}
198 		break;
199 	case spv::StorageClassPushConstant:
200 		{
201 			state->createPointer(resultId, SIMD::Pointer(routine->pushConstants, vk::MAX_PUSH_CONSTANT_SIZE));
202 		}
203 		break;
204 	default:
205 		UNREACHABLE("Storage class %d", objectTy.storageClass);
206 		break;
207 	}
208 
209 	if(insn.wordCount() > 4)
210 	{
211 		Object::ID initializerId = insn.word(4);
212 		if(getObject(initializerId).kind != Object::Kind::Constant)
213 		{
214 			UNIMPLEMENTED("b/148241854: Non-constant initializers not yet implemented");  // FIXME(b/148241854)
215 		}
216 
217 		switch(objectTy.storageClass)
218 		{
219 		case spv::StorageClassOutput:
220 		case spv::StorageClassPrivate:
221 		case spv::StorageClassFunction:
222 		case spv::StorageClassWorkgroup:
223 			{
224 				bool interleavedByLane = IsStorageInterleavedByLane(objectTy.storageClass);
225 				auto ptr = GetPointerToData(resultId, 0, state);
226 				Operand initialValue(this, state, initializerId);
227 				VisitMemoryObject(resultId, [&](const MemoryElement &el) {
228 					auto p = ptr + el.offset;
229 					if(interleavedByLane) { p = InterleaveByLane(p); }
230 					auto robustness = OutOfBoundsBehavior::UndefinedBehavior;  // Local variables are always within bounds.
231 					p.Store(initialValue.Float(el.index), robustness, state->activeLaneMask());
232 				});
233 				if(objectTy.storageClass == spv::StorageClassWorkgroup)
234 				{
235 					// Initialization of workgroup memory is done by each subgroup and requires waiting on a barrier.
236 					// TODO(b/221242292): Initialize just once per workgroup and eliminate the barrier.
237 					Yield(YieldResult::ControlBarrier);
238 				}
239 			}
240 			break;
241 		default:
242 			ASSERT_MSG(initializerId == 0, "Vulkan does not permit variables of storage class %d to have initializers", int(objectTy.storageClass));
243 		}
244 	}
245 
246 	return EmitResult::Continue;
247 }
248 
EmitCopyMemory(InsnIterator insn,EmitState * state) const249 SpirvShader::EmitResult SpirvShader::EmitCopyMemory(InsnIterator insn, EmitState *state) const
250 {
251 	Object::ID dstPtrId = insn.word(1);
252 	Object::ID srcPtrId = insn.word(2);
253 	auto &dstPtrTy = getObjectType(dstPtrId);
254 	auto &srcPtrTy = getObjectType(srcPtrId);
255 	ASSERT(dstPtrTy.element == srcPtrTy.element);
256 
257 	bool dstInterleavedByLane = IsStorageInterleavedByLane(dstPtrTy.storageClass);
258 	bool srcInterleavedByLane = IsStorageInterleavedByLane(srcPtrTy.storageClass);
259 	auto dstPtr = GetPointerToData(dstPtrId, 0, state);
260 	auto srcPtr = GetPointerToData(srcPtrId, 0, state);
261 
262 	std::unordered_map<uint32_t, uint32_t> srcOffsets;
263 
264 	VisitMemoryObject(srcPtrId, [&](const MemoryElement &el) { srcOffsets[el.index] = el.offset; });
265 
266 	VisitMemoryObject(dstPtrId, [&](const MemoryElement &el) {
267 		auto it = srcOffsets.find(el.index);
268 		ASSERT(it != srcOffsets.end());
269 		auto srcOffset = it->second;
270 		auto dstOffset = el.offset;
271 
272 		auto dst = dstPtr + dstOffset;
273 		auto src = srcPtr + srcOffset;
274 		if(dstInterleavedByLane) { dst = InterleaveByLane(dst); }
275 		if(srcInterleavedByLane) { src = InterleaveByLane(src); }
276 
277 		// TODO(b/131224163): Optimize based on src/dst storage classes.
278 		auto robustness = OutOfBoundsBehavior::RobustBufferAccess;
279 
280 		auto value = src.Load<SIMD::Float>(robustness, state->activeLaneMask());
281 		dst.Store(value, robustness, state->activeLaneMask());
282 	});
283 	return EmitResult::Continue;
284 }
285 
EmitMemoryBarrier(InsnIterator insn,EmitState * state) const286 SpirvShader::EmitResult SpirvShader::EmitMemoryBarrier(InsnIterator insn, EmitState *state) const
287 {
288 	auto semantics = spv::MemorySemanticsMask(GetConstScalarInt(insn.word(2)));
289 	// TODO(b/176819536): We probably want to consider the memory scope here.
290 	// For now, just always emit the full fence.
291 	Fence(semantics);
292 	return EmitResult::Continue;
293 }
294 
VisitMemoryObjectInner(sw::SpirvShader::Type::ID id,sw::SpirvShader::Decorations d,uint32_t & index,uint32_t offset,const MemoryVisitor & f) const295 void SpirvShader::VisitMemoryObjectInner(sw::SpirvShader::Type::ID id, sw::SpirvShader::Decorations d, uint32_t &index, uint32_t offset, const MemoryVisitor &f) const
296 {
297 	ApplyDecorationsForId(&d, id);
298 	auto const &type = getType(id);
299 
300 	if(d.HasOffset)
301 	{
302 		offset += d.Offset;
303 		d.HasOffset = false;
304 	}
305 
306 	switch(type.opcode())
307 	{
308 	case spv::OpTypePointer:
309 		VisitMemoryObjectInner(type.definition.word(3), d, index, offset, f);
310 		break;
311 	case spv::OpTypeInt:
312 	case spv::OpTypeFloat:
313 	case spv::OpTypeRuntimeArray:
314 		f(MemoryElement{ index++, offset, type });
315 		break;
316 	case spv::OpTypeVector:
317 		{
318 			auto elemStride = (d.InsideMatrix && d.HasRowMajor && d.RowMajor) ? d.MatrixStride : static_cast<int32_t>(sizeof(float));
319 			for(auto i = 0u; i < type.definition.word(3); i++)
320 			{
321 				VisitMemoryObjectInner(type.definition.word(2), d, index, offset + elemStride * i, f);
322 			}
323 		}
324 		break;
325 	case spv::OpTypeMatrix:
326 		{
327 			auto columnStride = (d.HasRowMajor && d.RowMajor) ? static_cast<int32_t>(sizeof(float)) : d.MatrixStride;
328 			d.InsideMatrix = true;
329 			for(auto i = 0u; i < type.definition.word(3); i++)
330 			{
331 				ASSERT(d.HasMatrixStride);
332 				VisitMemoryObjectInner(type.definition.word(2), d, index, offset + columnStride * i, f);
333 			}
334 		}
335 		break;
336 	case spv::OpTypeStruct:
337 		for(auto i = 0u; i < type.definition.wordCount() - 2; i++)
338 		{
339 			ApplyDecorationsForIdMember(&d, id, i);
340 			VisitMemoryObjectInner(type.definition.word(i + 2), d, index, offset, f);
341 		}
342 		break;
343 	case spv::OpTypeArray:
344 		{
345 			auto arraySize = GetConstScalarInt(type.definition.word(3));
346 			for(auto i = 0u; i < arraySize; i++)
347 			{
348 				ASSERT(d.HasArrayStride);
349 				VisitMemoryObjectInner(type.definition.word(2), d, index, offset + i * d.ArrayStride, f);
350 			}
351 		}
352 		break;
353 	default:
354 		UNREACHABLE("%s", OpcodeName(type.opcode()));
355 	}
356 }
357 
VisitMemoryObject(Object::ID id,const MemoryVisitor & f) const358 void SpirvShader::VisitMemoryObject(Object::ID id, const MemoryVisitor &f) const
359 {
360 	auto typeId = getObject(id).typeId();
361 	auto const &type = getType(typeId);
362 
363 	if(IsExplicitLayout(type.storageClass))
364 	{
365 		Decorations d = GetDecorationsForId(id);
366 		uint32_t index = 0;
367 		VisitMemoryObjectInner(typeId, d, index, 0, f);
368 	}
369 	else
370 	{
371 		// Objects without explicit layout are tightly packed.
372 		auto &elType = getType(type.element);
373 		for(auto index = 0u; index < elType.componentCount; index++)
374 		{
375 			auto offset = static_cast<uint32_t>(index * sizeof(float));
376 			f({ index, offset, elType });
377 		}
378 	}
379 }
380 
GetPointerToData(Object::ID id,Int arrayIndex,EmitState const * state) const381 SIMD::Pointer SpirvShader::GetPointerToData(Object::ID id, Int arrayIndex, EmitState const *state) const
382 {
383 	auto routine = state->routine;
384 	auto &object = getObject(id);
385 	switch(object.kind)
386 	{
387 	case Object::Kind::Pointer:
388 	case Object::Kind::InterfaceVariable:
389 		return state->getPointer(id);
390 
391 	case Object::Kind::DescriptorSet:
392 		{
393 			const auto &d = descriptorDecorations.at(id);
394 			ASSERT(d.DescriptorSet >= 0 && static_cast<uint32_t>(d.DescriptorSet) < vk::MAX_BOUND_DESCRIPTOR_SETS);
395 			ASSERT(d.Binding >= 0);
396 			ASSERT(routine->pipelineLayout->getDescriptorCount(d.DescriptorSet, d.Binding) != 0);  // "If descriptorCount is zero this binding entry is reserved and the resource must not be accessed from any stage via this binding within any pipeline using the set layout."
397 
398 			uint32_t bindingOffset = routine->pipelineLayout->getBindingOffset(d.DescriptorSet, d.Binding);
399 			uint32_t descriptorSize = routine->pipelineLayout->getDescriptorSize(d.DescriptorSet, d.Binding);
400 			Int descriptorOffset = bindingOffset + descriptorSize * arrayIndex;
401 
402 			auto set = state->getPointer(id);
403 			Assert(set.base != Pointer<Byte>(nullptr));
404 			Pointer<Byte> descriptor = set.base + descriptorOffset;  // BufferDescriptor* or inline uniform block
405 
406 			auto descriptorType = routine->pipelineLayout->getDescriptorType(d.DescriptorSet, d.Binding);
407 			if(descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
408 			{
409 				// Note: there is no bounds checking for inline uniform blocks.
410 				// MAX_INLINE_UNIFORM_BLOCK_SIZE represents the maximum size of
411 				// an inline uniform block, but this value should remain unused.
412 				return SIMD::Pointer(descriptor, vk::MAX_INLINE_UNIFORM_BLOCK_SIZE);
413 			}
414 			else
415 			{
416 				Pointer<Byte> data = *Pointer<Pointer<Byte>>(descriptor + OFFSET(vk::BufferDescriptor, ptr));  // void*
417 				Int size = *Pointer<Int>(descriptor + OFFSET(vk::BufferDescriptor, sizeInBytes));
418 
419 				if(routine->pipelineLayout->isDescriptorDynamic(d.DescriptorSet, d.Binding))
420 				{
421 					Int dynamicOffsetIndex =
422 					    routine->pipelineLayout->getDynamicOffsetIndex(d.DescriptorSet, d.Binding) +
423 					    arrayIndex;
424 					Int offset = routine->descriptorDynamicOffsets[dynamicOffsetIndex];
425 					Int robustnessSize = *Pointer<Int>(descriptor + OFFSET(vk::BufferDescriptor, robustnessSize));
426 
427 					return SIMD::Pointer(data + offset, Min(size, robustnessSize - offset));
428 				}
429 				else
430 				{
431 					return SIMD::Pointer(data, size);
432 				}
433 			}
434 		}
435 
436 	default:
437 		UNREACHABLE("Invalid pointer kind %d", int(object.kind));
438 		return SIMD::Pointer(Pointer<Byte>(), 0);
439 	}
440 }
441 
Fence(spv::MemorySemanticsMask semantics) const442 void SpirvShader::Fence(spv::MemorySemanticsMask semantics) const
443 {
444 	if(semantics != spv::MemorySemanticsMaskNone)
445 	{
446 		rr::Fence(MemoryOrder(semantics));
447 	}
448 }
449 
MemoryOrder(spv::MemorySemanticsMask memorySemantics)450 std::memory_order SpirvShader::MemoryOrder(spv::MemorySemanticsMask memorySemantics)
451 {
452 	uint32_t control = static_cast<uint32_t>(memorySemantics) & static_cast<uint32_t>(
453 	                                                                spv::MemorySemanticsAcquireMask |
454 	                                                                spv::MemorySemanticsReleaseMask |
455 	                                                                spv::MemorySemanticsAcquireReleaseMask |
456 	                                                                spv::MemorySemanticsSequentiallyConsistentMask);
457 	switch(control)
458 	{
459 	case spv::MemorySemanticsMaskNone: return std::memory_order_relaxed;
460 	case spv::MemorySemanticsAcquireMask: return std::memory_order_acquire;
461 	case spv::MemorySemanticsReleaseMask: return std::memory_order_release;
462 	case spv::MemorySemanticsAcquireReleaseMask: return std::memory_order_acq_rel;
463 	case spv::MemorySemanticsSequentiallyConsistentMask: return std::memory_order_acq_rel;  // Vulkan 1.1: "SequentiallyConsistent is treated as AcquireRelease"
464 	default:
465 		// "it is invalid for more than one of these four bits to be set:
466 		//  Acquire, Release, AcquireRelease, or SequentiallyConsistent."
467 		UNREACHABLE("MemorySemanticsMask: %x", int(control));
468 		return std::memory_order_acq_rel;
469 	}
470 }
471 
StoresInHelperInvocation(spv::StorageClass storageClass)472 bool SpirvShader::StoresInHelperInvocation(spv::StorageClass storageClass)
473 {
474 	switch(storageClass)
475 	{
476 	case spv::StorageClassUniform:
477 	case spv::StorageClassStorageBuffer:
478 	case spv::StorageClassImage:
479 		return false;
480 	default:
481 		return true;
482 	}
483 }
484 
IsExplicitLayout(spv::StorageClass storageClass)485 bool SpirvShader::IsExplicitLayout(spv::StorageClass storageClass)
486 {
487 	switch(storageClass)
488 	{
489 	case spv::StorageClassUniform:
490 	case spv::StorageClassStorageBuffer:
491 	case spv::StorageClassPushConstant:
492 		return true;
493 	default:
494 		return false;
495 	}
496 }
497 
InterleaveByLane(sw::SIMD::Pointer p)498 sw::SIMD::Pointer SpirvShader::InterleaveByLane(sw::SIMD::Pointer p)
499 {
500 	p *= sw::SIMD::Width;
501 	p.staticOffsets[0] += 0 * sizeof(float);
502 	p.staticOffsets[1] += 1 * sizeof(float);
503 	p.staticOffsets[2] += 2 * sizeof(float);
504 	p.staticOffsets[3] += 3 * sizeof(float);
505 	return p;
506 }
507 
IsStorageInterleavedByLane(spv::StorageClass storageClass)508 bool SpirvShader::IsStorageInterleavedByLane(spv::StorageClass storageClass)
509 {
510 	switch(storageClass)
511 	{
512 	case spv::StorageClassUniform:
513 	case spv::StorageClassStorageBuffer:
514 	case spv::StorageClassPushConstant:
515 	case spv::StorageClassWorkgroup:
516 	case spv::StorageClassImage:
517 		return false;
518 	default:
519 		return true;
520 	}
521 }
522 
523 }  // namespace sw