• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2018 The Khronos Group Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file
21  * \brief Robust buffer access tests for storage buffers and
22  *        storage texel buffers with variable pointers.
23  *
24  * \note These tests are checking if accessing a memory through a variable
25  *       pointer that points outside of accessible buffer memory is robust.
26  *       To do this the tests are creating proper SPIRV code that creates
27  *       variable pointers. Those pointers are either pointing into a
28  *       memory allocated for a buffer but "not accesible" - meaning
29  *       DescriptorBufferInfo has smaller size than a memory we access in
30  *       shader or entirely outside of allocated memory (i.e. buffer is
31  *       256 bytes big but we are trying to access under offset of 1k from
32  *       buffer start). There is a set of valid behaviours defined when
33  *       robust buffer access extension is enabled described in chapter 32
34  *       section 1 of Vulkan spec.
35  *
36  *//*--------------------------------------------------------------------*/
37 
38 #include "vktRobustBufferAccessWithVariablePointersTests.hpp"
39 #include "vktRobustnessUtil.hpp"
40 #include "vktTestCaseUtil.hpp"
41 #include "vkBuilderUtil.hpp"
42 #include "vkImageUtil.hpp"
43 #include "vkPrograms.hpp"
44 #include "vkQueryUtil.hpp"
45 #include "vkRef.hpp"
46 #include "vkRefUtil.hpp"
47 #include "vkTypeUtil.hpp"
48 #include "tcuTestLog.hpp"
49 #include "vkDefs.hpp"
50 #include "deRandom.hpp"
51 
52 #include <limits>
53 #include <sstream>
54 
55 namespace vkt
56 {
57 namespace robustness
58 {
59 
60 using namespace vk;
61 
62 // keep local things local
63 namespace
64 {
65 
66 // Creates a custom device with robust buffer access and variable pointer features.
createRobustBufferAccessVariablePointersDevice(Context & context)67 Move<VkDevice> createRobustBufferAccessVariablePointersDevice (Context& context)
68 {
69 	auto pointerFeatures = context.getVariablePointersFeatures();
70 
71 	VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
72 	features2.features = context.getDeviceFeatures();
73 	features2.features.robustBufferAccess = VK_TRUE;
74 	features2.pNext = &pointerFeatures;
75 
76 	return createRobustBufferAccessDevice(context, &features2);
77 }
78 
79 // A supplementary structures that can hold information about buffer size
80 struct AccessRangesData
81 {
82 	VkDeviceSize	allocSize;
83 	VkDeviceSize	accessRange;
84 	VkDeviceSize	maxAccessRange;
85 };
86 
87 // Pointer to function that can be used to fill a buffer with some data - it is passed as an parameter to buffer creation utility function
88 typedef void(*FillBufferProcPtr)(void*, vk::VkDeviceSize, const void* const);
89 
90 // An utility function for creating a buffer
91 // This function not only allocates memory for the buffer but also fills buffer up with a data
createTestBuffer(const vk::DeviceInterface & deviceInterface,const VkDevice & device,VkDeviceSize accessRange,VkBufferUsageFlags usage,SimpleAllocator & allocator,Move<VkBuffer> & buffer,de::MovePtr<Allocation> & bufferAlloc,AccessRangesData & data,FillBufferProcPtr fillBufferProc,const void * const blob)92 void createTestBuffer (const vk::DeviceInterface&				deviceInterface,
93 					   const VkDevice&							device,
94 					   VkDeviceSize								accessRange,
95 					   VkBufferUsageFlags						usage,
96 					   SimpleAllocator&							allocator,
97 					   Move<VkBuffer>&							buffer,
98 					   de::MovePtr<Allocation>&					bufferAlloc,
99 					   AccessRangesData&						data,
100 					   FillBufferProcPtr						fillBufferProc,
101 					   const void* const						blob)
102 {
103 	const VkBufferCreateInfo	bufferParams	=
104 	{
105 		VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,		// VkStructureType		sType;
106 		DE_NULL,									// const void*			pNext;
107 		0u,											// VkBufferCreateFlags	flags;
108 		accessRange,								// VkDeviceSize			size;
109 		usage,										// VkBufferUsageFlags	usage;
110 		VK_SHARING_MODE_EXCLUSIVE,					// VkSharingMode		sharingMode;
111 		VK_QUEUE_FAMILY_IGNORED,					// deUint32				queueFamilyIndexCount;
112 		DE_NULL										// const deUint32*		pQueueFamilyIndices;
113 	};
114 
115 	buffer = createBuffer(deviceInterface, device, &bufferParams);
116 
117 	VkMemoryRequirements bufferMemoryReqs		= getBufferMemoryRequirements(deviceInterface, device, *buffer);
118 	bufferAlloc = allocator.allocate(bufferMemoryReqs, MemoryRequirement::HostVisible);
119 
120 	data.allocSize = bufferMemoryReqs.size;
121 	data.accessRange = accessRange;
122 	data.maxAccessRange = deMinu64(data.allocSize, deMinu64(bufferParams.size, accessRange));
123 
124 	VK_CHECK(deviceInterface.bindBufferMemory(device, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
125 	fillBufferProc(bufferAlloc->getHostPtr(), bufferMemoryReqs.size, blob);
126 	flushMappedMemoryRange(deviceInterface, device, bufferAlloc->getMemory(), bufferAlloc->getOffset(), VK_WHOLE_SIZE);
127 }
128 
129 // An adapter function matching FillBufferProcPtr interface. Fills a buffer with "randomly" generated test data matching desired format.
populateBufferWithValues(void * buffer,VkDeviceSize size,const void * const blob)130 void populateBufferWithValues (void*				buffer,
131 							   VkDeviceSize			size,
132 							   const void* const	blob)
133 {
134 	populateBufferWithTestValues(buffer, size, *static_cast<const vk::VkFormat*>(blob));
135 }
136 
137 // An adapter function matching FillBufferProcPtr interface. Fills a buffer with 0xBABABABABABA... pattern. Used to fill up output buffers.
138 // Since this pattern cannot show up in generated test data it should not show up in the valid output.
populateBufferWithDummy(void * buffer,VkDeviceSize size,const void * const blob)139 void populateBufferWithDummy (void*					buffer,
140 							  VkDeviceSize			size,
141 							  const void* const		blob)
142 {
143 	DE_UNREF(blob);
144 	deMemset(buffer, 0xBA, static_cast<size_t>(size));
145 }
146 
147 // An adapter function matching FillBufferProcPtr interface. Fills a buffer with a copy of memory contents pointed to by blob.
populateBufferWithCopy(void * buffer,VkDeviceSize size,const void * const blob)148 void populateBufferWithCopy (void*					buffer,
149 							 VkDeviceSize			size,
150 							 const void* const		blob)
151 {
152 	deMemcpy(buffer, blob, static_cast<size_t>(size));
153 }
154 
155 // A composite types used in test
156 // Those composites can be made of unsigned ints, signed ints or floats (except for matrices that work with floats only).
157 enum ShaderType
158 {
159 	SHADER_TYPE_MATRIX_COPY					= 0,
160 	SHADER_TYPE_VECTOR_COPY,
161 	SHADER_TYPE_SCALAR_COPY,
162 
163 	SHADER_TYPE_COUNT
164 };
165 
166 // We are testing reads or writes
167 // In case of testing reads - writes are always
168 enum BufferAccessType
169 {
170 	BUFFER_ACCESS_TYPE_READ_FROM_STORAGE	= 0,
171 	BUFFER_ACCESS_TYPE_WRITE_TO_STORAGE,
172 };
173 
174 // Test case for checking robust buffer access with variable pointers
175 class RobustAccessWithPointersTest : public vkt::TestCase
176 {
177 public:
178 	static const deUint32		s_testArraySize;
179 	static const deUint32		s_numberOfBytesAccessed;
180 
181 								RobustAccessWithPointersTest	(tcu::TestContext&		testContext,
182 																 const std::string&		name,
183 																 const std::string&		description,
184 																 VkShaderStageFlags		shaderStage,
185 																 ShaderType				shaderType,
186 																 VkFormat				bufferFormat);
187 
~RobustAccessWithPointersTest(void)188 	virtual						~RobustAccessWithPointersTest	(void)
189 	{
190 	}
191 
192 	void						checkSupport (Context &context) const override;
193 
194 protected:
195 	const VkShaderStageFlags	m_shaderStage;
196 	const ShaderType			m_shaderType;
197 	const VkFormat				m_bufferFormat;
198 };
199 
200 const deUint32 RobustAccessWithPointersTest::s_testArraySize = 1024u;
201 const deUint32 RobustAccessWithPointersTest::s_numberOfBytesAccessed = static_cast<deUint32>(16ull * sizeof(float));
202 
RobustAccessWithPointersTest(tcu::TestContext & testContext,const std::string & name,const std::string & description,VkShaderStageFlags shaderStage,ShaderType shaderType,VkFormat bufferFormat)203 RobustAccessWithPointersTest::RobustAccessWithPointersTest(tcu::TestContext&		testContext,
204 	const std::string&		name,
205 	const std::string&		description,
206 	VkShaderStageFlags		shaderStage,
207 	ShaderType				shaderType,
208 	VkFormat				bufferFormat)
209 	: vkt::TestCase(testContext, name, description)
210 	, m_shaderStage(shaderStage)
211 	, m_shaderType(shaderType)
212 	, m_bufferFormat(bufferFormat)
213 {
214 	DE_ASSERT(m_shaderStage == VK_SHADER_STAGE_VERTEX_BIT || m_shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT || m_shaderStage == VK_SHADER_STAGE_COMPUTE_BIT);
215 }
216 
checkSupport(Context & context) const217 void RobustAccessWithPointersTest::checkSupport (Context &context) const
218 {
219 	const auto& pointerFeatures = context.getVariablePointersFeatures();
220 	if (!pointerFeatures.variablePointersStorageBuffer)
221 		TCU_THROW(NotSupportedError, "VariablePointersStorageBuffer SPIR-V capability not supported");
222 
223 	if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") && !context.getDeviceFeatures().robustBufferAccess)
224 		TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: robustBufferAccess not supported by this implementation");
225 }
226 
227 // A subclass for testing reading with variable pointers
228 class RobustReadTest : public RobustAccessWithPointersTest
229 {
230 public:
231 								RobustReadTest					(tcu::TestContext&		testContext,
232 																 const std::string&		name,
233 																 const std::string&		description,
234 																 VkShaderStageFlags		shaderStage,
235 																 ShaderType				shaderType,
236 																 VkFormat				bufferFormat,
237 																 VkDeviceSize			readAccessRange,
238 																 bool					accessOutOfBackingMemory);
239 
~RobustReadTest(void)240 	virtual						~RobustReadTest					(void)
241 	{}
242 	virtual TestInstance*		createInstance					(Context&				context) const;
243 private:
244 	virtual void				initPrograms					(SourceCollections&		programCollection) const;
245 	const VkDeviceSize			m_readAccessRange;
246 	const bool					m_accessOutOfBackingMemory;
247 };
248 
249 // A subclass for testing writing with variable pointers
250 class RobustWriteTest : public RobustAccessWithPointersTest
251 {
252 public:
253 								RobustWriteTest				(tcu::TestContext&		testContext,
254 															 const std::string&		name,
255 															 const std::string&		description,
256 															 VkShaderStageFlags		shaderStage,
257 															 ShaderType				shaderType,
258 															 VkFormat				bufferFormat,
259 															 VkDeviceSize			writeAccessRange,
260 															 bool					accessOutOfBackingMemory);
261 
~RobustWriteTest(void)262 	virtual						~RobustWriteTest			(void) {}
263 	virtual TestInstance*		createInstance				(Context& context) const;
264 private:
265 	virtual void				initPrograms				(SourceCollections&		programCollection) const;
266 	const VkDeviceSize			m_writeAccessRange;
267 	const bool					m_accessOutOfBackingMemory;
268 };
269 
270 // In case I detect that some prerequisites are not fullfilled I am creating this lightweight dummy test instance instead of AccessInstance. Should be bit faster that way.
271 class NotSupportedInstance : public vkt::TestInstance
272 {
273 public:
NotSupportedInstance(Context & context,const std::string & message)274 								NotSupportedInstance		(Context&			context,
275 															 const std::string&	message)
276 		: TestInstance(context)
277 		, m_notSupportedMessage(message)
278 	{}
279 
~NotSupportedInstance(void)280 	virtual						~NotSupportedInstance		(void)
281 	{
282 	}
283 
iterate(void)284 	virtual tcu::TestStatus		iterate						(void)
285 	{
286 		TCU_THROW(NotSupportedError, m_notSupportedMessage.c_str());
287 	}
288 
289 private:
290 	std::string					m_notSupportedMessage;
291 };
292 
293 // A superclass for instances testing reading and writing
294 // holds all necessary object members
295 class AccessInstance : public vkt::TestInstance
296 {
297 public:
298 								AccessInstance				(Context&			context,
299 															 Move<VkDevice>		device,
300 															 ShaderType			shaderType,
301 															 VkShaderStageFlags	shaderStage,
302 															 VkFormat			bufferFormat,
303 															 BufferAccessType	bufferAccessType,
304 															 VkDeviceSize		inBufferAccessRange,
305 															 VkDeviceSize		outBufferAccessRange,
306 															 bool				accessOutOfBackingMemory);
307 
~AccessInstance(void)308 	virtual						~AccessInstance				(void) {}
309 
310 	virtual tcu::TestStatus		iterate						(void);
311 
312 	virtual bool				verifyResult				(bool splitAccess = false);
313 
314 private:
315 	bool						isExpectedValueFromInBuffer	(VkDeviceSize		offsetInBytes,
316 															 const void*		valuePtr,
317 															 VkDeviceSize		valueSize);
318 	bool						isOutBufferValueUnchanged	(VkDeviceSize		offsetInBytes,
319 															 VkDeviceSize		valueSize);
320 
321 protected:
322 	Move<VkDevice>				m_device;
323 	de::MovePtr<TestEnvironment>m_testEnvironment;
324 
325 	const ShaderType			m_shaderType;
326 	const VkShaderStageFlags	m_shaderStage;
327 
328 	const VkFormat				m_bufferFormat;
329 	const BufferAccessType		m_bufferAccessType;
330 
331 	AccessRangesData			m_inBufferAccess;
332 	Move<VkBuffer>				m_inBuffer;
333 	de::MovePtr<Allocation>		m_inBufferAlloc;
334 
335 	AccessRangesData			m_outBufferAccess;
336 	Move<VkBuffer>				m_outBuffer;
337 	de::MovePtr<Allocation>		m_outBufferAlloc;
338 
339 	Move<VkBuffer>				m_indicesBuffer;
340 	de::MovePtr<Allocation>		m_indicesBufferAlloc;
341 
342 	Move<VkDescriptorPool>		m_descriptorPool;
343 	Move<VkDescriptorSetLayout>	m_descriptorSetLayout;
344 	Move<VkDescriptorSet>		m_descriptorSet;
345 
346 	Move<VkFence>				m_fence;
347 	VkQueue						m_queue;
348 
349 	// Used when m_shaderStage == VK_SHADER_STAGE_VERTEX_BIT
350 	Move<VkBuffer>				m_vertexBuffer;
351 	de::MovePtr<Allocation>		m_vertexBufferAlloc;
352 
353 	const bool					m_accessOutOfBackingMemory;
354 };
355 
356 // A subclass for read tests
357 class ReadInstance: public AccessInstance
358 {
359 public:
360 								ReadInstance			(Context&				context,
361 														 Move<VkDevice>			device,
362 														 ShaderType				shaderType,
363 														 VkShaderStageFlags		shaderStage,
364 														 VkFormat				bufferFormat,
365 														 VkDeviceSize			inBufferAccessRange,
366 														 bool					accessOutOfBackingMemory);
367 
~ReadInstance(void)368 	virtual						~ReadInstance			(void) {}
369 };
370 
371 // A subclass for write tests
372 class WriteInstance: public AccessInstance
373 {
374 public:
375 								WriteInstance			(Context&				context,
376 														 Move<VkDevice>			device,
377 														 ShaderType				shaderType,
378 														 VkShaderStageFlags		shaderStage,
379 														 VkFormat				bufferFormat,
380 														 VkDeviceSize			writeBufferAccessRange,
381 														 bool					accessOutOfBackingMemory);
382 
~WriteInstance(void)383 	virtual						~WriteInstance			(void) {}
384 };
385 
386 // Automatically incremented counter.
387 // Each read of value bumps counter up.
388 class Autocounter
389 {
390 public:
Autocounter()391 								Autocounter()
392 		:value(0u)
393 	{}
incrementAndGetValue()394 	deUint32					incrementAndGetValue()
395 	{
396 		return ++value;
397 	}
398 private:
399 	deUint32					value;
400 };
401 
402 // A class representing SPIRV variable.
403 // This class internally has an unique identificator.
404 // When such variable is used in shader composition routine it is mapped on a in-SPIRV-code variable name.
405 class Variable
406 {
407 	friend bool					operator < (const Variable& a, const Variable& b);
408 public:
Variable(Autocounter & autoincrement)409 								Variable(Autocounter& autoincrement)
410 		: value(autoincrement.incrementAndGetValue())
411 	{}
412 private:
413 	deUint32					value;
414 };
415 
operator <(const Variable & a,const Variable & b)416 bool operator < (const Variable& a, const Variable& b)
417 {
418 	return a.value < b.value;
419 }
420 
421 // A class representing SPIRV operation.
422 // Since those are not copyable they don't need internal id. Memory address is used instead.
423 class Operation
424 {
425 	friend bool					operator==(const Operation& a, const Operation& b);
426 public:
Operation(const char * text)427 								Operation(const char* text)
428 		: value(text)
429 	{
430 	}
getValue() const431 	const std::string&			getValue() const
432 	{
433 		return value;
434 	}
435 
436 private:
437 								Operation(const Operation& other);
438 	const std::string			value;
439 };
440 
operator ==(const Operation & a,const Operation & b)441 bool operator == (const Operation& a, const Operation& b)
442 {
443 	return &a == &b; // a fast & simple address comparison - making copies was disabled
444 }
445 
446 // A namespace containing all SPIRV operations used in those tests.
447 namespace op {
448 #define OP(name) const Operation name("Op"#name)
449 	OP(Capability);
450 	OP(Extension);
451 	OP(ExtInstImport);
452 	OP(EntryPoint);
453 	OP(MemoryModel);
454 	OP(ExecutionMode);
455 
456 	OP(Decorate);
457 	OP(MemberDecorate);
458 	OP(Name);
459 	OP(MemberName);
460 
461 	OP(TypeVoid);
462 	OP(TypeBool);
463 	OP(TypeInt);
464 	OP(TypeFloat);
465 	OP(TypeVector);
466 	OP(TypeMatrix);
467 	OP(TypeArray);
468 	OP(TypeStruct);
469 	OP(TypeFunction);
470 	OP(TypePointer);
471 	OP(TypeImage);
472 	OP(TypeSampledImage);
473 
474 	OP(Constant);
475 	OP(ConstantComposite);
476 	OP(Variable);
477 
478 	OP(Function);
479 	OP(FunctionEnd);
480 	OP(Label);
481 	OP(Return);
482 
483 	OP(LogicalEqual);
484 	OP(IEqual);
485 	OP(Select);
486 
487 	OP(AccessChain);
488 	OP(Load);
489 	OP(Store);
490 #undef OP
491 }
492 
493 // A class that allows to easily compose SPIRV code.
494 // This class automatically keeps correct order of most of operations
495 // i.e. capabilities to the top,
496 class ShaderStream
497 {
498 public:
ShaderStream()499 								ShaderStream ()
500 	{}
501 	// composes shader string out of shader substreams.
str() const502 	std::string					str () const
503 	{
504 		std::stringstream stream;
505 		stream << capabilities.str()
506 			<< "; ----------------- PREAMBLE -----------------\n"
507 			<< preamble.str()
508 			<< "; ----------------- DEBUG --------------------\n"
509 			<< names.str()
510 			<< "; ----------------- DECORATIONS --------------\n"
511 			<< decorations.str()
512 			<< "; ----------------- TYPES --------------------\n"
513 			<< basictypes.str()
514 			<< "; ----------------- CONSTANTS ----------------\n"
515 			<< constants.str()
516 			<< "; ----------------- ADVANCED TYPES -----------\n"
517 			<< compositetypes.str()
518 			<< ((compositeconstants.str().length() > 0) ? "; ----------------- CONSTANTS ----------------\n" : "")
519 			<< compositeconstants.str()
520 			<< "; ----------------- VARIABLES & FUNCTIONS ----\n"
521 			<< shaderstream.str();
522 		return stream.str();
523 	}
524 	// Functions below are used to push Operations, Variables and other strings, numbers and characters to the shader.
525 	// Each function uses selectStream and map subroutines.
526 	// selectStream is used to choose a proper substream of shader.
527 	// E.g. if an operation is OpConstant it should be put into constants definitions stream - so selectStream will return that stream.
528 	// map on the other hand is used to replace Variables and Operations to their in-SPIRV-code representations.
529 	// for types like ints or floats map simply calls << operator to produce its string representation
530 	// for Operations a proper operation string is returned
531 	// for Variables there is a special mapping between in-C++ variable and in-SPIRV-code variable name.
532 	// following sequence of functions could be squashed to just two using variadic templates once we move to C++11 or higher
533 	// each method returns *this to allow chaining calls to these methods.
534 	template <typename T>
operator ()(const T & a)535 	ShaderStream&				operator () (const T& a)
536 	{
537 		selectStream(a, 0) << map(a) << '\n';
538 		return *this;
539 	}
540 	template <typename T1, typename T2>
operator ()(const T1 & a,const T2 & b)541 	ShaderStream&				operator () (const T1& a, const T2& b)
542 	{
543 		selectStream(a, 0) << map(a) << '\t' << map(b) << '\n';
544 		return *this;
545 	}
546 	template <typename T1, typename T2, typename T3>
operator ()(const T1 & a,const T2 & b,const T3 & c)547 	ShaderStream&				operator () (const T1& a, const T2& b, const T3& c)
548 	{
549 		selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\n';
550 		return *this;
551 	}
552 	template <typename T1, typename T2, typename T3, typename T4>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d)553 	ShaderStream&				operator () (const T1& a, const T2& b, const T3& c, const T4& d)
554 	{
555 		selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\n';
556 		return *this;
557 	}
558 	template <typename T1, typename T2, typename T3, typename T4, typename T5>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e)559 	ShaderStream&				operator () (const T1& a, const T2& b, const T3& c, const T4& d, const T5& e)
560 	{
561 		selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\n';
562 		return *this;
563 	}
564 	template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e,const T6 & f)565 	ShaderStream&				operator () (const T1& a, const T2& b, const T3& c, const T4& d, const T5& e, const T6& f)
566 	{
567 		selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\t' << map(f) << '\n';
568 		return *this;
569 	}
570 	template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e,const T6 & f,const T7 & g)571 	ShaderStream&				operator () (const T1& a, const T2& b, const  T3& c, const T4& d, const T5& e, const T6& f, const T7& g)
572 	{
573 		selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\t' << map(f) << '\t' << map(g) << '\n';
574 		return *this;
575 	}
576 	template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e,const T6 & f,const T7 & g,const T8 & h)577 	ShaderStream&				operator () (const T1& a, const T2& b, const  T3& c, const T4& d, const T5& e, const T6& f, const T7& g, const T8& h)
578 	{
579 		selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\t' << map(f) << '\t' << map(g) << '\t' << map(h) << '\n';
580 		return *this;
581 	}
582 	template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e,const T6 & f,const T7 & g,const T8 & h,const T9 & i)583 	ShaderStream&				operator () (const T1& a, const T2& b, const  T3& c, const T4& d, const T5& e, const T6& f, const T7& g, const T8& h, const T9& i)
584 	{
585 		selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\t' << map(f) << '\t' << map(g) << '\t' << map(h) << '\t' << map(i) << '\n';
586 		return *this;
587 	}
588 	template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e,const T6 & f,const T7 & g,const T8 & h,const T9 & i,const T10 & k)589 	ShaderStream&				operator () (const T1& a, const T2& b, const  T3& c, const T4& d, const T5& e, const T6& f, const T7& g, const T8& h, const T9& i, const T10& k)
590 	{
591 		selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\t' << map(f) << '\t' << map(g) << '\t' << map(h) << '\t' << map(i) << '\t' << map(k) << '\n';
592 		return *this;
593 	}
594 
595 	// returns true if two variables has the same in-SPIRV-code names
areSame(const Variable a,const Variable b)596 	bool						areSame (const Variable a, const Variable b)
597 	{
598 		VariableIt varA = vars.find(a);
599 		VariableIt varB = vars.find(b);
600 		return varA != vars.end() && varB != vars.end() && varA->second == varB->second;
601 	}
602 
603 	// makes variable 'a' in-SPIRV-code name to be the same as variable 'b' in-SPIRV-code name
makeSame(const Variable a,const Variable b)604 	void						makeSame (const Variable a, const Variable b)
605 	{
606 		VariableIt varB = vars.find(b);
607 		if (varB != vars.end())
608 		{
609 			std::pair<VariableIt, bool> inserted = vars.insert(std::make_pair(a, varB->second));
610 			if (!inserted.second)
611 				inserted.first->second = varB->second;
612 		}
613 	}
614 private:
615 	// generic version of map (tries to push whatever came to stringstream to get its string representation)
616 	template <typename T>
map(const T & a)617 	std::string					map (const T& a)
618 	{
619 		std::stringstream temp;
620 		temp << a;
621 		return temp.str();
622 	}
623 
624 	// looks for mapping of c++ Variable object onto in-SPIRV-code name.
625 	// if there was not yet such mapping generated a new mapping is created based on incremented local counter.
map(const Variable & a)626 	std::string					map (const Variable& a)
627 	{
628 		VariableIt var = vars.find(a);
629 		if (var != vars.end())
630 			return var->second;
631 		std::stringstream temp;
632 		temp << '%';
633 		temp.width(4);
634 		temp.fill('0');
635 		temp << std::hex << varCounter.incrementAndGetValue();
636 		vars.insert(std::make_pair(a, temp.str()));
637 		return temp.str();
638 	}
639 
640 	// a simple specification for Operation
map(const Operation & a)641 	std::string					map (const Operation& a)
642 	{
643 		return a.getValue();
644 	}
645 
646 	// a specification for char* - faster than going through stringstream << operator
map(const char * & a)647 	std::string					map (const char*& a)
648 	{
649 		return std::string(a);
650 	}
651 
652 	// a specification for char - faster than going through stringstream << operator
map(const char & a)653 	std::string					map (const char& a)
654 	{
655 		return std::string(1, a);
656 	}
657 
658 	// a generic version of selectStream - used when neither 1st nor 3rd SPIRV line token is Operation.
659 	// In general should never happen.
660 	// All SPIRV lines are constructed in a one of two forms:
661 	// Variable = Operation operands...
662 	// or
663 	// Operation operands...
664 	// So operation is either 1st or 3rd token.
665 	template <typename T0, typename T1>
selectStream(const T0 & op0,const T1 & op1)666 	std::stringstream&			selectStream (const T0& op0, const T1& op1)
667 	{
668 		DE_UNREF(op0);
669 		DE_UNREF(op1);
670 		return shaderstream;
671 	}
672 
673 	// Specialisation for Operation being 1st parameter
674 	// Certain operations make the SPIRV code line to be pushed to different substreams.
675 	template <typename T1>
selectStream(const Operation & op,const T1 & op1)676 	std::stringstream&			selectStream (const Operation& op, const T1& op1)
677 	{
678 		DE_UNREF(op1);
679 		if (op == op::Decorate || op == op::MemberDecorate)
680 			return decorations;
681 		if (op == op::Name || op == op::MemberName)
682 			return names;
683 		if (op == op::Capability || op == op::Extension)
684 			return capabilities;
685 		if (op == op::MemoryModel || op == op::ExecutionMode || op == op::EntryPoint)
686 			return preamble;
687 		return shaderstream;
688 	}
689 
690 	// Specialisation for Operation being 3rd parameter
691 	// Certain operations make the SPIRV code line to be pushed to different substreams.
692 	// If we would like to use this way of generating SPIRV we could use this method as SPIRV line validation point
693 	// e.g. here instead of heving partial specialisation I could specialise for T0 being Variable since this has to match Variable = Operation operands...
694 	template <typename T0>
selectStream(const T0 & op0,const Operation & op)695 	std::stringstream&			selectStream (const T0& op0, const Operation& op)
696 	{
697 		DE_UNREF(op0);
698 		if (op == op::ExtInstImport)
699 			return preamble;
700 		if (op == op::TypeVoid || op == op::TypeBool || op == op::TypeInt || op == op::TypeFloat || op == op::TypeVector || op == op::TypeMatrix)
701 			return basictypes;
702 		if (op == op::TypeArray || op == op::TypeStruct || op == op::TypeFunction || op == op::TypePointer || op == op::TypeImage || op == op::TypeSampledImage)
703 			return compositetypes;
704 		if (op == op::Constant)
705 			return constants;
706 		if (op == op::ConstantComposite)
707 			return compositeconstants;
708 		return shaderstream;
709 	}
710 
711 	typedef std::map<Variable, std::string>	VariablesPack;
712 	typedef VariablesPack::iterator			VariableIt;
713 
714 	// local mappings between c++ Variable objects and in-SPIRV-code names
715 	VariablesPack				vars;
716 
717 	// shader substreams
718 	std::stringstream			capabilities;
719 	std::stringstream			preamble;
720 	std::stringstream			names;
721 	std::stringstream			decorations;
722 	std::stringstream			basictypes;
723 	std::stringstream			constants;
724 	std::stringstream			compositetypes;
725 	std::stringstream			compositeconstants;
726 	std::stringstream			shaderstream;
727 
728 	// local incremented counter
729 	Autocounter					varCounter;
730 };
731 
732 // A suppliementary class to group frequently used Variables together
733 class Variables
734 {
735 public:
Variables(Autocounter & autoincrement)736 								Variables (Autocounter &autoincrement)
737 		: version(autoincrement)
738 		, mainFunc(autoincrement)
739 		, mainFuncLabel(autoincrement)
740 		, voidFuncVoid(autoincrement)
741 		, copy_type(autoincrement)
742 		, copy_type_vec(autoincrement)
743 		, buffer_type_vec(autoincrement)
744 		, copy_type_ptr(autoincrement)
745 		, buffer_type(autoincrement)
746 		, voidId(autoincrement)
747 		, v4f32(autoincrement)
748 		, v4s32(autoincrement)
749 		, v4u32(autoincrement)
750 		, v4s64(autoincrement)
751 		, v4u64(autoincrement)
752 		, s32(autoincrement)
753 		, f32(autoincrement)
754 		, u32(autoincrement)
755 		, s64(autoincrement)
756 		, u64(autoincrement)
757 		, boolean(autoincrement)
758 		, array_content_type(autoincrement)
759 		, s32_type_ptr(autoincrement)
760 		, dataSelectorStructPtrType(autoincrement)
761 		, dataSelectorStructPtr(autoincrement)
762 		, dataArrayType(autoincrement)
763 		, dataInput(autoincrement)
764 		, dataInputPtrType(autoincrement)
765 		, dataInputType(autoincrement)
766 		, dataInputSampledType(autoincrement)
767 		, dataOutput(autoincrement)
768 		, dataOutputPtrType(autoincrement)
769 		, dataOutputType(autoincrement)
770 		, dataSelectorStructType(autoincrement)
771 		, input(autoincrement)
772 		, inputPtr(autoincrement)
773 		, output(autoincrement)
774 		, outputPtr(autoincrement)
775 	{
776 		for (deUint32 i = 0; i < 32; ++i)
777 			constants.push_back(Variable(autoincrement));
778 	}
779 	const Variable				version;
780 	const Variable				mainFunc;
781 	const Variable				mainFuncLabel;
782 	const Variable				voidFuncVoid;
783 	std::vector<Variable>		constants;
784 	const Variable				copy_type;
785 	const Variable				copy_type_vec;
786 	const Variable				buffer_type_vec;
787 	const Variable				copy_type_ptr;
788 	const Variable				buffer_type;
789 	const Variable				voidId;
790 	const Variable				v4f32;
791 	const Variable				v4s32;
792 	const Variable				v4u32;
793 	const Variable				v4s64;
794 	const Variable				v4u64;
795 	const Variable				s32;
796 	const Variable				f32;
797 	const Variable				u32;
798 	const Variable				s64;
799 	const Variable				u64;
800 	const Variable				boolean;
801 	const Variable				array_content_type;
802 	const Variable				s32_type_ptr;
803 	const Variable				dataSelectorStructPtrType;
804 	const Variable				dataSelectorStructPtr;
805 	const Variable				dataArrayType;
806 	const Variable				dataInput;
807 	const Variable				dataInputPtrType;
808 	const Variable				dataInputType;
809 	const Variable				dataInputSampledType;
810 	const Variable				dataOutput;
811 	const Variable				dataOutputPtrType;
812 	const Variable				dataOutputType;
813 	const Variable				dataSelectorStructType;
814 	const Variable				input;
815 	const Variable				inputPtr;
816 	const Variable				output;
817 	const Variable				outputPtr;
818 };
819 
820 // A routing generating SPIRV code for all test cases in this group
MakeShader(VkShaderStageFlags shaderStage,ShaderType shaderType,VkFormat bufferFormat,bool reads,bool dummy)821 std::string MakeShader(VkShaderStageFlags shaderStage, ShaderType shaderType, VkFormat bufferFormat, bool reads, bool dummy)
822 {
823 	const bool					isR64				= (bufferFormat == VK_FORMAT_R64_UINT || bufferFormat == VK_FORMAT_R64_SINT);
824 	// faster to write
825 	const char					is					= '=';
826 
827 	// variables require such counter to generate their unique ids. Since there is possibility that in the future this code will
828 	// run parallel this counter is made local to this function body to be safe.
829 	Autocounter					localcounter;
830 
831 	// A frequently used Variables (gathered into this single object for readability)
832 	Variables					var					(localcounter);
833 
834 	// A SPIRV code builder
835 	ShaderStream				shaderSource;
836 
837 	// A basic preamble of SPIRV shader. Turns on required capabilities and extensions.
838 	shaderSource
839 	(op::Capability, "Shader")
840 	(op::Capability, "VariablePointersStorageBuffer");
841 
842 	if (isR64)
843 	{
844 		shaderSource
845 		(op::Capability, "Int64");
846 	}
847 
848 	shaderSource
849 	(op::Extension, "\"SPV_KHR_storage_buffer_storage_class\"")
850 	(op::Extension, "\"SPV_KHR_variable_pointers\"")
851 	(var.version, is, op::ExtInstImport, "\"GLSL.std.450\"")
852 	(op::MemoryModel, "Logical", "GLSL450");
853 
854 	// Use correct entry point definition depending on shader stage
855 	if (shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
856 	{
857 		shaderSource
858 		(op::EntryPoint, "GLCompute", var.mainFunc, "\"main\"")
859 		(op::ExecutionMode, var.mainFunc, "LocalSize", 1, 1, 1);
860 	}
861 	else if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
862 	{
863 		shaderSource
864 		(op::EntryPoint, "Vertex", var.mainFunc, "\"main\"", var.input, var.output)
865 		(op::Decorate, var.output, "BuiltIn", "Position")
866 		(op::Decorate, var.input, "Location", 0);
867 	}
868 	else if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
869 	{
870 		shaderSource
871 		(op::EntryPoint, "Fragment", var.mainFunc, "\"main\"", var.output)
872 		(op::ExecutionMode, var.mainFunc, "OriginUpperLeft")
873 		(op::Decorate, var.output, "Location", 0);
874 	}
875 
876 	// If we are testing vertex shader or fragment shader we need to provide the other one for the pipeline too.
877 	// So the not tested one is 'dummy'. It is then a minimal/simplest possible pass-through shader.
878 	// If we are testing compute shader we dont need dummy shader at all.
879 	if (dummy)
880 	{
881 		if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
882 		{
883 			shaderSource
884 			(var.voidId, is, op::TypeVoid)
885 			(var.voidFuncVoid, is, op::TypeFunction, var.voidId)
886 			(var.f32, is, op::TypeFloat, 32)
887 			(var.v4f32, is, op::TypeVector, var.f32, 4)
888 			(var.outputPtr, is, op::TypePointer, "Output", var.v4f32)
889 			(var.output, is, op::Variable, var.outputPtr, "Output")
890 			(var.constants[6], is, op::Constant, var.f32, 1)
891 			(var.constants[7], is, op::ConstantComposite, var.v4f32, var.constants[6], var.constants[6], var.constants[6], var.constants[6])
892 			(var.mainFunc, is, op::Function, var.voidId, "None", var.voidFuncVoid)
893 			(var.mainFuncLabel, is, op::Label);
894 		}
895 		else if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
896 		{
897 			shaderSource
898 			(var.voidId, is, op::TypeVoid)
899 			(var.voidFuncVoid, is, op::TypeFunction , var.voidId)
900 			(var.f32, is, op::TypeFloat, 32)
901 			(var.v4f32, is, op::TypeVector , var.f32, 4)
902 			(var.outputPtr, is, op::TypePointer, "Output" , var.v4f32)
903 			(var.output, is, op::Variable , var.outputPtr, "Output")
904 			(var.inputPtr, is, op::TypePointer, "Input" , var.v4f32)
905 			(var.input, is, op::Variable , var.inputPtr, "Input")
906 			(var.mainFunc, is, op::Function , var.voidId, "None", var.voidFuncVoid)
907 			(var.mainFuncLabel, is, op::Label);
908 		}
909 	}
910 	else // this is a start of actual shader that tests variable pointers
911 	{
912 		shaderSource
913 		(op::Decorate, var.dataInput, "DescriptorSet", 0)
914 		(op::Decorate, var.dataInput, "Binding", 0)
915 
916 		(op::Decorate, var.dataOutput, "DescriptorSet", 0)
917 		(op::Decorate, var.dataOutput, "Binding", 1);
918 
919 		// for scalar types and vector types we use 1024 element array of 4 elements arrays of 4-component vectors
920 		// so the stride of internal array is size of 4-component vector
921 		if (shaderType == SHADER_TYPE_SCALAR_COPY || shaderType == SHADER_TYPE_VECTOR_COPY)
922 		{
923 			if (isR64)
924 			{
925 				shaderSource
926 				(op::Decorate, var.array_content_type, "ArrayStride", 32);
927 			}
928 			else
929 			{
930 				shaderSource
931 				(op::Decorate, var.array_content_type, "ArrayStride", 16);
932 			}
933 		}
934 
935 		if (isR64)
936 		{
937 			shaderSource
938 			(op::Decorate, var.dataArrayType, "ArrayStride", 128);
939 		}
940 		else
941 		{
942 			// for matrices we use array of 4x4-component matrices
943 			// stride of outer array is then 64 in every case
944 			shaderSource
945 			(op::Decorate, var.dataArrayType, "ArrayStride", 64);
946 		}
947 
948 		// an output block
949 		shaderSource
950 		(op::MemberDecorate, var.dataOutputType, 0, "Offset", 0)
951 		(op::Decorate, var.dataOutputType, "Block")
952 
953 		// an input block. Marked readonly.
954 		(op::MemberDecorate, var.dataInputType, 0, "NonWritable")
955 		(op::MemberDecorate, var.dataInputType, 0, "Offset", 0)
956 		(op::Decorate, var.dataInputType, "Block")
957 
958 		//a special structure matching data in one of our buffers.
959 		// member at 0 is an index to read position
960 		// member at 1 is an index to write position
961 		// member at 2 is always zero. It is used to perform OpSelect. I used value coming from buffer to avoid incidental optimisations that could prune OpSelect if the value was compile time known.
962 		(op::MemberDecorate, var.dataSelectorStructType, 0, "Offset", 0)
963 		(op::MemberDecorate, var.dataSelectorStructType, 1, "Offset", 4)
964 		(op::MemberDecorate, var.dataSelectorStructType, 2, "Offset", 8)
965 		(op::Decorate, var.dataSelectorStructType, "Block")
966 
967 		// binding to matching buffer
968 		(op::Decorate, var.dataSelectorStructPtr, "DescriptorSet", 0)
969 		(op::Decorate, var.dataSelectorStructPtr, "Binding", 2)
970 
971 		// making composite types used in shader
972 		(var.voidId, is, op::TypeVoid)
973 		(var.voidFuncVoid, is, op::TypeFunction, var.voidId)
974 
975 		(var.boolean, is, op::TypeBool)
976 
977 		(var.f32, is, op::TypeFloat, 32)
978 		(var.s32, is, op::TypeInt, 32, 1)
979 		(var.u32, is, op::TypeInt, 32, 0);
980 
981 		if (isR64)
982 		{
983 			shaderSource
984 			(var.s64, is, op::TypeInt, 64, 1)
985 			(var.u64, is, op::TypeInt, 64, 0);
986 		}
987 
988 		shaderSource
989 		(var.v4f32, is, op::TypeVector, var.f32, 4)
990 		(var.v4s32, is, op::TypeVector, var.s32, 4)
991 		(var.v4u32, is, op::TypeVector, var.u32, 4);
992 
993 		if (isR64)
994 		{
995 			shaderSource
996 			(var.v4s64, is, op::TypeVector, var.s64, 4)
997 			(var.v4u64, is, op::TypeVector, var.u64, 4);
998 		}
999 
1000 		// since the shared tests scalars, vectors, matrices of ints, uints and floats I am generating alternative names for some of the types so I can use those and not need to use "if" everywhere.
1001 		// A Variable mappings will make sure the proper variable name is used
1002 		// below is a first part of aliasing types based on int, uint, float
1003 		switch (bufferFormat)
1004 		{
1005 		case vk::VK_FORMAT_R32_SINT:
1006 			shaderSource.makeSame(var.buffer_type, var.s32);
1007 			shaderSource.makeSame(var.buffer_type_vec, var.v4s32);
1008 			break;
1009 		case vk::VK_FORMAT_R32_UINT:
1010 			shaderSource.makeSame(var.buffer_type, var.u32);
1011 			shaderSource.makeSame(var.buffer_type_vec, var.v4u32);
1012 			break;
1013 		case vk::VK_FORMAT_R32_SFLOAT:
1014 			shaderSource.makeSame(var.buffer_type, var.f32);
1015 			shaderSource.makeSame(var.buffer_type_vec, var.v4f32);
1016 			break;
1017 		case vk::VK_FORMAT_R64_SINT:
1018 			shaderSource.makeSame(var.buffer_type, var.s64);
1019 			shaderSource.makeSame(var.buffer_type_vec, var.v4s64);
1020 			break;
1021 		case vk::VK_FORMAT_R64_UINT:
1022 			shaderSource.makeSame(var.buffer_type, var.u64);
1023 			shaderSource.makeSame(var.buffer_type_vec, var.v4u64);
1024 			break;
1025 		default:
1026 			// to prevent compiler from complaining not all cases are handled (but we should not get here).
1027 			deAssertFail("This point should be not reachable with correct program flow.", __FILE__, __LINE__);
1028 			break;
1029 		}
1030 
1031 		// below is a second part that aliases based on scalar, vector, matrix
1032 		switch (shaderType)
1033 		{
1034 		case SHADER_TYPE_SCALAR_COPY:
1035 			shaderSource.makeSame(var.copy_type, var.buffer_type);
1036 			break;
1037 		case SHADER_TYPE_VECTOR_COPY:
1038 			shaderSource.makeSame(var.copy_type, var.buffer_type_vec);
1039 			break;
1040 		case SHADER_TYPE_MATRIX_COPY:
1041 			if (bufferFormat != VK_FORMAT_R32_SFLOAT)
1042 				TCU_THROW(NotSupportedError, "Matrices can be used only with floating point types.");
1043 			shaderSource
1044 			(var.copy_type, is, op::TypeMatrix, var.buffer_type_vec, 4);
1045 			break;
1046 		default:
1047 			// to prevent compiler from complaining not all cases are handled (but we should not get here).
1048 			deAssertFail("This point should be not reachable with correct program flow.", __FILE__, __LINE__);
1049 			break;
1050 		}
1051 
1052 		// I will need some constants so lets add them to shader source
1053 		shaderSource
1054 		(var.constants[0], is, op::Constant, var.s32, 0)
1055 		(var.constants[1], is, op::Constant, var.s32, 1)
1056 		(var.constants[2], is, op::Constant, var.s32, 2)
1057 		(var.constants[3], is, op::Constant, var.s32, 3)
1058 		(var.constants[4], is, op::Constant, var.u32, 4)
1059 		(var.constants[5], is, op::Constant, var.u32, 1024);
1060 
1061 		// for fragment shaders I need additionally a constant vector (output "colour") so lets make it
1062 		if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
1063 		{
1064 			shaderSource
1065 			(var.constants[6], is, op::Constant, var.f32, 1)
1066 			(var.constants[7], is, op::ConstantComposite, var.v4f32, var.constants[6], var.constants[6], var.constants[6], var.constants[6]);
1067 		}
1068 
1069 		// additional alias for the type of content of this 1024-element outer array.
1070 		if (shaderType == SHADER_TYPE_SCALAR_COPY || shaderType == SHADER_TYPE_VECTOR_COPY)
1071 		{
1072 			shaderSource
1073 			(var.array_content_type, is, op::TypeArray, var.buffer_type_vec, var.constants[4]);
1074 		}
1075 		else
1076 		{
1077 			shaderSource.makeSame(var.array_content_type, var.copy_type);
1078 		}
1079 
1080 		// Lets create pointer types to the input data type, output data type and a struct
1081 		// This must be distinct types due to different type decorations
1082 		// Lets make also actual poiters to the data
1083 		shaderSource
1084 		(var.dataArrayType, is, op::TypeArray, var.array_content_type, var.constants[5])
1085 		(var.dataInputType, is, op::TypeStruct, var.dataArrayType)
1086 		(var.dataOutputType, is, op::TypeStruct, var.dataArrayType)
1087 		(var.dataInputPtrType, is, op::TypePointer, "StorageBuffer", var.dataInputType)
1088 		(var.dataOutputPtrType, is, op::TypePointer, "StorageBuffer", var.dataOutputType)
1089 		(var.dataInput, is, op::Variable, var.dataInputPtrType, "StorageBuffer")
1090 		(var.dataOutput, is, op::Variable, var.dataOutputPtrType, "StorageBuffer")
1091 		(var.dataSelectorStructType, is, op::TypeStruct, var.s32, var.s32, var.s32)
1092 		(var.dataSelectorStructPtrType, is, op::TypePointer, "Uniform", var.dataSelectorStructType)
1093 		(var.dataSelectorStructPtr, is, op::Variable, var.dataSelectorStructPtrType, "Uniform");
1094 
1095 		// we need also additional pointers to fullfil stage requirements on shaders inputs and outputs
1096 		if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
1097 		{
1098 			shaderSource
1099 			(var.inputPtr, is, op::TypePointer, "Input", var.v4f32)
1100 			(var.input, is, op::Variable, var.inputPtr, "Input")
1101 			(var.outputPtr, is, op::TypePointer, "Output", var.v4f32)
1102 			(var.output, is, op::Variable, var.outputPtr, "Output");
1103 		}
1104 		else if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
1105 		{
1106 			shaderSource
1107 			(var.outputPtr, is, op::TypePointer, "Output", var.v4f32)
1108 			(var.output, is, op::Variable, var.outputPtr, "Output");
1109 		}
1110 
1111 		shaderSource
1112 		(var.copy_type_ptr, is, op::TypePointer, "StorageBuffer", var.copy_type)
1113 		(var.s32_type_ptr, is, op::TypePointer, "Uniform", var.s32);
1114 
1115 		// Make a shader main function
1116 		shaderSource
1117 		(var.mainFunc, is, op::Function, var.voidId, "None", var.voidFuncVoid)
1118 		(var.mainFuncLabel, is, op::Label);
1119 
1120 		Variable copyFromPtr(localcounter), copyToPtr(localcounter), zeroPtr(localcounter);
1121 		Variable copyFrom(localcounter), copyTo(localcounter), zero(localcounter);
1122 
1123 		// Lets load data from our auxiliary buffer with reading index, writing index and zero.
1124 		shaderSource
1125 		(copyToPtr, is, op::AccessChain, var.s32_type_ptr, var.dataSelectorStructPtr, var.constants[1])
1126 		(copyTo, is, op::Load, var.s32, copyToPtr)
1127 		(copyFromPtr, is, op::AccessChain, var.s32_type_ptr, var.dataSelectorStructPtr, var.constants[0])
1128 		(copyFrom, is, op::Load, var.s32, copyFromPtr)
1129 		(zeroPtr, is, op::AccessChain, var.s32_type_ptr, var.dataSelectorStructPtr, var.constants[2])
1130 		(zero, is, op::Load, var.s32, zeroPtr);
1131 
1132 		// let start copying data using variable pointers
1133 		switch (shaderType)
1134 		{
1135 		case SHADER_TYPE_SCALAR_COPY:
1136 			for (int i = 0; i < 4; ++i)
1137 			{
1138 				for (int j = 0; j < 4; ++j)
1139 				{
1140 					Variable actualLoadChain(localcounter), actualStoreChain(localcounter), loadResult(localcounter);
1141 					Variable selection(localcounter);
1142 					Variable lcA(localcounter), lcB(localcounter), scA(localcounter), scB(localcounter);
1143 
1144 					shaderSource
1145 					(selection, is, op::IEqual, var.boolean, zero, var.constants[0]);
1146 
1147 					if (reads)
1148 					{
1149 						// if we check reads we use variable pointers only for reading part
1150 						shaderSource
1151 						(lcA, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom, var.constants[i], var.constants[j])
1152 						(lcB, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom, var.constants[i], var.constants[j])
1153 						// actualLoadChain will be a variable pointer as it was created through OpSelect
1154 						(actualLoadChain, is, op::Select, var.copy_type_ptr, selection, lcA, lcB)
1155 						// actualStoreChain will be a regular pointer
1156 						(actualStoreChain, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo, var.constants[i], var.constants[j]);
1157 					}
1158 					else
1159 					{
1160 						// if we check writes we use variable pointers only for writing part only
1161 						shaderSource
1162 						// actualLoadChain will be regular regualar pointer
1163 						(actualLoadChain, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom, var.constants[i], var.constants[j])
1164 						(scA, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo, var.constants[i], var.constants[j])
1165 						(scB, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo, var.constants[i], var.constants[j])
1166 						// actualStoreChain will be a variable pointer as it was created through OpSelect
1167 						(actualStoreChain, is, op::Select, var.copy_type_ptr, selection, scA, scB);
1168 					}
1169 					// do actual copying
1170 					shaderSource
1171 					(loadResult, is, op::Load, var.copy_type, actualLoadChain)
1172 					(op::Store, actualStoreChain, loadResult);
1173 				}
1174 			}
1175 			break;
1176 		// cases below have the same logic as the one above - just we are copying bigger chunks of data with every load/store pair
1177 		case SHADER_TYPE_VECTOR_COPY:
1178 			for (int i = 0; i < 4; ++i)
1179 			{
1180 				Variable actualLoadChain(localcounter), actualStoreChain(localcounter), loadResult(localcounter);
1181 				Variable selection(localcounter);
1182 				Variable lcA(localcounter), lcB(localcounter), scA(localcounter), scB(localcounter);
1183 
1184 				shaderSource
1185 				(selection, is, op::IEqual, var.boolean, zero, var.constants[0]);
1186 
1187 				if (reads)
1188 				{
1189 					shaderSource
1190 					(lcA, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom, var.constants[i])
1191 					(lcB, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom, var.constants[i])
1192 					(actualLoadChain, is, op::Select, var.copy_type_ptr, selection, lcA, lcB)
1193 					(actualStoreChain, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo, var.constants[i]);
1194 				}
1195 				else
1196 				{
1197 					shaderSource
1198 					(actualLoadChain, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom, var.constants[i])
1199 					(scA, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo, var.constants[i])
1200 					(scB, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo, var.constants[i])
1201 					(actualStoreChain, is, op::Select, var.copy_type_ptr, selection, scA, scB);
1202 				}
1203 
1204 				shaderSource
1205 				(loadResult, is, op::Load, var.copy_type, actualLoadChain)
1206 				(op::Store, actualStoreChain, loadResult);
1207 			}
1208 			break;
1209 		case SHADER_TYPE_MATRIX_COPY:
1210 			{
1211 				Variable actualLoadChain(localcounter), actualStoreChain(localcounter), loadResult(localcounter);
1212 				Variable selection(localcounter);
1213 				Variable lcA(localcounter), lcB(localcounter), scA(localcounter), scB(localcounter);
1214 
1215 				shaderSource
1216 				(selection, is, op::IEqual, var.boolean, zero, var.constants[0]);
1217 
1218 				if (reads)
1219 				{
1220 					shaderSource
1221 					(lcA, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom)
1222 					(lcB, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom)
1223 					(actualLoadChain, is, op::Select, var.copy_type_ptr, selection, lcA, lcB)
1224 					(actualStoreChain, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo);
1225 				}
1226 				else
1227 				{
1228 					shaderSource
1229 					(actualLoadChain, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom)
1230 					(scA, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo)
1231 					(scB, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo)
1232 					(actualStoreChain, is, op::Select, var.copy_type_ptr, selection, scA, scB);
1233 				}
1234 
1235 				shaderSource
1236 				(loadResult, is, op::Load, var.copy_type, actualLoadChain)
1237 				(op::Store, actualStoreChain, loadResult);
1238 			}
1239 			break;
1240 		default:
1241 			// to prevent compiler from complaining not all cases are handled (but we should not get here).
1242 			deAssertFail("This point should be not reachable with correct program flow.", __FILE__, __LINE__);
1243 			break;
1244 		}
1245 	}
1246 
1247 	// This is common for test shaders and dummy ones
1248 	// We need to fill stage ouput from shader properly
1249 	// output vertices positions in vertex shader
1250 	if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
1251 	{
1252 		Variable inputValue(localcounter), outputLocation(localcounter);
1253 		shaderSource
1254 		(inputValue, is, op::Load, var.v4f32, var.input)
1255 		(outputLocation, is, op::AccessChain, var.outputPtr, var.output)
1256 		(op::Store, outputLocation, inputValue);
1257 	}
1258 	// output colour in fragment shader
1259 	else if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
1260 	{
1261 		shaderSource
1262 		(op::Store, var.output, var.constants[7]);
1263 	}
1264 
1265 	// We are done. Lets close main function body
1266 	shaderSource
1267 	(op::Return)
1268 	(op::FunctionEnd);
1269 
1270 	return shaderSource.str();
1271 }
1272 
RobustReadTest(tcu::TestContext & testContext,const std::string & name,const std::string & description,VkShaderStageFlags shaderStage,ShaderType shaderType,VkFormat bufferFormat,VkDeviceSize readAccessRange,bool accessOutOfBackingMemory)1273 RobustReadTest::RobustReadTest (tcu::TestContext&		testContext,
1274 								const std::string&		name,
1275 								const std::string&		description,
1276 								VkShaderStageFlags		shaderStage,
1277 								ShaderType				shaderType,
1278 								VkFormat				bufferFormat,
1279 								VkDeviceSize			readAccessRange,
1280 								bool					accessOutOfBackingMemory)
1281 	: RobustAccessWithPointersTest	(testContext, name, description, shaderStage, shaderType, bufferFormat)
1282 	, m_readAccessRange				(readAccessRange)
1283 	, m_accessOutOfBackingMemory	(accessOutOfBackingMemory)
1284 {
1285 }
1286 
createInstance(Context & context) const1287 TestInstance* RobustReadTest::createInstance (Context& context) const
1288 {
1289 	auto device = createRobustBufferAccessVariablePointersDevice(context);
1290 	return new ReadInstance(context, device, m_shaderType, m_shaderStage, m_bufferFormat, m_readAccessRange, m_accessOutOfBackingMemory);
1291 }
1292 
initPrograms(SourceCollections & programCollection) const1293 void RobustReadTest::initPrograms(SourceCollections&	programCollection) const
1294 {
1295 	if (m_shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
1296 	{
1297 		programCollection.spirvAsmSources.add("compute") << MakeShader(VK_SHADER_STAGE_COMPUTE_BIT, m_shaderType, m_bufferFormat, true, false);
1298 	}
1299 	else
1300 	{
1301 		programCollection.spirvAsmSources.add("vertex") << MakeShader(VK_SHADER_STAGE_VERTEX_BIT, m_shaderType, m_bufferFormat, true, m_shaderStage != VK_SHADER_STAGE_VERTEX_BIT);
1302 		programCollection.spirvAsmSources.add("fragment") << MakeShader(VK_SHADER_STAGE_FRAGMENT_BIT, m_shaderType, m_bufferFormat, true, m_shaderStage != VK_SHADER_STAGE_FRAGMENT_BIT);
1303 	}
1304 }
1305 
RobustWriteTest(tcu::TestContext & testContext,const std::string & name,const std::string & description,VkShaderStageFlags shaderStage,ShaderType shaderType,VkFormat bufferFormat,VkDeviceSize writeAccessRange,bool accessOutOfBackingMemory)1306 RobustWriteTest::RobustWriteTest (tcu::TestContext&		testContext,
1307 								  const std::string&	name,
1308 								  const std::string&	description,
1309 								  VkShaderStageFlags	shaderStage,
1310 								  ShaderType			shaderType,
1311 								  VkFormat				bufferFormat,
1312 								  VkDeviceSize			writeAccessRange,
1313 								  bool					accessOutOfBackingMemory)
1314 
1315 	: RobustAccessWithPointersTest	(testContext, name, description, shaderStage, shaderType, bufferFormat)
1316 	, m_writeAccessRange			(writeAccessRange)
1317 	, m_accessOutOfBackingMemory	(accessOutOfBackingMemory)
1318 {
1319 }
1320 
createInstance(Context & context) const1321 TestInstance* RobustWriteTest::createInstance (Context& context) const
1322 {
1323 	auto device = createRobustBufferAccessVariablePointersDevice(context);
1324 	return new WriteInstance(context, device, m_shaderType, m_shaderStage, m_bufferFormat, m_writeAccessRange, m_accessOutOfBackingMemory);
1325 }
1326 
initPrograms(SourceCollections & programCollection) const1327 void RobustWriteTest::initPrograms(SourceCollections&	programCollection) const
1328 {
1329 	if (m_shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
1330 	{
1331 		programCollection.spirvAsmSources.add("compute") << MakeShader(VK_SHADER_STAGE_COMPUTE_BIT, m_shaderType, m_bufferFormat, false, false);
1332 	}
1333 	else
1334 	{
1335 		programCollection.spirvAsmSources.add("vertex") << MakeShader(VK_SHADER_STAGE_VERTEX_BIT, m_shaderType, m_bufferFormat, false, m_shaderStage != VK_SHADER_STAGE_VERTEX_BIT);
1336 		programCollection.spirvAsmSources.add("fragment") << MakeShader(VK_SHADER_STAGE_FRAGMENT_BIT, m_shaderType, m_bufferFormat, false, m_shaderStage != VK_SHADER_STAGE_FRAGMENT_BIT);
1337 	}
1338 }
1339 
AccessInstance(Context & context,Move<VkDevice> device,ShaderType shaderType,VkShaderStageFlags shaderStage,VkFormat bufferFormat,BufferAccessType bufferAccessType,VkDeviceSize inBufferAccessRange,VkDeviceSize outBufferAccessRange,bool accessOutOfBackingMemory)1340 AccessInstance::AccessInstance (Context&			context,
1341 								Move<VkDevice>		device,
1342 								ShaderType			shaderType,
1343 								VkShaderStageFlags	shaderStage,
1344 								VkFormat			bufferFormat,
1345 								BufferAccessType	bufferAccessType,
1346 								VkDeviceSize		inBufferAccessRange,
1347 								VkDeviceSize		outBufferAccessRange,
1348 								bool				accessOutOfBackingMemory)
1349 	: vkt::TestInstance				(context)
1350 	, m_device						(device)
1351 	, m_shaderType					(shaderType)
1352 	, m_shaderStage					(shaderStage)
1353 	, m_bufferFormat				(bufferFormat)
1354 	, m_bufferAccessType			(bufferAccessType)
1355 	, m_accessOutOfBackingMemory	(accessOutOfBackingMemory)
1356 {
1357 	tcu::TestLog&									log						= context.getTestContext().getLog();
1358 	const DeviceInterface&							vk						= context.getDeviceInterface();
1359 	const deUint32									queueFamilyIndex		= context.getUniversalQueueFamilyIndex();
1360 	SimpleAllocator									memAlloc				(vk, *m_device, getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()));
1361 
1362 	DE_ASSERT(RobustAccessWithPointersTest::s_numberOfBytesAccessed % sizeof(deUint32) == 0);
1363 	DE_ASSERT(inBufferAccessRange <= RobustAccessWithPointersTest::s_numberOfBytesAccessed);
1364 	DE_ASSERT(outBufferAccessRange <= RobustAccessWithPointersTest::s_numberOfBytesAccessed);
1365 
1366 	if (m_bufferFormat == VK_FORMAT_R64_UINT || m_bufferFormat == VK_FORMAT_R64_SINT)
1367 	{
1368 		context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
1369 	}
1370 
1371 	// Check storage support
1372 	if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
1373 	{
1374 		if (!context.getDeviceFeatures().vertexPipelineStoresAndAtomics)
1375 		{
1376 			TCU_THROW(NotSupportedError, "Stores not supported in vertex stage");
1377 		}
1378 	}
1379 	else if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
1380 	{
1381 		if (!context.getDeviceFeatures().fragmentStoresAndAtomics)
1382 		{
1383 			TCU_THROW(NotSupportedError, "Stores not supported in fragment stage");
1384 		}
1385 	}
1386 
1387 	createTestBuffer(vk, *m_device, inBufferAccessRange, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, memAlloc, m_inBuffer, m_inBufferAlloc, m_inBufferAccess, &populateBufferWithValues, &m_bufferFormat);
1388 	createTestBuffer(vk, *m_device, outBufferAccessRange, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, memAlloc, m_outBuffer, m_outBufferAlloc, m_outBufferAccess, &populateBufferWithDummy, DE_NULL);
1389 
1390 	deInt32 indices[] = {
1391 		(m_accessOutOfBackingMemory && (m_bufferAccessType == BUFFER_ACCESS_TYPE_READ_FROM_STORAGE)) ? static_cast<deInt32>(RobustAccessWithPointersTest::s_testArraySize) - 1 : 0,
1392 		(m_accessOutOfBackingMemory && (m_bufferAccessType == BUFFER_ACCESS_TYPE_WRITE_TO_STORAGE)) ? static_cast<deInt32>(RobustAccessWithPointersTest::s_testArraySize) - 1 : 0,
1393 		0
1394 	};
1395 	AccessRangesData indicesAccess;
1396 	createTestBuffer(vk, *m_device, 3 * sizeof(deInt32), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, memAlloc, m_indicesBuffer, m_indicesBufferAlloc, indicesAccess, &populateBufferWithCopy, &indices);
1397 
1398 	log << tcu::TestLog::Message << "input  buffer - alloc size: " << m_inBufferAccess.allocSize << tcu::TestLog::EndMessage;
1399 	log << tcu::TestLog::Message << "input  buffer - max access range: " << m_inBufferAccess.maxAccessRange << tcu::TestLog::EndMessage;
1400 	log << tcu::TestLog::Message << "output buffer - alloc size: " << m_outBufferAccess.allocSize << tcu::TestLog::EndMessage;
1401 	log << tcu::TestLog::Message << "output buffer - max access range: " << m_outBufferAccess.maxAccessRange << tcu::TestLog::EndMessage;
1402 	log << tcu::TestLog::Message << "indices - input offset: " << indices[0] << tcu::TestLog::EndMessage;
1403 	log << tcu::TestLog::Message << "indices - output offset: " << indices[1] << tcu::TestLog::EndMessage;
1404 	log << tcu::TestLog::Message << "indices - additional: " << indices[2] << tcu::TestLog::EndMessage;
1405 
1406 	// Create descriptor data
1407 	{
1408 		DescriptorPoolBuilder						descriptorPoolBuilder;
1409 		descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u);
1410 		descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u);
1411 		descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u);
1412 		m_descriptorPool = descriptorPoolBuilder.build(vk, *m_device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1413 
1414 		DescriptorSetLayoutBuilder					setLayoutBuilder;
1415 		setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL);
1416 		setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL);
1417 		setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_ALL);
1418 		m_descriptorSetLayout = setLayoutBuilder.build(vk, *m_device);
1419 
1420 		const VkDescriptorSetAllocateInfo			descriptorSetAllocateInfo =
1421 		{
1422 			VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,		// VkStructureType	sType;
1423 			DE_NULL,								// const void*					pNext;
1424 			*m_descriptorPool,						// VkDescriptorPool				descriptorPool;
1425 			1u,										// deUint32						setLayoutCount;
1426 			&m_descriptorSetLayout.get()			// const VkDescriptorSetLayout*	pSetLayouts;
1427 		};
1428 
1429 		m_descriptorSet = allocateDescriptorSet(vk, *m_device, &descriptorSetAllocateInfo);
1430 
1431 		const VkDescriptorBufferInfo				inBufferDescriptorInfo			= makeDescriptorBufferInfo(*m_inBuffer, 0ull, m_inBufferAccess.accessRange);
1432 		const VkDescriptorBufferInfo				outBufferDescriptorInfo			= makeDescriptorBufferInfo(*m_outBuffer, 0ull, m_outBufferAccess.accessRange);
1433 		const VkDescriptorBufferInfo				indicesBufferDescriptorInfo		= makeDescriptorBufferInfo(*m_indicesBuffer, 0ull, 12ull);
1434 
1435 		DescriptorSetUpdateBuilder					setUpdateBuilder;
1436 		setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &inBufferDescriptorInfo);
1437 		setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &outBufferDescriptorInfo);
1438 		setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(2), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &indicesBufferDescriptorInfo);
1439 		setUpdateBuilder.update(vk, *m_device);
1440 	}
1441 
1442 	// Create fence
1443 	{
1444 		const VkFenceCreateInfo fenceParams =
1445 		{
1446 			VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,	// VkStructureType			sType;
1447 			DE_NULL,								// const void*				pNext;
1448 			0u										// VkFenceCreateFlags		flags;
1449 		};
1450 
1451 		m_fence = createFence(vk, *m_device, &fenceParams);
1452 	}
1453 
1454 	// Get queue
1455 	vk.getDeviceQueue(*m_device, queueFamilyIndex, 0, &m_queue);
1456 
1457 	if (m_shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
1458 	{
1459 		m_testEnvironment = de::MovePtr<TestEnvironment>(new ComputeEnvironment(m_context, *m_device, *m_descriptorSetLayout, *m_descriptorSet));
1460 	}
1461 	else
1462 	{
1463 		using tcu::Vec4;
1464 
1465 		const VkVertexInputBindingDescription		vertexInputBindingDescription =
1466 		{
1467 			0u,										// deUint32					binding;
1468 			sizeof(tcu::Vec4),						// deUint32					strideInBytes;
1469 			VK_VERTEX_INPUT_RATE_VERTEX				// VkVertexInputStepRate	inputRate;
1470 		};
1471 
1472 		const VkVertexInputAttributeDescription		vertexInputAttributeDescription =
1473 		{
1474 			0u,										// deUint32	location;
1475 			0u,										// deUint32	binding;
1476 			VK_FORMAT_R32G32B32A32_SFLOAT,			// VkFormat	format;
1477 			0u										// deUint32	offset;
1478 		};
1479 
1480 		AccessRangesData							vertexAccess;
1481 		const Vec4									vertices[] =
1482 		{
1483 			Vec4(-1.0f, -1.0f, 0.0f, 1.0f),
1484 			Vec4(-1.0f,  1.0f, 0.0f, 1.0f),
1485 			Vec4( 1.0f, -1.0f, 0.0f, 1.0f),
1486 		};
1487 		const VkDeviceSize							vertexBufferSize = static_cast<VkDeviceSize>(sizeof(vertices));
1488 		createTestBuffer(vk, *m_device, vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, memAlloc, m_vertexBuffer, m_vertexBufferAlloc, vertexAccess, &populateBufferWithCopy, &vertices);
1489 
1490 		const GraphicsEnvironment::DrawConfig		drawWithOneVertexBuffer =
1491 		{
1492 			std::vector<VkBuffer>(1, *m_vertexBuffer), // std::vector<VkBuffer>	vertexBuffers;
1493 			DE_LENGTH_OF_ARRAY(vertices),			// deUint32					vertexCount;
1494 			1,										// deUint32					instanceCount;
1495 			DE_NULL,								// VkBuffer					indexBuffer;
1496 			0u,										// deUint32					indexCount;
1497 		};
1498 
1499 		m_testEnvironment = de::MovePtr<TestEnvironment>(new GraphicsEnvironment(m_context,
1500 																				 *m_device,
1501 																				 *m_descriptorSetLayout,
1502 																				 *m_descriptorSet,
1503 																				 GraphicsEnvironment::VertexBindings(1, vertexInputBindingDescription),
1504 																				 GraphicsEnvironment::VertexAttributes(1, vertexInputAttributeDescription),
1505 																				 drawWithOneVertexBuffer));
1506 	}
1507 }
1508 
1509 // Verifies if the buffer has the value initialized by BufferAccessInstance::populateReadBuffer at a given offset.
isExpectedValueFromInBuffer(VkDeviceSize offsetInBytes,const void * valuePtr,VkDeviceSize valueSize)1510 bool AccessInstance::isExpectedValueFromInBuffer (VkDeviceSize	offsetInBytes,
1511 												  const void*	valuePtr,
1512 												  VkDeviceSize	valueSize)
1513 {
1514 	DE_ASSERT(offsetInBytes % 4 == 0);
1515 	DE_ASSERT(offsetInBytes < m_inBufferAccess.allocSize);
1516 	DE_ASSERT(valueSize == 4ull || valueSize == 8ull);
1517 
1518 	const deUint32 valueIndex = deUint32(offsetInBytes / 4) + 2;
1519 
1520 	if (isUintFormat(m_bufferFormat))
1521 	{
1522 		const deUint32 expectedValues[2] = { valueIndex, valueIndex + 1u };
1523 		return !deMemCmp(valuePtr, &expectedValues, (size_t)valueSize);
1524 	}
1525 	else if (isIntFormat(m_bufferFormat))
1526 	{
1527 		const deInt32 value				= -deInt32(valueIndex);
1528 		const deInt32 expectedValues[2]	= { value, value - 1 };
1529 		return !deMemCmp(valuePtr, &expectedValues, (size_t)valueSize);
1530 	}
1531 	else if (isFloatFormat(m_bufferFormat))
1532 	{
1533 		DE_ASSERT(valueSize == 4ull);
1534 		const float value = float(valueIndex);
1535 		return !deMemCmp(valuePtr, &value, (size_t)valueSize);
1536 	}
1537 	else
1538 	{
1539 		DE_ASSERT(false);
1540 		return false;
1541 	}
1542 }
1543 
isOutBufferValueUnchanged(VkDeviceSize offsetInBytes,VkDeviceSize valueSize)1544 bool AccessInstance::isOutBufferValueUnchanged (VkDeviceSize offsetInBytes, VkDeviceSize valueSize)
1545 {
1546 	DE_ASSERT(valueSize <= 8);
1547 	const deUint8 *const	outValuePtr		= (deUint8*)m_outBufferAlloc->getHostPtr() + offsetInBytes;
1548 	const deUint64			defaultValue	= 0xBABABABABABABABAull;
1549 
1550 	return !deMemCmp(outValuePtr, &defaultValue, (size_t)valueSize);
1551 }
1552 
iterate(void)1553 tcu::TestStatus AccessInstance::iterate (void)
1554 {
1555 	const DeviceInterface&		vk			= m_context.getDeviceInterface();
1556 	const vk::VkCommandBuffer	cmdBuffer	= m_testEnvironment->getCommandBuffer();
1557 
1558 	// Submit command buffer
1559 	{
1560 		const VkSubmitInfo	submitInfo	=
1561 		{
1562 			VK_STRUCTURE_TYPE_SUBMIT_INFO,	// VkStructureType				sType;
1563 			DE_NULL,						// const void*					pNext;
1564 			0u,								// deUint32						waitSemaphoreCount;
1565 			DE_NULL,						// const VkSemaphore*			pWaitSemaphores;
1566 			DE_NULL,						// const VkPIpelineStageFlags*	pWaitDstStageMask;
1567 			1u,								// deUint32						commandBufferCount;
1568 			&cmdBuffer,						// const VkCommandBuffer*		pCommandBuffers;
1569 			0u,								// deUint32						signalSemaphoreCount;
1570 			DE_NULL							// const VkSemaphore*			pSignalSemaphores;
1571 		};
1572 
1573 		VK_CHECK(vk.resetFences(*m_device, 1, &m_fence.get()));
1574 		VK_CHECK(vk.queueSubmit(m_queue, 1, &submitInfo, *m_fence));
1575 		VK_CHECK(vk.waitForFences(*m_device, 1, &m_fence.get(), true, ~(0ull) /* infinity */));
1576 	}
1577 
1578 	// Prepare result buffer for read
1579 	{
1580 		const VkMappedMemoryRange	outBufferRange	=
1581 		{
1582 			VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,	//  VkStructureType	sType;
1583 			DE_NULL,								//  const void*		pNext;
1584 			m_outBufferAlloc->getMemory(),			//  VkDeviceMemory	mem;
1585 			0ull,									//  VkDeviceSize	offset;
1586 			m_outBufferAccess.allocSize,			//  VkDeviceSize	size;
1587 		};
1588 
1589 		VK_CHECK(vk.invalidateMappedMemoryRanges(*m_device, 1u, &outBufferRange));
1590 	}
1591 
1592 	if (verifyResult())
1593 		return tcu::TestStatus::pass("All values OK");
1594 	else
1595 		return tcu::TestStatus::fail("Invalid value(s) found");
1596 }
1597 
verifyResult(bool splitAccess)1598 bool AccessInstance::verifyResult (bool splitAccess)
1599 {
1600 	std::ostringstream	logMsg;
1601 	tcu::TestLog&		log					= m_context.getTestContext().getLog();
1602 	const bool			isReadAccess		= (m_bufferAccessType == BUFFER_ACCESS_TYPE_READ_FROM_STORAGE);
1603 	const void*			inDataPtr			= m_inBufferAlloc->getHostPtr();
1604 	const void*			outDataPtr			= m_outBufferAlloc->getHostPtr();
1605 	bool				allOk				= true;
1606 	deUint32			valueNdx			= 0;
1607 	const VkDeviceSize	maxAccessRange		= isReadAccess ? m_inBufferAccess.maxAccessRange : m_outBufferAccess.maxAccessRange;
1608 	const bool			isR64				= (m_bufferFormat == VK_FORMAT_R64_UINT || m_bufferFormat == VK_FORMAT_R64_SINT);
1609 	const deUint32		unsplitElementSize	= (isR64 ? 8u : 4u);
1610 	const deUint32		elementSize			= ((isR64 && !splitAccess) ? 8u : 4u);
1611 
1612 	for (VkDeviceSize offsetInBytes = 0; offsetInBytes < m_outBufferAccess.allocSize; offsetInBytes += elementSize)
1613 	{
1614 		const deUint8*		outValuePtr		= static_cast<const deUint8*>(outDataPtr) + offsetInBytes;
1615 		const size_t		outValueSize	= static_cast<size_t>(deMinu64(elementSize, (m_outBufferAccess.allocSize - offsetInBytes)));
1616 
1617 		if (offsetInBytes >= RobustAccessWithPointersTest::s_numberOfBytesAccessed)
1618 		{
1619 			// The shader will only write 16 values into the result buffer. The rest of the values
1620 			// should remain unchanged or may be modified if we are writing out of bounds.
1621 			if (!isOutBufferValueUnchanged(offsetInBytes, outValueSize)
1622 				&& (isReadAccess || !isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, outValuePtr, 4)))
1623 			{
1624 				logMsg << "\nValue " << valueNdx++ << " has been modified with an unknown value: " << *(static_cast<const deUint32*>(static_cast<const void*>(outValuePtr)));
1625 				allOk = false;
1626 			}
1627 		}
1628 		else
1629 		{
1630 			const deInt32	distanceToOutOfBounds	= static_cast<deInt32>(maxAccessRange) - static_cast<deInt32>(offsetInBytes);
1631 			bool			isOutOfBoundsAccess		= false;
1632 
1633 			logMsg << "\n" << valueNdx++ << ": ";
1634 
1635 			logValue(logMsg, outValuePtr, m_bufferFormat, outValueSize);
1636 
1637 			if (m_accessOutOfBackingMemory)
1638 				isOutOfBoundsAccess = true;
1639 
1640 			// Check if the shader operation accessed an operand located less than 16 bytes away
1641 			// from the out of bounds address. Less than 32 bytes away for 64 bit accesses.
1642 			if (!isOutOfBoundsAccess && distanceToOutOfBounds < (isR64 ? 32 : 16))
1643 			{
1644 				deUint32 operandSize = 0;
1645 
1646 				switch (m_shaderType)
1647 				{
1648 					case SHADER_TYPE_SCALAR_COPY:
1649 						operandSize		= unsplitElementSize; // Size of scalar
1650 						break;
1651 
1652 					case SHADER_TYPE_VECTOR_COPY:
1653 						operandSize		= unsplitElementSize * 4; // Size of vec4
1654 						break;
1655 
1656 					case SHADER_TYPE_MATRIX_COPY:
1657 						operandSize		= unsplitElementSize * 16; // Size of mat4
1658 						break;
1659 
1660 					default:
1661 						DE_ASSERT(false);
1662 				}
1663 
1664 				isOutOfBoundsAccess = (((offsetInBytes / operandSize) + 1) * operandSize > maxAccessRange);
1665 			}
1666 
1667 			if (isOutOfBoundsAccess)
1668 			{
1669 				logMsg << " (out of bounds " << (isReadAccess ? "read": "write") << ")";
1670 
1671 				const bool	isValuePartiallyOutOfBounds = ((distanceToOutOfBounds > 0) && ((deUint32)distanceToOutOfBounds < elementSize));
1672 				bool		isValidValue				= false;
1673 
1674 				if (isValuePartiallyOutOfBounds && !m_accessOutOfBackingMemory)
1675 				{
1676 					// The value is partially out of bounds
1677 
1678 					bool	isOutOfBoundsPartOk  = true;
1679 					bool	isWithinBoundsPartOk = true;
1680 
1681 					deUint32 inBoundPartSize = distanceToOutOfBounds;
1682 
1683 					// For cases that partial element is out of bound, the part within the buffer allocated memory can be buffer content per spec.
1684 					// We need to check it as a whole part.
1685 					if (offsetInBytes + elementSize > m_inBufferAccess.allocSize)
1686 					{
1687 						inBoundPartSize = static_cast<deInt32>(m_inBufferAccess.allocSize) - static_cast<deInt32>(offsetInBytes);
1688 					}
1689 
1690 					if (isReadAccess)
1691 					{
1692 						isWithinBoundsPartOk	= isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, outValuePtr, inBoundPartSize);
1693 						isOutOfBoundsPartOk		= isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, (deUint8*)outValuePtr + inBoundPartSize, outValueSize - inBoundPartSize);
1694 					}
1695 					else
1696 					{
1697 						isWithinBoundsPartOk	= isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, outValuePtr, inBoundPartSize)
1698 												  || isOutBufferValueUnchanged(offsetInBytes, inBoundPartSize);
1699 
1700 						isOutOfBoundsPartOk		= isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, (deUint8*)outValuePtr + inBoundPartSize, outValueSize - inBoundPartSize)
1701 												  || isOutBufferValueUnchanged(offsetInBytes + inBoundPartSize, outValueSize - inBoundPartSize);
1702 					}
1703 
1704 					logMsg << ", first " << distanceToOutOfBounds << " byte(s) " << (isWithinBoundsPartOk ? "OK": "wrong");
1705 					logMsg << ", last " << outValueSize - distanceToOutOfBounds << " byte(s) " << (isOutOfBoundsPartOk ? "OK": "wrong");
1706 
1707 					isValidValue	= isWithinBoundsPartOk && isOutOfBoundsPartOk;
1708 				}
1709 				else
1710 				{
1711 					if (isReadAccess)
1712 					{
1713 						isValidValue	= isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, outValuePtr, outValueSize);
1714 					}
1715 					else
1716 					{
1717 						isValidValue	= isOutBufferValueUnchanged(offsetInBytes, outValueSize);
1718 
1719 						if (!isValidValue)
1720 						{
1721 							// Out of bounds writes may modify values withing the memory ranges bound to the buffer
1722 							isValidValue	= isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, outValuePtr, outValueSize);
1723 
1724 							if (isValidValue)
1725 								logMsg << ", OK, written within the memory range bound to the buffer";
1726 						}
1727 					}
1728 				}
1729 
1730 				if (!isValidValue && !splitAccess)
1731 				{
1732 					// Check if we are satisfying the [0, 0, 0, x] pattern, where x may be either 0 or 1,
1733 					// or the maximum representable positive integer value (if the format is integer-based).
1734 
1735 					const bool	canMatchVec4Pattern	= (isReadAccess
1736 													&& !isValuePartiallyOutOfBounds
1737 													&& (m_shaderType == SHADER_TYPE_VECTOR_COPY)
1738 													&& (offsetInBytes / elementSize + 1) % 4 == 0);
1739 					bool		matchesVec4Pattern	= false;
1740 
1741 					if (canMatchVec4Pattern)
1742 					{
1743 						matchesVec4Pattern = verifyOutOfBoundsVec4(outValuePtr - 3u * elementSize, m_bufferFormat);
1744 					}
1745 
1746 					if (!canMatchVec4Pattern || !matchesVec4Pattern)
1747 					{
1748 						logMsg << ". Failed: ";
1749 
1750 						if (isReadAccess)
1751 						{
1752 							logMsg << "expected value within the buffer range or 0";
1753 
1754 							if (canMatchVec4Pattern)
1755 								logMsg << ", or the [0, 0, 0, x] pattern";
1756 						}
1757 						else
1758 						{
1759 							logMsg << "written out of the range";
1760 						}
1761 
1762 						allOk = false;
1763 					}
1764 				}
1765 			}
1766 			else // We are within bounds
1767 			{
1768 				if (isReadAccess)
1769 				{
1770 					if (!isExpectedValueFromInBuffer(offsetInBytes, outValuePtr, elementSize))
1771 					{
1772 						logMsg << ", Failed: unexpected value";
1773 						allOk = false;
1774 					}
1775 				}
1776 				else
1777 				{
1778 					// Out of bounds writes may change values within the bounds.
1779 					if (!isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.accessRange, outValuePtr, elementSize))
1780 					{
1781 						logMsg << ", Failed: unexpected value";
1782 						allOk = false;
1783 					}
1784 				}
1785 			}
1786 		}
1787 	}
1788 
1789 	log << tcu::TestLog::Message << logMsg.str() << tcu::TestLog::EndMessage;
1790 
1791 	if (!allOk && unsplitElementSize > 4u && !splitAccess)
1792 	{
1793 		// "Non-atomic accesses to storage buffers that are a multiple of 32 bits may be decomposed into 32-bit accesses that are individually bounds-checked."
1794 		return verifyResult(true/*splitAccess*/);
1795 	}
1796 
1797 	return allOk;
1798 }
1799 
1800 // BufferReadInstance
1801 
ReadInstance(Context & context,Move<VkDevice> device,ShaderType shaderType,VkShaderStageFlags shaderStage,VkFormat bufferFormat,VkDeviceSize inBufferAccessRange,bool accessOutOfBackingMemory)1802 ReadInstance::ReadInstance (Context&				context,
1803 							Move<VkDevice>			device,
1804 							ShaderType				shaderType,
1805 							VkShaderStageFlags		shaderStage,
1806 							VkFormat				bufferFormat,
1807 							//bool					readFromStorage,
1808 							VkDeviceSize			inBufferAccessRange,
1809 							bool					accessOutOfBackingMemory)
1810 
1811 	: AccessInstance	(context, device, shaderType, shaderStage, bufferFormat,
1812 						 BUFFER_ACCESS_TYPE_READ_FROM_STORAGE,
1813 						 inBufferAccessRange, RobustAccessWithPointersTest::s_numberOfBytesAccessed,
1814 						 accessOutOfBackingMemory)
1815 {
1816 }
1817 
1818 // BufferWriteInstance
1819 
WriteInstance(Context & context,Move<VkDevice> device,ShaderType shaderType,VkShaderStageFlags shaderStage,VkFormat bufferFormat,VkDeviceSize writeBufferAccessRange,bool accessOutOfBackingMemory)1820 WriteInstance::WriteInstance (Context&				context,
1821 							  Move<VkDevice>		device,
1822 							  ShaderType			shaderType,
1823 							  VkShaderStageFlags	shaderStage,
1824 							  VkFormat				bufferFormat,
1825 							  VkDeviceSize			writeBufferAccessRange,
1826 							  bool					accessOutOfBackingMemory)
1827 
1828 	: AccessInstance	(context, device, shaderType, shaderStage, bufferFormat,
1829 						 BUFFER_ACCESS_TYPE_WRITE_TO_STORAGE,
1830 						 RobustAccessWithPointersTest::s_numberOfBytesAccessed, writeBufferAccessRange,
1831 						 accessOutOfBackingMemory)
1832 {
1833 }
1834 
1835 } // unnamed namespace
1836 
createBufferAccessWithVariablePointersTests(tcu::TestContext & testCtx)1837 tcu::TestCaseGroup* createBufferAccessWithVariablePointersTests(tcu::TestContext& testCtx)
1838 {
1839 	// Lets make group for the tests
1840 	de::MovePtr<tcu::TestCaseGroup> bufferAccessWithVariablePointersTests	(new tcu::TestCaseGroup(testCtx, "through_pointers", ""));
1841 
1842 	// Lets add subgroups to better organise tests
1843 	de::MovePtr<tcu::TestCaseGroup> computeWithVariablePointersTests		(new tcu::TestCaseGroup(testCtx, "compute", ""));
1844 	de::MovePtr<tcu::TestCaseGroup> computeReads							(new tcu::TestCaseGroup(testCtx, "reads", ""));
1845 	de::MovePtr<tcu::TestCaseGroup> computeWrites							(new tcu::TestCaseGroup(testCtx, "writes", ""));
1846 
1847 	de::MovePtr<tcu::TestCaseGroup> graphicsWithVariablePointersTests		(new tcu::TestCaseGroup(testCtx, "graphics", ""));
1848 	de::MovePtr<tcu::TestCaseGroup> graphicsReads							(new tcu::TestCaseGroup(testCtx, "reads", ""));
1849 	de::MovePtr<tcu::TestCaseGroup> graphicsReadsVertex						(new tcu::TestCaseGroup(testCtx, "vertex", ""));
1850 	de::MovePtr<tcu::TestCaseGroup> graphicsReadsFragment					(new tcu::TestCaseGroup(testCtx, "fragment", ""));
1851 	de::MovePtr<tcu::TestCaseGroup> graphicsWrites							(new tcu::TestCaseGroup(testCtx, "writes", ""));
1852 	de::MovePtr<tcu::TestCaseGroup> graphicsWritesVertex					(new tcu::TestCaseGroup(testCtx, "vertex", ""));
1853 	de::MovePtr<tcu::TestCaseGroup> graphicsWritesFragment					(new tcu::TestCaseGroup(testCtx, "fragment", ""));
1854 
1855 	// A struct for describing formats
1856 	struct Formats
1857 	{
1858 		const VkFormat		value;
1859 		const char * const	name;
1860 	};
1861 
1862 	const Formats			bufferFormats[]			=
1863 	{
1864 		{ VK_FORMAT_R32_SINT,		"s32" },
1865 		{ VK_FORMAT_R32_UINT,		"u32" },
1866 		{ VK_FORMAT_R32_SFLOAT,		"f32" },
1867 		{ VK_FORMAT_R64_SINT,		"s64" },
1868 		{ VK_FORMAT_R64_UINT,		"u64" },
1869 	};
1870 	const deUint8			bufferFormatsCount		= static_cast<deUint8>(DE_LENGTH_OF_ARRAY(bufferFormats));
1871 
1872 	// Amounts of data to copy
1873 	const VkDeviceSize		rangeSizes[]			=
1874 	{
1875 		1ull, 3ull, 4ull, 16ull, 32ull
1876 	};
1877 	const deUint8			rangeSizesCount			= static_cast<deUint8>(DE_LENGTH_OF_ARRAY(rangeSizes));
1878 
1879 	// gather above data into one array
1880 	const struct ShaderTypes
1881 	{
1882 		const ShaderType			value;
1883 		const char * const			name;
1884 		const Formats* const		formats;
1885 		const deUint8				formatsCount;
1886 		const VkDeviceSize* const	sizes;
1887 		const deUint8				sizesCount;
1888 	}						types[]					=
1889 	{
1890 		{ SHADER_TYPE_VECTOR_COPY,	"vec4",		bufferFormats,			bufferFormatsCount,			rangeSizes,			rangeSizesCount },
1891 		{ SHADER_TYPE_SCALAR_COPY,	"scalar",	bufferFormats,			bufferFormatsCount,			rangeSizes,			rangeSizesCount }
1892 	};
1893 
1894 	// Specify to which subgroups put various tests
1895 	const struct ShaderStages
1896 	{
1897 		VkShaderStageFlags					stage;
1898 		de::MovePtr<tcu::TestCaseGroup>&	reads;
1899 		de::MovePtr<tcu::TestCaseGroup>&	writes;
1900 	}						stages[]				=
1901 	{
1902 		{ VK_SHADER_STAGE_VERTEX_BIT,		graphicsReadsVertex,	graphicsWritesVertex },
1903 		{ VK_SHADER_STAGE_FRAGMENT_BIT,		graphicsReadsFragment,	graphicsWritesFragment },
1904 		{ VK_SHADER_STAGE_COMPUTE_BIT,		computeReads,			computeWrites }
1905 	};
1906 
1907 	// Eventually specify if memory used should be in the "inaccesible" portion of buffer or entirely outside of buffer
1908 	const char* const		backingMemory[]			= { "in_memory", "out_of_memory" };
1909 
1910 	for (deInt32 stageId = 0; stageId < DE_LENGTH_OF_ARRAY(stages); ++stageId)
1911 		for (int i = 0; i < DE_LENGTH_OF_ARRAY(types); ++i)
1912 			for (int j = 0; j < types[i].formatsCount; ++j)
1913 				for (int k = 0; k < types[i].sizesCount; ++k)
1914 					for (int s = 0; s < DE_LENGTH_OF_ARRAY(backingMemory); ++s)
1915 					{
1916 						std::ostringstream	name;
1917 						name << types[i].sizes[k] << "B_" << backingMemory[s] << "_with_" << types[i].name << '_' << types[i].formats[j].name;
1918 						stages[stageId].reads->addChild(new RobustReadTest(testCtx, name.str().c_str(), "", stages[stageId].stage, types[i].value, types[i].formats[j].value, types[i].sizes[k], s != 0));
1919 					}
1920 
1921 	for (deInt32 stageId = 0; stageId < DE_LENGTH_OF_ARRAY(stages); ++stageId)
1922 		for (int i=0; i<DE_LENGTH_OF_ARRAY(types); ++i)
1923 			for (int j=0; j<types[i].formatsCount; ++j)
1924 				for (int k = 0; k<types[i].sizesCount; ++k)
1925 					for (int s = 0; s < DE_LENGTH_OF_ARRAY(backingMemory); ++s)
1926 					{
1927 						std::ostringstream	name;
1928 						name << types[i].sizes[k] << "B_" << backingMemory[s] << "_with_" << types[i].name << '_' << types[i].formats[j].name;
1929 						stages[stageId].writes->addChild(new RobustWriteTest(testCtx, name.str().c_str(), "", stages[stageId].stage, types[i].value, types[i].formats[j].value, types[i].sizes[k], s != 0));
1930 					}
1931 
1932 	graphicsReads->addChild(graphicsReadsVertex.release());
1933 	graphicsReads->addChild(graphicsReadsFragment.release());
1934 
1935 	graphicsWrites->addChild(graphicsWritesVertex.release());
1936 	graphicsWrites->addChild(graphicsWritesFragment.release());
1937 
1938 	graphicsWithVariablePointersTests->addChild(graphicsReads.release());
1939 	graphicsWithVariablePointersTests->addChild(graphicsWrites.release());
1940 
1941 	computeWithVariablePointersTests->addChild(computeReads.release());
1942 	computeWithVariablePointersTests->addChild(computeWrites.release());
1943 
1944 	bufferAccessWithVariablePointersTests->addChild(graphicsWithVariablePointersTests.release());
1945 	bufferAccessWithVariablePointersTests->addChild(computeWithVariablePointersTests.release());
1946 
1947 	return bufferAccessWithVariablePointersTests.release();
1948 }
1949 
1950 } // robustness
1951 } // vkt
1952