1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2018 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Robust buffer access tests for storage buffers and
22 * storage texel buffers with variable pointers.
23 *
24 * \note These tests are checking if accessing a memory through a variable
25 * pointer that points outside of accessible buffer memory is robust.
26 * To do this the tests are creating proper SPIRV code that creates
27 * variable pointers. Those pointers are either pointing into a
28 * memory allocated for a buffer but "not accesible" - meaning
29 * DescriptorBufferInfo has smaller size than a memory we access in
30 * shader or entirely outside of allocated memory (i.e. buffer is
31 * 256 bytes big but we are trying to access under offset of 1k from
32 * buffer start). There is a set of valid behaviours defined when
33 * robust buffer access extension is enabled described in chapter 32
34 * section 1 of Vulkan spec.
35 *
36 *//*--------------------------------------------------------------------*/
37
38 #include "vktRobustBufferAccessWithVariablePointersTests.hpp"
39 #include "vktRobustnessUtil.hpp"
40 #include "vktTestCaseUtil.hpp"
41 #include "vkBuilderUtil.hpp"
42 #include "vkImageUtil.hpp"
43 #include "vkPrograms.hpp"
44 #include "vkQueryUtil.hpp"
45 #include "vkDeviceUtil.hpp"
46 #include "vkRef.hpp"
47 #include "vkRefUtil.hpp"
48 #include "vkTypeUtil.hpp"
49 #include "tcuTestLog.hpp"
50 #include "vkDefs.hpp"
51 #include "deRandom.hpp"
52
53 #include <limits>
54 #include <sstream>
55
56 namespace vkt
57 {
58 namespace robustness
59 {
60
61 using namespace vk;
62
63 // keep local things local
64 namespace
65 {
66
67 // Creates a custom device with robust buffer access and variable pointer features.
createRobustBufferAccessVariablePointersDevice(Context & context)68 Move<VkDevice> createRobustBufferAccessVariablePointersDevice (Context& context)
69 {
70 auto pointerFeatures = context.getVariablePointersFeatures();
71
72 VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
73 features2.features = context.getDeviceFeatures();
74 features2.features.robustBufferAccess = VK_TRUE;
75 features2.pNext = &pointerFeatures;
76
77 return createRobustBufferAccessDevice(context, &features2);
78 }
79
80 // A supplementary structures that can hold information about buffer size
81 struct AccessRangesData
82 {
83 VkDeviceSize allocSize;
84 VkDeviceSize accessRange;
85 VkDeviceSize maxAccessRange;
86 };
87
88 // Pointer to function that can be used to fill a buffer with some data - it is passed as an parameter to buffer creation utility function
89 typedef void(*FillBufferProcPtr)(void*, vk::VkDeviceSize, const void* const);
90
91 // An utility function for creating a buffer
92 // This function not only allocates memory for the buffer but also fills buffer up with a data
createTestBuffer(Context & context,const vk::DeviceInterface & deviceInterface,const VkDevice & device,VkDeviceSize accessRange,VkBufferUsageFlags usage,SimpleAllocator & allocator,Move<VkBuffer> & buffer,de::MovePtr<Allocation> & bufferAlloc,AccessRangesData & data,FillBufferProcPtr fillBufferProc,const void * const blob)93 void createTestBuffer (Context& context,
94 const vk::DeviceInterface& deviceInterface,
95 const VkDevice& device,
96 VkDeviceSize accessRange,
97 VkBufferUsageFlags usage,
98 SimpleAllocator& allocator,
99 Move<VkBuffer>& buffer,
100 de::MovePtr<Allocation>& bufferAlloc,
101 AccessRangesData& data,
102 FillBufferProcPtr fillBufferProc,
103 const void* const blob)
104 {
105 const VkBufferCreateInfo bufferParams =
106 {
107 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
108 DE_NULL, // const void* pNext;
109 0u, // VkBufferCreateFlags flags;
110 accessRange, // VkDeviceSize size;
111 usage, // VkBufferUsageFlags usage;
112 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
113 VK_QUEUE_FAMILY_IGNORED, // deUint32 queueFamilyIndexCount;
114 DE_NULL // const deUint32* pQueueFamilyIndices;
115 };
116
117 buffer = createBuffer(deviceInterface, device, &bufferParams);
118
119 VkMemoryRequirements bufferMemoryReqs = getBufferMemoryRequirements(deviceInterface, device, *buffer);
120 bufferAlloc = allocator.allocate(bufferMemoryReqs, MemoryRequirement::HostVisible);
121
122 data.allocSize = bufferMemoryReqs.size;
123 data.accessRange = accessRange;
124 data.maxAccessRange = deMinu64(data.allocSize, deMinu64(bufferParams.size, accessRange));
125
126 VK_CHECK(deviceInterface.bindBufferMemory(device, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
127 #ifdef CTS_USES_VULKANSC
128 if(context.getTestContext().getCommandLine().isSubProcess())
129 fillBufferProc(bufferAlloc->getHostPtr(), bufferMemoryReqs.size, blob);
130 #else
131 fillBufferProc(bufferAlloc->getHostPtr(), bufferMemoryReqs.size, blob);
132 DE_UNREF(context);
133 #endif // CTS_USES_VULKANCSC
134 flushMappedMemoryRange(deviceInterface, device, bufferAlloc->getMemory(), bufferAlloc->getOffset(), VK_WHOLE_SIZE);
135 }
136
137 // An adapter function matching FillBufferProcPtr interface. Fills a buffer with "randomly" generated test data matching desired format.
populateBufferWithValues(void * buffer,VkDeviceSize size,const void * const blob)138 void populateBufferWithValues (void* buffer,
139 VkDeviceSize size,
140 const void* const blob)
141 {
142 populateBufferWithTestValues(buffer, size, *static_cast<const vk::VkFormat*>(blob));
143 }
144
145 // An adapter function matching FillBufferProcPtr interface. Fills a buffer with 0xBABABABABABA... pattern. Used to fill up output buffers.
146 // Since this pattern cannot show up in generated test data it should not show up in the valid output.
populateBufferWithFiller(void * buffer,VkDeviceSize size,const void * const blob)147 void populateBufferWithFiller (void* buffer,
148 VkDeviceSize size,
149 const void* const blob)
150 {
151 DE_UNREF(blob);
152 deMemset(buffer, 0xBA, static_cast<size_t>(size));
153 }
154
155 // An adapter function matching FillBufferProcPtr interface. Fills a buffer with a copy of memory contents pointed to by blob.
populateBufferWithCopy(void * buffer,VkDeviceSize size,const void * const blob)156 void populateBufferWithCopy (void* buffer,
157 VkDeviceSize size,
158 const void* const blob)
159 {
160 deMemcpy(buffer, blob, static_cast<size_t>(size));
161 }
162
163 // A composite types used in test
164 // Those composites can be made of unsigned ints, signed ints or floats (except for matrices that work with floats only).
165 enum ShaderType
166 {
167 SHADER_TYPE_MATRIX_COPY = 0,
168 SHADER_TYPE_VECTOR_COPY,
169 SHADER_TYPE_SCALAR_COPY,
170
171 SHADER_TYPE_COUNT
172 };
173
174 // We are testing reads or writes
175 // In case of testing reads - writes are always
176 enum BufferAccessType
177 {
178 BUFFER_ACCESS_TYPE_READ_FROM_STORAGE = 0,
179 BUFFER_ACCESS_TYPE_WRITE_TO_STORAGE,
180 };
181
182 // Test case for checking robust buffer access with variable pointers
183 class RobustAccessWithPointersTest : public vkt::TestCase
184 {
185 public:
186 static const deUint32 s_testArraySize;
187 static const deUint32 s_numberOfBytesAccessed;
188
189 RobustAccessWithPointersTest (tcu::TestContext& testContext,
190 const std::string& name,
191 const std::string& description,
192 VkShaderStageFlags shaderStage,
193 ShaderType shaderType,
194 VkFormat bufferFormat);
195
~RobustAccessWithPointersTest(void)196 virtual ~RobustAccessWithPointersTest (void)
197 {
198 }
199
200 void checkSupport (Context &context) const override;
201
202 protected:
203 const VkShaderStageFlags m_shaderStage;
204 const ShaderType m_shaderType;
205 const VkFormat m_bufferFormat;
206 };
207
208 const deUint32 RobustAccessWithPointersTest::s_testArraySize = 1024u;
209 const deUint32 RobustAccessWithPointersTest::s_numberOfBytesAccessed = static_cast<deUint32>(16ull * sizeof(float));
210
RobustAccessWithPointersTest(tcu::TestContext & testContext,const std::string & name,const std::string & description,VkShaderStageFlags shaderStage,ShaderType shaderType,VkFormat bufferFormat)211 RobustAccessWithPointersTest::RobustAccessWithPointersTest(tcu::TestContext& testContext,
212 const std::string& name,
213 const std::string& description,
214 VkShaderStageFlags shaderStage,
215 ShaderType shaderType,
216 VkFormat bufferFormat)
217 : vkt::TestCase(testContext, name, description)
218 , m_shaderStage(shaderStage)
219 , m_shaderType(shaderType)
220 , m_bufferFormat(bufferFormat)
221 {
222 DE_ASSERT(m_shaderStage == VK_SHADER_STAGE_VERTEX_BIT || m_shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT || m_shaderStage == VK_SHADER_STAGE_COMPUTE_BIT);
223 }
224
checkSupport(Context & context) const225 void RobustAccessWithPointersTest::checkSupport (Context &context) const
226 {
227 const auto& pointerFeatures = context.getVariablePointersFeatures();
228 if (!pointerFeatures.variablePointersStorageBuffer)
229 TCU_THROW(NotSupportedError, "VariablePointersStorageBuffer SPIR-V capability not supported");
230
231 if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") && !context.getDeviceFeatures().robustBufferAccess)
232 TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: robustBufferAccess not supported by this implementation");
233 }
234
235 // A subclass for testing reading with variable pointers
236 class RobustReadTest : public RobustAccessWithPointersTest
237 {
238 public:
239 RobustReadTest (tcu::TestContext& testContext,
240 const std::string& name,
241 const std::string& description,
242 VkShaderStageFlags shaderStage,
243 ShaderType shaderType,
244 VkFormat bufferFormat,
245 VkDeviceSize readAccessRange,
246 bool accessOutOfBackingMemory);
247
~RobustReadTest(void)248 virtual ~RobustReadTest (void)
249 {}
250 virtual TestInstance* createInstance (Context& context) const;
251 private:
252 virtual void initPrograms (SourceCollections& programCollection) const;
253 const VkDeviceSize m_readAccessRange;
254 const bool m_accessOutOfBackingMemory;
255 };
256
257 // A subclass for testing writing with variable pointers
258 class RobustWriteTest : public RobustAccessWithPointersTest
259 {
260 public:
261 RobustWriteTest (tcu::TestContext& testContext,
262 const std::string& name,
263 const std::string& description,
264 VkShaderStageFlags shaderStage,
265 ShaderType shaderType,
266 VkFormat bufferFormat,
267 VkDeviceSize writeAccessRange,
268 bool accessOutOfBackingMemory);
269
~RobustWriteTest(void)270 virtual ~RobustWriteTest (void) {}
271 virtual TestInstance* createInstance (Context& context) const;
272 private:
273 virtual void initPrograms (SourceCollections& programCollection) const;
274 const VkDeviceSize m_writeAccessRange;
275 const bool m_accessOutOfBackingMemory;
276 };
277
278 // In case I detect that some prerequisites are not fullfilled I am creating this lightweight empty test instance instead of AccessInstance. Should be bit faster that way.
279 class NotSupportedInstance : public vkt::TestInstance
280 {
281 public:
NotSupportedInstance(Context & context,const std::string & message)282 NotSupportedInstance (Context& context,
283 const std::string& message)
284 : TestInstance(context)
285 , m_notSupportedMessage(message)
286 {}
287
~NotSupportedInstance(void)288 virtual ~NotSupportedInstance (void)
289 {
290 }
291
iterate(void)292 virtual tcu::TestStatus iterate (void)
293 {
294 TCU_THROW(NotSupportedError, m_notSupportedMessage.c_str());
295 }
296
297 private:
298 std::string m_notSupportedMessage;
299 };
300
301 // A superclass for instances testing reading and writing
302 // holds all necessary object members
303 class AccessInstance : public vkt::TestInstance
304 {
305 public:
306 AccessInstance (Context& context,
307 Move<VkDevice> device,
308 #ifndef CTS_USES_VULKANSC
309 de::MovePtr<vk::DeviceDriver> deviceDriver,
310 #else
311 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver,
312 #endif // CTS_USES_VULKANSC
313 ShaderType shaderType,
314 VkShaderStageFlags shaderStage,
315 VkFormat bufferFormat,
316 BufferAccessType bufferAccessType,
317 VkDeviceSize inBufferAccessRange,
318 VkDeviceSize outBufferAccessRange,
319 bool accessOutOfBackingMemory);
320
321 virtual ~AccessInstance (void);
322
323 virtual tcu::TestStatus iterate (void);
324
325 virtual bool verifyResult (bool splitAccess = false);
326
327 private:
328 bool isExpectedValueFromInBuffer (VkDeviceSize offsetInBytes,
329 const void* valuePtr,
330 VkDeviceSize valueSize);
331 bool isOutBufferValueUnchanged (VkDeviceSize offsetInBytes,
332 VkDeviceSize valueSize);
333
334 protected:
335 Move<VkDevice> m_device;
336 #ifndef CTS_USES_VULKANSC
337 de::MovePtr<vk::DeviceDriver> m_deviceDriver;
338 #else
339 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> m_deviceDriver;
340 #endif // CTS_USES_VULKANSC
341 de::MovePtr<TestEnvironment>m_testEnvironment;
342
343 const ShaderType m_shaderType;
344 const VkShaderStageFlags m_shaderStage;
345
346 const VkFormat m_bufferFormat;
347 const BufferAccessType m_bufferAccessType;
348
349 AccessRangesData m_inBufferAccess;
350 Move<VkBuffer> m_inBuffer;
351 de::MovePtr<Allocation> m_inBufferAlloc;
352
353 AccessRangesData m_outBufferAccess;
354 Move<VkBuffer> m_outBuffer;
355 de::MovePtr<Allocation> m_outBufferAlloc;
356
357 Move<VkBuffer> m_indicesBuffer;
358 de::MovePtr<Allocation> m_indicesBufferAlloc;
359
360 Move<VkDescriptorPool> m_descriptorPool;
361 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
362 Move<VkDescriptorSet> m_descriptorSet;
363
364 Move<VkFence> m_fence;
365 VkQueue m_queue;
366
367 // Used when m_shaderStage == VK_SHADER_STAGE_VERTEX_BIT
368 Move<VkBuffer> m_vertexBuffer;
369 de::MovePtr<Allocation> m_vertexBufferAlloc;
370
371 const bool m_accessOutOfBackingMemory;
372 };
373
374 // A subclass for read tests
375 class ReadInstance: public AccessInstance
376 {
377 public:
378 ReadInstance (Context& context,
379 Move<VkDevice> device,
380 #ifndef CTS_USES_VULKANSC
381 de::MovePtr<vk::DeviceDriver> deviceDriver,
382 #else
383 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver,
384 #endif // CTS_USES_VULKANSC
385 ShaderType shaderType,
386 VkShaderStageFlags shaderStage,
387 VkFormat bufferFormat,
388 VkDeviceSize inBufferAccessRange,
389 bool accessOutOfBackingMemory);
390
~ReadInstance(void)391 virtual ~ReadInstance (void) {}
392 };
393
394 // A subclass for write tests
395 class WriteInstance: public AccessInstance
396 {
397 public:
398 WriteInstance (Context& context,
399 Move<VkDevice> device,
400 #ifndef CTS_USES_VULKANSC
401 de::MovePtr<vk::DeviceDriver> deviceDriver,
402 #else
403 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver,
404 #endif // CTS_USES_VULKANSC
405 ShaderType shaderType,
406 VkShaderStageFlags shaderStage,
407 VkFormat bufferFormat,
408 VkDeviceSize writeBufferAccessRange,
409 bool accessOutOfBackingMemory);
410
~WriteInstance(void)411 virtual ~WriteInstance (void) {}
412 };
413
414 // Automatically incremented counter.
415 // Each read of value bumps counter up.
416 class Autocounter
417 {
418 public:
Autocounter()419 Autocounter()
420 :value(0u)
421 {}
incrementAndGetValue()422 deUint32 incrementAndGetValue()
423 {
424 return ++value;
425 }
426 private:
427 deUint32 value;
428 };
429
430 // A class representing SPIRV variable.
431 // This class internally has an unique identificator.
432 // When such variable is used in shader composition routine it is mapped on a in-SPIRV-code variable name.
433 class Variable
434 {
435 friend bool operator < (const Variable& a, const Variable& b);
436 public:
Variable(Autocounter & autoincrement)437 Variable(Autocounter& autoincrement)
438 : value(autoincrement.incrementAndGetValue())
439 {}
440 private:
441 deUint32 value;
442 };
443
operator <(const Variable & a,const Variable & b)444 bool operator < (const Variable& a, const Variable& b)
445 {
446 return a.value < b.value;
447 }
448
449 // A class representing SPIRV operation.
450 // Since those are not copyable they don't need internal id. Memory address is used instead.
451 class Operation
452 {
453 friend bool operator==(const Operation& a, const Operation& b);
454 public:
Operation(const char * text)455 Operation(const char* text)
456 : value(text)
457 {
458 }
getValue() const459 const std::string& getValue() const
460 {
461 return value;
462 }
463
464 private:
465 Operation(const Operation& other);
466 const std::string value;
467 };
468
operator ==(const Operation & a,const Operation & b)469 bool operator == (const Operation& a, const Operation& b)
470 {
471 return &a == &b; // a fast & simple address comparison - making copies was disabled
472 }
473
474 // A namespace containing all SPIRV operations used in those tests.
475 namespace op {
476 #define OP(name) const Operation name("Op"#name)
477 OP(Capability);
478 OP(Extension);
479 OP(ExtInstImport);
480 OP(EntryPoint);
481 OP(MemoryModel);
482 OP(ExecutionMode);
483
484 OP(Decorate);
485 OP(MemberDecorate);
486 OP(Name);
487 OP(MemberName);
488
489 OP(TypeVoid);
490 OP(TypeBool);
491 OP(TypeInt);
492 OP(TypeFloat);
493 OP(TypeVector);
494 OP(TypeMatrix);
495 OP(TypeArray);
496 OP(TypeStruct);
497 OP(TypeFunction);
498 OP(TypePointer);
499 OP(TypeImage);
500 OP(TypeSampledImage);
501
502 OP(Constant);
503 OP(ConstantComposite);
504 OP(Variable);
505
506 OP(Function);
507 OP(FunctionEnd);
508 OP(Label);
509 OP(Return);
510
511 OP(LogicalEqual);
512 OP(IEqual);
513 OP(Select);
514
515 OP(AccessChain);
516 OP(Load);
517 OP(Store);
518 #undef OP
519 }
520
521 // A class that allows to easily compose SPIRV code.
522 // This class automatically keeps correct order of most of operations
523 // i.e. capabilities to the top,
524 class ShaderStream
525 {
526 public:
ShaderStream()527 ShaderStream ()
528 {}
529 // composes shader string out of shader substreams.
str() const530 std::string str () const
531 {
532 std::stringstream stream;
533 stream << capabilities.str()
534 << "; ----------------- PREAMBLE -----------------\n"
535 << preamble.str()
536 << "; ----------------- DEBUG --------------------\n"
537 << names.str()
538 << "; ----------------- DECORATIONS --------------\n"
539 << decorations.str()
540 << "; ----------------- TYPES --------------------\n"
541 << basictypes.str()
542 << "; ----------------- CONSTANTS ----------------\n"
543 << constants.str()
544 << "; ----------------- ADVANCED TYPES -----------\n"
545 << compositetypes.str()
546 << ((compositeconstants.str().length() > 0) ? "; ----------------- CONSTANTS ----------------\n" : "")
547 << compositeconstants.str()
548 << "; ----------------- VARIABLES & FUNCTIONS ----\n"
549 << shaderstream.str();
550 return stream.str();
551 }
552 // Functions below are used to push Operations, Variables and other strings, numbers and characters to the shader.
553 // Each function uses selectStream and map subroutines.
554 // selectStream is used to choose a proper substream of shader.
555 // E.g. if an operation is OpConstant it should be put into constants definitions stream - so selectStream will return that stream.
556 // map on the other hand is used to replace Variables and Operations to their in-SPIRV-code representations.
557 // for types like ints or floats map simply calls << operator to produce its string representation
558 // for Operations a proper operation string is returned
559 // for Variables there is a special mapping between in-C++ variable and in-SPIRV-code variable name.
560 // following sequence of functions could be squashed to just two using variadic templates once we move to C++11 or higher
561 // each method returns *this to allow chaining calls to these methods.
562 template <typename T>
operator ()(const T & a)563 ShaderStream& operator () (const T& a)
564 {
565 selectStream(a, 0) << map(a) << '\n';
566 return *this;
567 }
568 template <typename T1, typename T2>
operator ()(const T1 & a,const T2 & b)569 ShaderStream& operator () (const T1& a, const T2& b)
570 {
571 selectStream(a, 0) << map(a) << '\t' << map(b) << '\n';
572 return *this;
573 }
574 template <typename T1, typename T2, typename T3>
operator ()(const T1 & a,const T2 & b,const T3 & c)575 ShaderStream& operator () (const T1& a, const T2& b, const T3& c)
576 {
577 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\n';
578 return *this;
579 }
580 template <typename T1, typename T2, typename T3, typename T4>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d)581 ShaderStream& operator () (const T1& a, const T2& b, const T3& c, const T4& d)
582 {
583 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\n';
584 return *this;
585 }
586 template <typename T1, typename T2, typename T3, typename T4, typename T5>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e)587 ShaderStream& operator () (const T1& a, const T2& b, const T3& c, const T4& d, const T5& e)
588 {
589 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\n';
590 return *this;
591 }
592 template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e,const T6 & f)593 ShaderStream& operator () (const T1& a, const T2& b, const T3& c, const T4& d, const T5& e, const T6& f)
594 {
595 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\t' << map(f) << '\n';
596 return *this;
597 }
598 template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e,const T6 & f,const T7 & g)599 ShaderStream& operator () (const T1& a, const T2& b, const T3& c, const T4& d, const T5& e, const T6& f, const T7& g)
600 {
601 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\t' << map(f) << '\t' << map(g) << '\n';
602 return *this;
603 }
604 template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e,const T6 & f,const T7 & g,const T8 & h)605 ShaderStream& operator () (const T1& a, const T2& b, const T3& c, const T4& d, const T5& e, const T6& f, const T7& g, const T8& h)
606 {
607 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\t' << map(f) << '\t' << map(g) << '\t' << map(h) << '\n';
608 return *this;
609 }
610 template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e,const T6 & f,const T7 & g,const T8 & h,const T9 & i)611 ShaderStream& operator () (const T1& a, const T2& b, const T3& c, const T4& d, const T5& e, const T6& f, const T7& g, const T8& h, const T9& i)
612 {
613 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\t' << map(f) << '\t' << map(g) << '\t' << map(h) << '\t' << map(i) << '\n';
614 return *this;
615 }
616 template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10>
operator ()(const T1 & a,const T2 & b,const T3 & c,const T4 & d,const T5 & e,const T6 & f,const T7 & g,const T8 & h,const T9 & i,const T10 & k)617 ShaderStream& operator () (const T1& a, const T2& b, const T3& c, const T4& d, const T5& e, const T6& f, const T7& g, const T8& h, const T9& i, const T10& k)
618 {
619 selectStream(a, c) << map(a) << '\t' << map(b) << '\t' << map(c) << '\t' << map(d) << '\t' << map(e) << '\t' << map(f) << '\t' << map(g) << '\t' << map(h) << '\t' << map(i) << '\t' << map(k) << '\n';
620 return *this;
621 }
622
623 // returns true if two variables has the same in-SPIRV-code names
areSame(const Variable a,const Variable b)624 bool areSame (const Variable a, const Variable b)
625 {
626 VariableIt varA = vars.find(a);
627 VariableIt varB = vars.find(b);
628 return varA != vars.end() && varB != vars.end() && varA->second == varB->second;
629 }
630
631 // makes variable 'a' in-SPIRV-code name to be the same as variable 'b' in-SPIRV-code name
makeSame(const Variable a,const Variable b)632 void makeSame (const Variable a, const Variable b)
633 {
634 VariableIt varB = vars.find(b);
635 if (varB != vars.end())
636 {
637 std::pair<VariableIt, bool> inserted = vars.insert(std::make_pair(a, varB->second));
638 if (!inserted.second)
639 inserted.first->second = varB->second;
640 }
641 }
642 private:
643 // generic version of map (tries to push whatever came to stringstream to get its string representation)
644 template <typename T>
map(const T & a)645 std::string map (const T& a)
646 {
647 std::stringstream temp;
648 temp << a;
649 return temp.str();
650 }
651
652 // looks for mapping of c++ Variable object onto in-SPIRV-code name.
653 // if there was not yet such mapping generated a new mapping is created based on incremented local counter.
map(const Variable & a)654 std::string map (const Variable& a)
655 {
656 VariableIt var = vars.find(a);
657 if (var != vars.end())
658 return var->second;
659 std::stringstream temp;
660 temp << '%';
661 temp.width(4);
662 temp.fill('0');
663 temp << std::hex << varCounter.incrementAndGetValue();
664 vars.insert(std::make_pair(a, temp.str()));
665 return temp.str();
666 }
667
668 // a simple specification for Operation
map(const Operation & a)669 std::string map (const Operation& a)
670 {
671 return a.getValue();
672 }
673
674 // a specification for char* - faster than going through stringstream << operator
map(const char * & a)675 std::string map (const char*& a)
676 {
677 return std::string(a);
678 }
679
680 // a specification for char - faster than going through stringstream << operator
map(const char & a)681 std::string map (const char& a)
682 {
683 return std::string(1, a);
684 }
685
686 // a generic version of selectStream - used when neither 1st nor 3rd SPIRV line token is Operation.
687 // In general should never happen.
688 // All SPIRV lines are constructed in a one of two forms:
689 // Variable = Operation operands...
690 // or
691 // Operation operands...
692 // So operation is either 1st or 3rd token.
693 template <typename T0, typename T1>
selectStream(const T0 & op0,const T1 & op1)694 std::stringstream& selectStream (const T0& op0, const T1& op1)
695 {
696 DE_UNREF(op0);
697 DE_UNREF(op1);
698 return shaderstream;
699 }
700
701 // Specialisation for Operation being 1st parameter
702 // Certain operations make the SPIRV code line to be pushed to different substreams.
703 template <typename T1>
selectStream(const Operation & op,const T1 & op1)704 std::stringstream& selectStream (const Operation& op, const T1& op1)
705 {
706 DE_UNREF(op1);
707 if (op == op::Decorate || op == op::MemberDecorate)
708 return decorations;
709 if (op == op::Name || op == op::MemberName)
710 return names;
711 if (op == op::Capability || op == op::Extension)
712 return capabilities;
713 if (op == op::MemoryModel || op == op::ExecutionMode || op == op::EntryPoint)
714 return preamble;
715 return shaderstream;
716 }
717
718 // Specialisation for Operation being 3rd parameter
719 // Certain operations make the SPIRV code line to be pushed to different substreams.
720 // If we would like to use this way of generating SPIRV we could use this method as SPIRV line validation point
721 // e.g. here instead of heving partial specialisation I could specialise for T0 being Variable since this has to match Variable = Operation operands...
722 template <typename T0>
selectStream(const T0 & op0,const Operation & op)723 std::stringstream& selectStream (const T0& op0, const Operation& op)
724 {
725 DE_UNREF(op0);
726 if (op == op::ExtInstImport)
727 return preamble;
728 if (op == op::TypeVoid || op == op::TypeBool || op == op::TypeInt || op == op::TypeFloat || op == op::TypeVector || op == op::TypeMatrix)
729 return basictypes;
730 if (op == op::TypeArray || op == op::TypeStruct || op == op::TypeFunction || op == op::TypePointer || op == op::TypeImage || op == op::TypeSampledImage)
731 return compositetypes;
732 if (op == op::Constant)
733 return constants;
734 if (op == op::ConstantComposite)
735 return compositeconstants;
736 return shaderstream;
737 }
738
739 typedef std::map<Variable, std::string> VariablesPack;
740 typedef VariablesPack::iterator VariableIt;
741
742 // local mappings between c++ Variable objects and in-SPIRV-code names
743 VariablesPack vars;
744
745 // shader substreams
746 std::stringstream capabilities;
747 std::stringstream preamble;
748 std::stringstream names;
749 std::stringstream decorations;
750 std::stringstream basictypes;
751 std::stringstream constants;
752 std::stringstream compositetypes;
753 std::stringstream compositeconstants;
754 std::stringstream shaderstream;
755
756 // local incremented counter
757 Autocounter varCounter;
758 };
759
760 // A suppliementary class to group frequently used Variables together
761 class Variables
762 {
763 public:
Variables(Autocounter & autoincrement)764 Variables (Autocounter &autoincrement)
765 : version(autoincrement)
766 , mainFunc(autoincrement)
767 , mainFuncLabel(autoincrement)
768 , voidFuncVoid(autoincrement)
769 , copy_type(autoincrement)
770 , copy_type_vec(autoincrement)
771 , buffer_type_vec(autoincrement)
772 , copy_type_ptr(autoincrement)
773 , buffer_type(autoincrement)
774 , voidId(autoincrement)
775 , v4f32(autoincrement)
776 , v4s32(autoincrement)
777 , v4u32(autoincrement)
778 , v4s64(autoincrement)
779 , v4u64(autoincrement)
780 , s32(autoincrement)
781 , f32(autoincrement)
782 , u32(autoincrement)
783 , s64(autoincrement)
784 , u64(autoincrement)
785 , boolean(autoincrement)
786 , array_content_type(autoincrement)
787 , s32_type_ptr(autoincrement)
788 , dataSelectorStructPtrType(autoincrement)
789 , dataSelectorStructPtr(autoincrement)
790 , dataArrayType(autoincrement)
791 , dataInput(autoincrement)
792 , dataInputPtrType(autoincrement)
793 , dataInputType(autoincrement)
794 , dataInputSampledType(autoincrement)
795 , dataOutput(autoincrement)
796 , dataOutputPtrType(autoincrement)
797 , dataOutputType(autoincrement)
798 , dataSelectorStructType(autoincrement)
799 , input(autoincrement)
800 , inputPtr(autoincrement)
801 , output(autoincrement)
802 , outputPtr(autoincrement)
803 {
804 for (deUint32 i = 0; i < 32; ++i)
805 constants.push_back(Variable(autoincrement));
806 }
807 const Variable version;
808 const Variable mainFunc;
809 const Variable mainFuncLabel;
810 const Variable voidFuncVoid;
811 std::vector<Variable> constants;
812 const Variable copy_type;
813 const Variable copy_type_vec;
814 const Variable buffer_type_vec;
815 const Variable copy_type_ptr;
816 const Variable buffer_type;
817 const Variable voidId;
818 const Variable v4f32;
819 const Variable v4s32;
820 const Variable v4u32;
821 const Variable v4s64;
822 const Variable v4u64;
823 const Variable s32;
824 const Variable f32;
825 const Variable u32;
826 const Variable s64;
827 const Variable u64;
828 const Variable boolean;
829 const Variable array_content_type;
830 const Variable s32_type_ptr;
831 const Variable dataSelectorStructPtrType;
832 const Variable dataSelectorStructPtr;
833 const Variable dataArrayType;
834 const Variable dataInput;
835 const Variable dataInputPtrType;
836 const Variable dataInputType;
837 const Variable dataInputSampledType;
838 const Variable dataOutput;
839 const Variable dataOutputPtrType;
840 const Variable dataOutputType;
841 const Variable dataSelectorStructType;
842 const Variable input;
843 const Variable inputPtr;
844 const Variable output;
845 const Variable outputPtr;
846 };
847
848 // A routing generating SPIRV code for all test cases in this group
MakeShader(VkShaderStageFlags shaderStage,ShaderType shaderType,VkFormat bufferFormat,bool reads,bool unused)849 std::string MakeShader(VkShaderStageFlags shaderStage, ShaderType shaderType, VkFormat bufferFormat, bool reads, bool unused)
850 {
851 const bool isR64 = (bufferFormat == VK_FORMAT_R64_UINT || bufferFormat == VK_FORMAT_R64_SINT);
852 // faster to write
853 const char is = '=';
854
855 // variables require such counter to generate their unique ids. Since there is possibility that in the future this code will
856 // run parallel this counter is made local to this function body to be safe.
857 Autocounter localcounter;
858
859 // A frequently used Variables (gathered into this single object for readability)
860 Variables var (localcounter);
861
862 // A SPIRV code builder
863 ShaderStream shaderSource;
864
865 // A basic preamble of SPIRV shader. Turns on required capabilities and extensions.
866 shaderSource
867 (op::Capability, "Shader")
868 (op::Capability, "VariablePointersStorageBuffer");
869
870 if (isR64)
871 {
872 shaderSource
873 (op::Capability, "Int64");
874 }
875
876 shaderSource
877 (op::Extension, "\"SPV_KHR_storage_buffer_storage_class\"")
878 (op::Extension, "\"SPV_KHR_variable_pointers\"")
879 (var.version, is, op::ExtInstImport, "\"GLSL.std.450\"")
880 (op::MemoryModel, "Logical", "GLSL450");
881
882 // Use correct entry point definition depending on shader stage
883 if (shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
884 {
885 shaderSource
886 (op::EntryPoint, "GLCompute", var.mainFunc, "\"main\"")
887 (op::ExecutionMode, var.mainFunc, "LocalSize", 1, 1, 1);
888 }
889 else if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
890 {
891 shaderSource
892 (op::EntryPoint, "Vertex", var.mainFunc, "\"main\"", var.input, var.output)
893 (op::Decorate, var.output, "BuiltIn", "Position")
894 (op::Decorate, var.input, "Location", 0);
895 }
896 else if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
897 {
898 shaderSource
899 (op::EntryPoint, "Fragment", var.mainFunc, "\"main\"", var.output)
900 (op::ExecutionMode, var.mainFunc, "OriginUpperLeft")
901 (op::Decorate, var.output, "Location", 0);
902 }
903
904 // If we are testing vertex shader or fragment shader we need to provide the other one for the pipeline too.
905 // So the not tested one is 'unused'. It is then a minimal/simplest possible pass-through shader.
906 // If we are testing compute shader we dont need unused shader at all.
907 if (unused)
908 {
909 if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
910 {
911 shaderSource
912 (var.voidId, is, op::TypeVoid)
913 (var.voidFuncVoid, is, op::TypeFunction, var.voidId)
914 (var.f32, is, op::TypeFloat, 32)
915 (var.v4f32, is, op::TypeVector, var.f32, 4)
916 (var.outputPtr, is, op::TypePointer, "Output", var.v4f32)
917 (var.output, is, op::Variable, var.outputPtr, "Output")
918 (var.constants[6], is, op::Constant, var.f32, 1)
919 (var.constants[7], is, op::ConstantComposite, var.v4f32, var.constants[6], var.constants[6], var.constants[6], var.constants[6])
920 (var.mainFunc, is, op::Function, var.voidId, "None", var.voidFuncVoid)
921 (var.mainFuncLabel, is, op::Label);
922 }
923 else if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
924 {
925 shaderSource
926 (var.voidId, is, op::TypeVoid)
927 (var.voidFuncVoid, is, op::TypeFunction , var.voidId)
928 (var.f32, is, op::TypeFloat, 32)
929 (var.v4f32, is, op::TypeVector , var.f32, 4)
930 (var.outputPtr, is, op::TypePointer, "Output" , var.v4f32)
931 (var.output, is, op::Variable , var.outputPtr, "Output")
932 (var.inputPtr, is, op::TypePointer, "Input" , var.v4f32)
933 (var.input, is, op::Variable , var.inputPtr, "Input")
934 (var.mainFunc, is, op::Function , var.voidId, "None", var.voidFuncVoid)
935 (var.mainFuncLabel, is, op::Label);
936 }
937 }
938 else // this is a start of actual shader that tests variable pointers
939 {
940 shaderSource
941 (op::Decorate, var.dataInput, "DescriptorSet", 0)
942 (op::Decorate, var.dataInput, "Binding", 0)
943
944 (op::Decorate, var.dataOutput, "DescriptorSet", 0)
945 (op::Decorate, var.dataOutput, "Binding", 1);
946
947 // for scalar types and vector types we use 1024 element array of 4 elements arrays of 4-component vectors
948 // so the stride of internal array is size of 4-component vector
949 if (shaderType == SHADER_TYPE_SCALAR_COPY || shaderType == SHADER_TYPE_VECTOR_COPY)
950 {
951 if (isR64)
952 {
953 shaderSource
954 (op::Decorate, var.array_content_type, "ArrayStride", 32);
955 }
956 else
957 {
958 shaderSource
959 (op::Decorate, var.array_content_type, "ArrayStride", 16);
960 }
961 }
962
963 if (isR64)
964 {
965 shaderSource
966 (op::Decorate, var.dataArrayType, "ArrayStride", 128);
967 }
968 else
969 {
970 // for matrices we use array of 4x4-component matrices
971 // stride of outer array is then 64 in every case
972 shaderSource
973 (op::Decorate, var.dataArrayType, "ArrayStride", 64);
974 }
975
976 // an output block
977 shaderSource
978 (op::MemberDecorate, var.dataOutputType, 0, "Offset", 0)
979 (op::Decorate, var.dataOutputType, "Block")
980
981 // an input block. Marked readonly.
982 (op::MemberDecorate, var.dataInputType, 0, "NonWritable")
983 (op::MemberDecorate, var.dataInputType, 0, "Offset", 0)
984 (op::Decorate, var.dataInputType, "Block")
985
986 //a special structure matching data in one of our buffers.
987 // member at 0 is an index to read position
988 // member at 1 is an index to write position
989 // member at 2 is always zero. It is used to perform OpSelect. I used value coming from buffer to avoid incidental optimisations that could prune OpSelect if the value was compile time known.
990 (op::MemberDecorate, var.dataSelectorStructType, 0, "Offset", 0)
991 (op::MemberDecorate, var.dataSelectorStructType, 1, "Offset", 4)
992 (op::MemberDecorate, var.dataSelectorStructType, 2, "Offset", 8)
993 (op::Decorate, var.dataSelectorStructType, "Block")
994
995 // binding to matching buffer
996 (op::Decorate, var.dataSelectorStructPtr, "DescriptorSet", 0)
997 (op::Decorate, var.dataSelectorStructPtr, "Binding", 2)
998
999 // making composite types used in shader
1000 (var.voidId, is, op::TypeVoid)
1001 (var.voidFuncVoid, is, op::TypeFunction, var.voidId)
1002
1003 (var.boolean, is, op::TypeBool)
1004
1005 (var.f32, is, op::TypeFloat, 32)
1006 (var.s32, is, op::TypeInt, 32, 1)
1007 (var.u32, is, op::TypeInt, 32, 0);
1008
1009 if (isR64)
1010 {
1011 shaderSource
1012 (var.s64, is, op::TypeInt, 64, 1)
1013 (var.u64, is, op::TypeInt, 64, 0);
1014 }
1015
1016 shaderSource
1017 (var.v4f32, is, op::TypeVector, var.f32, 4)
1018 (var.v4s32, is, op::TypeVector, var.s32, 4)
1019 (var.v4u32, is, op::TypeVector, var.u32, 4);
1020
1021 if (isR64)
1022 {
1023 shaderSource
1024 (var.v4s64, is, op::TypeVector, var.s64, 4)
1025 (var.v4u64, is, op::TypeVector, var.u64, 4);
1026 }
1027
1028 // since the shared tests scalars, vectors, matrices of ints, uints and floats I am generating alternative names for some of the types so I can use those and not need to use "if" everywhere.
1029 // A Variable mappings will make sure the proper variable name is used
1030 // below is a first part of aliasing types based on int, uint, float
1031 switch (bufferFormat)
1032 {
1033 case vk::VK_FORMAT_R32_SINT:
1034 shaderSource.makeSame(var.buffer_type, var.s32);
1035 shaderSource.makeSame(var.buffer_type_vec, var.v4s32);
1036 break;
1037 case vk::VK_FORMAT_R32_UINT:
1038 shaderSource.makeSame(var.buffer_type, var.u32);
1039 shaderSource.makeSame(var.buffer_type_vec, var.v4u32);
1040 break;
1041 case vk::VK_FORMAT_R32_SFLOAT:
1042 shaderSource.makeSame(var.buffer_type, var.f32);
1043 shaderSource.makeSame(var.buffer_type_vec, var.v4f32);
1044 break;
1045 case vk::VK_FORMAT_R64_SINT:
1046 shaderSource.makeSame(var.buffer_type, var.s64);
1047 shaderSource.makeSame(var.buffer_type_vec, var.v4s64);
1048 break;
1049 case vk::VK_FORMAT_R64_UINT:
1050 shaderSource.makeSame(var.buffer_type, var.u64);
1051 shaderSource.makeSame(var.buffer_type_vec, var.v4u64);
1052 break;
1053 default:
1054 // to prevent compiler from complaining not all cases are handled (but we should not get here).
1055 deAssertFail("This point should be not reachable with correct program flow.", __FILE__, __LINE__);
1056 break;
1057 }
1058
1059 // below is a second part that aliases based on scalar, vector, matrix
1060 switch (shaderType)
1061 {
1062 case SHADER_TYPE_SCALAR_COPY:
1063 shaderSource.makeSame(var.copy_type, var.buffer_type);
1064 break;
1065 case SHADER_TYPE_VECTOR_COPY:
1066 shaderSource.makeSame(var.copy_type, var.buffer_type_vec);
1067 break;
1068 case SHADER_TYPE_MATRIX_COPY:
1069 if (bufferFormat != VK_FORMAT_R32_SFLOAT)
1070 TCU_THROW(NotSupportedError, "Matrices can be used only with floating point types.");
1071 shaderSource
1072 (var.copy_type, is, op::TypeMatrix, var.buffer_type_vec, 4);
1073 break;
1074 default:
1075 // to prevent compiler from complaining not all cases are handled (but we should not get here).
1076 deAssertFail("This point should be not reachable with correct program flow.", __FILE__, __LINE__);
1077 break;
1078 }
1079
1080 // I will need some constants so lets add them to shader source
1081 shaderSource
1082 (var.constants[0], is, op::Constant, var.s32, 0)
1083 (var.constants[1], is, op::Constant, var.s32, 1)
1084 (var.constants[2], is, op::Constant, var.s32, 2)
1085 (var.constants[3], is, op::Constant, var.s32, 3)
1086 (var.constants[4], is, op::Constant, var.u32, 4)
1087 (var.constants[5], is, op::Constant, var.u32, 1024);
1088
1089 // for fragment shaders I need additionally a constant vector (output "colour") so lets make it
1090 if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
1091 {
1092 shaderSource
1093 (var.constants[6], is, op::Constant, var.f32, 1)
1094 (var.constants[7], is, op::ConstantComposite, var.v4f32, var.constants[6], var.constants[6], var.constants[6], var.constants[6]);
1095 }
1096
1097 // additional alias for the type of content of this 1024-element outer array.
1098 if (shaderType == SHADER_TYPE_SCALAR_COPY || shaderType == SHADER_TYPE_VECTOR_COPY)
1099 {
1100 shaderSource
1101 (var.array_content_type, is, op::TypeArray, var.buffer_type_vec, var.constants[4]);
1102 }
1103 else
1104 {
1105 shaderSource.makeSame(var.array_content_type, var.copy_type);
1106 }
1107
1108 // Lets create pointer types to the input data type, output data type and a struct
1109 // This must be distinct types due to different type decorations
1110 // Lets make also actual poiters to the data
1111 shaderSource
1112 (var.dataArrayType, is, op::TypeArray, var.array_content_type, var.constants[5])
1113 (var.dataInputType, is, op::TypeStruct, var.dataArrayType)
1114 (var.dataOutputType, is, op::TypeStruct, var.dataArrayType)
1115 (var.dataInputPtrType, is, op::TypePointer, "StorageBuffer", var.dataInputType)
1116 (var.dataOutputPtrType, is, op::TypePointer, "StorageBuffer", var.dataOutputType)
1117 (var.dataInput, is, op::Variable, var.dataInputPtrType, "StorageBuffer")
1118 (var.dataOutput, is, op::Variable, var.dataOutputPtrType, "StorageBuffer")
1119 (var.dataSelectorStructType, is, op::TypeStruct, var.s32, var.s32, var.s32)
1120 (var.dataSelectorStructPtrType, is, op::TypePointer, "Uniform", var.dataSelectorStructType)
1121 (var.dataSelectorStructPtr, is, op::Variable, var.dataSelectorStructPtrType, "Uniform");
1122
1123 // we need also additional pointers to fullfil stage requirements on shaders inputs and outputs
1124 if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
1125 {
1126 shaderSource
1127 (var.inputPtr, is, op::TypePointer, "Input", var.v4f32)
1128 (var.input, is, op::Variable, var.inputPtr, "Input")
1129 (var.outputPtr, is, op::TypePointer, "Output", var.v4f32)
1130 (var.output, is, op::Variable, var.outputPtr, "Output");
1131 }
1132 else if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
1133 {
1134 shaderSource
1135 (var.outputPtr, is, op::TypePointer, "Output", var.v4f32)
1136 (var.output, is, op::Variable, var.outputPtr, "Output");
1137 }
1138
1139 shaderSource
1140 (var.copy_type_ptr, is, op::TypePointer, "StorageBuffer", var.copy_type)
1141 (var.s32_type_ptr, is, op::TypePointer, "Uniform", var.s32);
1142
1143 // Make a shader main function
1144 shaderSource
1145 (var.mainFunc, is, op::Function, var.voidId, "None", var.voidFuncVoid)
1146 (var.mainFuncLabel, is, op::Label);
1147
1148 Variable copyFromPtr(localcounter), copyToPtr(localcounter), zeroPtr(localcounter);
1149 Variable copyFrom(localcounter), copyTo(localcounter), zero(localcounter);
1150
1151 // Lets load data from our auxiliary buffer with reading index, writing index and zero.
1152 shaderSource
1153 (copyToPtr, is, op::AccessChain, var.s32_type_ptr, var.dataSelectorStructPtr, var.constants[1])
1154 (copyTo, is, op::Load, var.s32, copyToPtr)
1155 (copyFromPtr, is, op::AccessChain, var.s32_type_ptr, var.dataSelectorStructPtr, var.constants[0])
1156 (copyFrom, is, op::Load, var.s32, copyFromPtr)
1157 (zeroPtr, is, op::AccessChain, var.s32_type_ptr, var.dataSelectorStructPtr, var.constants[2])
1158 (zero, is, op::Load, var.s32, zeroPtr);
1159
1160 // let start copying data using variable pointers
1161 switch (shaderType)
1162 {
1163 case SHADER_TYPE_SCALAR_COPY:
1164 for (int i = 0; i < 4; ++i)
1165 {
1166 for (int j = 0; j < 4; ++j)
1167 {
1168 Variable actualLoadChain(localcounter), actualStoreChain(localcounter), loadResult(localcounter);
1169 Variable selection(localcounter);
1170 Variable lcA(localcounter), lcB(localcounter), scA(localcounter), scB(localcounter);
1171
1172 shaderSource
1173 (selection, is, op::IEqual, var.boolean, zero, var.constants[0]);
1174
1175 if (reads)
1176 {
1177 // if we check reads we use variable pointers only for reading part
1178 shaderSource
1179 (lcA, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom, var.constants[i], var.constants[j])
1180 (lcB, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom, var.constants[i], var.constants[j])
1181 // actualLoadChain will be a variable pointer as it was created through OpSelect
1182 (actualLoadChain, is, op::Select, var.copy_type_ptr, selection, lcA, lcB)
1183 // actualStoreChain will be a regular pointer
1184 (actualStoreChain, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo, var.constants[i], var.constants[j]);
1185 }
1186 else
1187 {
1188 // if we check writes we use variable pointers only for writing part only
1189 shaderSource
1190 // actualLoadChain will be regular regualar pointer
1191 (actualLoadChain, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom, var.constants[i], var.constants[j])
1192 (scA, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo, var.constants[i], var.constants[j])
1193 (scB, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo, var.constants[i], var.constants[j])
1194 // actualStoreChain will be a variable pointer as it was created through OpSelect
1195 (actualStoreChain, is, op::Select, var.copy_type_ptr, selection, scA, scB);
1196 }
1197 // do actual copying
1198 shaderSource
1199 (loadResult, is, op::Load, var.copy_type, actualLoadChain)
1200 (op::Store, actualStoreChain, loadResult);
1201 }
1202 }
1203 break;
1204 // cases below have the same logic as the one above - just we are copying bigger chunks of data with every load/store pair
1205 case SHADER_TYPE_VECTOR_COPY:
1206 for (int i = 0; i < 4; ++i)
1207 {
1208 Variable actualLoadChain(localcounter), actualStoreChain(localcounter), loadResult(localcounter);
1209 Variable selection(localcounter);
1210 Variable lcA(localcounter), lcB(localcounter), scA(localcounter), scB(localcounter);
1211
1212 shaderSource
1213 (selection, is, op::IEqual, var.boolean, zero, var.constants[0]);
1214
1215 if (reads)
1216 {
1217 shaderSource
1218 (lcA, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom, var.constants[i])
1219 (lcB, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom, var.constants[i])
1220 (actualLoadChain, is, op::Select, var.copy_type_ptr, selection, lcA, lcB)
1221 (actualStoreChain, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo, var.constants[i]);
1222 }
1223 else
1224 {
1225 shaderSource
1226 (actualLoadChain, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom, var.constants[i])
1227 (scA, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo, var.constants[i])
1228 (scB, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo, var.constants[i])
1229 (actualStoreChain, is, op::Select, var.copy_type_ptr, selection, scA, scB);
1230 }
1231
1232 shaderSource
1233 (loadResult, is, op::Load, var.copy_type, actualLoadChain)
1234 (op::Store, actualStoreChain, loadResult);
1235 }
1236 break;
1237 case SHADER_TYPE_MATRIX_COPY:
1238 {
1239 Variable actualLoadChain(localcounter), actualStoreChain(localcounter), loadResult(localcounter);
1240 Variable selection(localcounter);
1241 Variable lcA(localcounter), lcB(localcounter), scA(localcounter), scB(localcounter);
1242
1243 shaderSource
1244 (selection, is, op::IEqual, var.boolean, zero, var.constants[0]);
1245
1246 if (reads)
1247 {
1248 shaderSource
1249 (lcA, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom)
1250 (lcB, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom)
1251 (actualLoadChain, is, op::Select, var.copy_type_ptr, selection, lcA, lcB)
1252 (actualStoreChain, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo);
1253 }
1254 else
1255 {
1256 shaderSource
1257 (actualLoadChain, is, op::AccessChain, var.copy_type_ptr, var.dataInput, var.constants[0], copyFrom)
1258 (scA, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo)
1259 (scB, is, op::AccessChain, var.copy_type_ptr, var.dataOutput, var.constants[0], copyTo)
1260 (actualStoreChain, is, op::Select, var.copy_type_ptr, selection, scA, scB);
1261 }
1262
1263 shaderSource
1264 (loadResult, is, op::Load, var.copy_type, actualLoadChain)
1265 (op::Store, actualStoreChain, loadResult);
1266 }
1267 break;
1268 default:
1269 // to prevent compiler from complaining not all cases are handled (but we should not get here).
1270 deAssertFail("This point should be not reachable with correct program flow.", __FILE__, __LINE__);
1271 break;
1272 }
1273 }
1274
1275 // This is common for test shaders and unused ones
1276 // We need to fill stage ouput from shader properly
1277 // output vertices positions in vertex shader
1278 if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
1279 {
1280 Variable inputValue(localcounter), outputLocation(localcounter);
1281 shaderSource
1282 (inputValue, is, op::Load, var.v4f32, var.input)
1283 (outputLocation, is, op::AccessChain, var.outputPtr, var.output)
1284 (op::Store, outputLocation, inputValue);
1285 }
1286 // output colour in fragment shader
1287 else if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
1288 {
1289 shaderSource
1290 (op::Store, var.output, var.constants[7]);
1291 }
1292
1293 // We are done. Lets close main function body
1294 shaderSource
1295 (op::Return)
1296 (op::FunctionEnd);
1297
1298 return shaderSource.str();
1299 }
1300
RobustReadTest(tcu::TestContext & testContext,const std::string & name,const std::string & description,VkShaderStageFlags shaderStage,ShaderType shaderType,VkFormat bufferFormat,VkDeviceSize readAccessRange,bool accessOutOfBackingMemory)1301 RobustReadTest::RobustReadTest (tcu::TestContext& testContext,
1302 const std::string& name,
1303 const std::string& description,
1304 VkShaderStageFlags shaderStage,
1305 ShaderType shaderType,
1306 VkFormat bufferFormat,
1307 VkDeviceSize readAccessRange,
1308 bool accessOutOfBackingMemory)
1309 : RobustAccessWithPointersTest (testContext, name, description, shaderStage, shaderType, bufferFormat)
1310 , m_readAccessRange (readAccessRange)
1311 , m_accessOutOfBackingMemory (accessOutOfBackingMemory)
1312 {
1313 }
1314
createInstance(Context & context) const1315 TestInstance* RobustReadTest::createInstance (Context& context) const
1316 {
1317 auto device = createRobustBufferAccessVariablePointersDevice(context);
1318 #ifndef CTS_USES_VULKANSC
1319 de::MovePtr<vk::DeviceDriver> deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), context.getInstance(), *device));
1320 #else
1321 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(new DeviceDriverSC(context.getPlatformInterface(), context.getInstance(), *device, context.getTestContext().getCommandLine(), context.getResourceInterface(), context.getDeviceVulkanSC10Properties(), context.getDeviceProperties()), vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *device));
1322 #endif // CTS_USES_VULKANSC
1323
1324 return new ReadInstance(context, device, deviceDriver, m_shaderType, m_shaderStage, m_bufferFormat, m_readAccessRange, m_accessOutOfBackingMemory);
1325 }
1326
initPrograms(SourceCollections & programCollection) const1327 void RobustReadTest::initPrograms(SourceCollections& programCollection) const
1328 {
1329 if (m_shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
1330 {
1331 programCollection.spirvAsmSources.add("compute") << MakeShader(VK_SHADER_STAGE_COMPUTE_BIT, m_shaderType, m_bufferFormat, true, false);
1332 }
1333 else
1334 {
1335 programCollection.spirvAsmSources.add("vertex") << MakeShader(VK_SHADER_STAGE_VERTEX_BIT, m_shaderType, m_bufferFormat, true, m_shaderStage != VK_SHADER_STAGE_VERTEX_BIT);
1336 programCollection.spirvAsmSources.add("fragment") << MakeShader(VK_SHADER_STAGE_FRAGMENT_BIT, m_shaderType, m_bufferFormat, true, m_shaderStage != VK_SHADER_STAGE_FRAGMENT_BIT);
1337 }
1338 }
1339
RobustWriteTest(tcu::TestContext & testContext,const std::string & name,const std::string & description,VkShaderStageFlags shaderStage,ShaderType shaderType,VkFormat bufferFormat,VkDeviceSize writeAccessRange,bool accessOutOfBackingMemory)1340 RobustWriteTest::RobustWriteTest (tcu::TestContext& testContext,
1341 const std::string& name,
1342 const std::string& description,
1343 VkShaderStageFlags shaderStage,
1344 ShaderType shaderType,
1345 VkFormat bufferFormat,
1346 VkDeviceSize writeAccessRange,
1347 bool accessOutOfBackingMemory)
1348
1349 : RobustAccessWithPointersTest (testContext, name, description, shaderStage, shaderType, bufferFormat)
1350 , m_writeAccessRange (writeAccessRange)
1351 , m_accessOutOfBackingMemory (accessOutOfBackingMemory)
1352 {
1353 }
1354
createInstance(Context & context) const1355 TestInstance* RobustWriteTest::createInstance (Context& context) const
1356 {
1357 auto device = createRobustBufferAccessVariablePointersDevice(context);
1358 #ifndef CTS_USES_VULKANSC
1359 de::MovePtr<vk::DeviceDriver> deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), context.getInstance(), *device));
1360 #else
1361 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(new DeviceDriverSC(context.getPlatformInterface(), context.getInstance(), *device, context.getTestContext().getCommandLine(), context.getResourceInterface(), context.getDeviceVulkanSC10Properties(), context.getDeviceProperties()), vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *device));
1362 #endif // CTS_USES_VULKANSC
1363
1364 return new WriteInstance(context, device, deviceDriver, m_shaderType, m_shaderStage, m_bufferFormat, m_writeAccessRange, m_accessOutOfBackingMemory);
1365 }
1366
initPrograms(SourceCollections & programCollection) const1367 void RobustWriteTest::initPrograms(SourceCollections& programCollection) const
1368 {
1369 if (m_shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
1370 {
1371 programCollection.spirvAsmSources.add("compute") << MakeShader(VK_SHADER_STAGE_COMPUTE_BIT, m_shaderType, m_bufferFormat, false, false);
1372 }
1373 else
1374 {
1375 programCollection.spirvAsmSources.add("vertex") << MakeShader(VK_SHADER_STAGE_VERTEX_BIT, m_shaderType, m_bufferFormat, false, m_shaderStage != VK_SHADER_STAGE_VERTEX_BIT);
1376 programCollection.spirvAsmSources.add("fragment") << MakeShader(VK_SHADER_STAGE_FRAGMENT_BIT, m_shaderType, m_bufferFormat, false, m_shaderStage != VK_SHADER_STAGE_FRAGMENT_BIT);
1377 }
1378 }
1379
AccessInstance(Context & context,Move<VkDevice> device,de::MovePtr<vk::DeviceDriver> deviceDriver,ShaderType shaderType,VkShaderStageFlags shaderStage,VkFormat bufferFormat,BufferAccessType bufferAccessType,VkDeviceSize inBufferAccessRange,VkDeviceSize outBufferAccessRange,bool accessOutOfBackingMemory)1380 AccessInstance::AccessInstance (Context& context,
1381 Move<VkDevice> device,
1382 #ifndef CTS_USES_VULKANSC
1383 de::MovePtr<vk::DeviceDriver> deviceDriver,
1384 #else
1385 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver,
1386 #endif // CTS_USES_VULKANSC
1387
1388 ShaderType shaderType,
1389 VkShaderStageFlags shaderStage,
1390 VkFormat bufferFormat,
1391 BufferAccessType bufferAccessType,
1392 VkDeviceSize inBufferAccessRange,
1393 VkDeviceSize outBufferAccessRange,
1394 bool accessOutOfBackingMemory)
1395 : vkt::TestInstance (context)
1396 , m_device (device)
1397 , m_deviceDriver (deviceDriver)
1398 , m_shaderType (shaderType)
1399 , m_shaderStage (shaderStage)
1400 , m_bufferFormat (bufferFormat)
1401 , m_bufferAccessType (bufferAccessType)
1402 , m_accessOutOfBackingMemory (accessOutOfBackingMemory)
1403 {
1404 tcu::TestLog& log = context.getTestContext().getLog();
1405 const DeviceInterface& vk = *m_deviceDriver;
1406 const auto& vki = context.getInstanceInterface();
1407 const auto instance = context.getInstance();
1408 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1409 const VkPhysicalDevice physicalDevice = chooseDevice(vki, instance, context.getTestContext().getCommandLine());
1410 SimpleAllocator memAlloc (vk, *m_device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
1411
1412 DE_ASSERT(RobustAccessWithPointersTest::s_numberOfBytesAccessed % sizeof(deUint32) == 0);
1413 DE_ASSERT(inBufferAccessRange <= RobustAccessWithPointersTest::s_numberOfBytesAccessed);
1414 DE_ASSERT(outBufferAccessRange <= RobustAccessWithPointersTest::s_numberOfBytesAccessed);
1415
1416 if (m_bufferFormat == VK_FORMAT_R64_UINT || m_bufferFormat == VK_FORMAT_R64_SINT)
1417 {
1418 context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
1419 }
1420
1421 // Check storage support
1422 if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
1423 {
1424 if (!context.getDeviceFeatures().vertexPipelineStoresAndAtomics)
1425 {
1426 TCU_THROW(NotSupportedError, "Stores not supported in vertex stage");
1427 }
1428 }
1429 else if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
1430 {
1431 if (!context.getDeviceFeatures().fragmentStoresAndAtomics)
1432 {
1433 TCU_THROW(NotSupportedError, "Stores not supported in fragment stage");
1434 }
1435 }
1436
1437 createTestBuffer(context, vk, *m_device, inBufferAccessRange, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, memAlloc, m_inBuffer, m_inBufferAlloc, m_inBufferAccess, &populateBufferWithValues, &m_bufferFormat);
1438 createTestBuffer(context, vk, *m_device, outBufferAccessRange, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, memAlloc, m_outBuffer, m_outBufferAlloc, m_outBufferAccess, &populateBufferWithFiller, DE_NULL);
1439
1440 deInt32 indices[] = {
1441 (m_accessOutOfBackingMemory && (m_bufferAccessType == BUFFER_ACCESS_TYPE_READ_FROM_STORAGE)) ? static_cast<deInt32>(RobustAccessWithPointersTest::s_testArraySize) - 1 : 0,
1442 (m_accessOutOfBackingMemory && (m_bufferAccessType == BUFFER_ACCESS_TYPE_WRITE_TO_STORAGE)) ? static_cast<deInt32>(RobustAccessWithPointersTest::s_testArraySize) - 1 : 0,
1443 0
1444 };
1445 AccessRangesData indicesAccess;
1446 createTestBuffer(context, vk, *m_device, 3 * sizeof(deInt32), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, memAlloc, m_indicesBuffer, m_indicesBufferAlloc, indicesAccess, &populateBufferWithCopy, &indices);
1447
1448 log << tcu::TestLog::Message << "input buffer - alloc size: " << m_inBufferAccess.allocSize << tcu::TestLog::EndMessage;
1449 log << tcu::TestLog::Message << "input buffer - max access range: " << m_inBufferAccess.maxAccessRange << tcu::TestLog::EndMessage;
1450 log << tcu::TestLog::Message << "output buffer - alloc size: " << m_outBufferAccess.allocSize << tcu::TestLog::EndMessage;
1451 log << tcu::TestLog::Message << "output buffer - max access range: " << m_outBufferAccess.maxAccessRange << tcu::TestLog::EndMessage;
1452 log << tcu::TestLog::Message << "indices - input offset: " << indices[0] << tcu::TestLog::EndMessage;
1453 log << tcu::TestLog::Message << "indices - output offset: " << indices[1] << tcu::TestLog::EndMessage;
1454 log << tcu::TestLog::Message << "indices - additional: " << indices[2] << tcu::TestLog::EndMessage;
1455
1456 // Create descriptor data
1457 {
1458 DescriptorPoolBuilder descriptorPoolBuilder;
1459 descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u);
1460 descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u);
1461 descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u);
1462 m_descriptorPool = descriptorPoolBuilder.build(vk, *m_device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1463
1464 DescriptorSetLayoutBuilder setLayoutBuilder;
1465 setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL);
1466 setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL);
1467 setLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_ALL);
1468 m_descriptorSetLayout = setLayoutBuilder.build(vk, *m_device);
1469
1470 const VkDescriptorSetAllocateInfo descriptorSetAllocateInfo =
1471 {
1472 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
1473 DE_NULL, // const void* pNext;
1474 *m_descriptorPool, // VkDescriptorPool descriptorPool;
1475 1u, // deUint32 setLayoutCount;
1476 &m_descriptorSetLayout.get() // const VkDescriptorSetLayout* pSetLayouts;
1477 };
1478
1479 m_descriptorSet = allocateDescriptorSet(vk, *m_device, &descriptorSetAllocateInfo);
1480
1481 const VkDescriptorBufferInfo inBufferDescriptorInfo = makeDescriptorBufferInfo(*m_inBuffer, 0ull, m_inBufferAccess.accessRange);
1482 const VkDescriptorBufferInfo outBufferDescriptorInfo = makeDescriptorBufferInfo(*m_outBuffer, 0ull, m_outBufferAccess.accessRange);
1483 const VkDescriptorBufferInfo indicesBufferDescriptorInfo = makeDescriptorBufferInfo(*m_indicesBuffer, 0ull, 12ull);
1484
1485 DescriptorSetUpdateBuilder setUpdateBuilder;
1486 setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &inBufferDescriptorInfo);
1487 setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &outBufferDescriptorInfo);
1488 setUpdateBuilder.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(2), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &indicesBufferDescriptorInfo);
1489 setUpdateBuilder.update(vk, *m_device);
1490 }
1491
1492 // Create fence
1493 {
1494 const VkFenceCreateInfo fenceParams =
1495 {
1496 VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
1497 DE_NULL, // const void* pNext;
1498 0u // VkFenceCreateFlags flags;
1499 };
1500
1501 m_fence = createFence(vk, *m_device, &fenceParams);
1502 }
1503
1504 // Get queue
1505 vk.getDeviceQueue(*m_device, queueFamilyIndex, 0, &m_queue);
1506
1507 if (m_shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
1508 {
1509 m_testEnvironment = de::MovePtr<TestEnvironment>(new ComputeEnvironment(m_context, *m_deviceDriver, *m_device, *m_descriptorSetLayout, *m_descriptorSet));
1510 }
1511 else
1512 {
1513 using tcu::Vec4;
1514
1515 const VkVertexInputBindingDescription vertexInputBindingDescription =
1516 {
1517 0u, // deUint32 binding;
1518 sizeof(tcu::Vec4), // deUint32 strideInBytes;
1519 VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate inputRate;
1520 };
1521
1522 const VkVertexInputAttributeDescription vertexInputAttributeDescription =
1523 {
1524 0u, // deUint32 location;
1525 0u, // deUint32 binding;
1526 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
1527 0u // deUint32 offset;
1528 };
1529
1530 AccessRangesData vertexAccess;
1531 const Vec4 vertices[] =
1532 {
1533 Vec4(-1.0f, -1.0f, 0.0f, 1.0f),
1534 Vec4(-1.0f, 1.0f, 0.0f, 1.0f),
1535 Vec4( 1.0f, -1.0f, 0.0f, 1.0f),
1536 };
1537 const VkDeviceSize vertexBufferSize = static_cast<VkDeviceSize>(sizeof(vertices));
1538 createTestBuffer(context, vk, *m_device, vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, memAlloc, m_vertexBuffer, m_vertexBufferAlloc, vertexAccess, &populateBufferWithCopy, &vertices);
1539
1540 const GraphicsEnvironment::DrawConfig drawWithOneVertexBuffer =
1541 {
1542 std::vector<VkBuffer>(1, *m_vertexBuffer), // std::vector<VkBuffer> vertexBuffers;
1543 DE_LENGTH_OF_ARRAY(vertices), // deUint32 vertexCount;
1544 1, // deUint32 instanceCount;
1545 DE_NULL, // VkBuffer indexBuffer;
1546 0u, // deUint32 indexCount;
1547 };
1548
1549 m_testEnvironment = de::MovePtr<TestEnvironment>(new GraphicsEnvironment(m_context,
1550 *m_deviceDriver,
1551 *m_device,
1552 *m_descriptorSetLayout,
1553 *m_descriptorSet,
1554 GraphicsEnvironment::VertexBindings(1, vertexInputBindingDescription),
1555 GraphicsEnvironment::VertexAttributes(1, vertexInputAttributeDescription),
1556 drawWithOneVertexBuffer));
1557 }
1558 }
1559
~AccessInstance()1560 AccessInstance::~AccessInstance()
1561 {
1562 }
1563
1564 // Verifies if the buffer has the value initialized by BufferAccessInstance::populateReadBuffer at a given offset.
isExpectedValueFromInBuffer(VkDeviceSize offsetInBytes,const void * valuePtr,VkDeviceSize valueSize)1565 bool AccessInstance::isExpectedValueFromInBuffer (VkDeviceSize offsetInBytes,
1566 const void* valuePtr,
1567 VkDeviceSize valueSize)
1568 {
1569 DE_ASSERT(offsetInBytes % 4 == 0);
1570 DE_ASSERT(offsetInBytes < m_inBufferAccess.allocSize);
1571 DE_ASSERT(valueSize == 4ull || valueSize == 8ull);
1572
1573 const deUint32 valueIndex = deUint32(offsetInBytes / 4) + 2;
1574
1575 if (isUintFormat(m_bufferFormat))
1576 {
1577 const deUint32 expectedValues[2] = { valueIndex, valueIndex + 1u };
1578 return !deMemCmp(valuePtr, &expectedValues, (size_t)valueSize);
1579 }
1580 else if (isIntFormat(m_bufferFormat))
1581 {
1582 const deInt32 value = -deInt32(valueIndex);
1583 const deInt32 expectedValues[2] = { value, value - 1 };
1584 return !deMemCmp(valuePtr, &expectedValues, (size_t)valueSize);
1585 }
1586 else if (isFloatFormat(m_bufferFormat))
1587 {
1588 DE_ASSERT(valueSize == 4ull);
1589 const float value = float(valueIndex);
1590 return !deMemCmp(valuePtr, &value, (size_t)valueSize);
1591 }
1592 else
1593 {
1594 DE_ASSERT(false);
1595 return false;
1596 }
1597 }
1598
isOutBufferValueUnchanged(VkDeviceSize offsetInBytes,VkDeviceSize valueSize)1599 bool AccessInstance::isOutBufferValueUnchanged (VkDeviceSize offsetInBytes, VkDeviceSize valueSize)
1600 {
1601 DE_ASSERT(valueSize <= 8);
1602 const deUint8 *const outValuePtr = (deUint8*)m_outBufferAlloc->getHostPtr() + offsetInBytes;
1603 const deUint64 defaultValue = 0xBABABABABABABABAull;
1604
1605 return !deMemCmp(outValuePtr, &defaultValue, (size_t)valueSize);
1606 }
1607
iterate(void)1608 tcu::TestStatus AccessInstance::iterate (void)
1609 {
1610 const DeviceInterface& vk = *m_deviceDriver;
1611 const vk::VkCommandBuffer cmdBuffer = m_testEnvironment->getCommandBuffer();
1612
1613 // Submit command buffer
1614 {
1615 const VkSubmitInfo submitInfo =
1616 {
1617 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
1618 DE_NULL, // const void* pNext;
1619 0u, // deUint32 waitSemaphoreCount;
1620 DE_NULL, // const VkSemaphore* pWaitSemaphores;
1621 DE_NULL, // const VkPIpelineStageFlags* pWaitDstStageMask;
1622 1u, // deUint32 commandBufferCount;
1623 &cmdBuffer, // const VkCommandBuffer* pCommandBuffers;
1624 0u, // deUint32 signalSemaphoreCount;
1625 DE_NULL // const VkSemaphore* pSignalSemaphores;
1626 };
1627
1628 VK_CHECK(vk.resetFences(*m_device, 1, &m_fence.get()));
1629 VK_CHECK(vk.queueSubmit(m_queue, 1, &submitInfo, *m_fence));
1630 VK_CHECK(vk.waitForFences(*m_device, 1, &m_fence.get(), true, ~(0ull) /* infinity */));
1631 }
1632
1633 // Prepare result buffer for read
1634 {
1635 const VkMappedMemoryRange outBufferRange =
1636 {
1637 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // VkStructureType sType;
1638 DE_NULL, // const void* pNext;
1639 m_outBufferAlloc->getMemory(), // VkDeviceMemory mem;
1640 0ull, // VkDeviceSize offset;
1641 m_outBufferAccess.allocSize, // VkDeviceSize size;
1642 };
1643
1644 VK_CHECK(vk.invalidateMappedMemoryRanges(*m_device, 1u, &outBufferRange));
1645 }
1646
1647 if (verifyResult())
1648 return tcu::TestStatus::pass("All values OK");
1649 else
1650 return tcu::TestStatus::fail("Invalid value(s) found");
1651 }
1652
verifyResult(bool splitAccess)1653 bool AccessInstance::verifyResult (bool splitAccess)
1654 {
1655 std::ostringstream logMsg;
1656 tcu::TestLog& log = m_context.getTestContext().getLog();
1657 const bool isReadAccess = (m_bufferAccessType == BUFFER_ACCESS_TYPE_READ_FROM_STORAGE);
1658 const void* inDataPtr = m_inBufferAlloc->getHostPtr();
1659 const void* outDataPtr = m_outBufferAlloc->getHostPtr();
1660 bool allOk = true;
1661 deUint32 valueNdx = 0;
1662 const VkDeviceSize maxAccessRange = isReadAccess ? m_inBufferAccess.maxAccessRange : m_outBufferAccess.maxAccessRange;
1663 const bool isR64 = (m_bufferFormat == VK_FORMAT_R64_UINT || m_bufferFormat == VK_FORMAT_R64_SINT);
1664 const deUint32 unsplitElementSize = (isR64 ? 8u : 4u);
1665 const deUint32 elementSize = ((isR64 && !splitAccess) ? 8u : 4u);
1666
1667 for (VkDeviceSize offsetInBytes = 0; offsetInBytes < m_outBufferAccess.allocSize; offsetInBytes += elementSize)
1668 {
1669 const deUint8* outValuePtr = static_cast<const deUint8*>(outDataPtr) + offsetInBytes;
1670 const size_t outValueSize = static_cast<size_t>(deMinu64(elementSize, (m_outBufferAccess.allocSize - offsetInBytes)));
1671
1672 if (offsetInBytes >= RobustAccessWithPointersTest::s_numberOfBytesAccessed)
1673 {
1674 // The shader will only write 16 values into the result buffer. The rest of the values
1675 // should remain unchanged or may be modified if we are writing out of bounds.
1676 if (!isOutBufferValueUnchanged(offsetInBytes, outValueSize)
1677 && (isReadAccess || !isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, outValuePtr, 4)))
1678 {
1679 logMsg << "\nValue " << valueNdx++ << " has been modified with an unknown value: " << *(static_cast<const deUint32*>(static_cast<const void*>(outValuePtr)));
1680 allOk = false;
1681 }
1682 }
1683 else
1684 {
1685 const deInt32 distanceToOutOfBounds = static_cast<deInt32>(maxAccessRange) - static_cast<deInt32>(offsetInBytes);
1686 bool isOutOfBoundsAccess = false;
1687
1688 logMsg << "\n" << valueNdx++ << ": ";
1689
1690 logValue(logMsg, outValuePtr, m_bufferFormat, outValueSize);
1691
1692 if (m_accessOutOfBackingMemory)
1693 isOutOfBoundsAccess = true;
1694
1695 // Check if the shader operation accessed an operand located less than 16 bytes away
1696 // from the out of bounds address. Less than 32 bytes away for 64 bit accesses.
1697 if (!isOutOfBoundsAccess && distanceToOutOfBounds < (isR64 ? 32 : 16))
1698 {
1699 deUint32 operandSize = 0;
1700
1701 switch (m_shaderType)
1702 {
1703 case SHADER_TYPE_SCALAR_COPY:
1704 operandSize = unsplitElementSize; // Size of scalar
1705 break;
1706
1707 case SHADER_TYPE_VECTOR_COPY:
1708 operandSize = unsplitElementSize * 4; // Size of vec4
1709 break;
1710
1711 case SHADER_TYPE_MATRIX_COPY:
1712 operandSize = unsplitElementSize * 16; // Size of mat4
1713 break;
1714
1715 default:
1716 DE_ASSERT(false);
1717 }
1718
1719 isOutOfBoundsAccess = (((offsetInBytes / operandSize) + 1) * operandSize > maxAccessRange);
1720 }
1721
1722 if (isOutOfBoundsAccess)
1723 {
1724 logMsg << " (out of bounds " << (isReadAccess ? "read": "write") << ")";
1725
1726 const bool isValuePartiallyOutOfBounds = ((distanceToOutOfBounds > 0) && ((deUint32)distanceToOutOfBounds < elementSize));
1727 bool isValidValue = false;
1728
1729 if (isValuePartiallyOutOfBounds && !m_accessOutOfBackingMemory)
1730 {
1731 // The value is partially out of bounds
1732
1733 bool isOutOfBoundsPartOk = true;
1734 bool isWithinBoundsPartOk = true;
1735
1736 deUint32 inBoundPartSize = distanceToOutOfBounds;
1737
1738 // For cases that partial element is out of bound, the part within the buffer allocated memory can be buffer content per spec.
1739 // We need to check it as a whole part.
1740 if (offsetInBytes + elementSize > m_inBufferAccess.allocSize)
1741 {
1742 inBoundPartSize = static_cast<deInt32>(m_inBufferAccess.allocSize) - static_cast<deInt32>(offsetInBytes);
1743 }
1744
1745 if (isReadAccess)
1746 {
1747 isWithinBoundsPartOk = isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, outValuePtr, inBoundPartSize);
1748 isOutOfBoundsPartOk = isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, (deUint8*)outValuePtr + inBoundPartSize, outValueSize - inBoundPartSize);
1749 }
1750 else
1751 {
1752 isWithinBoundsPartOk = isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, outValuePtr, inBoundPartSize)
1753 || isOutBufferValueUnchanged(offsetInBytes, inBoundPartSize);
1754
1755 isOutOfBoundsPartOk = isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, (deUint8*)outValuePtr + inBoundPartSize, outValueSize - inBoundPartSize)
1756 || isOutBufferValueUnchanged(offsetInBytes + inBoundPartSize, outValueSize - inBoundPartSize);
1757 }
1758
1759 logMsg << ", first " << distanceToOutOfBounds << " byte(s) " << (isWithinBoundsPartOk ? "OK": "wrong");
1760 logMsg << ", last " << outValueSize - distanceToOutOfBounds << " byte(s) " << (isOutOfBoundsPartOk ? "OK": "wrong");
1761
1762 isValidValue = isWithinBoundsPartOk && isOutOfBoundsPartOk;
1763 }
1764 else
1765 {
1766 if (isReadAccess)
1767 {
1768 isValidValue = isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, outValuePtr, outValueSize);
1769 }
1770 else
1771 {
1772 isValidValue = isOutBufferValueUnchanged(offsetInBytes, outValueSize);
1773
1774 if (!isValidValue)
1775 {
1776 // Out of bounds writes may modify values withing the memory ranges bound to the buffer
1777 isValidValue = isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.allocSize, outValuePtr, outValueSize);
1778
1779 if (isValidValue)
1780 logMsg << ", OK, written within the memory range bound to the buffer";
1781 }
1782 }
1783 }
1784
1785 if (!isValidValue && !splitAccess)
1786 {
1787 // Check if we are satisfying the [0, 0, 0, x] pattern, where x may be either 0 or 1,
1788 // or the maximum representable positive integer value (if the format is integer-based).
1789
1790 const bool canMatchVec4Pattern = (isReadAccess
1791 && !isValuePartiallyOutOfBounds
1792 && (m_shaderType == SHADER_TYPE_VECTOR_COPY)
1793 && (offsetInBytes / elementSize + 1) % 4 == 0);
1794 bool matchesVec4Pattern = false;
1795
1796 if (canMatchVec4Pattern)
1797 {
1798 matchesVec4Pattern = verifyOutOfBoundsVec4(outValuePtr - 3u * elementSize, m_bufferFormat);
1799 }
1800
1801 if (!canMatchVec4Pattern || !matchesVec4Pattern)
1802 {
1803 logMsg << ". Failed: ";
1804
1805 if (isReadAccess)
1806 {
1807 logMsg << "expected value within the buffer range or 0";
1808
1809 if (canMatchVec4Pattern)
1810 logMsg << ", or the [0, 0, 0, x] pattern";
1811 }
1812 else
1813 {
1814 logMsg << "written out of the range";
1815 }
1816
1817 allOk = false;
1818 }
1819 }
1820 }
1821 else // We are within bounds
1822 {
1823 if (isReadAccess)
1824 {
1825 if (!isExpectedValueFromInBuffer(offsetInBytes, outValuePtr, elementSize))
1826 {
1827 logMsg << ", Failed: unexpected value";
1828 allOk = false;
1829 }
1830 }
1831 else
1832 {
1833 // Out of bounds writes may change values within the bounds.
1834 if (!isValueWithinBufferOrZero(inDataPtr, m_inBufferAccess.accessRange, outValuePtr, elementSize))
1835 {
1836 logMsg << ", Failed: unexpected value";
1837 allOk = false;
1838 }
1839 }
1840 }
1841 }
1842 }
1843
1844 log << tcu::TestLog::Message << logMsg.str() << tcu::TestLog::EndMessage;
1845
1846 if (!allOk && unsplitElementSize > 4u && !splitAccess)
1847 {
1848 // "Non-atomic accesses to storage buffers that are a multiple of 32 bits may be decomposed into 32-bit accesses that are individually bounds-checked."
1849 return verifyResult(true/*splitAccess*/);
1850 }
1851
1852 return allOk;
1853 }
1854
1855 // BufferReadInstance
1856
ReadInstance(Context & context,Move<VkDevice> device,de::MovePtr<vk::DeviceDriver> deviceDriver,ShaderType shaderType,VkShaderStageFlags shaderStage,VkFormat bufferFormat,VkDeviceSize inBufferAccessRange,bool accessOutOfBackingMemory)1857 ReadInstance::ReadInstance (Context& context,
1858 Move<VkDevice> device,
1859 #ifndef CTS_USES_VULKANSC
1860 de::MovePtr<vk::DeviceDriver> deviceDriver,
1861 #else
1862 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver,
1863 #endif // CTS_USES_VULKANSC
1864 ShaderType shaderType,
1865 VkShaderStageFlags shaderStage,
1866 VkFormat bufferFormat,
1867 //bool readFromStorage,
1868 VkDeviceSize inBufferAccessRange,
1869 bool accessOutOfBackingMemory)
1870
1871 : AccessInstance (context, device, deviceDriver, shaderType, shaderStage, bufferFormat,
1872 BUFFER_ACCESS_TYPE_READ_FROM_STORAGE,
1873 inBufferAccessRange, RobustAccessWithPointersTest::s_numberOfBytesAccessed,
1874 accessOutOfBackingMemory)
1875 {
1876 }
1877
1878 // BufferWriteInstance
1879
WriteInstance(Context & context,Move<VkDevice> device,de::MovePtr<vk::DeviceDriver> deviceDriver,ShaderType shaderType,VkShaderStageFlags shaderStage,VkFormat bufferFormat,VkDeviceSize writeBufferAccessRange,bool accessOutOfBackingMemory)1880 WriteInstance::WriteInstance (Context& context,
1881 Move<VkDevice> device,
1882 #ifndef CTS_USES_VULKANSC
1883 de::MovePtr<vk::DeviceDriver> deviceDriver,
1884 #else
1885 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> deviceDriver,
1886 #endif // CTS_USES_VULKANSC
1887 ShaderType shaderType,
1888 VkShaderStageFlags shaderStage,
1889 VkFormat bufferFormat,
1890 VkDeviceSize writeBufferAccessRange,
1891 bool accessOutOfBackingMemory)
1892
1893 : AccessInstance (context, device, deviceDriver, shaderType, shaderStage, bufferFormat,
1894 BUFFER_ACCESS_TYPE_WRITE_TO_STORAGE,
1895 RobustAccessWithPointersTest::s_numberOfBytesAccessed, writeBufferAccessRange,
1896 accessOutOfBackingMemory)
1897 {
1898 }
1899
1900 } // unnamed namespace
1901
createBufferAccessWithVariablePointersTests(tcu::TestContext & testCtx)1902 tcu::TestCaseGroup* createBufferAccessWithVariablePointersTests(tcu::TestContext& testCtx)
1903 {
1904 // Lets make group for the tests
1905 de::MovePtr<tcu::TestCaseGroup> bufferAccessWithVariablePointersTests (new tcu::TestCaseGroup(testCtx, "through_pointers", ""));
1906
1907 // Lets add subgroups to better organise tests
1908 de::MovePtr<tcu::TestCaseGroup> computeWithVariablePointersTests (new tcu::TestCaseGroup(testCtx, "compute", ""));
1909 de::MovePtr<tcu::TestCaseGroup> computeReads (new tcu::TestCaseGroup(testCtx, "reads", ""));
1910 de::MovePtr<tcu::TestCaseGroup> computeWrites (new tcu::TestCaseGroup(testCtx, "writes", ""));
1911
1912 de::MovePtr<tcu::TestCaseGroup> graphicsWithVariablePointersTests (new tcu::TestCaseGroup(testCtx, "graphics", ""));
1913 de::MovePtr<tcu::TestCaseGroup> graphicsReads (new tcu::TestCaseGroup(testCtx, "reads", ""));
1914 de::MovePtr<tcu::TestCaseGroup> graphicsReadsVertex (new tcu::TestCaseGroup(testCtx, "vertex", ""));
1915 de::MovePtr<tcu::TestCaseGroup> graphicsReadsFragment (new tcu::TestCaseGroup(testCtx, "fragment", ""));
1916 de::MovePtr<tcu::TestCaseGroup> graphicsWrites (new tcu::TestCaseGroup(testCtx, "writes", ""));
1917 de::MovePtr<tcu::TestCaseGroup> graphicsWritesVertex (new tcu::TestCaseGroup(testCtx, "vertex", ""));
1918 de::MovePtr<tcu::TestCaseGroup> graphicsWritesFragment (new tcu::TestCaseGroup(testCtx, "fragment", ""));
1919
1920 // A struct for describing formats
1921 struct Formats
1922 {
1923 const VkFormat value;
1924 const char * const name;
1925 };
1926
1927 const Formats bufferFormats[] =
1928 {
1929 { VK_FORMAT_R32_SINT, "s32" },
1930 { VK_FORMAT_R32_UINT, "u32" },
1931 { VK_FORMAT_R32_SFLOAT, "f32" },
1932 { VK_FORMAT_R64_SINT, "s64" },
1933 { VK_FORMAT_R64_UINT, "u64" },
1934 };
1935 const deUint8 bufferFormatsCount = static_cast<deUint8>(DE_LENGTH_OF_ARRAY(bufferFormats));
1936
1937 // Amounts of data to copy
1938 const VkDeviceSize rangeSizes[] =
1939 {
1940 1ull, 3ull, 4ull, 16ull, 32ull
1941 };
1942 const deUint8 rangeSizesCount = static_cast<deUint8>(DE_LENGTH_OF_ARRAY(rangeSizes));
1943
1944 // gather above data into one array
1945 const struct ShaderTypes
1946 {
1947 const ShaderType value;
1948 const char * const name;
1949 const Formats* const formats;
1950 const deUint8 formatsCount;
1951 const VkDeviceSize* const sizes;
1952 const deUint8 sizesCount;
1953 } types[] =
1954 {
1955 { SHADER_TYPE_VECTOR_COPY, "vec4", bufferFormats, bufferFormatsCount, rangeSizes, rangeSizesCount },
1956 { SHADER_TYPE_SCALAR_COPY, "scalar", bufferFormats, bufferFormatsCount, rangeSizes, rangeSizesCount }
1957 };
1958
1959 // Specify to which subgroups put various tests
1960 const struct ShaderStages
1961 {
1962 VkShaderStageFlags stage;
1963 de::MovePtr<tcu::TestCaseGroup>& reads;
1964 de::MovePtr<tcu::TestCaseGroup>& writes;
1965 } stages[] =
1966 {
1967 { VK_SHADER_STAGE_VERTEX_BIT, graphicsReadsVertex, graphicsWritesVertex },
1968 { VK_SHADER_STAGE_FRAGMENT_BIT, graphicsReadsFragment, graphicsWritesFragment },
1969 { VK_SHADER_STAGE_COMPUTE_BIT, computeReads, computeWrites }
1970 };
1971
1972 // Eventually specify if memory used should be in the "inaccesible" portion of buffer or entirely outside of buffer
1973 const char* const backingMemory[] = { "in_memory", "out_of_memory" };
1974
1975 for (deInt32 stageId = 0; stageId < DE_LENGTH_OF_ARRAY(stages); ++stageId)
1976 for (int i = 0; i < DE_LENGTH_OF_ARRAY(types); ++i)
1977 for (int j = 0; j < types[i].formatsCount; ++j)
1978 for (int k = 0; k < types[i].sizesCount; ++k)
1979 for (int s = 0; s < DE_LENGTH_OF_ARRAY(backingMemory); ++s)
1980 {
1981 std::ostringstream name;
1982 name << types[i].sizes[k] << "B_" << backingMemory[s] << "_with_" << types[i].name << '_' << types[i].formats[j].name;
1983 stages[stageId].reads->addChild(new RobustReadTest(testCtx, name.str().c_str(), "", stages[stageId].stage, types[i].value, types[i].formats[j].value, types[i].sizes[k], s != 0));
1984 }
1985
1986 for (deInt32 stageId = 0; stageId < DE_LENGTH_OF_ARRAY(stages); ++stageId)
1987 for (int i=0; i<DE_LENGTH_OF_ARRAY(types); ++i)
1988 for (int j=0; j<types[i].formatsCount; ++j)
1989 for (int k = 0; k<types[i].sizesCount; ++k)
1990 for (int s = 0; s < DE_LENGTH_OF_ARRAY(backingMemory); ++s)
1991 {
1992 std::ostringstream name;
1993 name << types[i].sizes[k] << "B_" << backingMemory[s] << "_with_" << types[i].name << '_' << types[i].formats[j].name;
1994 stages[stageId].writes->addChild(new RobustWriteTest(testCtx, name.str().c_str(), "", stages[stageId].stage, types[i].value, types[i].formats[j].value, types[i].sizes[k], s != 0));
1995 }
1996
1997 graphicsReads->addChild(graphicsReadsVertex.release());
1998 graphicsReads->addChild(graphicsReadsFragment.release());
1999
2000 graphicsWrites->addChild(graphicsWritesVertex.release());
2001 graphicsWrites->addChild(graphicsWritesFragment.release());
2002
2003 graphicsWithVariablePointersTests->addChild(graphicsReads.release());
2004 graphicsWithVariablePointersTests->addChild(graphicsWrites.release());
2005
2006 computeWithVariablePointersTests->addChild(computeReads.release());
2007 computeWithVariablePointersTests->addChild(computeWrites.release());
2008
2009 bufferAccessWithVariablePointersTests->addChild(graphicsWithVariablePointersTests.release());
2010 bufferAccessWithVariablePointersTests->addChild(computeWithVariablePointersTests.release());
2011
2012 return bufferAccessWithVariablePointersTests.release();
2013 }
2014
2015 } // robustness
2016 } // vkt
2017