1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017-2019 The Khronos Group Inc.
6 * Copyright (c) 2018-2020 NVIDIA Corporation
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Vulkan robustness2 tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktRobustnessExtsTests.hpp"
26
27 #include "vkBufferWithMemory.hpp"
28 #include "vkImageWithMemory.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vkDeviceUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkCmdUtil.hpp"
34 #include "vkTypeUtil.hpp"
35 #include "vkObjUtil.hpp"
36 #include "vkBarrierUtil.hpp"
37 #include "vktRobustnessUtil.hpp"
38
39 #include "vktTestGroupUtil.hpp"
40 #include "vktTestCase.hpp"
41
42 #include "deDefs.h"
43 #include "deMath.h"
44 #include "deRandom.h"
45 #include "deSharedPtr.hpp"
46 #include "deString.h"
47
48 #include "tcuVectorType.hpp"
49 #include "tcuTestCase.hpp"
50 #include "tcuTestLog.hpp"
51 #include "tcuImageCompare.hpp"
52
53 #include <string>
54 #include <sstream>
55 #include <algorithm>
56 #include <limits>
57
58 namespace vkt
59 {
60 namespace robustness
61 {
62 namespace
63 {
64 using namespace vk;
65 using namespace std;
66 using de::SharedPtr;
67 using BufferWithMemoryPtr = de::MovePtr<BufferWithMemory>;
68
69 enum RobustnessFeatureBits
70 {
71 RF_IMG_ROBUSTNESS = (1),
72 RF_ROBUSTNESS2 = (1 << 1),
73 RF_PIPELINE_ROBUSTNESS = (1 << 2),
74 };
75
76 using RobustnessFeatures = uint32_t;
77
78 // Class to wrap a singleton device with the indicated robustness features.
79 template <RobustnessFeatures FEATURES>
80 class SingletonDevice
81 {
SingletonDevice(Context & context)82 SingletonDevice(Context &context)
83 : m_context(context)
84 #ifdef CTS_USES_VULKANSC
85 , m_customInstance(createCustomInstanceFromContext(context))
86 #endif // CTS_USES_VULKANSC
87 , m_logicalDevice()
88 {
89 // Note we are already checking the needed features are available in checkSupport().
90 VkPhysicalDeviceExtendedDynamicStateFeaturesEXT edsFeatures = initVulkanStructure();
91 VkPhysicalDeviceScalarBlockLayoutFeatures scalarBlockLayoutFeatures = initVulkanStructure();
92 VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT shaderImageAtomicInt64Features = initVulkanStructure();
93 VkPhysicalDeviceBufferDeviceAddressFeatures bufferDeviceAddressFeatures = initVulkanStructure();
94 VkPhysicalDeviceRobustness2FeaturesEXT robustness2Features = initVulkanStructure();
95 VkPhysicalDeviceImageRobustnessFeaturesEXT imageRobustnessFeatures = initVulkanStructure();
96 #ifndef CTS_USES_VULKANSC
97 VkPhysicalDeviceRayTracingPipelineFeaturesKHR rayTracingPipelineFeatures = initVulkanStructure();
98 VkPhysicalDeviceAccelerationStructureFeaturesKHR accelerationStructureFeatures = initVulkanStructure();
99 VkPhysicalDevicePipelineRobustnessFeaturesEXT pipelineRobustnessFeatures = initVulkanStructure();
100 VkPhysicalDeviceGraphicsPipelineLibraryFeaturesEXT gplFeatures = initVulkanStructure();
101 #endif // CTS_USES_VULKANSC
102 VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
103
104 const auto addFeatures = makeStructChainAdder(&features2);
105
106 // Enable these ones if supported, as they're needed in some tests.
107 if (context.isDeviceFunctionalitySupported("VK_EXT_extended_dynamic_state"))
108 addFeatures(&edsFeatures);
109
110 if (context.isDeviceFunctionalitySupported("VK_EXT_scalar_block_layout"))
111 addFeatures(&scalarBlockLayoutFeatures);
112
113 if (context.isDeviceFunctionalitySupported("VK_EXT_shader_image_atomic_int64"))
114 addFeatures(&shaderImageAtomicInt64Features);
115
116 #ifndef CTS_USES_VULKANSC
117 if (context.isDeviceFunctionalitySupported("VK_KHR_ray_tracing_pipeline"))
118 {
119 addFeatures(&accelerationStructureFeatures);
120 addFeatures(&rayTracingPipelineFeatures);
121 }
122
123 if (context.isDeviceFunctionalitySupported("VK_EXT_graphics_pipeline_library"))
124 addFeatures(&gplFeatures);
125 #endif // CTS_USES_VULKANSC
126
127 if (context.isDeviceFunctionalitySupported("VK_KHR_buffer_device_address"))
128 addFeatures(&bufferDeviceAddressFeatures);
129
130 if (FEATURES & RF_IMG_ROBUSTNESS)
131 {
132 DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"));
133
134 if (!(FEATURES & RF_PIPELINE_ROBUSTNESS))
135 addFeatures(&imageRobustnessFeatures);
136 }
137
138 if (FEATURES & RF_ROBUSTNESS2)
139 {
140 DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_robustness2"));
141
142 if (!(FEATURES & RF_PIPELINE_ROBUSTNESS))
143 addFeatures(&robustness2Features);
144 }
145
146 #ifndef CTS_USES_VULKANSC
147 if (FEATURES & RF_PIPELINE_ROBUSTNESS)
148 addFeatures(&pipelineRobustnessFeatures);
149 #endif
150
151 const auto &vki = m_context.getInstanceInterface();
152 const auto instance = m_context.getInstance();
153 const auto physicalDevice = chooseDevice(vki, instance, context.getTestContext().getCommandLine());
154
155 vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
156
157 #ifndef CTS_USES_VULKANSC
158 if (FEATURES & RF_PIPELINE_ROBUSTNESS)
159 features2.features.robustBufferAccess = VK_FALSE;
160 #endif
161 m_logicalDevice = createRobustBufferAccessDevice(context,
162 #ifdef CTS_USES_VULKANSC
163 m_customInstance,
164 #endif // CTS_USES_VULKANSC
165 &features2);
166
167 #ifndef CTS_USES_VULKANSC
168 m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), instance,
169 *m_logicalDevice, context.getUsedApiVersion(),
170 context.getTestContext().getCommandLine()));
171 #else
172 m_deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(
173 new DeviceDriverSC(context.getPlatformInterface(), instance, *m_logicalDevice,
174 context.getTestContext().getCommandLine(), context.getResourceInterface(),
175 m_context.getDeviceVulkanSC10Properties(), m_context.getDeviceProperties(),
176 context.getUsedApiVersion()),
177 vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *m_logicalDevice));
178 #endif // CTS_USES_VULKANSC
179 }
180
181 public:
~SingletonDevice()182 ~SingletonDevice()
183 {
184 }
185
getDevice(Context & context)186 static VkDevice getDevice(Context &context)
187 {
188 if (!m_singletonDevice)
189 m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
190 DE_ASSERT(m_singletonDevice);
191 return m_singletonDevice->m_logicalDevice.get();
192 }
getDeviceInterface(Context & context)193 static const DeviceInterface &getDeviceInterface(Context &context)
194 {
195 if (!m_singletonDevice)
196 m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
197 DE_ASSERT(m_singletonDevice);
198 return *(m_singletonDevice->m_deviceDriver.get());
199 }
200
destroy()201 static void destroy()
202 {
203 m_singletonDevice.clear();
204 }
205
206 private:
207 const Context &m_context;
208 #ifndef CTS_USES_VULKANSC
209 Move<vk::VkDevice> m_logicalDevice;
210 de::MovePtr<vk::DeviceDriver> m_deviceDriver;
211 #else
212 // Construction needs to happen in this exact order to ensure proper resource destruction
213 CustomInstance m_customInstance;
214 Move<vk::VkDevice> m_logicalDevice;
215 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> m_deviceDriver;
216 #endif // CTS_USES_VULKANSC
217
218 static SharedPtr<SingletonDevice<FEATURES>> m_singletonDevice;
219 };
220
221 template <RobustnessFeatures FEATURES>
222 SharedPtr<SingletonDevice<FEATURES>> SingletonDevice<FEATURES>::m_singletonDevice;
223
224 using ImageRobustnessSingleton = SingletonDevice<RF_IMG_ROBUSTNESS>;
225 using Robustness2Singleton = SingletonDevice<RF_ROBUSTNESS2>;
226
227 using PipelineRobustnessImageRobustnessSingleton = SingletonDevice<RF_IMG_ROBUSTNESS | RF_PIPELINE_ROBUSTNESS>;
228 using PipelineRobustnessRobustness2Singleton = SingletonDevice<RF_ROBUSTNESS2 | RF_PIPELINE_ROBUSTNESS>;
229
230 // Render target / compute grid dimensions
231 static const uint32_t DIM = 8;
232
233 // treated as a phony VkDescriptorType value
234 #define VERTEX_ATTRIBUTE_FETCH 999
235
236 typedef enum
237 {
238 STAGE_COMPUTE = 0,
239 STAGE_VERTEX,
240 STAGE_FRAGMENT,
241 STAGE_RAYGEN
242 } Stage;
243
244 enum class PipelineRobustnessCase
245 {
246 DISABLED = 0,
247 ENABLED_MONOLITHIC,
248 ENABLED_FAST_GPL,
249 ENABLED_OPTIMIZED_GPL,
250 };
251
getConstructionTypeFromRobustnessCase(PipelineRobustnessCase prCase)252 PipelineConstructionType getConstructionTypeFromRobustnessCase(PipelineRobustnessCase prCase)
253 {
254 if (prCase == PipelineRobustnessCase::ENABLED_FAST_GPL)
255 return PIPELINE_CONSTRUCTION_TYPE_FAST_LINKED_LIBRARY;
256 if (prCase == PipelineRobustnessCase::ENABLED_OPTIMIZED_GPL)
257 return PIPELINE_CONSTRUCTION_TYPE_LINK_TIME_OPTIMIZED_LIBRARY;
258 return PIPELINE_CONSTRUCTION_TYPE_MONOLITHIC;
259 }
260
261 struct CaseDef
262 {
263 VkFormat format;
264 Stage stage;
265 VkFlags allShaderStages;
266 VkFlags allPipelineStages;
267 int /*VkDescriptorType*/ descriptorType;
268 VkImageViewType viewType;
269 VkSampleCountFlagBits samples;
270 int bufferLen;
271 bool unroll;
272 bool vol;
273 bool nullDescriptor;
274 bool useTemplate;
275 bool formatQualifier;
276 bool pushDescriptor;
277 bool testRobustness2;
278 PipelineRobustnessCase pipelineRobustnessCase;
279 uint32_t imageDim[3]; // width, height, depth or layers
280 bool readOnly;
281
needsScalarBlockLayoutvkt::robustness::__anon5c37df3e0111::CaseDef282 bool needsScalarBlockLayout() const
283 {
284 bool scalarNeeded = false;
285
286 switch (descriptorType)
287 {
288 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
289 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
290 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
291 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
292 scalarNeeded = true;
293 break;
294 default:
295 scalarNeeded = false;
296 break;
297 }
298
299 return scalarNeeded;
300 }
301
needsPipelineRobustnessvkt::robustness::__anon5c37df3e0111::CaseDef302 bool needsPipelineRobustness(void) const
303 {
304 return (pipelineRobustnessCase != PipelineRobustnessCase::DISABLED);
305 }
306 };
307
formatIsR64(const VkFormat & f)308 static bool formatIsR64(const VkFormat &f)
309 {
310 switch (f)
311 {
312 case VK_FORMAT_R64_SINT:
313 case VK_FORMAT_R64_UINT:
314 return true;
315 default:
316 return false;
317 }
318 }
319
320 // Returns the appropriate singleton device for the given case.
getLogicalDevice(Context & ctx,const bool testRobustness2,const bool testPipelineRobustness)321 VkDevice getLogicalDevice(Context &ctx, const bool testRobustness2, const bool testPipelineRobustness)
322 {
323 if (testPipelineRobustness)
324 {
325 if (testRobustness2)
326 return PipelineRobustnessRobustness2Singleton::getDevice(ctx);
327 return PipelineRobustnessImageRobustnessSingleton::getDevice(ctx);
328 }
329
330 if (testRobustness2)
331 return Robustness2Singleton::getDevice(ctx);
332 return ImageRobustnessSingleton::getDevice(ctx);
333 }
334
335 // Returns the appropriate singleton device driver for the given case.
getDeviceInterface(Context & ctx,const bool testRobustness2,const bool testPipelineRobustness)336 const DeviceInterface &getDeviceInterface(Context &ctx, const bool testRobustness2, const bool testPipelineRobustness)
337 {
338 if (testPipelineRobustness)
339 {
340 if (testRobustness2)
341 return PipelineRobustnessRobustness2Singleton::getDeviceInterface(ctx);
342 return PipelineRobustnessImageRobustnessSingleton::getDeviceInterface(ctx);
343 }
344
345 if (testRobustness2)
346 return Robustness2Singleton::getDeviceInterface(ctx);
347 return ImageRobustnessSingleton::getDeviceInterface(ctx);
348 }
349
350 class Layout
351 {
352 public:
353 vector<VkDescriptorSetLayoutBinding> layoutBindings;
354 vector<uint8_t> refData;
355 };
356
357 class RobustnessExtsTestInstance : public TestInstance
358 {
359 public:
360 RobustnessExtsTestInstance(Context &context, const CaseDef &data);
361 ~RobustnessExtsTestInstance(void);
362 tcu::TestStatus iterate(void);
363
364 private:
365 CaseDef m_data;
366 };
367
RobustnessExtsTestInstance(Context & context,const CaseDef & data)368 RobustnessExtsTestInstance::RobustnessExtsTestInstance(Context &context, const CaseDef &data)
369 : vkt::TestInstance(context)
370 , m_data(data)
371 {
372 }
373
~RobustnessExtsTestInstance(void)374 RobustnessExtsTestInstance::~RobustnessExtsTestInstance(void)
375 {
376 }
377
378 class RobustnessExtsTestCase : public TestCase
379 {
380 public:
381 RobustnessExtsTestCase(tcu::TestContext &context, const std::string &name, const CaseDef data);
382 ~RobustnessExtsTestCase(void);
383 virtual void initPrograms(SourceCollections &programCollection) const;
384 virtual TestInstance *createInstance(Context &context) const;
385 virtual void checkSupport(Context &context) const;
386
387 private:
388 CaseDef m_data;
389 };
390
RobustnessExtsTestCase(tcu::TestContext & context,const std::string & name,const CaseDef data)391 RobustnessExtsTestCase::RobustnessExtsTestCase(tcu::TestContext &context, const std::string &name, const CaseDef data)
392 : vkt::TestCase(context, name)
393 , m_data(data)
394 {
395 }
396
~RobustnessExtsTestCase(void)397 RobustnessExtsTestCase::~RobustnessExtsTestCase(void)
398 {
399 }
400
formatIsFloat(const VkFormat & f)401 static bool formatIsFloat(const VkFormat &f)
402 {
403 switch (f)
404 {
405 case VK_FORMAT_R32_SFLOAT:
406 case VK_FORMAT_R32G32_SFLOAT:
407 case VK_FORMAT_R32G32B32A32_SFLOAT:
408 return true;
409 default:
410 return false;
411 }
412 }
413
formatIsSignedInt(const VkFormat & f)414 static bool formatIsSignedInt(const VkFormat &f)
415 {
416 switch (f)
417 {
418 case VK_FORMAT_R32_SINT:
419 case VK_FORMAT_R64_SINT:
420 case VK_FORMAT_R32G32_SINT:
421 case VK_FORMAT_R32G32B32A32_SINT:
422 return true;
423 default:
424 return false;
425 }
426 }
427
supportsStores(int descriptorType)428 static bool supportsStores(int descriptorType)
429 {
430 switch (descriptorType)
431 {
432 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
433 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
434 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
435 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
436 return true;
437 default:
438 return false;
439 }
440 }
441
442 #ifndef CTS_USES_VULKANSC
getPipelineRobustnessInfo(bool robustness2,int descriptorType)443 static VkPipelineRobustnessCreateInfoEXT getPipelineRobustnessInfo(bool robustness2, int descriptorType)
444 {
445 VkPipelineRobustnessCreateInfoEXT robustnessCreateInfo = initVulkanStructure();
446 robustnessCreateInfo.storageBuffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT;
447 robustnessCreateInfo.uniformBuffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT;
448 robustnessCreateInfo.vertexInputs = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT;
449 robustnessCreateInfo.images = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT;
450
451 switch (descriptorType)
452 {
453 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
454 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
455 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
456 robustnessCreateInfo.storageBuffers =
457 (robustness2 ? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT :
458 VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
459 break;
460
461 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
462 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
463 robustnessCreateInfo.images = (robustness2 ? VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT :
464 VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT);
465 break;
466
467 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
468 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
469 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
470 robustnessCreateInfo.uniformBuffers =
471 (robustness2 ? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT :
472 VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
473 break;
474
475 case VERTEX_ATTRIBUTE_FETCH:
476 robustnessCreateInfo.vertexInputs =
477 (robustness2 ? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT :
478 VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
479 break;
480
481 default:
482 DE_ASSERT(0);
483 }
484
485 return robustnessCreateInfo;
486 }
487 #endif
488
checkSupport(Context & context) const489 void RobustnessExtsTestCase::checkSupport(Context &context) const
490 {
491 const auto &vki = context.getInstanceInterface();
492 const auto physicalDevice = context.getPhysicalDevice();
493
494 checkPipelineConstructionRequirements(vki, physicalDevice,
495 getConstructionTypeFromRobustnessCase(m_data.pipelineRobustnessCase));
496
497 // We need to query some features using the physical device instead of using the reported context features because robustness2
498 // and image robustness are always disabled in the default device but they may be available.
499 VkPhysicalDeviceRobustness2FeaturesEXT robustness2Features = initVulkanStructure();
500 VkPhysicalDeviceImageRobustnessFeaturesEXT imageRobustnessFeatures = initVulkanStructure();
501 VkPhysicalDeviceScalarBlockLayoutFeatures scalarLayoutFeatures = initVulkanStructure();
502 VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
503
504 context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
505 const auto addFeatures = makeStructChainAdder(&features2);
506
507 if (context.isDeviceFunctionalitySupported("VK_EXT_scalar_block_layout"))
508 addFeatures(&scalarLayoutFeatures);
509
510 if (context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"))
511 addFeatures(&imageRobustnessFeatures);
512
513 if (context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
514 addFeatures(&robustness2Features);
515
516 #ifndef CTS_USES_VULKANSC
517 VkPhysicalDevicePipelineRobustnessFeaturesEXT pipelineRobustnessFeatures = initVulkanStructure();
518 if (context.isDeviceFunctionalitySupported("VK_EXT_pipeline_robustness"))
519 addFeatures(&pipelineRobustnessFeatures);
520 #endif
521
522 context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
523 vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
524
525 if (formatIsR64(m_data.format))
526 {
527 context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
528
529 VkFormatProperties formatProperties;
530 vki.getPhysicalDeviceFormatProperties(physicalDevice, m_data.format, &formatProperties);
531
532 #ifndef CTS_USES_VULKANSC
533 const VkFormatProperties3KHR formatProperties3 = context.getFormatProperties(m_data.format);
534 #endif // CTS_USES_VULKANSC
535
536 switch (m_data.descriptorType)
537 {
538 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
539 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT) !=
540 VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)
541 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT is not supported");
542 break;
543 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
544 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) !=
545 VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)
546 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT is not supported");
547 #ifndef CTS_USES_VULKANSC
548 if ((formatProperties3.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR) !=
549 VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR)
550 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT is not supported");
551 #endif // CTS_USES_VULKANSC
552 break;
553 case VERTEX_ATTRIBUTE_FETCH:
554 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) !=
555 VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT)
556 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
557 break;
558 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
559 if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) !=
560 VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
561 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
562 break;
563 default:
564 DE_ASSERT(true);
565 }
566
567 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
568 {
569 if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) !=
570 VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
571 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT is not supported");
572 }
573 }
574
575 // Check needed properties and features
576 if (m_data.needsScalarBlockLayout() && !scalarLayoutFeatures.scalarBlockLayout)
577 TCU_THROW(NotSupportedError, "Scalar block layout not supported");
578
579 if (m_data.stage == STAGE_VERTEX && !features2.features.vertexPipelineStoresAndAtomics)
580 TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
581
582 if (m_data.stage == STAGE_FRAGMENT && !features2.features.fragmentStoresAndAtomics)
583 TCU_THROW(NotSupportedError, "Fragment shader stores not supported");
584
585 if (m_data.stage == STAGE_RAYGEN)
586 context.requireDeviceFunctionality("VK_KHR_ray_tracing_pipeline");
587
588 switch (m_data.descriptorType)
589 {
590 default:
591 DE_ASSERT(0); // Fallthrough
592 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
593 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
594 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
595 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
596 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
597 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
598 case VERTEX_ATTRIBUTE_FETCH:
599 if (m_data.testRobustness2)
600 {
601 if (!robustness2Features.robustBufferAccess2)
602 TCU_THROW(NotSupportedError, "robustBufferAccess2 not supported");
603 }
604 else
605 {
606 // This case is not tested here.
607 DE_ASSERT(false);
608 }
609 break;
610 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
611 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
612 if (m_data.testRobustness2)
613 {
614 if (!robustness2Features.robustImageAccess2)
615 TCU_THROW(NotSupportedError, "robustImageAccess2 not supported");
616 }
617 else
618 {
619 if (!imageRobustnessFeatures.robustImageAccess)
620 TCU_THROW(NotSupportedError, "robustImageAccess not supported");
621 }
622 break;
623 }
624
625 if (m_data.nullDescriptor && !robustness2Features.nullDescriptor)
626 TCU_THROW(NotSupportedError, "nullDescriptor not supported");
627
628 // The fill shader for 64-bit multisample image tests uses a storage image.
629 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && formatIsR64(m_data.format) &&
630 !features2.features.shaderStorageImageMultisample)
631 TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
632
633 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) && m_data.samples != VK_SAMPLE_COUNT_1_BIT &&
634 !features2.features.shaderStorageImageMultisample)
635 TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
636
637 if ((m_data.useTemplate || formatIsR64(m_data.format)) && !context.contextSupports(vk::ApiVersion(0, 1, 1, 0)))
638 TCU_THROW(NotSupportedError, "Vulkan 1.1 not supported");
639
640 #ifndef CTS_USES_VULKANSC
641 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && !m_data.formatQualifier)
642 {
643 const VkFormatProperties3 formatProperties = context.getFormatProperties(m_data.format);
644 if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR))
645 TCU_THROW(NotSupportedError, "Format does not support reading without format");
646 if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
647 TCU_THROW(NotSupportedError, "Format does not support writing without format");
648 }
649 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER && !m_data.formatQualifier)
650 {
651 const VkFormatProperties3 formatProperties = context.getFormatProperties(m_data.format);
652 if (!(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR))
653 TCU_THROW(NotSupportedError, "Format does not support reading without format");
654 if (!(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
655 TCU_THROW(NotSupportedError, "Format does not support writing without format");
656 }
657 #else
658 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
659 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
660 !m_data.formatQualifier &&
661 (!features2.features.shaderStorageImageReadWithoutFormat ||
662 !features2.features.shaderStorageImageWriteWithoutFormat))
663 TCU_THROW(NotSupportedError,
664 "shaderStorageImageReadWithoutFormat or shaderStorageImageWriteWithoutFormat not supported");
665 #endif // CTS_USES_VULKANSC
666
667 if (m_data.pushDescriptor)
668 context.requireDeviceFunctionality("VK_KHR_push_descriptor");
669
670 if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY && !features2.features.imageCubeArray)
671 TCU_THROW(NotSupportedError, "Cube array image view type not supported");
672
673 if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
674 !context.getDeviceFeatures().robustBufferAccess)
675 TCU_THROW(NotSupportedError,
676 "VK_KHR_portability_subset: robustBufferAccess not supported by this implementation");
677
678 #ifndef CTS_USES_VULKANSC
679 if (m_data.needsPipelineRobustness() && !pipelineRobustnessFeatures.pipelineRobustness)
680 TCU_THROW(NotSupportedError, "pipelineRobustness not supported");
681 #endif
682 }
683
generateLayout(Layout & layout,const CaseDef & caseDef)684 void generateLayout(Layout &layout, const CaseDef &caseDef)
685 {
686 vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
687 int numBindings = caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH ? 2 : 1;
688 bindings = vector<VkDescriptorSetLayoutBinding>(numBindings);
689
690 for (uint32_t b = 0; b < layout.layoutBindings.size(); ++b)
691 {
692 VkDescriptorSetLayoutBinding &binding = bindings[b];
693 binding.binding = b;
694 binding.pImmutableSamplers = NULL;
695 binding.stageFlags = caseDef.allShaderStages;
696 binding.descriptorCount = 1;
697
698 // Output image
699 if (b == 0)
700 binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
701 else if (caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH)
702 binding.descriptorType = (VkDescriptorType)caseDef.descriptorType;
703 }
704
705 if (caseDef.nullDescriptor)
706 return;
707
708 if (caseDef.bufferLen == 0)
709 {
710 // Clear color values for image tests
711 static uint32_t urefData[4] = {0x12345678, 0x23456789, 0x34567890, 0x45678901};
712 static uint64_t urefData64[4] = {0x1234567887654321, 0x234567899, 0x345678909, 0x456789019};
713 static float frefData[4] = {123.f, 234.f, 345.f, 456.f};
714
715 if (formatIsR64(caseDef.format))
716 {
717 layout.refData.resize(32);
718 uint64_t *ptr = (uint64_t *)layout.refData.data();
719
720 for (unsigned int i = 0; i < 4; ++i)
721 {
722 ptr[i] = urefData64[i];
723 }
724 }
725 else
726 {
727 layout.refData.resize(16);
728 deMemcpy(layout.refData.data(),
729 formatIsFloat(caseDef.format) ? (const void *)frefData : (const void *)urefData, sizeof(frefData));
730 }
731 }
732 else
733 {
734 layout.refData.resize(caseDef.bufferLen & (formatIsR64(caseDef.format) ? ~7 : ~3));
735 for (unsigned int i = 0;
736 i < caseDef.bufferLen / (formatIsR64(caseDef.format) ? sizeof(uint64_t) : sizeof(uint32_t)); ++i)
737 {
738 if (formatIsFloat(caseDef.format))
739 {
740 float *f = (float *)layout.refData.data() + i;
741 *f = 2.0f * (float)i + 3.0f;
742 }
743 if (formatIsR64(caseDef.format))
744 {
745 uint64_t *u = (uint64_t *)layout.refData.data() + i;
746 *u = 2 * i + 3;
747 }
748 else
749 {
750 int *u = (int *)layout.refData.data() + i;
751 *u = 2 * i + 3;
752 }
753 }
754 }
755 }
756
genFetch(const CaseDef & caseDef,int numComponents,const string & vecType,const string & coord,const string & lod)757 static string genFetch(const CaseDef &caseDef, int numComponents, const string &vecType, const string &coord,
758 const string &lod)
759 {
760 std::stringstream s;
761 // Fetch from the descriptor.
762 switch (caseDef.descriptorType)
763 {
764 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
765 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
766 s << vecType << "(ubo0_1.val[" << coord << "]";
767 for (int i = numComponents; i < 4; ++i)
768 s << ", 0";
769 s << ")";
770 break;
771 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
772 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
773 s << vecType << "(ssbo0_1.val[" << coord << "]";
774 for (int i = numComponents; i < 4; ++i)
775 s << ", 0";
776 s << ")";
777 break;
778 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
779 s << "texelFetch(texbo0_1, " << coord << ")";
780 break;
781 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
782 s << "imageLoad(image0_1, " << coord << ")";
783 break;
784 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
785 if (caseDef.samples > VK_SAMPLE_COUNT_1_BIT)
786 s << "texelFetch(texture0_1, " << coord << ")";
787 else
788 s << "texelFetch(texture0_1, " << coord << ", " << lod << ")";
789 break;
790 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
791 s << "imageLoad(image0_1, " << coord << ")";
792 break;
793 case VERTEX_ATTRIBUTE_FETCH:
794 s << "attr";
795 break;
796 default:
797 DE_ASSERT(0);
798 }
799 return s.str();
800 }
801
802 static const int storeValue = 123;
803
804 // Get the value stored by genStore.
getStoreValue(int descriptorType,int numComponents,const string & vecType,const string & bufType)805 static string getStoreValue(int descriptorType, int numComponents, const string &vecType, const string &bufType)
806 {
807 std::stringstream s;
808 switch (descriptorType)
809 {
810 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
811 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
812 s << vecType << "(" << bufType << "(" << storeValue << ")";
813 for (int i = numComponents; i < 4; ++i)
814 s << ", 0";
815 s << ")";
816 break;
817 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
818 s << vecType << "(" << storeValue << ")";
819 break;
820 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
821 s << vecType << "(" << storeValue << ")";
822 break;
823 default:
824 DE_ASSERT(0);
825 }
826 return s.str();
827 }
828
genStore(int descriptorType,const string & vecType,const string & bufType,const string & coord)829 static string genStore(int descriptorType, const string &vecType, const string &bufType, const string &coord)
830 {
831 std::stringstream s;
832 // Store to the descriptor.
833 switch (descriptorType)
834 {
835 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
836 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
837 s << "ssbo0_1.val[" << coord << "] = " << bufType << "(" << storeValue << ")";
838 break;
839 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
840 s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
841 break;
842 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
843 s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
844 break;
845 default:
846 DE_ASSERT(0);
847 }
848 return s.str();
849 }
850
genAtomic(int descriptorType,const string & bufType,const string & coord)851 static string genAtomic(int descriptorType, const string &bufType, const string &coord)
852 {
853 std::stringstream s;
854 // Store to the descriptor. The value doesn't matter, since we only test out of bounds coordinates.
855 switch (descriptorType)
856 {
857 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
858 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
859 s << "atomicAdd(ssbo0_1.val[" << coord << "], " << bufType << "(10))";
860 break;
861 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
862 s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
863 break;
864 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
865 s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
866 break;
867 default:
868 DE_ASSERT(0);
869 }
870 return s.str();
871 }
872
getShaderImageFormatQualifier(const tcu::TextureFormat & format)873 static std::string getShaderImageFormatQualifier(const tcu::TextureFormat &format)
874 {
875 const char *orderPart;
876 const char *typePart;
877
878 switch (format.order)
879 {
880 case tcu::TextureFormat::R:
881 orderPart = "r";
882 break;
883 case tcu::TextureFormat::RG:
884 orderPart = "rg";
885 break;
886 case tcu::TextureFormat::RGB:
887 orderPart = "rgb";
888 break;
889 case tcu::TextureFormat::RGBA:
890 orderPart = "rgba";
891 break;
892
893 default:
894 DE_FATAL("Impossible");
895 orderPart = DE_NULL;
896 }
897
898 switch (format.type)
899 {
900 case tcu::TextureFormat::FLOAT:
901 typePart = "32f";
902 break;
903 case tcu::TextureFormat::HALF_FLOAT:
904 typePart = "16f";
905 break;
906
907 case tcu::TextureFormat::UNSIGNED_INT64:
908 typePart = "64ui";
909 break;
910 case tcu::TextureFormat::UNSIGNED_INT32:
911 typePart = "32ui";
912 break;
913 case tcu::TextureFormat::UNSIGNED_INT16:
914 typePart = "16ui";
915 break;
916 case tcu::TextureFormat::UNSIGNED_INT8:
917 typePart = "8ui";
918 break;
919
920 case tcu::TextureFormat::SIGNED_INT64:
921 typePart = "64i";
922 break;
923 case tcu::TextureFormat::SIGNED_INT32:
924 typePart = "32i";
925 break;
926 case tcu::TextureFormat::SIGNED_INT16:
927 typePart = "16i";
928 break;
929 case tcu::TextureFormat::SIGNED_INT8:
930 typePart = "8i";
931 break;
932
933 case tcu::TextureFormat::UNORM_INT16:
934 typePart = "16";
935 break;
936 case tcu::TextureFormat::UNORM_INT8:
937 typePart = "8";
938 break;
939
940 case tcu::TextureFormat::SNORM_INT16:
941 typePart = "16_snorm";
942 break;
943 case tcu::TextureFormat::SNORM_INT8:
944 typePart = "8_snorm";
945 break;
946
947 default:
948 DE_FATAL("Impossible");
949 typePart = DE_NULL;
950 }
951
952 return std::string() + orderPart + typePart;
953 }
954
genCoord(string c,int numCoords,VkSampleCountFlagBits samples,int dim)955 string genCoord(string c, int numCoords, VkSampleCountFlagBits samples, int dim)
956 {
957 if (numCoords == 1)
958 return c;
959
960 if (samples != VK_SAMPLE_COUNT_1_BIT)
961 numCoords--;
962
963 string coord = "ivec" + to_string(numCoords) + "(";
964
965 for (int i = 0; i < numCoords; ++i)
966 {
967 if (i == dim)
968 coord += c;
969 else
970 coord += "0";
971 if (i < numCoords - 1)
972 coord += ", ";
973 }
974 coord += ")";
975
976 // Append sample coordinate
977 if (samples != VK_SAMPLE_COUNT_1_BIT)
978 {
979 coord += ", ";
980 if (dim == numCoords)
981 coord += c;
982 else
983 coord += "0";
984 }
985 return coord;
986 }
987
988 // Normalized coordinates. Divide by "imageDim" and add 0.25 so we're not on a pixel boundary.
genCoordNorm(const CaseDef & caseDef,string c,int numCoords,int numNormalizedCoords,int dim)989 string genCoordNorm(const CaseDef &caseDef, string c, int numCoords, int numNormalizedCoords, int dim)
990 {
991 // dim can be 3 for cube_array. Reuse the number of layers in that case.
992 dim = std::min(dim, 2);
993
994 if (numCoords == 1)
995 return c + " / float(" + to_string(caseDef.imageDim[dim]) + ")";
996
997 string coord = "vec" + to_string(numCoords) + "(";
998
999 for (int i = 0; i < numCoords; ++i)
1000 {
1001 if (i == dim)
1002 coord += c;
1003 else
1004 coord += "0.25";
1005 if (i < numNormalizedCoords)
1006 coord += " / float(" + to_string(caseDef.imageDim[dim]) + ")";
1007 if (i < numCoords - 1)
1008 coord += ", ";
1009 }
1010 coord += ")";
1011 return coord;
1012 }
1013
initPrograms(SourceCollections & programCollection) const1014 void RobustnessExtsTestCase::initPrograms(SourceCollections &programCollection) const
1015 {
1016 VkFormat format = m_data.format;
1017
1018 Layout layout;
1019 generateLayout(layout, m_data);
1020
1021 if (layout.layoutBindings.size() > 1 &&
1022 layout.layoutBindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1023 {
1024 if (format == VK_FORMAT_R64_SINT)
1025 format = VK_FORMAT_R32G32_SINT;
1026
1027 if (format == VK_FORMAT_R64_UINT)
1028 format = VK_FORMAT_R32G32_UINT;
1029 }
1030
1031 std::stringstream decls, checks;
1032
1033 const string r64 = formatIsR64(format) ? "64" : "";
1034 const string i64Type = formatIsR64(format) ? "64_t" : "";
1035 const string vecType =
1036 formatIsFloat(format) ? "vec4" : (formatIsSignedInt(format) ? ("i" + r64 + "vec4") : ("u" + r64 + "vec4"));
1037 const string qLevelType = vecType == "vec4" ? "float" :
1038 ((vecType == "ivec4") || (vecType == "i64vec4")) ? ("int" + i64Type) :
1039 ("uint" + i64Type);
1040
1041 decls << "uvec4 abs(uvec4 x) { return x; }\n";
1042 if (formatIsR64(format))
1043 decls << "u64vec4 abs(u64vec4 x) { return x; }\n";
1044 decls << "int smod(int a, int b) { if (a < 0) a += b*(abs(a)/b+1); return a%b; }\n";
1045
1046 const int componetsSize = (formatIsR64(format) ? 8 : 4);
1047 int refDataNumElements = deIntRoundToPow2(((int)layout.refData.size() / componetsSize), 4);
1048 // Pad reference data to include zeros, up to max value of robustUniformBufferAccessSizeAlignment (256).
1049 // robustStorageBufferAccessSizeAlignment is 4, so no extra padding needed.
1050 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1051 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1052 {
1053 refDataNumElements = deIntRoundToPow2(refDataNumElements, 256 / (formatIsR64(format) ? 8 : 4));
1054 }
1055 if (m_data.nullDescriptor)
1056 refDataNumElements = 4;
1057
1058 if (formatIsFloat(format))
1059 {
1060 decls << "float refData[" << refDataNumElements << "] = {";
1061 int i;
1062 for (i = 0; i < (int)layout.refData.size() / 4; ++i)
1063 {
1064 if (i != 0)
1065 decls << ", ";
1066 decls << ((const float *)layout.refData.data())[i];
1067 }
1068 while (i < refDataNumElements)
1069 {
1070 if (i != 0)
1071 decls << ", ";
1072 decls << "0";
1073 i++;
1074 }
1075 }
1076 else if (formatIsR64(format))
1077 {
1078 decls << "int" << i64Type << " refData[" << refDataNumElements << "] = {";
1079 int i;
1080 for (i = 0; i < (int)layout.refData.size() / 8; ++i)
1081 {
1082 if (i != 0)
1083 decls << ", ";
1084 decls << ((const uint64_t *)layout.refData.data())[i] << "l";
1085 }
1086 while (i < refDataNumElements)
1087 {
1088 if (i != 0)
1089 decls << ", ";
1090 decls << "0l";
1091 i++;
1092 }
1093 }
1094 else
1095 {
1096 decls << "int"
1097 << " refData[" << refDataNumElements << "] = {";
1098 int i;
1099 for (i = 0; i < (int)layout.refData.size() / 4; ++i)
1100 {
1101 if (i != 0)
1102 decls << ", ";
1103 decls << ((const int *)layout.refData.data())[i];
1104 }
1105 while (i < refDataNumElements)
1106 {
1107 if (i != 0)
1108 decls << ", ";
1109 decls << "0";
1110 i++;
1111 }
1112 }
1113
1114 decls << "};\n";
1115 decls << vecType << " zzzz = " << vecType << "(0);\n";
1116 decls << vecType << " zzzo = " << vecType << "(0, 0, 0, 1);\n";
1117 decls << vecType << " expectedIB;\n";
1118
1119 string imgprefix = (formatIsFloat(format) ? "" : formatIsSignedInt(format) ? "i" : "u") + r64;
1120 string imgqualif = (m_data.formatQualifier) ? getShaderImageFormatQualifier(mapVkFormat(format)) + ", " : "";
1121 string outputimgqualif = getShaderImageFormatQualifier(mapVkFormat(format));
1122
1123 string imageDim = "";
1124 int numCoords, numNormalizedCoords;
1125 bool layered = false;
1126 switch (m_data.viewType)
1127 {
1128 default:
1129 DE_ASSERT(0); // Fallthrough
1130 case VK_IMAGE_VIEW_TYPE_1D:
1131 imageDim = "1D";
1132 numCoords = 1;
1133 numNormalizedCoords = 1;
1134 break;
1135 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
1136 imageDim = "1DArray";
1137 numCoords = 2;
1138 numNormalizedCoords = 1;
1139 layered = true;
1140 break;
1141 case VK_IMAGE_VIEW_TYPE_2D:
1142 imageDim = "2D";
1143 numCoords = 2;
1144 numNormalizedCoords = 2;
1145 break;
1146 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1147 imageDim = "2DArray";
1148 numCoords = 3;
1149 numNormalizedCoords = 2;
1150 layered = true;
1151 break;
1152 case VK_IMAGE_VIEW_TYPE_3D:
1153 imageDim = "3D";
1154 numCoords = 3;
1155 numNormalizedCoords = 3;
1156 break;
1157 case VK_IMAGE_VIEW_TYPE_CUBE:
1158 imageDim = "Cube";
1159 numCoords = 3;
1160 numNormalizedCoords = 3;
1161 break;
1162 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
1163 imageDim = "CubeArray";
1164 numCoords = 4;
1165 numNormalizedCoords = 3;
1166 layered = true;
1167 break;
1168 }
1169 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
1170 {
1171 switch (m_data.viewType)
1172 {
1173 default:
1174 DE_ASSERT(0); // Fallthrough
1175 case VK_IMAGE_VIEW_TYPE_2D:
1176 imageDim = "2DMS";
1177 break;
1178 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1179 imageDim = "2DMSArray";
1180 break;
1181 }
1182 numCoords++;
1183 }
1184 bool dataDependsOnLayer =
1185 (m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY || m_data.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY) &&
1186 !m_data.nullDescriptor;
1187
1188 // Special case imageLoad(imageCubeArray, ...) which uses ivec3
1189 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
1190 {
1191 numCoords = 3;
1192 }
1193
1194 int numComponents = tcu::getPixelSize(mapVkFormat(format)) / tcu::getChannelSize(mapVkFormat(format).type);
1195 string bufType;
1196 if (numComponents == 1)
1197 bufType = string(formatIsFloat(format) ? "float" : formatIsSignedInt(format) ? "int" : "uint") + i64Type;
1198 else
1199 bufType = imgprefix + "vec" + std::to_string(numComponents);
1200
1201 // For UBO's, which have a declared size in the shader, don't access outside that size.
1202 bool declaredSize = false;
1203 switch (m_data.descriptorType)
1204 {
1205 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1206 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1207 declaredSize = true;
1208 break;
1209 default:
1210 break;
1211 }
1212
1213 checks << " int inboundcoords, clampedLayer;\n";
1214 checks << " " << vecType << " expectedIB2;\n";
1215 if (m_data.unroll)
1216 {
1217 if (declaredSize)
1218 checks << " [[unroll]] for (int c = 0; c <= 10; ++c) {\n";
1219 else
1220 checks << " [[unroll]] for (int c = -10; c <= 10; ++c) {\n";
1221 }
1222 else
1223 {
1224 if (declaredSize)
1225 checks << " [[dont_unroll]] for (int c = 1023; c >= 0; --c) {\n";
1226 else
1227 checks << " [[dont_unroll]] for (int c = 1050; c >= -1050; --c) {\n";
1228 }
1229
1230 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1231 checks << " int idx = smod(gl_VertexIndex * " << numComponents << ", " << refDataNumElements << ");\n";
1232 else
1233 checks << " int idx = smod(c * " << numComponents << ", " << refDataNumElements << ");\n";
1234
1235 decls << "layout(" << outputimgqualif << ", set = 0, binding = 0) uniform " << imgprefix << "image2D image0_0;\n";
1236
1237 const char *vol = m_data.vol ? "volatile " : "";
1238 const char *ro = m_data.readOnly ? "readonly " : "";
1239
1240 // Construct the declaration for the binding
1241 switch (m_data.descriptorType)
1242 {
1243 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1244 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1245 decls << "layout(scalar, set = 0, binding = 1) uniform ubodef0_1 { " << bufType << " val[1024]; } ubo0_1;\n";
1246 break;
1247 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1248 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1249 decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1 { " << bufType
1250 << " val[]; } ssbo0_1;\n";
1251 decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1_pad { vec4 pad; " << bufType
1252 << " val[]; } ssbo0_1_pad;\n";
1253 break;
1254 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1255 switch (format)
1256 {
1257 case VK_FORMAT_R64_SINT:
1258 decls << "layout(set = 0, binding = 1) uniform itextureBuffer texbo0_1;\n";
1259 break;
1260 case VK_FORMAT_R64_UINT:
1261 decls << "layout(set = 0, binding = 1) uniform utextureBuffer texbo0_1;\n";
1262 break;
1263 default:
1264 decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "textureBuffer texbo0_1;\n";
1265 }
1266 break;
1267 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1268 decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix
1269 << "imageBuffer image0_1;\n";
1270 break;
1271 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1272 decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "image"
1273 << imageDim << " image0_1;\n";
1274 break;
1275 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1276 switch (format)
1277 {
1278 case VK_FORMAT_R64_SINT:
1279 decls << "layout(set = 0, binding = 1) uniform isampler" << imageDim << " texture0_1; \n";
1280 break;
1281 case VK_FORMAT_R64_UINT:
1282 decls << "layout(set = 0, binding = 1) uniform usampler" << imageDim << " texture0_1; \n";
1283 break;
1284 default:
1285 decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "sampler" << imageDim << " texture0_1;\n";
1286 break;
1287 }
1288 break;
1289 case VERTEX_ATTRIBUTE_FETCH:
1290 if (formatIsR64(format))
1291 {
1292 decls << "layout(location = 0) in " << (formatIsSignedInt(format) ? ("int64_t") : ("uint64_t"))
1293 << " attr;\n";
1294 }
1295 else
1296 {
1297 decls << "layout(location = 0) in " << vecType << " attr;\n";
1298 }
1299 break;
1300 default:
1301 DE_ASSERT(0);
1302 }
1303
1304 string expectedOOB;
1305 string defaultw;
1306
1307 switch (m_data.descriptorType)
1308 {
1309 default:
1310 DE_ASSERT(0); // Fallthrough
1311 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1312 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1313 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1314 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1315 expectedOOB = "zzzz";
1316 defaultw = "0";
1317 break;
1318 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1319 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1320 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1321 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1322 case VERTEX_ATTRIBUTE_FETCH:
1323 if (numComponents == 1)
1324 {
1325 expectedOOB = "zzzo";
1326 }
1327 else if (numComponents == 2)
1328 {
1329 expectedOOB = "zzzo";
1330 }
1331 else
1332 {
1333 expectedOOB = "zzzz";
1334 }
1335 defaultw = "1";
1336 break;
1337 }
1338
1339 string idx;
1340 switch (m_data.descriptorType)
1341 {
1342 default:
1343 DE_ASSERT(0); // Fallthrough
1344 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1345 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1346 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1347 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1348 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1349 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1350 case VERTEX_ATTRIBUTE_FETCH:
1351 idx = "idx";
1352 break;
1353 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1354 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1355 idx = "0";
1356 break;
1357 }
1358
1359 if (m_data.nullDescriptor)
1360 {
1361 checks << " expectedIB = zzzz;\n";
1362 checks << " inboundcoords = 0;\n";
1363 checks << " int paddedinboundcoords = 0;\n";
1364 // Vertex attribute fetch still gets format conversion applied
1365 if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
1366 expectedOOB = "zzzz";
1367 }
1368 else
1369 {
1370 checks << " expectedIB.x = refData[" << idx << "];\n";
1371 if (numComponents > 1)
1372 {
1373 checks << " expectedIB.y = refData[" << idx << "+1];\n";
1374 }
1375 else
1376 {
1377 checks << " expectedIB.y = 0;\n";
1378 }
1379 if (numComponents > 2)
1380 {
1381 checks << " expectedIB.z = refData[" << idx << "+2];\n";
1382 checks << " expectedIB.w = refData[" << idx << "+3];\n";
1383 }
1384 else
1385 {
1386 checks << " expectedIB.z = 0;\n";
1387 checks << " expectedIB.w = " << defaultw << ";\n";
1388 }
1389
1390 switch (m_data.descriptorType)
1391 {
1392 default:
1393 DE_ASSERT(0); // Fallthrough
1394 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1395 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1396 // UBOs can either strictly bounds check against inboundcoords, or can
1397 // return the contents from memory for the range padded up to paddedinboundcoords.
1398 checks << " int paddedinboundcoords = " << refDataNumElements / numComponents << ";\n";
1399 // fallthrough
1400 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1401 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1402 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1403 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1404 case VERTEX_ATTRIBUTE_FETCH:
1405 checks << " inboundcoords = "
1406 << layout.refData.size() / (formatIsR64(format) ? sizeof(uint64_t) : sizeof(uint32_t)) /
1407 numComponents
1408 << ";\n";
1409 break;
1410 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1411 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1412 // set per-component below
1413 break;
1414 }
1415 }
1416
1417 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
1418 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
1419 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1420 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1421 !m_data.readOnly)
1422 {
1423 for (int i = 0; i < numCoords; ++i)
1424 {
1425 // Treat i==3 coord (cube array layer) like i == 2
1426 uint32_t coordDim = m_data.imageDim[i == 3 ? 2 : i];
1427 if (!m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1428 checks << " inboundcoords = " << coordDim << ";\n";
1429
1430 string coord = genCoord("c", numCoords, m_data.samples, i);
1431 string inboundcoords = m_data.nullDescriptor ? "0" :
1432 (m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1) ?
1433 to_string(m_data.samples) :
1434 "inboundcoords";
1435
1436 checks << " if (c < 0 || c >= " << inboundcoords << ") "
1437 << genStore(m_data.descriptorType, vecType, bufType, coord) << ";\n";
1438 if (m_data.formatQualifier && (format == VK_FORMAT_R32_SINT || format == VK_FORMAT_R32_UINT))
1439 {
1440 checks << " if (c < 0 || c >= " << inboundcoords << ") "
1441 << genAtomic(m_data.descriptorType, bufType, coord) << ";\n";
1442 }
1443 }
1444 }
1445
1446 for (int i = 0; i < numCoords; ++i)
1447 {
1448 // Treat i==3 coord (cube array layer) like i == 2
1449 uint32_t coordDim = m_data.imageDim[i == 3 ? 2 : i];
1450 if (!m_data.nullDescriptor)
1451 {
1452 switch (m_data.descriptorType)
1453 {
1454 default:
1455 break;
1456 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1457 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1458 checks << " inboundcoords = " << coordDim << ";\n";
1459 break;
1460 }
1461 }
1462
1463 string coord = genCoord("c", numCoords, m_data.samples, i);
1464
1465 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1466 {
1467 if (formatIsR64(format))
1468 {
1469 checks << " temp.x = attr;\n";
1470 checks << " temp.y = 0l;\n";
1471 checks << " temp.z = 0l;\n";
1472 checks << " temp.w = 0l;\n";
1473 checks << " if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp.x -= expectedIB.x; else "
1474 "temp -= zzzz;\n";
1475 }
1476 else
1477 {
1478 checks << " temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1479 checks << " if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp -= expectedIB; else "
1480 "temp -= "
1481 << expectedOOB << ";\n";
1482 }
1483 // Accumulate any incorrect values.
1484 checks << " accum += abs(temp);\n";
1485 }
1486 // Skip texelFetch testing for cube(array) - texelFetch doesn't support it
1487 if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH &&
1488 !(m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1489 (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)))
1490 {
1491 checks << " temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1492
1493 checks << " expectedIB2 = expectedIB;\n";
1494
1495 // Expected data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1496 if (dataDependsOnLayer && i == numNormalizedCoords)
1497 checks << " if (c >= 0 && c < inboundcoords) expectedIB2 += " << vecType << "(c, 0, 0, 0);\n";
1498
1499 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1)
1500 {
1501 if (m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1502 {
1503 checks << " if (temp == zzzz) temp = " << vecType << "(0);\n";
1504 if (m_data.formatQualifier && numComponents < 4)
1505 checks << " else if (temp == zzzo) temp = " << vecType << "(0);\n";
1506 checks << " else temp = " << vecType << "(1);\n";
1507 }
1508 else
1509 // multisample coord doesn't have defined behavior for OOB, so just set temp to 0.
1510 checks << " if (c >= 0 && c < " << m_data.samples
1511 << ") temp -= expectedIB2; else temp = " << vecType << "(0);\n";
1512 }
1513 else
1514 {
1515 // Storage buffers may be split into per-component loads. Generate a second
1516 // expected out of bounds value where some subset of the components are
1517 // actually in-bounds. If both loads and stores are split into per-component
1518 // accesses, then the result value can be a mix of storeValue and zero.
1519 string expectedOOB2 = expectedOOB;
1520 string expectedOOB3 = expectedOOB;
1521 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1522 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1523 !m_data.nullDescriptor)
1524 {
1525 int len = m_data.bufferLen & (formatIsR64(format) ? ~7 : ~3);
1526 int mod =
1527 (int)((len / (formatIsR64(format) ? sizeof(uint64_t) : sizeof(uint32_t))) % numComponents);
1528 string sstoreValue = de::toString(storeValue);
1529 switch (mod)
1530 {
1531 case 0:
1532 break;
1533 case 1:
1534 expectedOOB2 = vecType + "(expectedIB2.x, 0, 0, 0)";
1535 expectedOOB3 = vecType + "(" + sstoreValue + ", 0, 0, 0)";
1536 break;
1537 case 2:
1538 expectedOOB2 = vecType + "(expectedIB2.xy, 0, 0)";
1539 expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", 0, 0)";
1540 break;
1541 case 3:
1542 expectedOOB2 = vecType + "(expectedIB2.xyz, 0)";
1543 expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", " + sstoreValue + ", 0)";
1544 break;
1545 }
1546 }
1547
1548 // Entirely in-bounds.
1549 checks << " if (c >= 0 && c < inboundcoords) {\n"
1550 " if (temp == expectedIB2) temp = "
1551 << vecType << "(0); else temp = " << vecType
1552 << "(1);\n"
1553 " }\n";
1554
1555 // normal out-of-bounds value
1556 if (m_data.testRobustness2)
1557 checks << " else if (temp == " << expectedOOB << ") temp = " << vecType << "(0);\n";
1558 else
1559 // image_robustness relaxes alpha which is allowed to be zero or one
1560 checks << " else if (temp == zzzz || temp == zzzo) temp = " << vecType << "(0);\n";
1561
1562 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1563 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1564 {
1565 checks << " else if (c >= 0 && c < paddedinboundcoords && temp == expectedIB2) temp = "
1566 << vecType << "(0);\n";
1567 }
1568
1569 // null descriptor loads with image format layout qualifier that doesn't include alpha may return alpha=1
1570 if (m_data.nullDescriptor && m_data.formatQualifier &&
1571 (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
1572 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) &&
1573 numComponents < 4)
1574 checks << " else if (temp == zzzo) temp = " << vecType << "(0);\n";
1575
1576 // non-volatile value replaced with stored value
1577 if (supportsStores(m_data.descriptorType) && !m_data.vol)
1578 {
1579 checks << " else if (temp == "
1580 << getStoreValue(m_data.descriptorType, numComponents, vecType, bufType)
1581 << ") temp = " << vecType << "(0);\n";
1582
1583 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC ||
1584 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
1585 {
1586
1587 for (int mask = (numComponents * numComponents) - 2; mask > 0; mask--)
1588 {
1589 checks << " else if (temp == " << vecType << "(";
1590 for (int vecIdx = 0; vecIdx < 4; vecIdx++)
1591 {
1592 if (mask & (1 << vecIdx))
1593 checks << storeValue;
1594 else
1595 checks << "0";
1596
1597 if (vecIdx != 3)
1598 checks << ",";
1599 }
1600 checks << ")) temp = " << vecType << "(0);\n";
1601 }
1602 }
1603 }
1604
1605 // value straddling the boundary, returning a partial vector
1606 if (expectedOOB2 != expectedOOB)
1607 checks << " else if (c == inboundcoords && temp == " << expectedOOB2 << ") temp = " << vecType
1608 << "(0);\n";
1609 if (expectedOOB3 != expectedOOB)
1610 checks << " else if (c == inboundcoords && temp == " << expectedOOB3 << ") temp = " << vecType
1611 << "(0);\n";
1612
1613 // failure
1614 checks << " else temp = " << vecType << "(1);\n";
1615 }
1616 // Accumulate any incorrect values.
1617 checks << " accum += abs(temp);\n";
1618
1619 // Only the full robustness2 extension provides guarantees about out-of-bounds mip levels.
1620 if (m_data.testRobustness2 && m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1621 m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1622 {
1623 // Fetch from an out of bounds mip level. Expect this to always return the OOB value.
1624 string coord0 = genCoord("0", numCoords, m_data.samples, i);
1625 checks << " if (c != 0) temp = " << genFetch(m_data, numComponents, vecType, coord0, "c")
1626 << "; else temp = " << vecType << "(0);\n";
1627 checks << " if (c != 0) temp -= " << expectedOOB << ";\n";
1628 checks << " accum += abs(temp);\n";
1629 }
1630 }
1631 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1632 m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1633 {
1634 string coordNorm = genCoordNorm(m_data, "(c+0.25)", numCoords, numNormalizedCoords, i);
1635
1636 checks << " expectedIB2 = expectedIB;\n";
1637
1638 // Data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1639 if (dataDependsOnLayer && i == numNormalizedCoords)
1640 {
1641 checks << " clampedLayer = clamp(c, 0, " << coordDim - 1 << ");\n";
1642 checks << " expectedIB2 += " << vecType << "(clampedLayer, 0, 0, 0);\n";
1643 }
1644
1645 stringstream normexpected;
1646 // Cubemap fetches are always in-bounds. Layer coordinate is clamped, so is always in-bounds.
1647 if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
1648 (layered && i == numCoords - 1))
1649 normexpected << " temp -= expectedIB2;\n";
1650 else
1651 {
1652 normexpected << " if (c >= 0 && c < inboundcoords)\n";
1653 normexpected << " temp -= expectedIB2;\n";
1654 normexpected << " else\n";
1655 if (m_data.testRobustness2)
1656 normexpected << " temp -= " << expectedOOB << ";\n";
1657 else
1658 // image_robustness relaxes alpha which is allowed to be zero or one
1659 normexpected << " temp = " << vecType << "((temp == zzzz || temp == zzzo) ? 0 : 1);\n";
1660 }
1661
1662 checks << " temp = texture(texture0_1, " << coordNorm << ");\n";
1663 checks << normexpected.str();
1664 checks << " accum += abs(temp);\n";
1665 checks << " temp = textureLod(texture0_1, " << coordNorm << ", 0.0f);\n";
1666 checks << normexpected.str();
1667 checks << " accum += abs(temp);\n";
1668 checks << " temp = textureGrad(texture0_1, " << coordNorm << ", "
1669 << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ", "
1670 << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ");\n";
1671 checks << normexpected.str();
1672 checks << " accum += abs(temp);\n";
1673 }
1674 if (m_data.nullDescriptor)
1675 {
1676 const char *sizeswiz;
1677 switch (m_data.viewType)
1678 {
1679 default:
1680 DE_ASSERT(0); // Fallthrough
1681 case VK_IMAGE_VIEW_TYPE_1D:
1682 sizeswiz = ".xxxx";
1683 break;
1684 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
1685 sizeswiz = ".xyxx";
1686 break;
1687 case VK_IMAGE_VIEW_TYPE_2D:
1688 sizeswiz = ".xyxx";
1689 break;
1690 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1691 sizeswiz = ".xyzx";
1692 break;
1693 case VK_IMAGE_VIEW_TYPE_3D:
1694 sizeswiz = ".xyzx";
1695 break;
1696 case VK_IMAGE_VIEW_TYPE_CUBE:
1697 sizeswiz = ".xyxx";
1698 break;
1699 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
1700 sizeswiz = ".xyzx";
1701 break;
1702 }
1703 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1704 {
1705 if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1706 {
1707 checks << " temp = textureSize(texture0_1, 0)" << sizeswiz << ";\n";
1708 checks << " accum += abs(temp);\n";
1709
1710 // checking textureSize with clearly out of range LOD values
1711 checks << " temp = textureSize(texture0_1, " << -i << ")" << sizeswiz << ";\n";
1712 checks << " accum += abs(temp);\n";
1713 checks << " temp = textureSize(texture0_1, " << (std::numeric_limits<int32_t>::max() - i) << ")"
1714 << sizeswiz << ";\n";
1715 checks << " accum += abs(temp);\n";
1716 }
1717 else
1718 {
1719 checks << " temp = textureSize(texture0_1)" << sizeswiz << ";\n";
1720 checks << " accum += abs(temp);\n";
1721 checks << " temp = textureSamples(texture0_1).xxxx;\n";
1722 checks << " accum += abs(temp);\n";
1723 }
1724 }
1725 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1726 {
1727 if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1728 {
1729 checks << " temp = imageSize(image0_1)" << sizeswiz << ";\n";
1730 checks << " accum += abs(temp);\n";
1731 }
1732 else
1733 {
1734 checks << " temp = imageSize(image0_1)" << sizeswiz << ";\n";
1735 checks << " accum += abs(temp);\n";
1736 checks << " temp = imageSamples(image0_1).xxxx;\n";
1737 checks << " accum += abs(temp);\n";
1738 }
1739 }
1740 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1741 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1742 {
1743 // expect zero for runtime-sized array .length()
1744 checks << " temp = " << vecType << "(ssbo0_1.val.length());\n";
1745 checks << " accum += abs(temp);\n";
1746 checks << " temp = " << vecType << "(ssbo0_1_pad.val.length());\n";
1747 checks << " accum += abs(temp);\n";
1748 }
1749 }
1750 }
1751 checks << " }\n";
1752
1753 // outside the coordinates loop because we only need to call it once
1754 if (m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1755 m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1756 {
1757 checks << " temp_ql = " << qLevelType << "(textureQueryLevels(texture0_1));\n";
1758 checks << " temp = " << vecType << "(temp_ql);\n";
1759 checks << " accum += abs(temp);\n";
1760
1761 if (m_data.stage == STAGE_FRAGMENT)
1762 {
1763 // as here we only want to check that textureQueryLod returns 0 when
1764 // texture0_1 is null, we don't need to use the actual texture coordinates
1765 // (and modify the vertex shader below to do so). Any coordinates are fine.
1766 // gl_FragCoord has been selected "randomly", instead of selecting 0 for example.
1767 std::string lod_str = (numNormalizedCoords == 1) ? ");" : (numNormalizedCoords == 2) ? "y);" : "yz);";
1768 checks << " vec2 lod = textureQueryLod(texture0_1, gl_FragCoord.x" << lod_str << "\n";
1769 checks << " temp_ql = " << qLevelType << "(ceil(abs(lod.x) + abs(lod.y)));\n";
1770 checks << " temp = " << vecType << "(temp_ql);\n";
1771 checks << " accum += abs(temp);\n";
1772 }
1773 }
1774
1775 const bool needsScalarLayout = m_data.needsScalarBlockLayout();
1776 const uint32_t shaderBuildOptions =
1777 (needsScalarLayout ? static_cast<uint32_t>(vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS) : 0u);
1778
1779 const bool is64BitFormat = formatIsR64(m_data.format);
1780 std::string support =
1781 "#version 460 core\n"
1782 "#extension GL_EXT_nonuniform_qualifier : enable\n" +
1783 (needsScalarLayout ? std::string("#extension GL_EXT_scalar_block_layout : enable\n") : std::string()) +
1784 "#extension GL_EXT_samplerless_texture_functions : enable\n"
1785 "#extension GL_EXT_control_flow_attributes : enable\n"
1786 "#extension GL_EXT_shader_image_load_formatted : enable\n";
1787 std::string SupportR64 = "#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require\n"
1788 "#extension GL_EXT_shader_image_int64 : require\n";
1789 if (is64BitFormat)
1790 support += SupportR64;
1791 if (m_data.stage == STAGE_RAYGEN)
1792 support += "#extension GL_EXT_ray_tracing : require\n";
1793
1794 std::string code = " " + vecType + " accum = " + vecType +
1795 "(0);\n"
1796 " " +
1797 vecType +
1798 " temp;\n"
1799 " " +
1800 qLevelType + " temp_ql;\n" + checks.str() + " " + vecType + " color = (accum != " + vecType +
1801 "(0)) ? " + vecType + "(0,0,0,0) : " + vecType + "(1,0,0,1);\n";
1802
1803 switch (m_data.stage)
1804 {
1805 default:
1806 DE_ASSERT(0); // Fallthrough
1807 case STAGE_COMPUTE:
1808 {
1809 std::stringstream css;
1810 css << support << decls.str()
1811 << "layout(local_size_x = 1, local_size_y = 1) in;\n"
1812 "void main()\n"
1813 "{\n"
1814 << code
1815 << " imageStore(image0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1816 "}\n";
1817
1818 programCollection.glslSources.add("test")
1819 << glu::ComputeSource(css.str())
1820 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion,
1821 is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0,
1822 shaderBuildOptions);
1823 break;
1824 }
1825 case STAGE_RAYGEN:
1826 {
1827 std::stringstream css;
1828 css << support << decls.str()
1829 << "void main()\n"
1830 "{\n"
1831 << code
1832 << " imageStore(image0_0, ivec2(gl_LaunchIDEXT.xy), color);\n"
1833 "}\n";
1834
1835 programCollection.glslSources.add("test")
1836 << glu::RaygenSource(css.str())
1837 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_4, shaderBuildOptions,
1838 true);
1839 break;
1840 }
1841 case STAGE_VERTEX:
1842 {
1843 std::stringstream vss;
1844 vss << support << decls.str()
1845 << "void main()\n"
1846 "{\n"
1847 << code << " imageStore(image0_0, ivec2(gl_VertexIndex % " << DIM << ", gl_VertexIndex / " << DIM
1848 << "), color);\n"
1849 " gl_PointSize = 1.0f;\n"
1850 " gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);\n"
1851 "}\n";
1852
1853 programCollection.glslSources.add("test")
1854 << glu::VertexSource(vss.str())
1855 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1856 break;
1857 }
1858 case STAGE_FRAGMENT:
1859 {
1860 std::stringstream vss;
1861 vss << "#version 450 core\n"
1862 "void main()\n"
1863 "{\n"
1864 // full-viewport quad
1865 " gl_Position = vec4( 2.0*float(gl_VertexIndex&2) - 1.0, 4.0*(gl_VertexIndex&1)-1.0, 1.0 - 2.0 * "
1866 "float(gl_VertexIndex&1), 1);\n"
1867 "}\n";
1868
1869 programCollection.glslSources.add("vert")
1870 << glu::VertexSource(vss.str())
1871 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1872
1873 std::stringstream fss;
1874 fss << support << decls.str()
1875 << "void main()\n"
1876 "{\n"
1877 << code
1878 << " imageStore(image0_0, ivec2(gl_FragCoord.x, gl_FragCoord.y), color);\n"
1879 "}\n";
1880
1881 programCollection.glslSources.add("test")
1882 << glu::FragmentSource(fss.str())
1883 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1884 break;
1885 }
1886 }
1887
1888 // The 64-bit conditions below are redundant. Can we support the below shader for other than 64-bit formats?
1889 if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && is64BitFormat)
1890 {
1891 const std::string ivecCords =
1892 (m_data.viewType == VK_IMAGE_VIEW_TYPE_2D ? "ivec2(gx, gy)" : "ivec3(gx, gy, gz)");
1893 std::stringstream fillShader;
1894
1895 fillShader
1896 << "#version 450\n"
1897 << SupportR64
1898 << "\n"
1899 "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
1900 "layout (" +
1901 getShaderImageFormatQualifier(mapVkFormat(m_data.format)) + ", binding=0) volatile uniform "
1902 << string(formatIsSignedInt(m_data.format) ? "i" : "u") + string(is64BitFormat ? "64" : "") << "image"
1903 << imageDim
1904 << +" u_resultImage;\n"
1905 "\n"
1906 "layout(std430, binding = 1) buffer inputBuffer\n"
1907 "{\n"
1908 " int"
1909 << (is64BitFormat ? "64_t" : "")
1910 << " data[];\n"
1911 "} inBuffer;\n"
1912 "\n"
1913 "void main(void)\n"
1914 "{\n"
1915 " int gx = int(gl_GlobalInvocationID.x);\n"
1916 " int gy = int(gl_GlobalInvocationID.y);\n"
1917 " int gz = int(gl_GlobalInvocationID.z);\n"
1918 " uint index = gx + (gy * gl_NumWorkGroups.x) + (gz *gl_NumWorkGroups.x * gl_NumWorkGroups.y);\n";
1919
1920 for (int ndx = 0; ndx < static_cast<int>(m_data.samples); ++ndx)
1921 {
1922 fillShader << " imageStore(u_resultImage, " << ivecCords << ", " << ndx
1923 << ", i64vec4(inBuffer.data[index]));\n";
1924 }
1925
1926 fillShader << "}\n";
1927
1928 programCollection.glslSources.add("fillShader")
1929 << glu::ComputeSource(fillShader.str())
1930 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion,
1931 is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0,
1932 shaderBuildOptions);
1933 }
1934 }
1935
imageViewTypeToImageType(VkImageViewType type)1936 VkImageType imageViewTypeToImageType(VkImageViewType type)
1937 {
1938 switch (type)
1939 {
1940 case VK_IMAGE_VIEW_TYPE_1D:
1941 case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
1942 return VK_IMAGE_TYPE_1D;
1943 case VK_IMAGE_VIEW_TYPE_2D:
1944 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1945 case VK_IMAGE_VIEW_TYPE_CUBE:
1946 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
1947 return VK_IMAGE_TYPE_2D;
1948 case VK_IMAGE_VIEW_TYPE_3D:
1949 return VK_IMAGE_TYPE_3D;
1950 default:
1951 DE_ASSERT(false);
1952 }
1953
1954 return VK_IMAGE_TYPE_2D;
1955 }
1956
createInstance(Context & context) const1957 TestInstance *RobustnessExtsTestCase::createInstance(Context &context) const
1958 {
1959 return new RobustnessExtsTestInstance(context, m_data);
1960 }
1961
iterate(void)1962 tcu::TestStatus RobustnessExtsTestInstance::iterate(void)
1963 {
1964 const VkInstance instance = m_context.getInstance();
1965 const InstanceInterface &vki = m_context.getInstanceInterface();
1966 const VkDevice device = getLogicalDevice(m_context, m_data.testRobustness2, m_data.needsPipelineRobustness());
1967 const vk::DeviceInterface &vk =
1968 getDeviceInterface(m_context, m_data.testRobustness2, m_data.needsPipelineRobustness());
1969 const VkPhysicalDevice physicalDevice = chooseDevice(vki, instance, m_context.getTestContext().getCommandLine());
1970 SimpleAllocator allocator(vk, device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
1971
1972 Layout layout;
1973 generateLayout(layout, m_data);
1974
1975 // Get needed properties.
1976 VkPhysicalDeviceProperties2 properties = initVulkanStructure();
1977
1978 #ifndef CTS_USES_VULKANSC
1979 VkPhysicalDeviceRayTracingPipelinePropertiesKHR rayTracingProperties = initVulkanStructure();
1980 #endif
1981
1982 VkPhysicalDeviceRobustness2PropertiesEXT robustness2Properties = initVulkanStructure();
1983
1984 #ifndef CTS_USES_VULKANSC
1985 if (m_context.isDeviceFunctionalitySupported("VK_KHR_ray_tracing_pipeline"))
1986 {
1987 rayTracingProperties.pNext = properties.pNext;
1988 properties.pNext = &rayTracingProperties;
1989 }
1990 #endif
1991
1992 if (m_context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
1993 {
1994 robustness2Properties.pNext = properties.pNext;
1995 properties.pNext = &robustness2Properties;
1996 }
1997
1998 vki.getPhysicalDeviceProperties2(physicalDevice, &properties);
1999
2000 if (m_data.testRobustness2)
2001 {
2002 if (robustness2Properties.robustStorageBufferAccessSizeAlignment != 1 &&
2003 robustness2Properties.robustStorageBufferAccessSizeAlignment != 4)
2004 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustStorageBufferAccessSizeAlignment must be 1 or 4");
2005
2006 if (robustness2Properties.robustUniformBufferAccessSizeAlignment < 1 ||
2007 robustness2Properties.robustUniformBufferAccessSizeAlignment > 256 ||
2008 !deIntIsPow2((int)robustness2Properties.robustUniformBufferAccessSizeAlignment))
2009 return tcu::TestStatus(QP_TEST_RESULT_FAIL,
2010 "robustUniformBufferAccessSizeAlignment must be a power of two in [1,256]");
2011 }
2012
2013 VkPipelineBindPoint bindPoint;
2014
2015 switch (m_data.stage)
2016 {
2017 case STAGE_COMPUTE:
2018 bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
2019 break;
2020 #ifndef CTS_USES_VULKANSC
2021 case STAGE_RAYGEN:
2022 bindPoint = VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR;
2023 break;
2024 #endif
2025 default:
2026 bindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
2027 break;
2028 }
2029
2030 Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
2031 Move<vk::VkDescriptorPool> descriptorPool;
2032 Move<vk::VkDescriptorSet> descriptorSet;
2033
2034 int formatBytes = tcu::getPixelSize(mapVkFormat(m_data.format));
2035 int numComponents = formatBytes / tcu::getChannelSize(mapVkFormat(m_data.format).type);
2036
2037 vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
2038
2039 VkDescriptorPoolCreateFlags poolCreateFlags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
2040
2041 #ifndef CTS_USES_VULKANSC
2042 VkDescriptorSetLayoutCreateFlags layoutCreateFlags =
2043 m_data.pushDescriptor ? VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR : 0;
2044 #else
2045 VkDescriptorSetLayoutCreateFlags layoutCreateFlags = 0;
2046 #endif
2047
2048 // Create a layout and allocate a descriptor set for it.
2049
2050 const VkDescriptorSetLayoutCreateInfo setLayoutCreateInfo = {
2051 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, DE_NULL,
2052
2053 layoutCreateFlags, (uint32_t)bindings.size(), bindings.empty() ? DE_NULL : bindings.data()};
2054
2055 descriptorSetLayout = vk::createDescriptorSetLayout(vk, device, &setLayoutCreateInfo);
2056
2057 vk::DescriptorPoolBuilder poolBuilder;
2058 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1);
2059 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1);
2060 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1);
2061 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1);
2062 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1);
2063 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1);
2064 poolBuilder.addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1);
2065 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2);
2066
2067 descriptorPool = poolBuilder.build(vk, device, poolCreateFlags, 1u, DE_NULL);
2068
2069 const void *pNext = DE_NULL;
2070
2071 if (!m_data.pushDescriptor)
2072 descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout, pNext);
2073
2074 BufferWithMemoryPtr buffer;
2075
2076 uint8_t *bufferPtr = DE_NULL;
2077 if (!m_data.nullDescriptor)
2078 {
2079 // Create a buffer to hold data for all descriptors.
2080 VkDeviceSize size = de::max((VkDeviceSize)(m_data.bufferLen ? m_data.bufferLen : 1), (VkDeviceSize)256);
2081
2082 VkBufferUsageFlags usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
2083 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2084 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
2085 {
2086 size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment);
2087 usage |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
2088 }
2089 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2090 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2091 {
2092 size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment);
2093 usage |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
2094 }
2095 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
2096 {
2097 usage |= VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
2098 }
2099 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER)
2100 {
2101 usage |= VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
2102 }
2103 else if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2104 {
2105 size = m_data.bufferLen;
2106 }
2107
2108 buffer = BufferWithMemoryPtr(new BufferWithMemory(vk, device, allocator, makeBufferCreateInfo(size, usage),
2109 MemoryRequirement::HostVisible));
2110 bufferPtr = (uint8_t *)buffer->getAllocation().getHostPtr();
2111
2112 deMemset(bufferPtr, 0x3f, (size_t)size);
2113
2114 deMemset(bufferPtr, 0, m_data.bufferLen);
2115 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2116 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
2117 {
2118 deMemset(
2119 bufferPtr, 0,
2120 deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment));
2121 }
2122 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2123 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2124 {
2125 deMemset(
2126 bufferPtr, 0,
2127 deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment));
2128 }
2129 }
2130
2131 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
2132
2133 Move<VkDescriptorSetLayout> descriptorSetLayoutR64;
2134 Move<VkDescriptorPool> descriptorPoolR64;
2135 Move<VkDescriptorSet> descriptorSetFillImage;
2136 Move<VkShaderModule> shaderModuleFillImage;
2137 Move<VkPipelineLayout> pipelineLayoutFillImage;
2138 Move<VkPipeline> pipelineFillImage;
2139
2140 Move<VkCommandPool> cmdPool = createCommandPool(vk, device, 0, queueFamilyIndex);
2141 Move<VkCommandBuffer> cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2142 VkQueue queue;
2143
2144 vk.getDeviceQueue(device, queueFamilyIndex, 0, &queue);
2145
2146 const VkImageSubresourceRange barrierRange = {
2147 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2148 0u, // uint32_t baseMipLevel;
2149 VK_REMAINING_MIP_LEVELS, // uint32_t levelCount;
2150 0u, // uint32_t baseArrayLayer;
2151 VK_REMAINING_ARRAY_LAYERS // uint32_t layerCount;
2152 };
2153
2154 VkImageMemoryBarrier preImageBarrier = {
2155 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
2156 DE_NULL, // const void* pNext
2157 0u, // VkAccessFlags srcAccessMask
2158 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask
2159 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout
2160 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout
2161 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
2162 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
2163 DE_NULL, // VkImage image
2164 barrierRange, // VkImageSubresourceRange subresourceRange;
2165 };
2166
2167 VkImageMemoryBarrier postImageBarrier = {
2168 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
2169 DE_NULL, // const void* pNext;
2170 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
2171 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
2172 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
2173 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout newLayout;
2174 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
2175 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
2176 DE_NULL, // VkImage image;
2177 barrierRange, // VkImageSubresourceRange subresourceRange;
2178 };
2179
2180 vk::VkClearColorValue clearValue;
2181 clearValue.uint32[0] = 0u;
2182 clearValue.uint32[1] = 0u;
2183 clearValue.uint32[2] = 0u;
2184 clearValue.uint32[3] = 0u;
2185
2186 beginCommandBuffer(vk, *cmdBuffer, 0u);
2187
2188 typedef vk::Unique<vk::VkBufferView> BufferViewHandleUp;
2189 typedef de::SharedPtr<BufferViewHandleUp> BufferViewHandleSp;
2190 typedef de::SharedPtr<ImageWithMemory> ImageWithMemorySp;
2191 typedef de::SharedPtr<Unique<VkImageView>> VkImageViewSp;
2192
2193 vector<BufferViewHandleSp> bufferViews(1);
2194
2195 VkImageCreateFlags mutableFormatFlag = 0;
2196 // The 64-bit image tests use a view format which differs from the image.
2197 if (formatIsR64(m_data.format))
2198 mutableFormatFlag = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
2199 VkImageCreateFlags imageCreateFlags = mutableFormatFlag;
2200 if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2201 imageCreateFlags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
2202
2203 const bool featureSampledImage =
2204 ((getPhysicalDeviceFormatProperties(vki, physicalDevice, m_data.format).optimalTilingFeatures &
2205 VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) == VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT);
2206
2207 const VkImageUsageFlags usageSampledImage =
2208 (featureSampledImage ? VK_IMAGE_USAGE_SAMPLED_BIT : (VkImageUsageFlagBits)0);
2209
2210 const VkImageCreateInfo outputImageCreateInfo = {
2211 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
2212 DE_NULL, // const void* pNext;
2213 mutableFormatFlag, // VkImageCreateFlags flags;
2214 VK_IMAGE_TYPE_2D, // VkImageType imageType;
2215 m_data.format, // VkFormat format;
2216 {
2217 DIM, // uint32_t width;
2218 DIM, // uint32_t height;
2219 1u // uint32_t depth;
2220 }, // VkExtent3D extent;
2221 1u, // uint32_t mipLevels;
2222 1u, // uint32_t arrayLayers;
2223 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
2224 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
2225 VK_IMAGE_USAGE_STORAGE_BIT | usageSampledImage | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
2226 VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
2227 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2228 0u, // uint32_t queueFamilyIndexCount;
2229 DE_NULL, // const uint32_t* pQueueFamilyIndices;
2230 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
2231 };
2232
2233 uint32_t width = m_data.imageDim[0];
2234 uint32_t height = m_data.viewType != VK_IMAGE_VIEW_TYPE_1D && m_data.viewType != VK_IMAGE_VIEW_TYPE_1D_ARRAY ?
2235 m_data.imageDim[1] :
2236 1;
2237 uint32_t depth = m_data.viewType == VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
2238 uint32_t layers = m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] :
2239 m_data.viewType != VK_IMAGE_VIEW_TYPE_1D && m_data.viewType != VK_IMAGE_VIEW_TYPE_2D &&
2240 m_data.viewType != VK_IMAGE_VIEW_TYPE_3D ?
2241 m_data.imageDim[2] :
2242 1;
2243
2244 const VkImageUsageFlags usageImage =
2245 (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ? VK_IMAGE_USAGE_STORAGE_BIT :
2246 (VkImageUsageFlagBits)0);
2247
2248 const VkImageCreateInfo imageCreateInfo = {
2249 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
2250 DE_NULL, // const void* pNext;
2251 imageCreateFlags, // VkImageCreateFlags flags;
2252 imageViewTypeToImageType(m_data.viewType), // VkImageType imageType;
2253 m_data.format, // VkFormat format;
2254 {
2255 width, // uint32_t width;
2256 height, // uint32_t height;
2257 depth // uint32_t depth;
2258 }, // VkExtent3D extent;
2259 1u, // uint32_t mipLevels;
2260 layers, // uint32_t arrayLayers;
2261 m_data.samples, // VkSampleCountFlagBits samples;
2262 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
2263 usageImage | usageSampledImage | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
2264 VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
2265 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2266 0u, // uint32_t queueFamilyIndexCount;
2267 DE_NULL, // const uint32_t* pQueueFamilyIndices;
2268 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
2269 };
2270
2271 VkImageViewCreateInfo imageViewCreateInfo = {
2272 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
2273 DE_NULL, // const void* pNext;
2274 (VkImageViewCreateFlags)0u, // VkImageViewCreateFlags flags;
2275 DE_NULL, // VkImage image;
2276 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
2277 m_data.format, // VkFormat format;
2278 {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
2279 VK_COMPONENT_SWIZZLE_IDENTITY}, // VkComponentMapping components;
2280 {
2281 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2282 0u, // uint32_t baseMipLevel;
2283 VK_REMAINING_MIP_LEVELS, // uint32_t levelCount;
2284 0u, // uint32_t baseArrayLayer;
2285 VK_REMAINING_ARRAY_LAYERS // uint32_t layerCount;
2286 } // VkImageSubresourceRange subresourceRange;
2287 };
2288
2289 vector<ImageWithMemorySp> images(2);
2290 vector<VkImageViewSp> imageViews(2);
2291
2292 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2293 {
2294 uint32_t *ptr = (uint32_t *)bufferPtr;
2295 deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2296 }
2297
2298 BufferWithMemoryPtr bufferImageR64;
2299 BufferWithMemoryPtr bufferOutputImageR64;
2300 const VkDeviceSize sizeOutputR64 = 8 * outputImageCreateInfo.extent.width * outputImageCreateInfo.extent.height *
2301 outputImageCreateInfo.extent.depth;
2302 const VkDeviceSize sizeOneLayers =
2303 8 * imageCreateInfo.extent.width * imageCreateInfo.extent.height * imageCreateInfo.extent.depth;
2304 const VkDeviceSize sizeImageR64 = sizeOneLayers * layers;
2305
2306 if (formatIsR64(m_data.format))
2307 {
2308 bufferOutputImageR64 = BufferWithMemoryPtr(new BufferWithMemory(
2309 vk, device, allocator, makeBufferCreateInfo(sizeOutputR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
2310 MemoryRequirement::HostVisible));
2311
2312 uint64_t *bufferUint64Ptr = (uint64_t *)bufferOutputImageR64->getAllocation().getHostPtr();
2313
2314 for (int ndx = 0; ndx < static_cast<int>(sizeOutputR64 / 8); ++ndx)
2315 {
2316 bufferUint64Ptr[ndx] = 0;
2317 }
2318 flushAlloc(vk, device, bufferOutputImageR64->getAllocation());
2319
2320 bufferImageR64 = BufferWithMemoryPtr(new BufferWithMemory(
2321 vk, device, allocator,
2322 makeBufferCreateInfo(sizeImageR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT),
2323 MemoryRequirement::HostVisible));
2324
2325 for (uint32_t layerNdx = 0; layerNdx < layers; ++layerNdx)
2326 {
2327 bufferUint64Ptr = (uint64_t *)bufferImageR64->getAllocation().getHostPtr();
2328 bufferUint64Ptr = bufferUint64Ptr + ((sizeOneLayers * layerNdx) / 8);
2329
2330 for (int ndx = 0; ndx < static_cast<int>(sizeOneLayers / 8); ++ndx)
2331 {
2332 bufferUint64Ptr[ndx] = 0x1234567887654321 + ((m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE &&
2333 m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ?
2334 layerNdx :
2335 0);
2336 }
2337 }
2338 flushAlloc(vk, device, bufferImageR64->getAllocation());
2339 }
2340
2341 for (size_t b = 0; b < bindings.size(); ++b)
2342 {
2343 VkDescriptorSetLayoutBinding &binding = bindings[b];
2344
2345 if (binding.descriptorCount == 0)
2346 continue;
2347 if (b == 1 && m_data.nullDescriptor)
2348 continue;
2349
2350 DE_ASSERT(binding.descriptorCount == 1);
2351 switch (binding.descriptorType)
2352 {
2353 default:
2354 DE_ASSERT(0); // Fallthrough
2355 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2356 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2357 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2358 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2359 {
2360 uint32_t *ptr = (uint32_t *)bufferPtr;
2361 deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2362 }
2363 break;
2364 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2365 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2366 {
2367 uint32_t *ptr = (uint32_t *)bufferPtr;
2368 deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2369
2370 const vk::VkBufferViewCreateInfo viewCreateInfo = {
2371 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
2372 DE_NULL,
2373 (vk::VkBufferViewCreateFlags)0,
2374 **buffer, // buffer
2375 m_data.format, // format
2376 (vk::VkDeviceSize)0, // offset
2377 (vk::VkDeviceSize)m_data.bufferLen // range
2378 };
2379 vk::Move<vk::VkBufferView> bufferView = vk::createBufferView(vk, device, &viewCreateInfo);
2380 bufferViews[0] = BufferViewHandleSp(new BufferViewHandleUp(bufferView));
2381 }
2382 break;
2383 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2384 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2385 {
2386 if (bindings.size() > 1 && bindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2387 {
2388 if (m_data.format == VK_FORMAT_R64_SINT)
2389 imageViewCreateInfo.format = VK_FORMAT_R32G32_SINT;
2390
2391 if (m_data.format == VK_FORMAT_R64_UINT)
2392 imageViewCreateInfo.format = VK_FORMAT_R32G32_UINT;
2393 }
2394
2395 if (b == 0)
2396 {
2397 images[b] = ImageWithMemorySp(
2398 new ImageWithMemory(vk, device, allocator, outputImageCreateInfo, MemoryRequirement::Any));
2399 imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
2400 }
2401 else
2402 {
2403 images[b] = ImageWithMemorySp(
2404 new ImageWithMemory(vk, device, allocator, imageCreateInfo, MemoryRequirement::Any));
2405 imageViewCreateInfo.viewType = m_data.viewType;
2406 }
2407 imageViewCreateInfo.image = **images[b];
2408 imageViews[b] =
2409 VkImageViewSp(new Unique<VkImageView>(createImageView(vk, device, &imageViewCreateInfo, NULL)));
2410
2411 VkImage img = **images[b];
2412 const VkBuffer &bufferR64 = ((b == 0) ? *(*bufferOutputImageR64) : *(*(bufferImageR64)));
2413 const VkImageCreateInfo &imageInfo = ((b == 0) ? outputImageCreateInfo : imageCreateInfo);
2414 const uint32_t clearLayers = b == 0 ? 1 : layers;
2415
2416 if (!formatIsR64(m_data.format))
2417 {
2418 preImageBarrier.image = img;
2419 if (b == 1)
2420 {
2421 if (formatIsFloat(m_data.format))
2422 {
2423 deMemcpy(&clearValue.float32[0], layout.refData.data(), layout.refData.size());
2424 }
2425 else if (formatIsSignedInt(m_data.format))
2426 {
2427 deMemcpy(&clearValue.int32[0], layout.refData.data(), layout.refData.size());
2428 }
2429 else
2430 {
2431 deMemcpy(&clearValue.uint32[0], layout.refData.data(), layout.refData.size());
2432 }
2433 }
2434 postImageBarrier.image = img;
2435
2436 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
2437 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
2438 (const VkBufferMemoryBarrier *)DE_NULL, 1, &preImageBarrier);
2439
2440 for (unsigned int i = 0; i < clearLayers; ++i)
2441 {
2442 const VkImageSubresourceRange clearRange = {
2443 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2444 0u, // uint32_t baseMipLevel;
2445 VK_REMAINING_MIP_LEVELS, // uint32_t levelCount;
2446 i, // uint32_t baseArrayLayer;
2447 1 // uint32_t layerCount;
2448 };
2449
2450 vk.cmdClearColorImage(*cmdBuffer, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue, 1,
2451 &clearRange);
2452
2453 // Use same data for all faces for cube(array), otherwise make value a function of the layer
2454 if (m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2455 {
2456 if (formatIsFloat(m_data.format))
2457 clearValue.float32[0] += 1;
2458 else if (formatIsSignedInt(m_data.format))
2459 clearValue.int32[0] += 1;
2460 else
2461 clearValue.uint32[0] += 1;
2462 }
2463 }
2464 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
2465 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
2466 (const VkBufferMemoryBarrier *)DE_NULL, 1, &postImageBarrier);
2467 }
2468 else
2469 {
2470 if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && (b == 1))
2471 {
2472 const VkImageSubresourceRange subresourceRange =
2473 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, clearLayers);
2474 const VkImageMemoryBarrier imageBarrierPre =
2475 makeImageMemoryBarrier(0, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
2476 VK_IMAGE_LAYOUT_GENERAL, img, subresourceRange);
2477 const VkImageMemoryBarrier imageBarrierPost =
2478 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
2479 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL, img, subresourceRange);
2480
2481 descriptorSetLayoutR64 =
2482 DescriptorSetLayoutBuilder()
2483 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2484 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
2485 .build(vk, device);
2486
2487 descriptorPoolR64 = DescriptorPoolBuilder()
2488 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2489 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1)
2490 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 2u);
2491
2492 descriptorSetFillImage = makeDescriptorSet(vk, device, *descriptorPoolR64, *descriptorSetLayoutR64);
2493
2494 shaderModuleFillImage =
2495 createShaderModule(vk, device, m_context.getBinaryCollection().get("fillShader"), 0);
2496 pipelineLayoutFillImage = makePipelineLayout(vk, device, *descriptorSetLayoutR64);
2497 pipelineFillImage =
2498 makeComputePipeline(vk, device, *pipelineLayoutFillImage, *shaderModuleFillImage);
2499
2500 const VkDescriptorImageInfo descResultImageInfo =
2501 makeDescriptorImageInfo(DE_NULL, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2502 const VkDescriptorBufferInfo descResultBufferInfo =
2503 makeDescriptorBufferInfo(bufferR64, 0, sizeImageR64);
2504
2505 DescriptorSetUpdateBuilder()
2506 .writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(0u),
2507 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descResultImageInfo)
2508 .writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(1u),
2509 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descResultBufferInfo)
2510 .update(vk, device);
2511
2512 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2513 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
2514 (const VkBufferMemoryBarrier *)DE_NULL, 1, &imageBarrierPre);
2515
2516 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineFillImage);
2517 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayoutFillImage, 0u,
2518 1u, &(*descriptorSetFillImage), 0u, DE_NULL);
2519
2520 vk.cmdDispatch(*cmdBuffer, imageInfo.extent.width, imageInfo.extent.height, clearLayers);
2521
2522 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2523 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (VkDependencyFlags)0, 0,
2524 (const VkMemoryBarrier *)DE_NULL, 0, (const VkBufferMemoryBarrier *)DE_NULL,
2525 1, &imageBarrierPost);
2526 }
2527 else
2528 {
2529 VkDeviceSize size = ((b == 0) ? sizeOutputR64 : sizeImageR64);
2530 const vector<VkBufferImageCopy> bufferImageCopy(
2531 1, makeBufferImageCopy(imageInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT,
2532 0, 0, clearLayers)));
2533
2534 copyBufferToImage(vk, *cmdBuffer, bufferR64, size, bufferImageCopy, VK_IMAGE_ASPECT_COLOR_BIT, 1,
2535 clearLayers, img, VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2536 }
2537 }
2538 }
2539 break;
2540 }
2541 }
2542
2543 const VkSamplerCreateInfo samplerParams = {
2544 VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, // VkStructureType sType;
2545 DE_NULL, // const void* pNext;
2546 0, // VkSamplerCreateFlags flags;
2547 VK_FILTER_NEAREST, // VkFilter magFilter:
2548 VK_FILTER_NEAREST, // VkFilter minFilter;
2549 VK_SAMPLER_MIPMAP_MODE_NEAREST, // VkSamplerMipmapMode mipmapMode;
2550 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // VkSamplerAddressMode addressModeU;
2551 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // VkSamplerAddressMode addressModeV;
2552 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // VkSamplerAddressMode addressModeW;
2553 0.0f, // float mipLodBias;
2554 VK_FALSE, // VkBool32 anistoropyEnable;
2555 1.0f, // float maxAnisotropy;
2556 VK_FALSE, // VkBool32 compareEnable;
2557 VK_COMPARE_OP_ALWAYS, // VkCompareOp compareOp;
2558 0.0f, // float minLod;
2559 0.0f, // float maxLod;
2560 formatIsFloat(m_data.format) ? VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK :
2561 VK_BORDER_COLOR_INT_TRANSPARENT_BLACK, // VkBorderColor borderColor;
2562 VK_FALSE // VkBool32 unnormalizedCoordinates;
2563 };
2564
2565 Move<VkSampler> sampler(createSampler(vk, device, &samplerParams));
2566
2567 // Flush modified memory.
2568 if (!m_data.nullDescriptor)
2569 flushAlloc(vk, device, buffer->getAllocation());
2570
2571 const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo = {
2572 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
2573 DE_NULL, // pNext
2574 (VkPipelineLayoutCreateFlags)0,
2575 1u, // setLayoutCount
2576 &descriptorSetLayout.get(), // pSetLayouts
2577 0u, // pushConstantRangeCount
2578 DE_NULL, // pPushConstantRanges
2579 };
2580
2581 Move<VkPipelineLayout> pipelineLayout = createPipelineLayout(vk, device, &pipelineLayoutCreateInfo, NULL);
2582
2583 BufferWithMemoryPtr copyBuffer;
2584 copyBuffer = BufferWithMemoryPtr(new BufferWithMemory(
2585 vk, device, allocator, makeBufferCreateInfo(DIM * DIM * 16, VK_BUFFER_USAGE_TRANSFER_DST_BIT),
2586 MemoryRequirement::HostVisible));
2587
2588 {
2589 vector<VkDescriptorBufferInfo> bufferInfoVec(2);
2590 vector<VkDescriptorImageInfo> imageInfoVec(2);
2591 vector<VkBufferView> bufferViewVec(2);
2592 vector<VkWriteDescriptorSet> writesBeforeBindVec(0);
2593 int vecIndex = 0;
2594 int numDynamic = 0;
2595
2596 #ifndef CTS_USES_VULKANSC
2597 vector<VkDescriptorUpdateTemplateEntry> imgTemplateEntriesBefore, bufTemplateEntriesBefore,
2598 texelBufTemplateEntriesBefore;
2599 #endif
2600
2601 for (size_t b = 0; b < bindings.size(); ++b)
2602 {
2603 VkDescriptorSetLayoutBinding &binding = bindings[b];
2604 // Construct the declaration for the binding
2605 if (binding.descriptorCount > 0)
2606 {
2607 // output image
2608 switch (binding.descriptorType)
2609 {
2610 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2611 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2612 // Output image.
2613 if (b == 1 && m_data.nullDescriptor)
2614 imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, DE_NULL, VK_IMAGE_LAYOUT_GENERAL);
2615 else
2616 imageInfoVec[vecIndex] =
2617 makeDescriptorImageInfo(*sampler, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2618 break;
2619 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2620 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2621 if (b == 1 && m_data.nullDescriptor)
2622 bufferViewVec[vecIndex] = DE_NULL;
2623 else
2624 bufferViewVec[vecIndex] = **bufferViews[0];
2625 break;
2626 default:
2627 // Other descriptor types.
2628 if (b == 1 && m_data.nullDescriptor)
2629 bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(DE_NULL, 0, VK_WHOLE_SIZE);
2630 else
2631 bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(**buffer, 0, layout.refData.size());
2632 break;
2633 }
2634
2635 VkWriteDescriptorSet w = {
2636 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // sType
2637 DE_NULL, // pNext
2638 m_data.pushDescriptor ? DE_NULL : *descriptorSet, // dstSet
2639 (uint32_t)b, // binding
2640 0, // dstArrayElement
2641 1u, // descriptorCount
2642 binding.descriptorType, // descriptorType
2643 &imageInfoVec[vecIndex], // pImageInfo
2644 &bufferInfoVec[vecIndex], // pBufferInfo
2645 &bufferViewVec[vecIndex], // pTexelBufferView
2646 };
2647
2648 #ifndef CTS_USES_VULKANSC
2649 VkDescriptorUpdateTemplateEntry templateEntry = {
2650 (uint32_t)b, // uint32_t dstBinding;
2651 0, // uint32_t dstArrayElement;
2652 1u, // uint32_t descriptorCount;
2653 binding.descriptorType, // VkDescriptorType descriptorType;
2654 0, // size_t offset;
2655 0, // size_t stride;
2656 };
2657
2658 switch (binding.descriptorType)
2659 {
2660 default:
2661 DE_ASSERT(0); // Fallthrough
2662 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2663 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2664 templateEntry.offset = vecIndex * sizeof(VkDescriptorImageInfo);
2665 imgTemplateEntriesBefore.push_back(templateEntry);
2666 break;
2667 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2668 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2669 templateEntry.offset = vecIndex * sizeof(VkBufferView);
2670 texelBufTemplateEntriesBefore.push_back(templateEntry);
2671 break;
2672 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2673 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2674 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2675 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2676 templateEntry.offset = vecIndex * sizeof(VkDescriptorBufferInfo);
2677 bufTemplateEntriesBefore.push_back(templateEntry);
2678 break;
2679 }
2680 #endif
2681
2682 vecIndex++;
2683
2684 writesBeforeBindVec.push_back(w);
2685
2686 // Count the number of dynamic descriptors in this set.
2687 if (binding.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
2688 binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2689 {
2690 numDynamic++;
2691 }
2692 }
2693 }
2694
2695 // Make zeros have at least one element so &zeros[0] works
2696 vector<uint32_t> zeros(de::max(1, numDynamic));
2697 deMemset(&zeros[0], 0, numDynamic * sizeof(uint32_t));
2698
2699 // Randomly select between vkUpdateDescriptorSets and vkUpdateDescriptorSetWithTemplate
2700 if (m_data.useTemplate)
2701 {
2702 #ifndef CTS_USES_VULKANSC
2703 VkDescriptorUpdateTemplateCreateInfo templateCreateInfo = {
2704 VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, // VkStructureType sType;
2705 NULL, // void* pNext;
2706 0, // VkDescriptorUpdateTemplateCreateFlags flags;
2707 0, // uint32_t descriptorUpdateEntryCount;
2708 DE_NULL, // uint32_t descriptorUpdateEntryCount;
2709 m_data.pushDescriptor ?
2710 VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR :
2711 VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, // VkDescriptorUpdateTemplateType templateType;
2712 descriptorSetLayout.get(), // VkDescriptorSetLayout descriptorSetLayout;
2713 bindPoint, // VkPipelineBindPoint pipelineBindPoint;
2714 *pipelineLayout, // VkPipelineLayout pipelineLayout;
2715 0, // uint32_t set;
2716 };
2717
2718 void *templateVectorData[] = {
2719 imageInfoVec.data(),
2720 bufferInfoVec.data(),
2721 bufferViewVec.data(),
2722 };
2723
2724 vector<VkDescriptorUpdateTemplateEntry> *templateVectorsBefore[] = {
2725 &imgTemplateEntriesBefore,
2726 &bufTemplateEntriesBefore,
2727 &texelBufTemplateEntriesBefore,
2728 };
2729
2730 if (m_data.pushDescriptor)
2731 {
2732 for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2733 {
2734 if (templateVectorsBefore[i]->size())
2735 {
2736 templateCreateInfo.descriptorUpdateEntryCount = (uint32_t)templateVectorsBefore[i]->size();
2737 templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2738 Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate =
2739 createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2740 vk.cmdPushDescriptorSetWithTemplateKHR(*cmdBuffer, *descriptorUpdateTemplate, *pipelineLayout,
2741 0, templateVectorData[i]);
2742 }
2743 }
2744 }
2745 else
2746 {
2747 for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2748 {
2749 if (templateVectorsBefore[i]->size())
2750 {
2751 templateCreateInfo.descriptorUpdateEntryCount = (uint32_t)templateVectorsBefore[i]->size();
2752 templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2753 Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate =
2754 createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2755 vk.updateDescriptorSetWithTemplate(device, descriptorSet.get(), *descriptorUpdateTemplate,
2756 templateVectorData[i]);
2757 }
2758 }
2759
2760 vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic,
2761 &zeros[0]);
2762 }
2763 #endif
2764 }
2765 else
2766 {
2767 if (m_data.pushDescriptor)
2768 {
2769 #ifndef CTS_USES_VULKANSC
2770 if (writesBeforeBindVec.size())
2771 {
2772 vk.cmdPushDescriptorSetKHR(*cmdBuffer, bindPoint, *pipelineLayout, 0,
2773 (uint32_t)writesBeforeBindVec.size(), &writesBeforeBindVec[0]);
2774 }
2775 #endif
2776 }
2777 else
2778 {
2779 if (writesBeforeBindVec.size())
2780 {
2781 vk.updateDescriptorSets(device, (uint32_t)writesBeforeBindVec.size(), &writesBeforeBindVec[0], 0,
2782 NULL);
2783 }
2784
2785 vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic,
2786 &zeros[0]);
2787 }
2788 }
2789 }
2790
2791 #ifndef CTS_USES_VULKANSC
2792 // For graphics pipeline library cases.
2793 Move<VkPipeline> vertexInputLib;
2794 Move<VkPipeline> preRasterShaderLib;
2795 Move<VkPipeline> fragShaderLib;
2796 Move<VkPipeline> fragOutputLib;
2797 #endif // CTS_USES_VULKANSC
2798
2799 Move<VkPipeline> pipeline;
2800 Move<VkRenderPass> renderPass;
2801 Move<VkFramebuffer> framebuffer;
2802
2803 #ifndef CTS_USES_VULKANSC
2804 BufferWithMemoryPtr sbtBuffer;
2805 const auto sbtFlags = (VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR |
2806 VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT);
2807 VkStridedDeviceAddressRegionKHR rgenSBTRegion = makeStridedDeviceAddressRegionKHR(0ull, 0, 0);
2808 VkStridedDeviceAddressRegionKHR missSBTRegion = makeStridedDeviceAddressRegionKHR(0ull, 0, 0);
2809 VkStridedDeviceAddressRegionKHR hitSBTRegion = makeStridedDeviceAddressRegionKHR(0ull, 0, 0);
2810 VkStridedDeviceAddressRegionKHR callSBTRegion = makeStridedDeviceAddressRegionKHR(0ull, 0, 0);
2811 const auto sgHandleSize = rayTracingProperties.shaderGroupHandleSize;
2812 #endif // CTS_USES_VULKANSC
2813
2814 if (m_data.stage == STAGE_COMPUTE)
2815 {
2816 const Unique<VkShaderModule> shader(
2817 createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2818
2819 const VkPipelineShaderStageCreateInfo pipelineShaderStageParams = {
2820 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
2821 nullptr, // const void* pNext;
2822 static_cast<VkPipelineShaderStageCreateFlags>(0u), // VkPipelineShaderStageCreateFlags flags;
2823 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage;
2824 *shader, // VkShaderModule module;
2825 "main", // const char* pName;
2826 nullptr, // const VkSpecializationInfo* pSpecializationInfo;
2827 };
2828
2829 VkComputePipelineCreateInfo pipelineCreateInfo = {
2830 VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
2831 nullptr, // const void* pNext;
2832 static_cast<VkPipelineCreateFlags>(0u), // VkPipelineCreateFlags flags;
2833 pipelineShaderStageParams, // VkPipelineShaderStageCreateInfo stage;
2834 *pipelineLayout, // VkPipelineLayout layout;
2835 DE_NULL, // VkPipeline basePipelineHandle;
2836 0, // int32_t basePipelineIndex;
2837 };
2838
2839 #ifndef CTS_USES_VULKANSC
2840 VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo;
2841 if (m_data.needsPipelineRobustness())
2842 {
2843 pipelineRobustnessInfo = getPipelineRobustnessInfo(m_data.testRobustness2, m_data.descriptorType);
2844 pipelineCreateInfo.pNext = &pipelineRobustnessInfo;
2845 }
2846 #endif
2847
2848 pipeline = createComputePipeline(vk, device, DE_NULL, &pipelineCreateInfo);
2849 }
2850 #ifndef CTS_USES_VULKANSC
2851 else if (m_data.stage == STAGE_RAYGEN)
2852 {
2853 const Unique<VkShaderModule> shader(
2854 createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2855
2856 const VkPipelineShaderStageCreateInfo shaderCreateInfo = {
2857 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2858 nullptr,
2859 0u, // flags
2860 VK_SHADER_STAGE_RAYGEN_BIT_KHR, // stage
2861 *shader, // shader
2862 "main",
2863 nullptr, // pSpecializationInfo
2864 };
2865
2866 VkRayTracingShaderGroupCreateInfoKHR group = {
2867 VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR,
2868 nullptr,
2869 VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR, // type
2870 0, // generalShader
2871 VK_SHADER_UNUSED_KHR, // closestHitShader
2872 VK_SHADER_UNUSED_KHR, // anyHitShader
2873 VK_SHADER_UNUSED_KHR, // intersectionShader
2874 nullptr, // pShaderGroupCaptureReplayHandle
2875 };
2876
2877 VkRayTracingPipelineCreateInfoKHR pipelineCreateInfo = {
2878 VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR, // sType
2879 nullptr, // pNext
2880 0u, // flags
2881 1u, // stageCount
2882 &shaderCreateInfo, // pStages
2883 1u, // groupCount
2884 &group, // pGroups
2885 0, // maxRecursionDepth
2886 nullptr, // pLibraryInfo
2887 nullptr, // pLibraryInterface
2888 nullptr, // pDynamicState
2889 *pipelineLayout, // layout
2890 (vk::VkPipeline)0, // basePipelineHandle
2891 0u, // basePipelineIndex
2892 };
2893
2894 VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo;
2895 if (m_data.needsPipelineRobustness())
2896 {
2897 pipelineRobustnessInfo = getPipelineRobustnessInfo(m_data.testRobustness2, m_data.descriptorType);
2898 pipelineCreateInfo.pNext = &pipelineRobustnessInfo;
2899 }
2900
2901 pipeline = createRayTracingPipelineKHR(vk, device, VK_NULL_HANDLE, VK_NULL_HANDLE, &pipelineCreateInfo);
2902
2903 sbtBuffer = BufferWithMemoryPtr(
2904 new BufferWithMemory(vk, device, allocator, makeBufferCreateInfo(sgHandleSize, sbtFlags),
2905 (MemoryRequirement::HostVisible | MemoryRequirement::DeviceAddress)));
2906
2907 uint32_t *ptr = (uint32_t *)sbtBuffer->getAllocation().getHostPtr();
2908 invalidateAlloc(vk, device, sbtBuffer->getAllocation());
2909
2910 vk.getRayTracingShaderGroupHandlesKHR(device, *pipeline, 0, 1, sgHandleSize, ptr);
2911
2912 const VkBufferDeviceAddressInfo deviceAddressInfo{
2913 VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, // VkStructureType sType
2914 nullptr, // const void* pNext
2915 sbtBuffer->get() // VkBuffer buffer;
2916 };
2917 const auto sbtAddress = vk.getBufferDeviceAddress(device, &deviceAddressInfo);
2918 rgenSBTRegion = makeStridedDeviceAddressRegionKHR(sbtAddress, sgHandleSize, sgHandleSize);
2919 }
2920 #endif
2921 else
2922 {
2923 const VkSubpassDescription subpassDesc = {
2924 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags
2925 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint
2926 0u, // uint32_t inputAttachmentCount
2927 DE_NULL, // const VkAttachmentReference* pInputAttachments
2928 0u, // uint32_t colorAttachmentCount
2929 DE_NULL, // const VkAttachmentReference* pColorAttachments
2930 DE_NULL, // const VkAttachmentReference* pResolveAttachments
2931 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment
2932 0u, // uint32_t preserveAttachmentCount
2933 DE_NULL // const uint32_t* pPreserveAttachments
2934 };
2935
2936 const std::vector<VkSubpassDependency> subpassDependencies = {
2937 makeSubpassDependency(VK_SUBPASS_EXTERNAL, // uint32_t srcSubpass
2938 0, // uint32_t dstSubpass
2939 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags srcStageMask
2940 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, // VkPipelineStageFlags dstStageMask
2941 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask
2942 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, // dstAccessMask
2943 VK_DEPENDENCY_BY_REGION_BIT // VkDependencyFlags dependencyFlags
2944 ),
2945 makeSubpassDependency(0, 0, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
2946 ((m_data.stage == STAGE_VERTEX) ? VK_PIPELINE_STAGE_VERTEX_SHADER_BIT :
2947 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT),
2948 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, VK_ACCESS_SHADER_WRITE_BIT, 0u),
2949 };
2950
2951 const VkRenderPassCreateInfo renderPassParams = {
2952 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureTypei sType
2953 DE_NULL, // const void* pNext
2954 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags
2955 0u, // uint32_t attachmentCount
2956 DE_NULL, // const VkAttachmentDescription* pAttachments
2957 1u, // uint32_t subpassCount
2958 &subpassDesc, // const VkSubpassDescription* pSubpasses
2959 de::sizeU32(subpassDependencies), // uint32_t dependencyCount
2960 de::dataOrNull(subpassDependencies), // const VkSubpassDependency* pDependencies
2961 };
2962
2963 renderPass = createRenderPass(vk, device, &renderPassParams);
2964
2965 const vk::VkFramebufferCreateInfo framebufferParams = {
2966 vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
2967 DE_NULL, // pNext
2968 (vk::VkFramebufferCreateFlags)0,
2969 *renderPass, // renderPass
2970 0u, // attachmentCount
2971 DE_NULL, // pAttachments
2972 DIM, // width
2973 DIM, // height
2974 1u, // layers
2975 };
2976
2977 framebuffer = createFramebuffer(vk, device, &framebufferParams);
2978
2979 const VkVertexInputBindingDescription vertexInputBindingDescription = {
2980 0u, // uint32_t binding
2981 (uint32_t)formatBytes, // uint32_t stride
2982 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate
2983 };
2984
2985 const VkVertexInputAttributeDescription vertexInputAttributeDescription = {
2986 0u, // uint32_t location
2987 0u, // uint32_t binding
2988 m_data.format, // VkFormat format
2989 0u // uint32_t offset
2990 };
2991
2992 uint32_t numAttribs = m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH ? 1u : 0u;
2993
2994 VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = {
2995 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
2996 DE_NULL, // const void* pNext;
2997 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
2998 numAttribs, // uint32_t vertexBindingDescriptionCount;
2999 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
3000 numAttribs, // uint32_t vertexAttributeDescriptionCount;
3001 &vertexInputAttributeDescription // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
3002 };
3003
3004 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo = {
3005 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
3006 DE_NULL, // const void* pNext;
3007 (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags;
3008 (m_data.stage == STAGE_VERTEX) ? VK_PRIMITIVE_TOPOLOGY_POINT_LIST :
3009 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // VkPrimitiveTopology topology;
3010 VK_FALSE // VkBool32 primitiveRestartEnable;
3011 };
3012
3013 const VkPipelineRasterizationStateCreateInfo rasterizationStateCreateInfo = {
3014 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
3015 DE_NULL, // const void* pNext;
3016 (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags;
3017 VK_FALSE, // VkBool32 depthClampEnable;
3018 (m_data.stage == STAGE_VERTEX) ? VK_TRUE : VK_FALSE, // VkBool32 rasterizerDiscardEnable;
3019 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
3020 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
3021 VK_FRONT_FACE_CLOCKWISE, // VkFrontFace frontFace;
3022 VK_FALSE, // VkBool32 depthBiasEnable;
3023 0.0f, // float depthBiasConstantFactor;
3024 0.0f, // float depthBiasClamp;
3025 0.0f, // float depthBiasSlopeFactor;
3026 1.0f // float lineWidth;
3027 };
3028
3029 const VkPipelineMultisampleStateCreateInfo multisampleStateCreateInfo = {
3030 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType
3031 DE_NULL, // const void* pNext
3032 0u, // VkPipelineMultisampleStateCreateFlags flags
3033 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples
3034 VK_FALSE, // VkBool32 sampleShadingEnable
3035 1.0f, // float minSampleShading
3036 DE_NULL, // const VkSampleMask* pSampleMask
3037 VK_FALSE, // VkBool32 alphaToCoverageEnable
3038 VK_FALSE // VkBool32 alphaToOneEnable
3039 };
3040
3041 VkViewport viewport = makeViewport(DIM, DIM);
3042 VkRect2D scissor = makeRect2D(DIM, DIM);
3043
3044 const VkPipelineViewportStateCreateInfo viewportStateCreateInfo = {
3045 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType
3046 DE_NULL, // const void* pNext
3047 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags
3048 1u, // uint32_t viewportCount
3049 &viewport, // const VkViewport* pViewports
3050 1u, // uint32_t scissorCount
3051 &scissor // const VkRect2D* pScissors
3052 };
3053
3054 Move<VkShaderModule> fs;
3055 Move<VkShaderModule> vs;
3056
3057 uint32_t numStages;
3058 if (m_data.stage == STAGE_VERTEX)
3059 {
3060 vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
3061 fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0); // bogus
3062 numStages = 1u;
3063 }
3064 else
3065 {
3066 vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0);
3067 fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
3068 numStages = 2u;
3069 }
3070
3071 VkPipelineShaderStageCreateInfo shaderCreateInfo[2] = {
3072 {
3073 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, DE_NULL, (VkPipelineShaderStageCreateFlags)0,
3074 VK_SHADER_STAGE_VERTEX_BIT, // stage
3075 *vs, // shader
3076 "main",
3077 DE_NULL, // pSpecializationInfo
3078 },
3079 {
3080 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, DE_NULL, (VkPipelineShaderStageCreateFlags)0,
3081 VK_SHADER_STAGE_FRAGMENT_BIT, // stage
3082 *fs, // shader
3083 "main",
3084 DE_NULL, // pSpecializationInfo
3085 }};
3086
3087 // Base structure with everything for the monolithic case.
3088 VkGraphicsPipelineCreateInfo graphicsPipelineCreateInfo = {
3089 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
3090 nullptr, // const void* pNext;
3091 0u, // VkPipelineCreateFlags flags;
3092 numStages, // uint32_t stageCount;
3093 &shaderCreateInfo[0], // const VkPipelineShaderStageCreateInfo* pStages;
3094 &vertexInputStateCreateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
3095 &inputAssemblyStateCreateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
3096 nullptr, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
3097 &viewportStateCreateInfo, // const VkPipelineViewportStateCreateInfo* pViewportState;
3098 &rasterizationStateCreateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
3099 &multisampleStateCreateInfo, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
3100 nullptr, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
3101 nullptr, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
3102 nullptr, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
3103 pipelineLayout.get(), // VkPipelineLayout layout;
3104 renderPass.get(), // VkRenderPass renderPass;
3105 0u, // uint32_t subpass;
3106 VK_NULL_HANDLE, // VkPipeline basePipelineHandle;
3107 0 // int basePipelineIndex;
3108 };
3109
3110 #ifndef CTS_USES_VULKANSC
3111 VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo;
3112 if (m_data.needsPipelineRobustness())
3113 {
3114 pipelineRobustnessInfo = getPipelineRobustnessInfo(m_data.testRobustness2, m_data.descriptorType);
3115
3116 if (m_data.pipelineRobustnessCase == PipelineRobustnessCase::ENABLED_MONOLITHIC)
3117 {
3118 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
3119 graphicsPipelineCreateInfo.pNext = &pipelineRobustnessInfo;
3120 else if (m_data.stage == STAGE_VERTEX)
3121 shaderCreateInfo[0].pNext = &pipelineRobustnessInfo;
3122 else
3123 shaderCreateInfo[1].pNext = &pipelineRobustnessInfo;
3124 }
3125 else // Fast or Optimized graphics pipeline libraries.
3126 {
3127 VkPipelineCreateFlags libCreationFlags = VK_PIPELINE_CREATE_LIBRARY_BIT_KHR;
3128 VkPipelineCreateFlags linkFlags = 0u;
3129
3130 if (m_data.pipelineRobustnessCase == PipelineRobustnessCase::ENABLED_OPTIMIZED_GPL)
3131 {
3132 libCreationFlags |= VK_PIPELINE_CREATE_RETAIN_LINK_TIME_OPTIMIZATION_INFO_BIT_EXT;
3133 linkFlags |= VK_PIPELINE_CREATE_LINK_TIME_OPTIMIZATION_BIT_EXT;
3134 }
3135
3136 // Vertex input state library. When testing the robust vertex shaders, this will be merged with it in the same library.
3137 if (m_data.stage != STAGE_VERTEX || m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
3138 {
3139 VkGraphicsPipelineLibraryCreateInfoEXT vertexInputLibInfo = initVulkanStructure();
3140 VkGraphicsPipelineCreateInfo vertexInputPipelineInfo = initVulkanStructure();
3141
3142 vertexInputPipelineInfo.pNext = &vertexInputLibInfo;
3143 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
3144 vertexInputLibInfo.pNext = &pipelineRobustnessInfo;
3145
3146 vertexInputLibInfo.flags |= VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT;
3147 vertexInputPipelineInfo.flags = libCreationFlags;
3148 vertexInputPipelineInfo.pVertexInputState = graphicsPipelineCreateInfo.pVertexInputState;
3149 vertexInputPipelineInfo.pInputAssemblyState = graphicsPipelineCreateInfo.pInputAssemblyState;
3150
3151 vertexInputLib = createGraphicsPipeline(vk, device, VK_NULL_HANDLE, &vertexInputPipelineInfo);
3152 }
3153
3154 // Pre-rasterization shader state library.
3155 {
3156 VkGraphicsPipelineLibraryCreateInfoEXT preRasterShaderLibInfo = initVulkanStructure();
3157 preRasterShaderLibInfo.flags |= VK_GRAPHICS_PIPELINE_LIBRARY_PRE_RASTERIZATION_SHADERS_BIT_EXT;
3158
3159 VkGraphicsPipelineCreateInfo preRasterShaderPipelineInfo =
3160 initVulkanStructure(&preRasterShaderLibInfo);
3161 preRasterShaderPipelineInfo.flags = libCreationFlags;
3162 preRasterShaderPipelineInfo.layout = graphicsPipelineCreateInfo.layout;
3163 preRasterShaderPipelineInfo.pViewportState = graphicsPipelineCreateInfo.pViewportState;
3164 preRasterShaderPipelineInfo.pRasterizationState = graphicsPipelineCreateInfo.pRasterizationState;
3165 preRasterShaderPipelineInfo.pTessellationState = graphicsPipelineCreateInfo.pTessellationState;
3166 preRasterShaderPipelineInfo.renderPass = graphicsPipelineCreateInfo.renderPass;
3167 preRasterShaderPipelineInfo.subpass = graphicsPipelineCreateInfo.subpass;
3168
3169 VkPipelineShaderStageCreateInfo vertexStageInfo = shaderCreateInfo[0];
3170 if (m_data.stage == STAGE_VERTEX && m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
3171 {
3172 preRasterShaderPipelineInfo.pVertexInputState = graphicsPipelineCreateInfo.pVertexInputState;
3173 preRasterShaderPipelineInfo.pInputAssemblyState =
3174 graphicsPipelineCreateInfo.pInputAssemblyState;
3175 preRasterShaderLibInfo.flags |= VK_GRAPHICS_PIPELINE_LIBRARY_VERTEX_INPUT_INTERFACE_BIT_EXT;
3176 vertexStageInfo.pNext = &pipelineRobustnessInfo;
3177 }
3178
3179 preRasterShaderPipelineInfo.stageCount = 1u;
3180 preRasterShaderPipelineInfo.pStages = &vertexStageInfo;
3181
3182 preRasterShaderLib =
3183 createGraphicsPipeline(vk, device, VK_NULL_HANDLE, &preRasterShaderPipelineInfo);
3184 }
3185
3186 // Fragment shader stage library.
3187 {
3188 VkGraphicsPipelineLibraryCreateInfoEXT fragShaderLibInfo = initVulkanStructure();
3189 fragShaderLibInfo.flags |= VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_SHADER_BIT_EXT;
3190
3191 VkGraphicsPipelineCreateInfo fragShaderPipelineInfo = initVulkanStructure(&fragShaderLibInfo);
3192 fragShaderPipelineInfo.flags = libCreationFlags;
3193 fragShaderPipelineInfo.layout = graphicsPipelineCreateInfo.layout;
3194 fragShaderPipelineInfo.pMultisampleState = graphicsPipelineCreateInfo.pMultisampleState;
3195 fragShaderPipelineInfo.pDepthStencilState = graphicsPipelineCreateInfo.pDepthStencilState;
3196 fragShaderPipelineInfo.renderPass = graphicsPipelineCreateInfo.renderPass;
3197 fragShaderPipelineInfo.subpass = graphicsPipelineCreateInfo.subpass;
3198
3199 std::vector<VkPipelineShaderStageCreateInfo> shaderStages;
3200 if (m_data.stage != STAGE_VERTEX)
3201 {
3202 shaderStages.push_back(shaderCreateInfo[1]);
3203 if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
3204 shaderStages.back().pNext = &pipelineRobustnessInfo;
3205 }
3206
3207 fragShaderPipelineInfo.stageCount = de::sizeU32(shaderStages);
3208 fragShaderPipelineInfo.pStages = de::dataOrNull(shaderStages);
3209
3210 fragShaderLib = createGraphicsPipeline(vk, device, VK_NULL_HANDLE, &fragShaderPipelineInfo);
3211 }
3212
3213 // Fragment output library.
3214 {
3215 VkGraphicsPipelineLibraryCreateInfoEXT fragOutputLibInfo = initVulkanStructure();
3216 fragOutputLibInfo.flags |= VK_GRAPHICS_PIPELINE_LIBRARY_FRAGMENT_OUTPUT_INTERFACE_BIT_EXT;
3217
3218 VkGraphicsPipelineCreateInfo fragOutputPipelineInfo = initVulkanStructure(&fragOutputLibInfo);
3219 fragOutputPipelineInfo.flags = libCreationFlags;
3220 fragOutputPipelineInfo.pColorBlendState = graphicsPipelineCreateInfo.pColorBlendState;
3221 fragOutputPipelineInfo.renderPass = graphicsPipelineCreateInfo.renderPass;
3222 fragOutputPipelineInfo.subpass = graphicsPipelineCreateInfo.subpass;
3223 fragOutputPipelineInfo.pMultisampleState = graphicsPipelineCreateInfo.pMultisampleState;
3224
3225 fragOutputLib = createGraphicsPipeline(vk, device, VK_NULL_HANDLE, &fragOutputPipelineInfo);
3226 }
3227
3228 // Linked pipeline.
3229 std::vector<VkPipeline> libraryHandles;
3230 if (*vertexInputLib != VK_NULL_HANDLE)
3231 libraryHandles.push_back(*vertexInputLib);
3232 if (*preRasterShaderLib != VK_NULL_HANDLE)
3233 libraryHandles.push_back(*preRasterShaderLib);
3234 if (*fragShaderLib != VK_NULL_HANDLE)
3235 libraryHandles.push_back(*fragShaderLib);
3236 if (*fragOutputLib != VK_NULL_HANDLE)
3237 libraryHandles.push_back(*fragOutputLib);
3238
3239 VkPipelineLibraryCreateInfoKHR linkedPipelineLibraryInfo = initVulkanStructure();
3240 linkedPipelineLibraryInfo.libraryCount = de::sizeU32(libraryHandles);
3241 linkedPipelineLibraryInfo.pLibraries = de::dataOrNull(libraryHandles);
3242
3243 VkGraphicsPipelineCreateInfo linkedPipelineInfo = initVulkanStructure(&linkedPipelineLibraryInfo);
3244 linkedPipelineInfo.flags = linkFlags;
3245 linkedPipelineInfo.layout = graphicsPipelineCreateInfo.layout;
3246
3247 pipeline = createGraphicsPipeline(vk, device, VK_NULL_HANDLE, &linkedPipelineInfo);
3248 }
3249 }
3250 #endif
3251 if (*pipeline == VK_NULL_HANDLE)
3252 pipeline = createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineCreateInfo);
3253 }
3254
3255 const VkImageMemoryBarrier imageBarrier = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
3256 DE_NULL, // const void* pNext
3257 0u, // VkAccessFlags srcAccessMask
3258 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask
3259 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout
3260 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout newLayout
3261 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
3262 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
3263 **images[0], // VkImage image
3264 {
3265 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask
3266 0u, // uint32_t baseMipLevel
3267 1u, // uint32_t mipLevels,
3268 0u, // uint32_t baseArray
3269 1u, // uint32_t arraySize
3270 }};
3271
3272 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
3273 (VkDependencyFlags)0, 0, (const VkMemoryBarrier *)DE_NULL, 0,
3274 (const VkBufferMemoryBarrier *)DE_NULL, 1, &imageBarrier);
3275
3276 vk.cmdBindPipeline(*cmdBuffer, bindPoint, *pipeline);
3277
3278 if (!formatIsR64(m_data.format))
3279 {
3280 VkImageSubresourceRange range = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
3281 VkClearValue clearColor = makeClearValueColorU32(0, 0, 0, 0);
3282
3283 vk.cmdClearColorImage(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, &clearColor.color, 1, &range);
3284 }
3285 else
3286 {
3287 const vector<VkBufferImageCopy> bufferImageCopy(
3288 1, makeBufferImageCopy(outputImageCreateInfo.extent,
3289 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1)));
3290 copyBufferToImage(vk, *cmdBuffer, *(*bufferOutputImageR64), sizeOutputR64, bufferImageCopy,
3291 VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, **images[0], VK_IMAGE_LAYOUT_GENERAL,
3292 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
3293 }
3294
3295 VkMemoryBarrier memBarrier = {
3296 VK_STRUCTURE_TYPE_MEMORY_BARRIER, // sType
3297 DE_NULL, // pNext
3298 0u, // srcAccessMask
3299 0u, // dstAccessMask
3300 };
3301
3302 memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
3303 memBarrier.dstAccessMask =
3304 VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
3305 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_data.allPipelineStages, 0, 1, &memBarrier, 0,
3306 DE_NULL, 0, DE_NULL);
3307
3308 if (m_data.stage == STAGE_COMPUTE)
3309 {
3310 vk.cmdDispatch(*cmdBuffer, DIM, DIM, 1);
3311 }
3312 #ifndef CTS_USES_VULKANSC
3313 else if (m_data.stage == STAGE_RAYGEN)
3314 {
3315 vk.cmdTraceRaysKHR(*cmdBuffer, &rgenSBTRegion, &missSBTRegion, &hitSBTRegion, &callSBTRegion, DIM, DIM, 1u);
3316 }
3317 #endif
3318 else
3319 {
3320 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, makeRect2D(DIM, DIM), 0, DE_NULL,
3321 VK_SUBPASS_CONTENTS_INLINE);
3322 // Draw a point cloud for vertex shader testing, and a single quad for fragment shader testing
3323 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
3324 {
3325 VkDeviceSize zeroOffset = 0;
3326 VkBuffer b = m_data.nullDescriptor ? DE_NULL : **buffer;
3327 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &b, &zeroOffset);
3328 vk.cmdDraw(*cmdBuffer, 1000u, 1u, 0u, 0u);
3329
3330 // This barrier corresponds to the second subpass dependency.
3331 const auto writeStage = ((m_data.stage == STAGE_VERTEX) ? VK_PIPELINE_STAGE_VERTEX_SHADER_BIT :
3332 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
3333 const auto postDrawBarrier =
3334 makeMemoryBarrier(VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, VK_ACCESS_SHADER_WRITE_BIT);
3335 cmdPipelineMemoryBarrier(vk, *cmdBuffer, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, writeStage, &postDrawBarrier);
3336 }
3337 if (m_data.stage == STAGE_VERTEX)
3338 {
3339 vk.cmdDraw(*cmdBuffer, DIM * DIM, 1u, 0u, 0u);
3340 }
3341 else
3342 {
3343 vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u);
3344 }
3345 endRenderPass(vk, *cmdBuffer);
3346 }
3347
3348 memBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
3349 memBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
3350 vk.cmdPipelineBarrier(*cmdBuffer, m_data.allPipelineStages, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 1, &memBarrier, 0,
3351 DE_NULL, 0, DE_NULL);
3352
3353 const VkBufferImageCopy copyRegion = makeBufferImageCopy(
3354 makeExtent3D(DIM, DIM, 1u), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
3355 vk.cmdCopyImageToBuffer(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, **copyBuffer, 1u, ©Region);
3356
3357 memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
3358 memBarrier.dstAccessMask = VK_ACCESS_HOST_READ_BIT;
3359 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 1, &memBarrier, 0,
3360 DE_NULL, 0, DE_NULL);
3361
3362 endCommandBuffer(vk, *cmdBuffer);
3363
3364 submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
3365
3366 void *ptr = copyBuffer->getAllocation().getHostPtr();
3367
3368 invalidateAlloc(vk, device, copyBuffer->getAllocation());
3369
3370 qpTestResult res = QP_TEST_RESULT_PASS;
3371
3372 for (uint32_t i = 0; i < DIM * DIM; ++i)
3373 {
3374 if (formatIsFloat(m_data.format))
3375 {
3376 if (((float *)ptr)[i * numComponents] != 1.0f)
3377 {
3378 res = QP_TEST_RESULT_FAIL;
3379 }
3380 }
3381 else if (formatIsR64(m_data.format))
3382 {
3383 if (((uint64_t *)ptr)[i * numComponents] != 1)
3384 {
3385 res = QP_TEST_RESULT_FAIL;
3386 }
3387 }
3388 else
3389 {
3390 if (((uint32_t *)ptr)[i * numComponents] != 1)
3391 {
3392 res = QP_TEST_RESULT_FAIL;
3393 }
3394 }
3395 }
3396
3397 return tcu::TestStatus(res, qpGetTestResultName(res));
3398 }
3399
3400 // Out of bounds stride tests.
3401 //
3402 // The goal is checking the following situation:
3403 //
3404 // - The vertex buffer size is not a multiple of the vertex binding stride.
3405 // - In other words, the last chunk goes partially beyond the end of the buffer.
3406 // - However, in this last chunk there will be an attribute that will be completely inside the buffer's range.
3407 // - With robustBufferAccess2, the implementation has to consider the attribute in-bounds and use it properly.
3408 // - Without robustBufferAccess2, the implementation is allowed to work at the chunk level instead of the attribute level.
3409 // - In other words, it can consider the attribute out of bounds because the chunk is out of bounds.
3410 //
3411 // The test will try to check robustBufferAccess2 is correctly applied here.
3412
3413 struct OutOfBoundsStrideParams
3414 {
3415 const bool pipelineRobustness;
3416 const bool dynamicStride;
3417
OutOfBoundsStrideParamsvkt::robustness::__anon5c37df3e0111::OutOfBoundsStrideParams3418 OutOfBoundsStrideParams(const bool pipelineRobustness_, const bool dynamicStride_)
3419 : pipelineRobustness(pipelineRobustness_)
3420 , dynamicStride(dynamicStride_)
3421 {
3422 }
3423 };
3424
3425 class OutOfBoundsStrideInstance : public vkt::TestInstance
3426 {
3427 public:
OutOfBoundsStrideInstance(Context & context,const OutOfBoundsStrideParams & params)3428 OutOfBoundsStrideInstance(Context &context, const OutOfBoundsStrideParams ¶ms)
3429 : vkt::TestInstance(context)
3430 , m_params(params)
3431 {
3432 }
~OutOfBoundsStrideInstance(void)3433 virtual ~OutOfBoundsStrideInstance(void)
3434 {
3435 }
3436
3437 tcu::TestStatus iterate(void) override;
3438
3439 protected:
3440 const OutOfBoundsStrideParams m_params;
3441 };
3442
3443 class OutOfBoundsStrideCase : public vkt::TestCase
3444 {
3445 public:
3446 OutOfBoundsStrideCase(tcu::TestContext &testCtx, const std::string &name, const OutOfBoundsStrideParams ¶ms);
~OutOfBoundsStrideCase(void)3447 virtual ~OutOfBoundsStrideCase(void)
3448 {
3449 }
3450
3451 void initPrograms(vk::SourceCollections &programCollection) const override;
createInstance(Context & context) const3452 TestInstance *createInstance(Context &context) const override
3453 {
3454 return new OutOfBoundsStrideInstance(context, m_params);
3455 }
3456 void checkSupport(Context &context) const override;
3457
3458 protected:
3459 const OutOfBoundsStrideParams m_params;
3460 };
3461
OutOfBoundsStrideCase(tcu::TestContext & testCtx,const std::string & name,const OutOfBoundsStrideParams & params)3462 OutOfBoundsStrideCase::OutOfBoundsStrideCase(tcu::TestContext &testCtx, const std::string &name,
3463 const OutOfBoundsStrideParams ¶ms)
3464 : vkt::TestCase(testCtx, name)
3465 , m_params(params)
3466 {
3467 #ifdef CTS_USES_VULKANSC
3468 DE_ASSERT(!m_params.pipelineRobustness);
3469 #endif // CTS_USES_VULKANSC
3470 }
3471
checkSupport(Context & context) const3472 void OutOfBoundsStrideCase::checkSupport(Context &context) const
3473 {
3474 context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
3475
3476 const auto &vki = context.getInstanceInterface();
3477 const auto physicalDevice = context.getPhysicalDevice();
3478
3479 // We need to query feature support using the physical device instead of using the reported context features because robustness
3480 // features are disabled in the default device.
3481 VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
3482 VkPhysicalDeviceRobustness2FeaturesEXT robustness2Features = initVulkanStructure();
3483 #ifndef CTS_USES_VULKANSC
3484 VkPhysicalDevicePipelineRobustnessFeaturesEXT pipelineRobustnessFeatures = initVulkanStructure();
3485 #endif // CTS_USES_VULKANSC
3486 VkPhysicalDeviceExtendedDynamicStateFeaturesEXT edsFeatures = initVulkanStructure();
3487
3488 const auto addFeatures = makeStructChainAdder(&features2);
3489
3490 if (context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
3491 addFeatures(&robustness2Features);
3492
3493 #ifndef CTS_USES_VULKANSC
3494 if (context.isDeviceFunctionalitySupported("VK_EXT_pipeline_robustness"))
3495 addFeatures(&pipelineRobustnessFeatures);
3496 #endif // CTS_USES_VULKANSC
3497
3498 if (context.isDeviceFunctionalitySupported("VK_EXT_extended_dynamic_state"))
3499 addFeatures(&edsFeatures);
3500
3501 vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
3502
3503 if (!robustness2Features.robustBufferAccess2)
3504 TCU_THROW(NotSupportedError, "robustBufferAccess2 not supported");
3505
3506 #ifndef CTS_USES_VULKANSC
3507 if (m_params.pipelineRobustness && !pipelineRobustnessFeatures.pipelineRobustness)
3508 TCU_THROW(NotSupportedError, "pipelineRobustness not supported");
3509 #endif // CTS_USES_VULKANSC
3510
3511 if (m_params.dynamicStride && !edsFeatures.extendedDynamicState)
3512 TCU_THROW(NotSupportedError, "extendedDynamicState not supported");
3513 }
3514
initPrograms(vk::SourceCollections & programCollection) const3515 void OutOfBoundsStrideCase::initPrograms(vk::SourceCollections &programCollection) const
3516 {
3517 std::ostringstream vert;
3518 vert << "#version 460\n"
3519 << "layout (location=0) in vec4 inPos;\n"
3520 << "void main (void) {\n"
3521 << " gl_Position = inPos;\n"
3522 << " gl_PointSize = 1.0;\n"
3523 << "}\n";
3524 programCollection.glslSources.add("vert") << glu::VertexSource(vert.str());
3525
3526 std::ostringstream frag;
3527 frag << "#version 460\n"
3528 << "layout (location=0) out vec4 outColor;\n"
3529 << "void main (void) {\n"
3530 << " outColor = vec4(0.0, 0.0, 1.0, 1.0);\n"
3531 << "}\n";
3532 programCollection.glslSources.add("frag") << glu::FragmentSource(frag.str());
3533 }
3534
iterate(void)3535 tcu::TestStatus OutOfBoundsStrideInstance::iterate(void)
3536 {
3537 const auto &vki = m_context.getInstanceInterface();
3538 const auto physicalDevice = m_context.getPhysicalDevice();
3539 const auto &vkd = getDeviceInterface(m_context, true, m_params.pipelineRobustness);
3540 const auto device = getLogicalDevice(m_context, true, m_params.pipelineRobustness);
3541 SimpleAllocator allocator(vkd, device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
3542 const auto qfIndex = m_context.getUniversalQueueFamilyIndex();
3543 const tcu::IVec3 fbDim(8, 8, 1);
3544 const auto fbExtent = makeExtent3D(fbDim);
3545 const auto colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
3546 const auto colorUsage = (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
3547 const auto colorSRR = makeDefaultImageSubresourceRange();
3548 const auto colorSRL = makeDefaultImageSubresourceLayers();
3549 const auto v4Size = static_cast<uint32_t>(sizeof(tcu::Vec4));
3550 VkQueue queue;
3551
3552 // Retrieve queue manually.
3553 vkd.getDeviceQueue(device, qfIndex, 0u, &queue);
3554
3555 // Color buffer for the test.
3556 ImageWithBuffer colorBuffer(vkd, device, allocator, fbExtent, colorFormat, colorUsage, VK_IMAGE_TYPE_2D);
3557
3558 // We will use points, one point per pixel, but we'll insert a padding after each point.
3559 // We'll make the last padding out of the buffer, but the point itself will be inside the buffer.
3560
3561 // One point per pixel.
3562 const auto pointCount = fbExtent.width * fbExtent.height * fbExtent.depth;
3563 std::vector<tcu::Vec4> points;
3564
3565 points.reserve(pointCount);
3566 for (uint32_t y = 0u; y < fbExtent.height; ++y)
3567 for (uint32_t x = 0u; x < fbExtent.width; ++x)
3568 {
3569 const auto xCoord = ((static_cast<float>(x) + 0.5f) / static_cast<float>(fbExtent.width)) * 2.0f - 1.0f;
3570 const auto yCoord = ((static_cast<float>(y) + 0.5f) / static_cast<float>(fbExtent.height)) * 2.0f - 1.0f;
3571 const tcu::Vec4 coords(xCoord, yCoord, 0.0f, 1.0f);
3572
3573 points.push_back(coords);
3574 }
3575
3576 // Add paddings.
3577 std::vector<tcu::Vec4> vertexBufferData;
3578 vertexBufferData.reserve(points.size() * 2u);
3579 for (const auto &point : points)
3580 {
3581 vertexBufferData.push_back(point);
3582 vertexBufferData.push_back(tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f));
3583 }
3584
3585 // Prepare vertex buffer. Note the size is slightly short and excludes the last padding.
3586 const auto vertexBufferSize = static_cast<VkDeviceSize>(de::dataSize(vertexBufferData) - v4Size);
3587 const auto vertexBufferUsage = (VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
3588 const auto vertexBufferInfo = makeBufferCreateInfo(vertexBufferSize, vertexBufferUsage);
3589 const auto vertexBufferOffset = VkDeviceSize{0};
3590 const auto vertexBufferStride = static_cast<VkDeviceSize>(2u * v4Size);
3591
3592 BufferWithMemory vertexBuffer(vkd, device, allocator, vertexBufferInfo, MemoryRequirement::HostVisible);
3593 auto &vertexBufferAlloc = vertexBuffer.getAllocation();
3594 void *vertexBufferPtr = vertexBufferAlloc.getHostPtr();
3595
3596 deMemcpy(vertexBufferPtr, de::dataOrNull(vertexBufferData), static_cast<size_t>(vertexBufferSize));
3597
3598 // Create the pipeline.
3599 const auto &binaries = m_context.getBinaryCollection();
3600 const auto vertModule = createShaderModule(vkd, device, binaries.get("vert"));
3601 const auto fragModule = createShaderModule(vkd, device, binaries.get("frag"));
3602 const auto renderPass = makeRenderPass(vkd, device, colorFormat);
3603 const auto framebuffer =
3604 makeFramebuffer(vkd, device, renderPass.get(), colorBuffer.getImageView(), fbExtent.width, fbExtent.height);
3605 const auto pipelineLayout = makePipelineLayout(vkd, device);
3606
3607 const std::vector<VkViewport> viewports(1u, makeViewport(fbExtent));
3608 const std::vector<VkRect2D> scissors(1u, makeRect2D(fbExtent));
3609
3610 // Input state, which contains the right stride.
3611 const auto bindingStride = v4Size * 2u; // Vertex and padding.
3612 const auto bindingDescription = makeVertexInputBindingDescription(0u, bindingStride, VK_VERTEX_INPUT_RATE_VERTEX);
3613 const auto attributeDescription = makeVertexInputAttributeDescription(0u, 0u, vk::VK_FORMAT_R32G32B32A32_SFLOAT,
3614 0u); // Vertex at the start of each item.
3615
3616 const VkPipelineVertexInputStateCreateInfo inputStateCreateInfo = {
3617 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
3618 nullptr, // const void* pNext;
3619 0u, // VkPipelineVertexInputStateCreateFlags flags;
3620 1u, // uint32_t vertexBindingDescriptionCount;
3621 &bindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
3622 1u, // uint32_t vertexAttributeDescriptionCount;
3623 &attributeDescription, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
3624 };
3625
3626 std::vector<VkDynamicState> dynamicStates;
3627 if (m_params.dynamicStride)
3628 dynamicStates.push_back(VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT);
3629
3630 const VkPipelineDynamicStateCreateInfo dynamicStateCreateInfo = {
3631 VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, // VkStructureType sType;
3632 nullptr, // const void* pNext;
3633 0u, // VkPipelineDynamicStateCreateFlags flags;
3634 de::sizeU32(dynamicStates), // uint32_t dynamicStateCount;
3635 de::dataOrNull(dynamicStates), // const VkDynamicState* pDynamicStates;
3636 };
3637
3638 const auto pipeline = makeGraphicsPipeline(
3639 vkd, device, pipelineLayout.get(), vertModule.get(), VK_NULL_HANDLE, VK_NULL_HANDLE, VK_NULL_HANDLE,
3640 fragModule.get(), renderPass.get(), viewports, scissors, VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, 0u,
3641 &inputStateCreateInfo, nullptr, nullptr, nullptr, nullptr, &dynamicStateCreateInfo);
3642
3643 // Command pool and buffer.
3644 const CommandPoolWithBuffer cmd(vkd, device, qfIndex);
3645 const auto cmdBuffer = cmd.cmdBuffer.get();
3646
3647 const auto clearColor = makeClearValueColor(tcu::Vec4(0.0f, 0.0f, 0.0f, 0.0f));
3648 beginCommandBuffer(vkd, cmdBuffer);
3649 beginRenderPass(vkd, cmdBuffer, renderPass.get(), framebuffer.get(), scissors.at(0u), clearColor);
3650 if (m_params.dynamicStride)
3651 {
3652 #ifndef CTS_USES_VULKANSC
3653 vkd.cmdBindVertexBuffers2(cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset, nullptr,
3654 &vertexBufferStride);
3655 #else
3656 vkd.cmdBindVertexBuffers2EXT(cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset, nullptr,
3657 &vertexBufferStride);
3658 #endif // CTS_USES_VULKANSC
3659 }
3660 else
3661 {
3662 vkd.cmdBindVertexBuffers(cmdBuffer, 0u, 1u, &vertexBuffer.get(), &vertexBufferOffset);
3663 }
3664 vkd.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline.get());
3665 vkd.cmdDraw(cmdBuffer, pointCount, 1u, 0u, 0u);
3666 endRenderPass(vkd, cmdBuffer);
3667
3668 // Copy image to verification buffer.
3669 const auto color2Transfer = makeImageMemoryBarrier(
3670 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
3671 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, colorBuffer.getImage(), colorSRR);
3672
3673 cmdPipelineImageMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
3674 VK_PIPELINE_STAGE_TRANSFER_BIT, &color2Transfer);
3675
3676 const auto copyRegion = makeBufferImageCopy(fbExtent, colorSRL);
3677 vkd.cmdCopyImageToBuffer(cmdBuffer, colorBuffer.getImage(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3678 colorBuffer.getBuffer(), 1u, ©Region);
3679
3680 const auto transfer2Host = makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
3681 cmdPipelineMemoryBarrier(vkd, cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT,
3682 &transfer2Host);
3683
3684 endCommandBuffer(vkd, cmdBuffer);
3685 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
3686
3687 // Verify color buffer.
3688 invalidateAlloc(vkd, device, colorBuffer.getBufferAllocation());
3689
3690 const tcu::Vec4 refColor(0.0f, 0.0f, 1.0f, 1.0f); // Must match frag shader.
3691 const tcu::Vec4 threshold(0.0f, 0.0f, 0.0f, 0.0f);
3692 const void *resultData = colorBuffer.getBufferAllocation().getHostPtr();
3693 const auto tcuFormat = mapVkFormat(colorFormat);
3694 const tcu::ConstPixelBufferAccess resultAccess(tcuFormat, fbDim, resultData);
3695 auto &log = m_context.getTestContext().getLog();
3696
3697 if (!tcu::floatThresholdCompare(log, "Result", "", refColor, resultAccess, threshold, tcu::COMPARE_LOG_ON_ERROR))
3698 return tcu::TestStatus::fail("Unexpected results in the color buffer -- check log for details");
3699
3700 return tcu::TestStatus::pass("Pass");
3701 }
3702
getGPLSuffix(PipelineRobustnessCase prCase)3703 std::string getGPLSuffix(PipelineRobustnessCase prCase)
3704 {
3705 if (prCase == PipelineRobustnessCase::ENABLED_FAST_GPL)
3706 return "_fast_gpl";
3707 if (prCase == PipelineRobustnessCase::ENABLED_OPTIMIZED_GPL)
3708 return "_optimized_gpl";
3709 return "";
3710 }
3711
3712 } // namespace
3713
createTests(tcu::TestCaseGroup * group,bool robustness2,bool pipelineRobustness)3714 static void createTests(tcu::TestCaseGroup *group, bool robustness2, bool pipelineRobustness)
3715 {
3716 tcu::TestContext &testCtx = group->getTestContext();
3717
3718 typedef struct
3719 {
3720 uint32_t count;
3721 const char *name;
3722 } TestGroupCase;
3723
3724 TestGroupCase fmtCases[] = {
3725 {VK_FORMAT_R32_SINT, "r32i"},
3726 {VK_FORMAT_R32_UINT, "r32ui"},
3727 {VK_FORMAT_R32_SFLOAT, "r32f"},
3728 {VK_FORMAT_R32G32_SINT, "rg32i"},
3729 {VK_FORMAT_R32G32_UINT, "rg32ui"},
3730 {VK_FORMAT_R32G32_SFLOAT, "rg32f"},
3731 {VK_FORMAT_R32G32B32A32_SINT, "rgba32i"},
3732 {VK_FORMAT_R32G32B32A32_UINT, "rgba32ui"},
3733 {VK_FORMAT_R32G32B32A32_SFLOAT, "rgba32f"},
3734 {VK_FORMAT_R64_SINT, "r64i"},
3735 {VK_FORMAT_R64_UINT, "r64ui"},
3736 };
3737
3738 TestGroupCase fullDescCases[] = {
3739 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, "uniform_buffer"},
3740 {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, "storage_buffer"},
3741 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, "uniform_buffer_dynamic"},
3742 {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, "storage_buffer_dynamic"},
3743 {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, "uniform_texel_buffer"},
3744 {VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, "storage_texel_buffer"},
3745 {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, "storage_image"},
3746 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, "sampled_image"},
3747 {VERTEX_ATTRIBUTE_FETCH, "vertex_attribute_fetch"},
3748 };
3749
3750 TestGroupCase imgDescCases[] = {
3751 {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, "storage_image"},
3752 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, "sampled_image"},
3753 };
3754
3755 TestGroupCase fullLenCases32Bit[] = {
3756 {~0U, "null_descriptor"}, {0, "img"}, {4, "len_4"}, {8, "len_8"}, {12, "len_12"}, {16, "len_16"},
3757 {20, "len_20"}, {31, "len_31"}, {32, "len_32"}, {33, "len_33"}, {35, "len_35"}, {36, "len_36"},
3758 {39, "len_39"}, {40, "len_41"}, {252, "len_252"}, {256, "len_256"}, {260, "len_260"},
3759 };
3760
3761 TestGroupCase fullLenCases64Bit[] = {
3762 {~0U, "null_descriptor"}, {0, "img"}, {8, "len_8"}, {16, "len_16"}, {24, "len_24"}, {32, "len_32"},
3763 {40, "len_40"}, {62, "len_62"}, {64, "len_64"}, {66, "len_66"}, {70, "len_70"}, {72, "len_72"},
3764 {78, "len_78"}, {80, "len_80"}, {504, "len_504"}, {512, "len_512"}, {520, "len_520"},
3765 };
3766
3767 TestGroupCase imgLenCases[] = {
3768 {0, "img"},
3769 };
3770
3771 TestGroupCase viewCases[] = {
3772 {VK_IMAGE_VIEW_TYPE_1D, "1d"},
3773 {VK_IMAGE_VIEW_TYPE_2D, "2d"},
3774 {VK_IMAGE_VIEW_TYPE_3D, "3d"},
3775 {VK_IMAGE_VIEW_TYPE_CUBE, "cube"},
3776 {VK_IMAGE_VIEW_TYPE_1D_ARRAY, "1d_array"},
3777 {VK_IMAGE_VIEW_TYPE_2D_ARRAY, "2d_array"},
3778 {VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, "cube_array"},
3779 };
3780
3781 TestGroupCase sampCases[] = {
3782 {VK_SAMPLE_COUNT_1_BIT, "samples_1"},
3783 {VK_SAMPLE_COUNT_4_BIT, "samples_4"},
3784 };
3785
3786 TestGroupCase stageCases[] = {
3787 // compute
3788 {STAGE_COMPUTE, "comp"},
3789 // fragment
3790 {STAGE_FRAGMENT, "frag"},
3791 // vertex
3792 {STAGE_VERTEX, "vert"},
3793 #ifndef CTS_USES_VULKANSC
3794 // raygen
3795 {STAGE_RAYGEN, "rgen"},
3796 #endif
3797 };
3798
3799 TestGroupCase volCases[] = {
3800 {0, "nonvolatile"},
3801 {1, "volatile"},
3802 };
3803
3804 TestGroupCase unrollCases[] = {
3805 {0, "dontunroll"},
3806 {1, "unroll"},
3807 };
3808
3809 TestGroupCase tempCases[] = {
3810 {0, "notemplate"},
3811 #ifndef CTS_USES_VULKANSC
3812 {1, "template"},
3813 #endif
3814 };
3815
3816 TestGroupCase pushCases[] = {
3817 {0, "bind"},
3818 #ifndef CTS_USES_VULKANSC
3819 {1, "push"},
3820 #endif
3821 };
3822
3823 TestGroupCase fmtQualCases[] = {
3824 {0, "no_fmt_qual"},
3825 {1, "fmt_qual"},
3826 };
3827
3828 TestGroupCase readOnlyCases[] = {
3829 {0, "readwrite"},
3830 {1, "readonly"},
3831 };
3832
3833 for (int pushNdx = 0; pushNdx < DE_LENGTH_OF_ARRAY(pushCases); pushNdx++)
3834 {
3835 de::MovePtr<tcu::TestCaseGroup> pushGroup(new tcu::TestCaseGroup(testCtx, pushCases[pushNdx].name));
3836 for (int tempNdx = 0; tempNdx < DE_LENGTH_OF_ARRAY(tempCases); tempNdx++)
3837 {
3838 de::MovePtr<tcu::TestCaseGroup> tempGroup(new tcu::TestCaseGroup(testCtx, tempCases[tempNdx].name));
3839 for (int fmtNdx = 0; fmtNdx < DE_LENGTH_OF_ARRAY(fmtCases); fmtNdx++)
3840 {
3841 de::MovePtr<tcu::TestCaseGroup> fmtGroup(new tcu::TestCaseGroup(testCtx, fmtCases[fmtNdx].name));
3842
3843 // Avoid too much duplication by excluding certain test cases
3844 if (pipelineRobustness && !(fmtCases[fmtNdx].count == VK_FORMAT_R32_UINT ||
3845 fmtCases[fmtNdx].count == VK_FORMAT_R32G32B32A32_SFLOAT ||
3846 fmtCases[fmtNdx].count == VK_FORMAT_R64_SINT))
3847 {
3848 continue;
3849 }
3850
3851 int fmtSize = tcu::getPixelSize(mapVkFormat((VkFormat)fmtCases[fmtNdx].count));
3852
3853 for (int unrollNdx = 0; unrollNdx < DE_LENGTH_OF_ARRAY(unrollCases); unrollNdx++)
3854 {
3855 de::MovePtr<tcu::TestCaseGroup> unrollGroup(
3856 new tcu::TestCaseGroup(testCtx, unrollCases[unrollNdx].name));
3857
3858 // Avoid too much duplication by excluding certain test cases
3859 if (unrollNdx > 0 && pipelineRobustness)
3860 continue;
3861
3862 for (int volNdx = 0; volNdx < DE_LENGTH_OF_ARRAY(volCases); volNdx++)
3863 {
3864 de::MovePtr<tcu::TestCaseGroup> volGroup(
3865 new tcu::TestCaseGroup(testCtx, volCases[volNdx].name));
3866
3867 int numDescCases =
3868 robustness2 ? DE_LENGTH_OF_ARRAY(fullDescCases) : DE_LENGTH_OF_ARRAY(imgDescCases);
3869 TestGroupCase *descCases = robustness2 ? fullDescCases : imgDescCases;
3870
3871 for (int descNdx = 0; descNdx < numDescCases; descNdx++)
3872 {
3873 de::MovePtr<tcu::TestCaseGroup> descGroup(
3874 new tcu::TestCaseGroup(testCtx, descCases[descNdx].name));
3875
3876 // Avoid too much duplication by excluding certain test cases
3877 if (pipelineRobustness &&
3878 !(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
3879 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3880 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
3881 descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3882 {
3883 continue;
3884 }
3885
3886 for (int roNdx = 0; roNdx < DE_LENGTH_OF_ARRAY(readOnlyCases); roNdx++)
3887 {
3888 de::MovePtr<tcu::TestCaseGroup> rwGroup(
3889 new tcu::TestCaseGroup(testCtx, readOnlyCases[roNdx].name));
3890
3891 // readonly cases are just for storage_buffer
3892 if (readOnlyCases[roNdx].count != 0 &&
3893 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER &&
3894 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
3895 continue;
3896
3897 if (pipelineRobustness && readOnlyCases[roNdx].count != 0)
3898 {
3899 continue;
3900 }
3901
3902 for (int fmtQualNdx = 0; fmtQualNdx < DE_LENGTH_OF_ARRAY(fmtQualCases); fmtQualNdx++)
3903 {
3904 de::MovePtr<tcu::TestCaseGroup> fmtQualGroup(
3905 new tcu::TestCaseGroup(testCtx, fmtQualCases[fmtQualNdx].name));
3906
3907 // format qualifier is only used for storage image and storage texel buffers
3908 if (fmtQualCases[fmtQualNdx].count &&
3909 !(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
3910 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE))
3911 continue;
3912
3913 if (pushCases[pushNdx].count &&
3914 (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
3915 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC ||
3916 descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3917 continue;
3918
3919 const bool isR64 = formatIsR64((VkFormat)fmtCases[fmtNdx].count);
3920 int numLenCases =
3921 robustness2 ?
3922 DE_LENGTH_OF_ARRAY((isR64 ? fullLenCases64Bit : fullLenCases32Bit)) :
3923 DE_LENGTH_OF_ARRAY(imgLenCases);
3924 TestGroupCase *lenCases =
3925 robustness2 ? (isR64 ? fullLenCases64Bit : fullLenCases32Bit) : imgLenCases;
3926
3927 for (int lenNdx = 0; lenNdx < numLenCases; lenNdx++)
3928 {
3929 if (lenCases[lenNdx].count != ~0U)
3930 {
3931 bool bufferLen = lenCases[lenNdx].count != 0;
3932 bool bufferDesc =
3933 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
3934 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
3935 if (bufferLen != bufferDesc)
3936 continue;
3937
3938 // Add template tests cases only for null_descriptor cases
3939 if (tempCases[tempNdx].count)
3940 continue;
3941 }
3942
3943 if ((descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
3944 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) &&
3945 ((lenCases[lenNdx].count % fmtSize) != 0) && lenCases[lenNdx].count != ~0U)
3946 {
3947 continue;
3948 }
3949
3950 // Avoid too much duplication by excluding certain test cases
3951 if (pipelineRobustness && robustness2 &&
3952 (lenCases[lenNdx].count == 0 ||
3953 ((lenCases[lenNdx].count & (lenCases[lenNdx].count - 1)) != 0)))
3954 {
3955 continue;
3956 }
3957
3958 // "volatile" only applies to storage images/buffers
3959 if (volCases[volNdx].count && !supportsStores(descCases[descNdx].count))
3960 continue;
3961
3962 de::MovePtr<tcu::TestCaseGroup> lenGroup(
3963 new tcu::TestCaseGroup(testCtx, lenCases[lenNdx].name));
3964 for (int sampNdx = 0; sampNdx < DE_LENGTH_OF_ARRAY(sampCases); sampNdx++)
3965 {
3966 de::MovePtr<tcu::TestCaseGroup> sampGroup(
3967 new tcu::TestCaseGroup(testCtx, sampCases[sampNdx].name));
3968
3969 // Avoid too much duplication by excluding certain test cases
3970 if (pipelineRobustness && sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
3971 continue;
3972
3973 for (int viewNdx = 0; viewNdx < DE_LENGTH_OF_ARRAY(viewCases); viewNdx++)
3974 {
3975 if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_1D &&
3976 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
3977 descCases[descNdx].count !=
3978 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
3979 {
3980 // buffer descriptors don't have different dimensionalities. Only test "1D"
3981 continue;
3982 }
3983
3984 if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D &&
3985 viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D_ARRAY &&
3986 sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
3987 {
3988 continue;
3989 }
3990
3991 // Avoid too much duplication by excluding certain test cases
3992 if (pipelineRobustness &&
3993 !(viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_1D ||
3994 viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_2D ||
3995 viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_2D_ARRAY))
3996 {
3997 continue;
3998 }
3999
4000 de::MovePtr<tcu::TestCaseGroup> viewGroup(
4001 new tcu::TestCaseGroup(testCtx, viewCases[viewNdx].name));
4002 for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(stageCases);
4003 stageNdx++)
4004 {
4005 Stage currentStage = static_cast<Stage>(stageCases[stageNdx].count);
4006 VkFlags allShaderStages = VK_SHADER_STAGE_COMPUTE_BIT |
4007 VK_SHADER_STAGE_VERTEX_BIT |
4008 VK_SHADER_STAGE_FRAGMENT_BIT;
4009 VkFlags allPipelineStages = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
4010 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
4011 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
4012 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
4013 #ifndef CTS_USES_VULKANSC
4014 if ((Stage)stageCases[stageNdx].count == STAGE_RAYGEN)
4015 {
4016 allShaderStages |= VK_SHADER_STAGE_RAYGEN_BIT_KHR;
4017 allPipelineStages |=
4018 VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR;
4019
4020 if (pipelineRobustness)
4021 continue;
4022 }
4023 #endif // CTS_USES_VULKANSC
4024 if ((lenCases[lenNdx].count == ~0U) && pipelineRobustness)
4025 continue;
4026
4027 if (descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH &&
4028 currentStage != STAGE_VERTEX)
4029 continue;
4030
4031 uint32_t imageDim[3] = {5, 11, 6};
4032 if (viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
4033 viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE)
4034 imageDim[1] = imageDim[0];
4035
4036 #ifndef CTS_USES_VULKANSC
4037 std::vector<PipelineRobustnessCase> pipelineRobustnessCases;
4038 if (!pipelineRobustness)
4039 pipelineRobustnessCases.push_back(
4040 PipelineRobustnessCase::DISABLED);
4041 else
4042 {
4043 pipelineRobustnessCases.push_back(
4044 PipelineRobustnessCase::ENABLED_MONOLITHIC);
4045 if (currentStage != STAGE_RAYGEN &&
4046 currentStage != STAGE_COMPUTE)
4047 {
4048 pipelineRobustnessCases.push_back(
4049 PipelineRobustnessCase::ENABLED_FAST_GPL);
4050 pipelineRobustnessCases.push_back(
4051 PipelineRobustnessCase::ENABLED_OPTIMIZED_GPL);
4052 }
4053 }
4054 #else
4055 const std::vector<PipelineRobustnessCase> pipelineRobustnessCases(
4056 1u, (pipelineRobustness ?
4057 PipelineRobustnessCase::ENABLED_MONOLITHIC :
4058 PipelineRobustnessCase::DISABLED));
4059 #endif // CTS_USES_VULKANSC
4060
4061 for (const auto &pipelineRobustnessCase : pipelineRobustnessCases)
4062 {
4063 CaseDef c = {
4064 (VkFormat)fmtCases[fmtNdx].count, // VkFormat format;
4065 currentStage, // Stage stage;
4066 allShaderStages, // VkFlags allShaderStages;
4067 allPipelineStages, // VkFlags allPipelineStages;
4068 (int)descCases[descNdx]
4069 .count, // VkDescriptorType descriptorType;
4070 (VkImageViewType)viewCases[viewNdx]
4071 .count, // VkImageViewType viewType;
4072 (VkSampleCountFlagBits)sampCases[sampNdx]
4073 .count, // VkSampleCountFlagBits samples;
4074 (int)lenCases[lenNdx].count, // int bufferLen;
4075 (bool)unrollCases[unrollNdx].count, // bool unroll;
4076 (bool)volCases[volNdx].count, // bool vol;
4077 (bool)(lenCases[lenNdx].count ==
4078 ~0U), // bool nullDescriptor
4079 (bool)tempCases[tempNdx].count, // bool useTemplate
4080 (bool)fmtQualCases[fmtQualNdx]
4081 .count, // bool formatQualifier
4082 (bool)pushCases[pushNdx].count, // bool pushDescriptor;
4083 (bool)robustness2, // bool testRobustness2;
4084 pipelineRobustnessCase, // PipelineRobustnessCase pipelineRobustnessCase;
4085 {imageDim[0], imageDim[1],
4086 imageDim[2]}, // uint32_t imageDim[3];
4087 (bool)(readOnlyCases[roNdx].count == 1), // bool readOnly;
4088 };
4089
4090 const auto name = stageCases[stageNdx].name +
4091 getGPLSuffix(pipelineRobustnessCase);
4092 viewGroup->addChild(
4093 new RobustnessExtsTestCase(testCtx, name, c));
4094 }
4095 }
4096 sampGroup->addChild(viewGroup.release());
4097 }
4098 lenGroup->addChild(sampGroup.release());
4099 }
4100 fmtQualGroup->addChild(lenGroup.release());
4101 }
4102 // Put storage_buffer tests in separate readonly vs readwrite groups. Other types
4103 // go directly into descGroup
4104 if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
4105 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
4106 {
4107 rwGroup->addChild(fmtQualGroup.release());
4108 }
4109 else
4110 {
4111 descGroup->addChild(fmtQualGroup.release());
4112 }
4113 }
4114 if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
4115 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
4116 {
4117 descGroup->addChild(rwGroup.release());
4118 }
4119 }
4120 volGroup->addChild(descGroup.release());
4121 }
4122 unrollGroup->addChild(volGroup.release());
4123 }
4124 fmtGroup->addChild(unrollGroup.release());
4125 }
4126 tempGroup->addChild(fmtGroup.release());
4127 }
4128 pushGroup->addChild(tempGroup.release());
4129 }
4130 group->addChild(pushGroup.release());
4131 }
4132
4133 if (robustness2)
4134 {
4135 de::MovePtr<tcu::TestCaseGroup> miscGroup(new tcu::TestCaseGroup(testCtx, "misc"));
4136
4137 for (const auto dynamicStride : {false, true})
4138 {
4139 const OutOfBoundsStrideParams params(pipelineRobustness, dynamicStride);
4140 const std::string nameSuffix(dynamicStride ? "_dynamic_stride" : "");
4141 const std::string testName("out_of_bounds_stride" + nameSuffix);
4142
4143 miscGroup->addChild(new OutOfBoundsStrideCase(testCtx, testName, params));
4144 }
4145
4146 group->addChild(miscGroup.release());
4147 }
4148 }
4149
createRobustness2Tests(tcu::TestCaseGroup * group)4150 static void createRobustness2Tests(tcu::TestCaseGroup *group)
4151 {
4152 createTests(group, /*robustness2=*/true, /*pipelineRobustness=*/false);
4153 }
4154
createImageRobustnessTests(tcu::TestCaseGroup * group)4155 static void createImageRobustnessTests(tcu::TestCaseGroup *group)
4156 {
4157 createTests(group, /*robustness2=*/false, /*pipelineRobustness=*/false);
4158 }
4159
4160 #ifndef CTS_USES_VULKANSC
createPipelineRobustnessTests(tcu::TestCaseGroup * group)4161 static void createPipelineRobustnessTests(tcu::TestCaseGroup *group)
4162 {
4163 tcu::TestContext &testCtx = group->getTestContext();
4164
4165 tcu::TestCaseGroup *robustness2Group = new tcu::TestCaseGroup(testCtx, "robustness2");
4166
4167 createTests(robustness2Group, /*robustness2=*/true, /*pipelineRobustness=*/true);
4168
4169 group->addChild(robustness2Group);
4170
4171 tcu::TestCaseGroup *imageRobustness2Group = new tcu::TestCaseGroup(testCtx, "image_robustness");
4172
4173 createTests(imageRobustness2Group, /*robustness2=*/false, /*pipelineRobustness=*/true);
4174
4175 group->addChild(imageRobustness2Group);
4176 }
4177 #endif
4178
cleanupGroup(tcu::TestCaseGroup * group)4179 static void cleanupGroup(tcu::TestCaseGroup *group)
4180 {
4181 DE_UNREF(group);
4182 // Destroy singleton objects.
4183 ImageRobustnessSingleton::destroy();
4184 Robustness2Singleton::destroy();
4185 PipelineRobustnessImageRobustnessSingleton::destroy();
4186 PipelineRobustnessRobustness2Singleton::destroy();
4187 }
4188
createRobustness2Tests(tcu::TestContext & testCtx)4189 tcu::TestCaseGroup *createRobustness2Tests(tcu::TestContext &testCtx)
4190 {
4191 return createTestGroup(testCtx, "robustness2", createRobustness2Tests, cleanupGroup);
4192 }
4193
createImageRobustnessTests(tcu::TestContext & testCtx)4194 tcu::TestCaseGroup *createImageRobustnessTests(tcu::TestContext &testCtx)
4195 {
4196 return createTestGroup(testCtx, "image_robustness", createImageRobustnessTests, cleanupGroup);
4197 }
4198
4199 #ifndef CTS_USES_VULKANSC
createPipelineRobustnessTests(tcu::TestContext & testCtx)4200 tcu::TestCaseGroup *createPipelineRobustnessTests(tcu::TestContext &testCtx)
4201 {
4202 return createTestGroup(testCtx, "pipeline_robustness", createPipelineRobustnessTests, cleanupGroup);
4203 }
4204 #endif
4205
4206 } // namespace robustness
4207 } // namespace vkt
4208