1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2021 The Khronos Group Inc.
6 * Copyright (c) 2021 Valve Corporation.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Tests for VK_VALVE_mutable_descriptor_type and VK_EXT_mutable_descriptor_type.
23 *//*--------------------------------------------------------------------*/
24 #include "vktBindingMutableTests.hpp"
25 #include "vktTestCase.hpp"
26 #include "vktTestGroupUtil.hpp"
27 #include "vktCustomInstancesDevices.hpp"
28
29 #include "tcuCommandLine.hpp"
30
31 #include "vkDefs.hpp"
32 #include "vkRefUtil.hpp"
33 #include "vkQueryUtil.hpp"
34 #include "vkImageWithMemory.hpp"
35 #include "vkBufferWithMemory.hpp"
36 #include "vkTypeUtil.hpp"
37 #include "vkObjUtil.hpp"
38 #include "vkBarrierUtil.hpp"
39 #include "vkCmdUtil.hpp"
40 #include "vkBuilderUtil.hpp"
41 #include "vkRayTracingUtil.hpp"
42
43 #include "deUniquePtr.hpp"
44 #include "deSTLUtil.hpp"
45 #include "deStringUtil.hpp"
46
47 #include <cstdint>
48 #include <string>
49 #include <vector>
50 #include <algorithm>
51 #include <iterator>
52 #include <set>
53 #include <sstream>
54 #include <limits>
55
56 namespace vkt
57 {
58 namespace BindingModel
59 {
60
61 namespace
62 {
63
64 using namespace vk;
65
66 de::SharedPtr<Move<vk::VkDevice>> g_singletonDevice;
67
getDevice(Context & context)68 VkDevice getDevice(Context &context)
69 {
70 if (!g_singletonDevice)
71 {
72 const float queuePriority = 1.0f;
73
74 // Create a universal queue that supports graphics and compute
75 const VkDeviceQueueCreateInfo queueParams{
76 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // VkStructureType sType;
77 nullptr, // const void* pNext;
78 0u, // VkDeviceQueueCreateFlags flags;
79 context.getUniversalQueueFamilyIndex(), // uint32_t queueFamilyIndex;
80 1u, // uint32_t queueCount;
81 &queuePriority // const float* pQueuePriorities;
82 };
83
84 // \note Extensions in core are not explicitly enabled even though
85 // they are in the extension list advertised to tests.
86 const auto &extensionPtrs = context.getDeviceCreationExtensions();
87
88 VkPhysicalDeviceAccelerationStructureFeaturesKHR accelerationStructureFeatures = initVulkanStructure();
89 VkPhysicalDeviceBufferDeviceAddressFeatures bufferDeviceAddressFeatures = initVulkanStructure();
90 VkPhysicalDeviceRayTracingPipelineFeaturesKHR rayTracingPipelineFeatures = initVulkanStructure();
91 VkPhysicalDeviceRayQueryFeaturesKHR rayQueryFeatures = initVulkanStructure();
92 VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT mutableDescriptorTypeFeatures = initVulkanStructure();
93 VkPhysicalDeviceDescriptorIndexingFeatures descriptorIndexingFeatures = initVulkanStructure();
94 VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
95
96 const auto addFeatures = makeStructChainAdder(&features2);
97
98 if (context.isDeviceFunctionalitySupported("VK_KHR_acceleration_structure"))
99 addFeatures(&accelerationStructureFeatures);
100
101 if (context.isDeviceFunctionalitySupported("VK_KHR_buffer_device_address"))
102 addFeatures(&bufferDeviceAddressFeatures);
103
104 if (context.isDeviceFunctionalitySupported("VK_KHR_ray_tracing_pipeline"))
105 addFeatures(&rayTracingPipelineFeatures);
106
107 if (context.isDeviceFunctionalitySupported("VK_KHR_ray_query"))
108 addFeatures(&rayQueryFeatures);
109
110 if (context.isDeviceFunctionalitySupported("VK_VALVE_mutable_descriptor_type") ||
111 context.isDeviceFunctionalitySupported("VK_EXT_mutable_descriptor_type"))
112 addFeatures(&mutableDescriptorTypeFeatures);
113
114 if (context.isDeviceFunctionalitySupported("VK_EXT_descriptor_indexing"))
115 addFeatures(&descriptorIndexingFeatures);
116
117 context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features2);
118 features2.features.robustBufferAccess = VK_FALSE; // Disable robustness features.
119
120 const VkDeviceCreateInfo deviceCreateInfo{
121 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, //sType;
122 &features2, //pNext;
123 (VkDeviceCreateFlags)0u, //flags
124 1, //queueRecordCount;
125 &queueParams, //pRequestedQueues;
126 0, //layerCount;
127 nullptr, //ppEnabledLayerNames;
128 de::sizeU32(extensionPtrs), // uint32_t enabledExtensionCount;
129 de::dataOrNull(extensionPtrs), // const char* const* ppEnabledExtensionNames;
130 nullptr, //pEnabledFeatures;
131 };
132
133 Move<VkDevice> device = createCustomDevice(
134 context.getTestContext().getCommandLine().isValidationEnabled(), context.getPlatformInterface(),
135 context.getInstance(), context.getInstanceInterface(), context.getPhysicalDevice(), &deviceCreateInfo);
136 g_singletonDevice = de::SharedPtr<Move<VkDevice>>(new Move<VkDevice>(device));
137 }
138
139 return g_singletonDevice->get();
140 }
141
getDescriptorNumericValue(uint32_t iteration,uint32_t bindingIdx,uint32_t descriptorIdx=0u)142 uint32_t getDescriptorNumericValue(uint32_t iteration, uint32_t bindingIdx, uint32_t descriptorIdx = 0u)
143 {
144 // When assigning numeric values for the descriptor contents, each descriptor will get 0x5aIIBBDD. II is an octed containing the
145 // iteration index. BB is an octet containing the binding index and DD is the descriptor index inside that binding.
146 constexpr uint32_t kNumericValueBase = 0x5a000000u;
147
148 return (kNumericValueBase | ((iteration & 0xFFu) << 16) | ((bindingIdx & 0xFFu) << 8) | (descriptorIdx & 0xFFu));
149 }
150
getAccelerationStructureOffsetX(uint32_t descriptorNumericValue)151 uint16_t getAccelerationStructureOffsetX(uint32_t descriptorNumericValue)
152 {
153 // Keep the lowest 16 bits (binding and descriptor idx) as the offset.
154 return static_cast<uint16_t>(descriptorNumericValue);
155 }
156
157 // Value that will be stored in the output buffer to signal success reading values.
getExpectedOutputBufferValue()158 uint32_t getExpectedOutputBufferValue()
159 {
160 return 2u;
161 }
162
163 // This value will be stored in an image to be sampled when checking descriptors containing samplers alone.
getExternalSampledImageValue()164 uint32_t getExternalSampledImageValue()
165 {
166 return 0x41322314u;
167 }
168
169 // Value that will be ORed with the descriptor value before writing.
getStoredValueMask()170 uint32_t getStoredValueMask()
171 {
172 return 0xFF000000u;
173 }
174
getDescriptorImageFormat()175 VkFormat getDescriptorImageFormat()
176 {
177 return VK_FORMAT_R32_UINT;
178 }
179
getDefaultExtent()180 VkExtent3D getDefaultExtent()
181 {
182 return makeExtent3D(1u, 1u, 1u);
183 }
184
185 // Convert value to hexadecimal.
toHex(uint32_t val)186 std::string toHex(uint32_t val)
187 {
188 std::ostringstream s;
189 s << "0x" << std::hex << val << "u";
190 return s.str();
191 }
192
193 // Returns the list of descriptor types that cannot be part of a mutable descriptor.
getForbiddenMutableTypes()194 std::vector<VkDescriptorType> getForbiddenMutableTypes()
195 {
196 return std::vector<VkDescriptorType>{
197 VK_DESCRIPTOR_TYPE_MUTABLE_EXT,
198 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,
199 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
200 VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT,
201 };
202 }
203
204 // Returns the list of descriptor types that are mandatory for the extension.
getMandatoryMutableTypes()205 std::vector<VkDescriptorType> getMandatoryMutableTypes()
206 {
207 return std::vector<VkDescriptorType>{
208 VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
209 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
210 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER};
211 }
212
213 // This helps quickly transform a vector of descriptor types into a bitmask, which makes it easier to check some conditions.
214 enum DescriptorTypeFlagBits
215 {
216 DTFB_SAMPLER = (1 << 0),
217 DTFB_COMBINED_IMAGE_SAMPLER = (1 << 1),
218 DTFB_SAMPLED_IMAGE = (1 << 2),
219 DTFB_STORAGE_IMAGE = (1 << 3),
220 DTFB_UNIFORM_TEXEL_BUFFER = (1 << 4),
221 DTFB_STORAGE_TEXEL_BUFFER = (1 << 5),
222 DTFB_UNIFORM_BUFFER = (1 << 6),
223 DTFB_STORAGE_BUFFER = (1 << 7),
224 DTFB_UNIFORM_BUFFER_DYNAMIC = (1 << 8),
225 DTFB_STORAGE_BUFFER_DYNAMIC = (1 << 9),
226 DTFB_INPUT_ATTACHMENT = (1 << 10),
227 DTFB_INLINE_UNIFORM_BLOCK_EXT = (1 << 11),
228 DTFB_ACCELERATION_STRUCTURE_KHR = (1 << 12),
229 DTFB_ACCELERATION_STRUCTURE_NV = (1 << 13),
230 DTFB_MUTABLE = (1 << 14),
231 };
232
233 using DescriptorTypeFlags = uint32_t;
234
235 // Convert type to its corresponding flag bit.
toDescriptorTypeFlagBit(VkDescriptorType descriptorType)236 DescriptorTypeFlagBits toDescriptorTypeFlagBit(VkDescriptorType descriptorType)
237 {
238 switch (descriptorType)
239 {
240 case VK_DESCRIPTOR_TYPE_SAMPLER:
241 return DTFB_SAMPLER;
242 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
243 return DTFB_COMBINED_IMAGE_SAMPLER;
244 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
245 return DTFB_SAMPLED_IMAGE;
246 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
247 return DTFB_STORAGE_IMAGE;
248 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
249 return DTFB_UNIFORM_TEXEL_BUFFER;
250 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
251 return DTFB_STORAGE_TEXEL_BUFFER;
252 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
253 return DTFB_UNIFORM_BUFFER;
254 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
255 return DTFB_STORAGE_BUFFER;
256 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
257 return DTFB_UNIFORM_BUFFER_DYNAMIC;
258 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
259 return DTFB_STORAGE_BUFFER_DYNAMIC;
260 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
261 return DTFB_INPUT_ATTACHMENT;
262 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
263 return DTFB_INLINE_UNIFORM_BLOCK_EXT;
264 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
265 return DTFB_ACCELERATION_STRUCTURE_KHR;
266 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV:
267 return DTFB_ACCELERATION_STRUCTURE_NV;
268 case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
269 return DTFB_MUTABLE;
270 default:
271 break;
272 }
273
274 // Unreachable.
275 DE_ASSERT(false);
276 return DTFB_SAMPLER;
277 }
278
279 // Convert vector of descriptor types to a bitfield.
toDescriptorTypeFlags(const std::vector<VkDescriptorType> & types)280 DescriptorTypeFlags toDescriptorTypeFlags(const std::vector<VkDescriptorType> &types)
281 {
282 DescriptorTypeFlags result = 0u;
283 for (const auto &t : types)
284 result |= toDescriptorTypeFlagBit(t);
285 return result;
286 }
287
288 // Convert bitfield to vector of descriptor types.
toDescriptorTypeVector(DescriptorTypeFlags bitfield)289 std::vector<VkDescriptorType> toDescriptorTypeVector(DescriptorTypeFlags bitfield)
290 {
291 std::vector<VkDescriptorType> result;
292
293 if (bitfield & DTFB_SAMPLER)
294 result.push_back(VK_DESCRIPTOR_TYPE_SAMPLER);
295 if (bitfield & DTFB_COMBINED_IMAGE_SAMPLER)
296 result.push_back(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
297 if (bitfield & DTFB_SAMPLED_IMAGE)
298 result.push_back(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE);
299 if (bitfield & DTFB_STORAGE_IMAGE)
300 result.push_back(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
301 if (bitfield & DTFB_UNIFORM_TEXEL_BUFFER)
302 result.push_back(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);
303 if (bitfield & DTFB_STORAGE_TEXEL_BUFFER)
304 result.push_back(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
305 if (bitfield & DTFB_UNIFORM_BUFFER)
306 result.push_back(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
307 if (bitfield & DTFB_STORAGE_BUFFER)
308 result.push_back(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
309 if (bitfield & DTFB_UNIFORM_BUFFER_DYNAMIC)
310 result.push_back(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
311 if (bitfield & DTFB_STORAGE_BUFFER_DYNAMIC)
312 result.push_back(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC);
313 if (bitfield & DTFB_INPUT_ATTACHMENT)
314 result.push_back(VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT);
315 if (bitfield & DTFB_INLINE_UNIFORM_BLOCK_EXT)
316 result.push_back(VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT);
317 if (bitfield & DTFB_ACCELERATION_STRUCTURE_KHR)
318 result.push_back(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR);
319 if (bitfield & DTFB_ACCELERATION_STRUCTURE_NV)
320 result.push_back(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV);
321 if (bitfield & DTFB_MUTABLE)
322 result.push_back(VK_DESCRIPTOR_TYPE_MUTABLE_EXT);
323
324 return result;
325 }
326
327 // How to create the source set when copying descriptors from another set.
328 // * MUTABLE means to transform bindings into mutable bindings.
329 // * NONMUTABLE means to transform bindings into non-mutable bindings.
330 enum class SourceSetStrategy
331 {
332 MUTABLE = 0,
333 NONMUTABLE,
334 NO_SOURCE,
335 };
336
337 enum class PoolMutableStrategy
338 {
339 KEEP_TYPES = 0,
340 EXPAND_TYPES,
341 NO_TYPES,
342 KEEP_NO_MUTABLE_TYPES // mutable descriptor type out-of-range case: Do not keep type list of a mutable descriptor in the array of type lists for allocation in the descriptor pool
343 };
344
345 // Type of information that's present in VkWriteDescriptorSet.
346 enum class WriteType
347 {
348 IMAGE_INFO = 0,
349 BUFFER_INFO,
350 BUFFER_VIEW,
351 ACCELERATION_STRUCTURE_INFO,
352 };
353
354 struct WriteInfo
355 {
356 WriteType writeType;
357 union
358 {
359 VkDescriptorImageInfo imageInfo;
360 VkDescriptorBufferInfo bufferInfo;
361 VkBufferView bufferView;
362 VkWriteDescriptorSetAccelerationStructureKHR asInfo;
363 };
364
WriteInfovkt::BindingModel::__anona50c901c0111::WriteInfo365 explicit WriteInfo(const VkDescriptorImageInfo &info_) : writeType(WriteType::IMAGE_INFO), imageInfo(info_)
366 {
367 }
368
WriteInfovkt::BindingModel::__anona50c901c0111::WriteInfo369 explicit WriteInfo(const VkDescriptorBufferInfo &info_) : writeType(WriteType::BUFFER_INFO), bufferInfo(info_)
370 {
371 }
372
WriteInfovkt::BindingModel::__anona50c901c0111::WriteInfo373 explicit WriteInfo(VkBufferView view_) : writeType(WriteType::BUFFER_VIEW), bufferView(view_)
374 {
375 }
376
WriteInfovkt::BindingModel::__anona50c901c0111::WriteInfo377 explicit WriteInfo(const VkWriteDescriptorSetAccelerationStructureKHR &asInfo_)
378 : writeType(WriteType::ACCELERATION_STRUCTURE_INFO)
379 , asInfo(asInfo_)
380 {
381 }
382 };
383
384 // Resource backing up a single binding.
385 enum class ResourceType
386 {
387 SAMPLER = 0,
388 IMAGE,
389 COMBINED_IMAGE_SAMPLER,
390 BUFFER,
391 BUFFER_VIEW,
392 ACCELERATION_STRUCTURE,
393 };
394
395 // Type of resource backing up a particular descriptor type.
toResourceType(VkDescriptorType descriptorType)396 ResourceType toResourceType(VkDescriptorType descriptorType)
397 {
398 ResourceType resourceType = ResourceType::SAMPLER;
399 switch (descriptorType)
400 {
401 case VK_DESCRIPTOR_TYPE_SAMPLER:
402 resourceType = ResourceType::SAMPLER;
403 break;
404
405 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
406 resourceType = ResourceType::COMBINED_IMAGE_SAMPLER;
407 break;
408
409 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
410 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
411 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
412 resourceType = ResourceType::IMAGE;
413 break;
414
415 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
416 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
417 resourceType = ResourceType::BUFFER_VIEW;
418 break;
419
420 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
421 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
422 resourceType = ResourceType::BUFFER;
423 break;
424
425 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
426 resourceType = ResourceType::ACCELERATION_STRUCTURE;
427 break;
428
429 default:
430 DE_ASSERT(false);
431 break;
432 }
433
434 return resourceType;
435 }
436
isShaderWritable(VkDescriptorType descriptorType)437 bool isShaderWritable(VkDescriptorType descriptorType)
438 {
439 return (descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER || descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
440 descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
441 }
442
makeDefaultSampler(const DeviceInterface & vkd,VkDevice device)443 Move<VkSampler> makeDefaultSampler(const DeviceInterface &vkd, VkDevice device)
444 {
445 const VkSamplerCreateInfo samplerCreateInfo = {
446 VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, // VkStructureType sType;
447 nullptr, // const void* pNext;
448 0u, // VkSamplerCreateFlags flags;
449 VK_FILTER_NEAREST, // VkFilter magFilter;
450 VK_FILTER_NEAREST, // VkFilter minFilter;
451 VK_SAMPLER_MIPMAP_MODE_NEAREST, // VkSamplerMipmapMode mipmapMode;
452 VK_SAMPLER_ADDRESS_MODE_REPEAT, // VkSamplerAddressMode addressModeU;
453 VK_SAMPLER_ADDRESS_MODE_REPEAT, // VkSamplerAddressMode addressModeV;
454 VK_SAMPLER_ADDRESS_MODE_REPEAT, // VkSamplerAddressMode addressModeW;
455 0.f, // float mipLodBias;
456 VK_FALSE, // VkBool32 anisotropyEnable;
457 1.f, // float maxAnisotropy;
458 VK_FALSE, // VkBool32 compareEnable;
459 VK_COMPARE_OP_ALWAYS, // VkCompareOp compareOp;
460 0.f, // float minLod;
461 0.f, // float maxLod;
462 VK_BORDER_COLOR_INT_TRANSPARENT_BLACK, // VkBorderColor borderColor;
463 VK_FALSE, // VkBool32 unnormalizedCoordinates;
464 };
465
466 return createSampler(vkd, device, &samplerCreateInfo);
467 }
468
makeDefaultImage(const DeviceInterface & vkd,VkDevice device,Allocator & alloc)469 de::MovePtr<ImageWithMemory> makeDefaultImage(const DeviceInterface &vkd, VkDevice device, Allocator &alloc)
470 {
471 const auto extent = makeExtent3D(1u, 1u, 1u);
472 const VkImageUsageFlags usageFlags =
473 (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
474 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT);
475
476 const VkImageCreateInfo imageCreateInfo = {
477 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
478 nullptr, // const void* pNext;
479 0u, // VkImageCreateFlags flags;
480 VK_IMAGE_TYPE_2D, // VkImageType imageType;
481 getDescriptorImageFormat(), // VkFormat format;
482 extent, // VkExtent3D extent;
483 1u, // uint32_t mipLevels;
484 1u, // uint32_t arrayLayers;
485 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
486 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
487 usageFlags, // VkImageUsageFlags usage;
488 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
489 0u, // uint32_t queueFamilyIndexCount;
490 nullptr, // const uint32_t* pQueueFamilyIndices;
491 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
492 };
493 return de::MovePtr<ImageWithMemory>(
494 new ImageWithMemory(vkd, device, alloc, imageCreateInfo, MemoryRequirement::Any));
495 }
496
makeDefaultImageView(const DeviceInterface & vkd,VkDevice device,VkImage image)497 Move<VkImageView> makeDefaultImageView(const DeviceInterface &vkd, VkDevice device, VkImage image)
498 {
499 const auto subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
500 return makeImageView(vkd, device, image, VK_IMAGE_VIEW_TYPE_2D, getDescriptorImageFormat(), subresourceRange);
501 }
502
makeDefaultBuffer(const DeviceInterface & vkd,VkDevice device,Allocator & alloc,uint32_t numElements=1u)503 de::MovePtr<BufferWithMemory> makeDefaultBuffer(const DeviceInterface &vkd, VkDevice device, Allocator &alloc,
504 uint32_t numElements = 1u)
505 {
506 const VkBufferUsageFlags bufferUsage =
507 (VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
508 VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
509 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT);
510
511 const auto bufferSize = static_cast<VkDeviceSize>(sizeof(uint32_t) * static_cast<size_t>(numElements));
512
513 const auto bufferCreateInfo = makeBufferCreateInfo(bufferSize, bufferUsage);
514
515 return de::MovePtr<BufferWithMemory>(
516 new BufferWithMemory(vkd, device, alloc, bufferCreateInfo, MemoryRequirement::HostVisible));
517 }
518
makeDefaultBufferView(const DeviceInterface & vkd,VkDevice device,VkBuffer buffer)519 Move<VkBufferView> makeDefaultBufferView(const DeviceInterface &vkd, VkDevice device, VkBuffer buffer)
520 {
521 const auto bufferOffset = static_cast<VkDeviceSize>(0);
522 const auto bufferSize = static_cast<VkDeviceSize>(sizeof(uint32_t));
523
524 return makeBufferView(vkd, device, buffer, getDescriptorImageFormat(), bufferOffset, bufferSize);
525 }
526
527 struct AccelerationStructureData
528 {
529 using TLASPtr = de::MovePtr<TopLevelAccelerationStructure>;
530 using BLASPtr = de::MovePtr<BottomLevelAccelerationStructure>;
531
532 TLASPtr tlas;
533 BLASPtr blas;
534
swapvkt::BindingModel::__anona50c901c0111::AccelerationStructureData535 void swap(AccelerationStructureData &other)
536 {
537 auto myTlasPtr = tlas.release();
538 auto myBlasPtr = blas.release();
539
540 auto otherTlasPtr = other.tlas.release();
541 auto otherBlasPtr = other.blas.release();
542
543 tlas = TLASPtr(otherTlasPtr);
544 blas = BLASPtr(otherBlasPtr);
545
546 other.tlas = TLASPtr(myTlasPtr);
547 other.blas = BLASPtr(myBlasPtr);
548 }
549
AccelerationStructureDatavkt::BindingModel::__anona50c901c0111::AccelerationStructureData550 AccelerationStructureData() : tlas(), blas()
551 {
552 }
553
AccelerationStructureDatavkt::BindingModel::__anona50c901c0111::AccelerationStructureData554 AccelerationStructureData(AccelerationStructureData &&other) : AccelerationStructureData()
555 {
556 swap(other);
557 }
558
operator =vkt::BindingModel::__anona50c901c0111::AccelerationStructureData559 AccelerationStructureData &operator=(AccelerationStructureData &&other)
560 {
561 swap(other);
562 return *this;
563 }
564 };
565
makeDefaultAccelerationStructure(const DeviceInterface & vkd,VkDevice device,VkCommandBuffer cmdBuffer,Allocator & alloc,bool triangles,uint16_t offsetX)566 AccelerationStructureData makeDefaultAccelerationStructure(const DeviceInterface &vkd, VkDevice device,
567 VkCommandBuffer cmdBuffer, Allocator &alloc, bool triangles,
568 uint16_t offsetX)
569 {
570 AccelerationStructureData data;
571
572 // Triangle around (offsetX, 0) with depth 5.0.
573 const float middleX = static_cast<float>(offsetX);
574 const float leftX = middleX - 0.5f;
575 const float rightX = middleX + 0.5f;
576 const float topY = 0.5f;
577 const float bottomY = -0.5f;
578 const float depth = 5.0f;
579
580 std::vector<tcu::Vec3> vertices;
581
582 if (triangles)
583 {
584 vertices.reserve(3u);
585 vertices.emplace_back(middleX, topY, depth);
586 vertices.emplace_back(rightX, bottomY, depth);
587 vertices.emplace_back(leftX, bottomY, depth);
588 }
589 else
590 {
591 vertices.reserve(2u);
592 vertices.emplace_back(leftX, bottomY, depth);
593 vertices.emplace_back(rightX, topY, depth);
594 }
595
596 data.tlas = makeTopLevelAccelerationStructure();
597 data.blas = makeBottomLevelAccelerationStructure();
598
599 VkGeometryInstanceFlagsKHR instanceFlags = 0u;
600 if (triangles)
601 instanceFlags |= VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR;
602
603 data.blas->addGeometry(vertices, triangles, VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR);
604 data.blas->createAndBuild(vkd, device, cmdBuffer, alloc);
605
606 de::SharedPtr<BottomLevelAccelerationStructure> blasSharedPtr(data.blas.release());
607 data.tlas->setInstanceCount(1u);
608 data.tlas->addInstance(blasSharedPtr, identityMatrix3x4, 0u, 0xFFu, 0u, instanceFlags);
609 data.tlas->createAndBuild(vkd, device, cmdBuffer, alloc);
610
611 return data;
612 }
613
614 const auto kShaderAccess = (VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT);
615
616 struct Resource
617 {
618 VkDescriptorType descriptorType;
619 ResourceType resourceType;
620 Move<VkSampler> sampler;
621 de::MovePtr<ImageWithMemory> imageWithMemory;
622 Move<VkImageView> imageView;
623 de::MovePtr<BufferWithMemory> bufferWithMemory;
624 Move<VkBufferView> bufferView;
625 AccelerationStructureData asData;
626 uint32_t initialValue;
627
Resourcevkt::BindingModel::__anona50c901c0111::Resource628 Resource(VkDescriptorType descriptorType_, const DeviceInterface &vkd, VkDevice device, Allocator &alloc,
629 uint32_t qIndex, VkQueue queue, bool useAABBs, uint32_t initialValue_, uint32_t numElements = 1u)
630 : descriptorType(descriptorType_)
631 , resourceType(toResourceType(descriptorType))
632 , sampler()
633 , imageWithMemory()
634 , imageView()
635 , bufferWithMemory()
636 , bufferView()
637 , asData()
638 , initialValue(initialValue_)
639 {
640 if (numElements != 1u)
641 DE_ASSERT(resourceType == ResourceType::BUFFER);
642
643 switch (resourceType)
644 {
645 case ResourceType::SAMPLER:
646 sampler = makeDefaultSampler(vkd, device);
647 break;
648
649 case ResourceType::IMAGE:
650 imageWithMemory = makeDefaultImage(vkd, device, alloc);
651 imageView = makeDefaultImageView(vkd, device, imageWithMemory->get());
652 break;
653
654 case ResourceType::COMBINED_IMAGE_SAMPLER:
655 sampler = makeDefaultSampler(vkd, device);
656 imageWithMemory = makeDefaultImage(vkd, device, alloc);
657 imageView = makeDefaultImageView(vkd, device, imageWithMemory->get());
658 break;
659
660 case ResourceType::BUFFER:
661 bufferWithMemory = makeDefaultBuffer(vkd, device, alloc, numElements);
662 break;
663
664 case ResourceType::BUFFER_VIEW:
665 bufferWithMemory = makeDefaultBuffer(vkd, device, alloc);
666 bufferView = makeDefaultBufferView(vkd, device, bufferWithMemory->get());
667 break;
668
669 case ResourceType::ACCELERATION_STRUCTURE:
670 {
671 const auto cmdPool = makeCommandPool(vkd, device, qIndex);
672 const auto cmdBufferPtr =
673 allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
674 const auto cmdBuffer = cmdBufferPtr.get();
675 const bool triangles = !useAABBs;
676
677 beginCommandBuffer(vkd, cmdBuffer);
678 asData = makeDefaultAccelerationStructure(vkd, device, cmdBuffer, alloc, triangles,
679 getAccelerationStructureOffsetX(initialValue));
680 endCommandBuffer(vkd, cmdBuffer);
681 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
682 }
683 break;
684
685 default:
686 DE_ASSERT(false);
687 break;
688 }
689
690 if (imageWithMemory || bufferWithMemory)
691 {
692 const auto cmdPool = makeCommandPool(vkd, device, qIndex);
693 const auto cmdBufferPtr =
694 allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
695 const auto cmdBuffer = cmdBufferPtr.get();
696
697 if (imageWithMemory)
698 {
699 // Prepare staging buffer.
700 const auto bufferSize = static_cast<VkDeviceSize>(sizeof(initialValue));
701 const VkBufferUsageFlags bufferUsage = (VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
702 const auto stagingBufferInfo = makeBufferCreateInfo(bufferSize, bufferUsage);
703
704 BufferWithMemory stagingBuffer(vkd, device, alloc, stagingBufferInfo, MemoryRequirement::HostVisible);
705 auto &bufferAlloc = stagingBuffer.getAllocation();
706 void *bufferData = bufferAlloc.getHostPtr();
707
708 deMemcpy(bufferData, &initialValue, sizeof(initialValue));
709 flushAlloc(vkd, device, bufferAlloc);
710
711 beginCommandBuffer(vkd, cmdBuffer);
712
713 // Transition and copy image.
714 const auto copyRegion = makeBufferImageCopy(
715 makeExtent3D(1u, 1u, 1u), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
716
717 // Switch image to TRANSFER_DST_OPTIMAL before copying data to it.
718 const auto subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
719
720 const auto preTransferBarrier = makeImageMemoryBarrier(
721 0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
722 imageWithMemory->get(), subresourceRange);
723
724 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u,
725 0u, nullptr, 0u, nullptr, 1u, &preTransferBarrier);
726
727 // Copy data to image.
728 vkd.cmdCopyBufferToImage(cmdBuffer, stagingBuffer.get(), imageWithMemory->get(),
729 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
730
731 // Switch image to the GENERAL layout before reading or writing to it from shaders.
732 const auto postTransferBarrier = makeImageMemoryBarrier(
733 VK_ACCESS_TRANSFER_WRITE_BIT, kShaderAccess, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
734 VK_IMAGE_LAYOUT_GENERAL, imageWithMemory->get(), subresourceRange);
735
736 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
737 0u, 0u, nullptr, 0u, nullptr, 1u, &postTransferBarrier);
738
739 endCommandBuffer(vkd, cmdBuffer);
740 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
741 }
742
743 if (bufferWithMemory)
744 {
745 auto &bufferAlloc = bufferWithMemory->getAllocation();
746 void *bufferData = bufferAlloc.getHostPtr();
747
748 const std::vector<uint32_t> bufferValues(numElements, initialValue);
749 deMemcpy(bufferData, bufferValues.data(), de::dataSize(bufferValues));
750 flushAlloc(vkd, device, bufferAlloc);
751
752 beginCommandBuffer(vkd, cmdBuffer);
753
754 // Make sure host writes happen before shader reads/writes. Note: this barrier is not needed in theory.
755 const auto hostToShaderBarrier = makeMemoryBarrier(VK_ACCESS_HOST_WRITE_BIT, kShaderAccess);
756
757 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0u,
758 1u, &hostToShaderBarrier, 0u, nullptr, 0u, nullptr);
759
760 endCommandBuffer(vkd, cmdBuffer);
761 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
762 }
763 }
764 }
765
766 // Remove problematic copy constructor.
767 Resource(const Resource &) = delete;
768
769 // Make it movable.
Resourcevkt::BindingModel::__anona50c901c0111::Resource770 Resource(Resource &&other) noexcept
771 : descriptorType(other.descriptorType)
772 , resourceType(other.resourceType)
773 , sampler(other.sampler)
774 , imageWithMemory(other.imageWithMemory.release())
775 , imageView(other.imageView)
776 , bufferWithMemory(other.bufferWithMemory.release())
777 , bufferView(other.bufferView)
778 , asData(std::move(other.asData))
779 , initialValue(other.initialValue)
780 {
781 }
782
~Resourcevkt::BindingModel::__anona50c901c0111::Resource783 ~Resource()
784 {
785 }
786
makeWriteInfovkt::BindingModel::__anona50c901c0111::Resource787 WriteInfo makeWriteInfo() const
788 {
789 using WriteInfoPtr = de::MovePtr<WriteInfo>;
790
791 WriteInfoPtr writeInfo;
792
793 switch (resourceType)
794 {
795 case ResourceType::SAMPLER:
796 {
797 const VkDescriptorImageInfo imageInfo = {sampler.get(), VK_NULL_HANDLE, VK_IMAGE_LAYOUT_UNDEFINED};
798 writeInfo = WriteInfoPtr(new WriteInfo(imageInfo));
799 }
800 break;
801
802 case ResourceType::IMAGE:
803 {
804 const VkDescriptorImageInfo imageInfo = {VK_NULL_HANDLE, imageView.get(), VK_IMAGE_LAYOUT_GENERAL};
805 writeInfo = WriteInfoPtr(new WriteInfo(imageInfo));
806 }
807 break;
808
809 case ResourceType::COMBINED_IMAGE_SAMPLER:
810 {
811 const VkDescriptorImageInfo imageInfo = {sampler.get(), imageView.get(), VK_IMAGE_LAYOUT_GENERAL};
812 writeInfo = WriteInfoPtr(new WriteInfo(imageInfo));
813 }
814 break;
815
816 case ResourceType::BUFFER:
817 {
818 const VkDescriptorBufferInfo bufferInfo = {bufferWithMemory->get(), 0ull,
819 static_cast<VkDeviceSize>(sizeof(uint32_t))};
820 writeInfo = WriteInfoPtr(new WriteInfo(bufferInfo));
821 }
822 break;
823
824 case ResourceType::BUFFER_VIEW:
825 writeInfo = WriteInfoPtr(new WriteInfo(bufferView.get()));
826 break;
827
828 case ResourceType::ACCELERATION_STRUCTURE:
829 {
830 VkWriteDescriptorSetAccelerationStructureKHR asWrite = initVulkanStructure();
831 asWrite.accelerationStructureCount = 1u;
832 asWrite.pAccelerationStructures = asData.tlas.get()->getPtr();
833 writeInfo = WriteInfoPtr(new WriteInfo(asWrite));
834 }
835 break;
836
837 default:
838 DE_ASSERT(false);
839 break;
840 }
841
842 return *writeInfo;
843 }
844
getStoredValuevkt::BindingModel::__anona50c901c0111::Resource845 tcu::Maybe<uint32_t> getStoredValue(const DeviceInterface &vkd, VkDevice device, Allocator &alloc, uint32_t qIndex,
846 VkQueue queue, uint32_t position = 0u) const
847 {
848 if (position != 0u)
849 DE_ASSERT(static_cast<bool>(bufferWithMemory));
850
851 if (imageWithMemory || bufferWithMemory)
852 {
853 // Command pool and buffer.
854 const auto cmdPool = makeCommandPool(vkd, device, qIndex);
855 const auto cmdBufferPtr =
856 allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
857 const auto cmdBuffer = cmdBufferPtr.get();
858
859 if (imageWithMemory)
860 {
861 // Prepare staging buffer.
862 uint32_t result;
863 const auto bufferSize = static_cast<VkDeviceSize>(sizeof(result));
864 const VkBufferUsageFlags bufferUsage = (VK_BUFFER_USAGE_TRANSFER_DST_BIT);
865 const auto stagingBufferInfo = makeBufferCreateInfo(bufferSize, bufferUsage);
866
867 BufferWithMemory stagingBuffer(vkd, device, alloc, stagingBufferInfo, MemoryRequirement::HostVisible);
868 auto &bufferAlloc = stagingBuffer.getAllocation();
869 void *bufferData = bufferAlloc.getHostPtr();
870
871 // Copy image value to staging buffer.
872 beginCommandBuffer(vkd, cmdBuffer);
873
874 // Make sure shader accesses happen before transfers and prepare image for transfer.
875 const auto colorResourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
876
877 const auto preTransferBarrier = makeImageMemoryBarrier(
878 kShaderAccess, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL,
879 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, imageWithMemory->get(), colorResourceRange);
880
881 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
882 0u, 0u, nullptr, 0u, nullptr, 1u, &preTransferBarrier);
883
884 // Copy image contents to staging buffer.
885 const auto copyRegion = makeBufferImageCopy(
886 makeExtent3D(1u, 1u, 1u), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
887 vkd.cmdCopyImageToBuffer(cmdBuffer, imageWithMemory->get(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
888 stagingBuffer.get(), 1u, ©Region);
889
890 // Make sure writes are visible from the host.
891 const auto postTransferBarrier =
892 makeMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
893 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 1u,
894 &postTransferBarrier, 0u, nullptr, 0u, nullptr);
895
896 endCommandBuffer(vkd, cmdBuffer);
897 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
898
899 // Get value from staging buffer.
900 invalidateAlloc(vkd, device, bufferAlloc);
901 deMemcpy(&result, bufferData, sizeof(result));
902 return tcu::just(result);
903 }
904
905 if (bufferWithMemory)
906 {
907 auto &bufferAlloc = bufferWithMemory->getAllocation();
908 auto bufferData = reinterpret_cast<const char *>(bufferAlloc.getHostPtr());
909 uint32_t result;
910
911 // Make sure shader writes are visible from the host.
912 beginCommandBuffer(vkd, cmdBuffer);
913
914 const auto shaderToHostBarrier = makeMemoryBarrier(kShaderAccess, VK_ACCESS_HOST_READ_BIT);
915 vkd.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
916 1u, &shaderToHostBarrier, 0u, nullptr, 0u, nullptr);
917
918 endCommandBuffer(vkd, cmdBuffer);
919 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
920
921 invalidateAlloc(vkd, device, bufferAlloc);
922 deMemcpy(&result, bufferData + sizeof(uint32_t) * static_cast<size_t>(position), sizeof(result));
923 return tcu::just(result);
924 }
925 }
926
927 return tcu::Nothing;
928 }
929 };
930
931 struct BindingInterface
932 {
~BindingInterfacevkt::BindingModel::__anona50c901c0111::BindingInterface933 virtual ~BindingInterface()
934 {
935 }
936
937 // Minimum number of iterations to test all mutable types.
938 virtual uint32_t maxTypes() const = 0;
939
940 // Types that will be used by the binding at a given iteration.
941 virtual std::vector<VkDescriptorType> typesAtIteration(uint32_t iteration) const = 0;
942
943 // Binding's main type.
944 virtual VkDescriptorType mainType() const = 0;
945
946 // Binding's list of mutable types, if present.
947 virtual std::vector<VkDescriptorType> mutableTypes() const = 0;
948
949 // Descriptor count in the binding.
950 virtual size_t size() const = 0;
951
952 // Is the binding an array binding?
953 virtual bool isArray() const = 0;
954
955 // Is the binding an unbounded array?
956 virtual bool isUnbounded() const = 0;
957
958 // Will the binding use different descriptor types in a given iteration?
needsAliasingvkt::BindingModel::__anona50c901c0111::BindingInterface959 virtual bool needsAliasing(uint32_t iteration) const
960 {
961 const auto typesVec = typesAtIteration(iteration);
962 std::set<VkDescriptorType> descTypes(begin(typesVec), end(typesVec));
963 return (descTypes.size() > 1u);
964 }
965
966 // Will the binding need aliasing on any iteration up to a given number?
needsAliasingUpTovkt::BindingModel::__anona50c901c0111::BindingInterface967 virtual bool needsAliasingUpTo(uint32_t numIterations) const
968 {
969 std::vector<bool> needsAliasingFlags;
970 needsAliasingFlags.reserve(numIterations);
971
972 for (uint32_t iter = 0u; iter < numIterations; ++iter)
973 needsAliasingFlags.push_back(needsAliasing(iter));
974
975 return std::any_of(begin(needsAliasingFlags), end(needsAliasingFlags), [](bool f) { return f; });
976 }
977
978 private:
hasDescriptorTypevkt::BindingModel::__anona50c901c0111::BindingInterface979 virtual bool hasDescriptorType(uint32_t iteration, VkDescriptorType descriptorType) const
980 {
981 const auto typesVec = typesAtIteration(iteration);
982 return (std::find(begin(typesVec), end(typesVec), descriptorType) != end(typesVec));
983 }
984
985 public:
986 // Convert one particular binding to a mutable or non-mutable equivalent binding, returning the equivalent binding.
987 virtual de::MovePtr<BindingInterface> toMutable(uint32_t iteration) const = 0;
988 virtual de::MovePtr<BindingInterface> toNonMutable(uint32_t iteration) const = 0;
989
990 // Create resources needed to back up this binding.
991 virtual std::vector<Resource> createResources(const DeviceInterface &vkd, VkDevice device, Allocator &alloc,
992 uint32_t qIndex, VkQueue queue, uint32_t iteration, bool useAABBs,
993 uint32_t baseValue) const = 0;
994
995 // Get GLSL binding declarations. Note: no array size means no array, if size is < 0 it means unbounded array.
996 virtual std::string glslDeclarations(uint32_t iteration, uint32_t setNum, uint32_t bindingNum,
997 uint32_t inputAttachmentIdx, tcu::Maybe<int32_t> arraySize) const = 0;
998
999 // Get GLSL statements to check this binding.
1000 virtual std::string glslCheckStatements(uint32_t iteration, uint32_t setNum, uint32_t bindingNum,
1001 uint32_t baseValue, tcu::Maybe<uint32_t> arrayIndex,
1002 bool usePushConstants) const = 0;
1003 };
1004
1005 // Represents a single binding that will be used in a test.
1006 class SingleBinding : public BindingInterface
1007 {
1008 private:
1009 VkDescriptorType type; // The descriptor type.
1010 std::vector<VkDescriptorType>
1011 mutableTypesVec; // The types that will be used for each iteration of a test if mutable.
1012
1013 public:
SingleBinding(VkDescriptorType type_,std::vector<VkDescriptorType> mutableTypes_)1014 SingleBinding(VkDescriptorType type_, std::vector<VkDescriptorType> mutableTypes_)
1015 : type(type_)
1016 , mutableTypesVec(std::move(mutableTypes_))
1017 {
1018 static const auto kForbiddenMutableTypes = getForbiddenMutableTypes();
1019 const auto kBeginForbidden = begin(kForbiddenMutableTypes);
1020 const auto kEndForbidden = end(kForbiddenMutableTypes);
1021
1022 // For release builds.
1023 DE_UNREF(kBeginForbidden);
1024 DE_UNREF(kEndForbidden);
1025
1026 if (type != VK_DESCRIPTOR_TYPE_MUTABLE_EXT)
1027 {
1028 DE_ASSERT(mutableTypesVec.empty());
1029 }
1030 else
1031 {
1032 DE_ASSERT(!mutableTypesVec.empty());
1033 DE_ASSERT(std::none_of(begin(mutableTypesVec), end(mutableTypesVec),
1034 [&kBeginForbidden, &kEndForbidden](VkDescriptorType t) -> bool
1035 { return std::find(kBeginForbidden, kEndForbidden, t) != kEndForbidden; }));
1036 }
1037 }
1038
maxTypes() const1039 uint32_t maxTypes() const override
1040 {
1041 if (type != VK_DESCRIPTOR_TYPE_MUTABLE_EXT)
1042 return 1u;
1043 const auto vecSize = mutableTypesVec.size();
1044 DE_ASSERT(vecSize <= std::numeric_limits<uint32_t>::max());
1045 return static_cast<uint32_t>(vecSize);
1046 }
1047
typeAtIteration(uint32_t iteration) const1048 VkDescriptorType typeAtIteration(uint32_t iteration) const
1049 {
1050 return typesAtIteration(iteration)[0];
1051 }
1052
usedTypes() const1053 std::vector<VkDescriptorType> usedTypes() const
1054 {
1055 if (type != VK_DESCRIPTOR_TYPE_MUTABLE_EXT)
1056 return std::vector<VkDescriptorType>(1u, type);
1057 return mutableTypesVec;
1058 }
1059
typesAtIteration(uint32_t iteration) const1060 std::vector<VkDescriptorType> typesAtIteration(uint32_t iteration) const override
1061 {
1062 const auto typesVec = usedTypes();
1063 return std::vector<VkDescriptorType>(1u, typesVec[static_cast<size_t>(iteration) % typesVec.size()]);
1064 }
1065
mainType() const1066 VkDescriptorType mainType() const override
1067 {
1068 return type;
1069 }
1070
mutableTypes() const1071 std::vector<VkDescriptorType> mutableTypes() const override
1072 {
1073 return mutableTypesVec;
1074 }
1075
size() const1076 size_t size() const override
1077 {
1078 return size_t{1u};
1079 }
1080
isArray() const1081 bool isArray() const override
1082 {
1083 return false;
1084 }
1085
isUnbounded() const1086 bool isUnbounded() const override
1087 {
1088 return false;
1089 }
1090
toMutable(uint32_t iteration) const1091 de::MovePtr<BindingInterface> toMutable(uint32_t iteration) const override
1092 {
1093 DE_UNREF(iteration);
1094
1095 static const auto kMandatoryMutableTypeFlags = toDescriptorTypeFlags(getMandatoryMutableTypes());
1096 if (type == VK_DESCRIPTOR_TYPE_MUTABLE_EXT)
1097 {
1098 const auto descFlags = toDescriptorTypeFlags(mutableTypesVec);
1099 return de::MovePtr<BindingInterface>(new SingleBinding(type, toDescriptorTypeVector(descFlags)));
1100 }
1101
1102 // Make sure it's not a forbidden mutable type.
1103 static const auto kForbiddenMutableTypes = getForbiddenMutableTypes();
1104 DE_ASSERT(std::find(begin(kForbiddenMutableTypes), end(kForbiddenMutableTypes), type) ==
1105 end(kForbiddenMutableTypes));
1106
1107 // Convert the binding to mutable using a wider set of descriptor types if possible, including the binding type.
1108 const auto descFlags = (kMandatoryMutableTypeFlags | toDescriptorTypeFlagBit(type));
1109
1110 return de::MovePtr<BindingInterface>(
1111 new SingleBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, toDescriptorTypeVector(descFlags)));
1112 }
1113
toNonMutable(uint32_t iteration) const1114 de::MovePtr<BindingInterface> toNonMutable(uint32_t iteration) const override
1115 {
1116 return de::MovePtr<BindingInterface>(
1117 new SingleBinding(typeAtIteration(iteration), std::vector<VkDescriptorType>()));
1118 }
1119
createResources(const DeviceInterface & vkd,VkDevice device,Allocator & alloc,uint32_t qIndex,VkQueue queue,uint32_t iteration,bool useAABBs,uint32_t baseValue) const1120 std::vector<Resource> createResources(const DeviceInterface &vkd, VkDevice device, Allocator &alloc,
1121 uint32_t qIndex, VkQueue queue, uint32_t iteration, bool useAABBs,
1122 uint32_t baseValue) const override
1123 {
1124 const auto descriptorType = typeAtIteration(iteration);
1125
1126 std::vector<Resource> resources;
1127 resources.emplace_back(descriptorType, vkd, device, alloc, qIndex, queue, useAABBs, baseValue);
1128 return resources;
1129 }
1130
glslDeclarations(uint32_t iteration,uint32_t setNum,uint32_t bindingNum,uint32_t inputAttachmentIdx,tcu::Maybe<int32_t> arraySize) const1131 std::string glslDeclarations(uint32_t iteration, uint32_t setNum, uint32_t bindingNum, uint32_t inputAttachmentIdx,
1132 tcu::Maybe<int32_t> arraySize) const override
1133 {
1134 const auto descriptorType = typeAtIteration(iteration);
1135 const std::string arraySuffix =
1136 ((static_cast<bool>(arraySize)) ?
1137 ((arraySize.get() < 0) ? "[]" : ("[" + de::toString(arraySize.get()) + "]")) :
1138 "");
1139 const std::string layoutAttribs = "set=" + de::toString(setNum) + ", binding=" + de::toString(bindingNum);
1140 const std::string bindingSuffix = "_" + de::toString(setNum) + "_" + de::toString(bindingNum);
1141 const std::string nameSuffix = bindingSuffix + arraySuffix;
1142 std::ostringstream declarations;
1143
1144 declarations << "layout (";
1145
1146 switch (descriptorType)
1147 {
1148 case VK_DESCRIPTOR_TYPE_SAMPLER:
1149 declarations << layoutAttribs << ") uniform sampler sampler" << nameSuffix;
1150 break;
1151
1152 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1153 declarations << layoutAttribs << ") uniform usampler2D combinedSampler" << nameSuffix;
1154 break;
1155
1156 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1157 declarations << layoutAttribs << ") uniform utexture2D sampledImage" << nameSuffix;
1158 break;
1159
1160 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1161 declarations << layoutAttribs << ") uniform uboBlock" << bindingSuffix << " { uint val; } ubo"
1162 << nameSuffix;
1163 break;
1164
1165 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1166 declarations << layoutAttribs << ") buffer sboBlock" << bindingSuffix << " { uint val; } ssbo"
1167 << nameSuffix;
1168 break;
1169
1170 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1171 declarations << layoutAttribs << ") uniform utextureBuffer uniformTexel" << nameSuffix;
1172 break;
1173
1174 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1175 declarations << layoutAttribs << ", r32ui) uniform uimageBuffer storageTexel" << nameSuffix;
1176 break;
1177
1178 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1179 declarations << layoutAttribs << ", r32ui) uniform uimage2D storageImage" << nameSuffix;
1180 break;
1181
1182 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1183 declarations << layoutAttribs << ", input_attachment_index=" << inputAttachmentIdx
1184 << ") uniform usubpassInput inputAttachment" << nameSuffix;
1185 break;
1186
1187 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
1188 declarations << layoutAttribs << ") uniform accelerationStructureEXT accelerationStructure" << nameSuffix;
1189 break;
1190
1191 default:
1192 DE_ASSERT(false);
1193 break;
1194 }
1195
1196 declarations << ";\n";
1197
1198 return declarations.str();
1199 }
1200
glslCheckStatements(uint32_t iteration,uint32_t setNum,uint32_t bindingNum,uint32_t baseValue_,tcu::Maybe<uint32_t> arrayIndex,bool usePushConstants) const1201 std::string glslCheckStatements(uint32_t iteration, uint32_t setNum, uint32_t bindingNum, uint32_t baseValue_,
1202 tcu::Maybe<uint32_t> arrayIndex, bool usePushConstants) const override
1203 {
1204 const auto descriptorType = typeAtIteration(iteration);
1205 const std::string bindingSuffix = "_" + de::toString(setNum) + "_" + de::toString(bindingNum);
1206
1207 std::string indexSuffix;
1208 if (arrayIndex)
1209 {
1210 indexSuffix = de::toString(arrayIndex.get());
1211 if (usePushConstants)
1212 indexSuffix += " + pc.zero";
1213 indexSuffix = "[" + indexSuffix + "]";
1214 }
1215
1216 const std::string nameSuffix = bindingSuffix + indexSuffix;
1217 const std::string baseValue = toHex(baseValue_);
1218 const std::string externalImageValue = toHex(getExternalSampledImageValue());
1219 const std::string mask = toHex(getStoredValueMask());
1220
1221 std::ostringstream checks;
1222
1223 // Note: all of these depend on an external anyError uint variable.
1224 switch (descriptorType)
1225 {
1226 case VK_DESCRIPTOR_TYPE_SAMPLER:
1227 // Note this depends on an "externalSampledImage" binding.
1228 checks << " {\n";
1229 checks << " uint readValue = texture(usampler2D(externalSampledImage, sampler" << nameSuffix
1230 << "), vec2(0, 0)).r;\n";
1231 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1232 checks << " anyError |= ((readValue == " << externalImageValue << ") ? 0u : 1u);\n";
1233 //checks << " anyError = readValue;\n";
1234 checks << " }\n";
1235 break;
1236
1237 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1238 checks << " {\n";
1239 checks << " uint readValue = texture(combinedSampler" << nameSuffix << ", vec2(0, 0)).r;\n";
1240 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1241 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1242 //checks << " anyError = readValue;\n";
1243 checks << " }\n";
1244 break;
1245
1246 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1247 // Note this depends on an "externalSampler" binding.
1248 checks << " {\n";
1249 checks << " uint readValue = texture(usampler2D(sampledImage" << nameSuffix
1250 << ", externalSampler), vec2(0, 0)).r;\n";
1251 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1252 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1253 //checks << " anyError = readValue;\n";
1254 checks << " }\n";
1255 break;
1256
1257 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1258 checks << " {\n";
1259 checks << " uint readValue = ubo" << nameSuffix << ".val;\n";
1260 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1261 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1262 //checks << " anyError = readValue;\n";
1263 checks << " }\n";
1264 break;
1265
1266 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1267 checks << " {\n";
1268 checks << " uint readValue = ssbo" << nameSuffix << ".val;\n";
1269 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1270 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1271 //checks << " anyError = readValue;\n";
1272 // Check writes.
1273 checks << " ssbo" << nameSuffix << ".val = (readValue | " << mask << ");\n";
1274 checks << " }\n";
1275 break;
1276
1277 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1278 checks << " {\n";
1279 checks << " uint readValue = texelFetch(uniformTexel" << nameSuffix << ", 0).x;\n";
1280 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1281 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1282 //checks << " anyError = readValue;\n";
1283 checks << " }\n";
1284 break;
1285
1286 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1287 checks << " {\n";
1288 checks << " uint readValue = imageLoad(storageTexel" << nameSuffix << ", 0).x;\n";
1289 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1290 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1291 //checks << " anyError = readValue;\n";
1292 checks << " readValue |= " << mask << ";\n";
1293 // Check writes.
1294 checks << " imageStore(storageTexel" << nameSuffix << ", 0, uvec4(readValue, 0, 0, 0));\n";
1295 checks << " }\n";
1296 break;
1297
1298 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1299 checks << " {\n";
1300 checks << " uint readValue = imageLoad(storageImage" << nameSuffix << ", ivec2(0, 0)).x;\n";
1301 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1302 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1303 //checks << " anyError = readValue;\n";
1304 checks << " readValue |= " << mask << ";\n";
1305 // Check writes.
1306 checks << " imageStore(storageImage" << nameSuffix << ", ivec2(0, 0), uvec4(readValue, 0, 0, 0));\n";
1307 checks << " }\n";
1308 break;
1309
1310 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1311 checks << " {\n";
1312 checks << " uint readValue = subpassLoad(inputAttachment" << nameSuffix << ").x;\n";
1313 checks << " debugPrintfEXT(\"iteration-" << iteration << nameSuffix << ": 0x%xu\\n\", readValue);\n";
1314 checks << " anyError |= ((readValue == " << baseValue << ") ? 0u : 1u);\n";
1315 //checks << " anyError = readValue;\n";
1316 checks << " }\n";
1317 break;
1318
1319 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
1320 checks << " {\n";
1321 checks << " const uint cullMask = 0xFF;\n";
1322 checks << " const vec3 origin = vec3(" << getAccelerationStructureOffsetX(baseValue_)
1323 << ".0, 0.0, 0.0);\n";
1324 checks << " const vec3 direction = vec3(0.0, 0.0, 1.0);\n";
1325 checks << " const float tmin = 1.0;\n";
1326 checks << " const float tmax = 10.0;\n";
1327 checks << " uint candidateFound = 0u;\n";
1328 checks << " rayQueryEXT rq;\n";
1329 checks << " rayQueryInitializeEXT(rq, accelerationStructure" << nameSuffix
1330 << ", gl_RayFlagsNoneEXT, cullMask, origin, tmin, direction, tmax);\n";
1331 checks << " while (rayQueryProceedEXT(rq)) {\n";
1332 checks << " const uint candidateType = rayQueryGetIntersectionTypeEXT(rq, false);\n";
1333 checks << " if (candidateType == gl_RayQueryCandidateIntersectionTriangleEXT || candidateType == "
1334 "gl_RayQueryCandidateIntersectionAABBEXT) {\n";
1335 checks << " candidateFound = 1u;\n";
1336 checks << " }\n";
1337 checks << " }\n";
1338 checks << " anyError |= ((candidateFound == 1u) ? 0u : 1u);\n";
1339 checks << " }\n";
1340 break;
1341
1342 default:
1343 DE_ASSERT(false);
1344 break;
1345 }
1346
1347 return checks.str();
1348 }
1349 };
1350
1351 // Represents an array of bindings. Individual bindings are stored as SingleBindings because each one of them may take a different
1352 // type in each iteration (i.e. they can all have different descriptor type vectors).
1353 class ArrayBinding : public BindingInterface
1354 {
1355 private:
1356 bool unbounded;
1357 std::vector<SingleBinding> bindings;
1358
1359 public:
ArrayBinding(bool unbounded_,std::vector<SingleBinding> bindings_)1360 ArrayBinding(bool unbounded_, std::vector<SingleBinding> bindings_)
1361 : unbounded(unbounded_)
1362 , bindings(std::move(bindings_))
1363 {
1364 // We need to check all single bindings have the same effective type, even if mutable descriptors have different orders.
1365 DE_ASSERT(!bindings.empty());
1366
1367 std::set<VkDescriptorType> basicTypes;
1368 std::set<DescriptorTypeFlags> bindingTypes;
1369
1370 for (const auto &b : bindings)
1371 {
1372 basicTypes.insert(b.mainType());
1373 bindingTypes.insert(toDescriptorTypeFlags(b.usedTypes()));
1374 }
1375
1376 DE_ASSERT(basicTypes.size() == 1u);
1377 DE_ASSERT(bindingTypes.size() == 1u);
1378
1379 // For release builds.
1380 DE_UNREF(basicTypes);
1381 DE_UNREF(bindingTypes);
1382 }
1383
maxTypes() const1384 uint32_t maxTypes() const override
1385 {
1386 // Each binding may have the same effective type but a different number of iterations due to repeated types.
1387 std::vector<size_t> bindingSizes;
1388 bindingSizes.reserve(bindings.size());
1389
1390 std::transform(begin(bindings), end(bindings), std::back_inserter(bindingSizes),
1391 [](const SingleBinding &b) { return b.usedTypes().size(); });
1392
1393 const auto maxElement = std::max_element(begin(bindingSizes), end(bindingSizes));
1394 DE_ASSERT(maxElement != end(bindingSizes));
1395 DE_ASSERT(*maxElement <= std::numeric_limits<uint32_t>::max());
1396 return static_cast<uint32_t>(*maxElement);
1397 }
1398
typesAtIteration(uint32_t iteration) const1399 std::vector<VkDescriptorType> typesAtIteration(uint32_t iteration) const override
1400 {
1401 std::vector<VkDescriptorType> result;
1402 result.reserve(bindings.size());
1403
1404 for (const auto &b : bindings)
1405 result.push_back(b.typeAtIteration(iteration));
1406
1407 return result;
1408 }
1409
mainType() const1410 VkDescriptorType mainType() const override
1411 {
1412 return bindings[0].mainType();
1413 }
1414
mutableTypes() const1415 std::vector<VkDescriptorType> mutableTypes() const override
1416 {
1417 return bindings[0].mutableTypes();
1418 }
1419
size() const1420 size_t size() const override
1421 {
1422 return bindings.size();
1423 }
1424
isArray() const1425 bool isArray() const override
1426 {
1427 return true;
1428 }
1429
isUnbounded() const1430 bool isUnbounded() const override
1431 {
1432 return unbounded;
1433 }
1434
toMutable(uint32_t iteration) const1435 de::MovePtr<BindingInterface> toMutable(uint32_t iteration) const override
1436 {
1437 // Replicate the first binding once converted, as all are equivalent.
1438 const auto firstBindingPtr = bindings[0].toMutable(iteration);
1439 const auto firstBinding = *dynamic_cast<SingleBinding *>(firstBindingPtr.get());
1440 const std::vector<SingleBinding> newBindings(bindings.size(), firstBinding);
1441
1442 return de::MovePtr<BindingInterface>(new ArrayBinding(unbounded, newBindings));
1443 }
1444
toNonMutable(uint32_t iteration) const1445 de::MovePtr<BindingInterface> toNonMutable(uint32_t iteration) const override
1446 {
1447 // Make sure this binding can be converted to nonmutable for a given iteration.
1448 DE_ASSERT(!needsAliasing(iteration));
1449
1450 // We could use each SingleBinding's toNonMutable(), but this is the same.
1451 const auto descType = bindings[0].typeAtIteration(iteration);
1452 const SingleBinding firstBinding(descType, std::vector<VkDescriptorType>());
1453 const std::vector<SingleBinding> newBindings(bindings.size(), firstBinding);
1454
1455 return de::MovePtr<BindingInterface>(new ArrayBinding(unbounded, newBindings));
1456 }
1457
createResources(const DeviceInterface & vkd,VkDevice device,Allocator & alloc,uint32_t qIndex,VkQueue queue,uint32_t iteration,bool useAABBs,uint32_t baseValue) const1458 std::vector<Resource> createResources(const DeviceInterface &vkd, VkDevice device, Allocator &alloc,
1459 uint32_t qIndex, VkQueue queue, uint32_t iteration, bool useAABBs,
1460 uint32_t baseValue) const override
1461 {
1462 std::vector<Resource> resources;
1463 const auto numBindings = static_cast<uint32_t>(bindings.size());
1464
1465 for (uint32_t i = 0u; i < numBindings; ++i)
1466 {
1467 auto resourceVec =
1468 bindings[i].createResources(vkd, device, alloc, qIndex, queue, iteration, useAABBs, baseValue + i);
1469 resources.emplace_back(std::move(resourceVec[0]));
1470 }
1471
1472 return resources;
1473 }
1474
1475 // We will ignore the array size parameter.
glslDeclarations(uint32_t iteration,uint32_t setNum,uint32_t bindingNum,uint32_t inputAttachmentIdx,tcu::Maybe<int32_t> arraySize) const1476 std::string glslDeclarations(uint32_t iteration, uint32_t setNum, uint32_t bindingNum, uint32_t inputAttachmentIdx,
1477 tcu::Maybe<int32_t> arraySize) const override
1478 {
1479 const auto descriptorCount = bindings.size();
1480 const auto arraySizeVal =
1481 (isUnbounded() ? tcu::just(int32_t{-1}) : tcu::just(static_cast<int32_t>(descriptorCount)));
1482
1483 DE_UNREF(arraySize);
1484 DE_ASSERT(descriptorCount < static_cast<size_t>(std::numeric_limits<int32_t>::max()));
1485
1486 // Maybe a single declaration is enough.
1487 if (!needsAliasing(iteration))
1488 return bindings[0].glslDeclarations(iteration, setNum, bindingNum, inputAttachmentIdx, arraySizeVal);
1489
1490 // Aliasing needed. Avoid reusing types.
1491 const auto descriptorTypes = typesAtIteration(iteration);
1492 std::set<VkDescriptorType> usedTypes;
1493 std::ostringstream declarations;
1494
1495 for (size_t descriptorIdx = 0u; descriptorIdx < descriptorCount; ++descriptorIdx)
1496 {
1497 const auto &descriptorType = descriptorTypes[descriptorIdx];
1498 if (usedTypes.count(descriptorType) > 0)
1499 continue;
1500
1501 usedTypes.insert(descriptorType);
1502 declarations << bindings[descriptorIdx].glslDeclarations(iteration, setNum, bindingNum, inputAttachmentIdx,
1503 arraySizeVal);
1504 }
1505
1506 return declarations.str();
1507 }
1508
glslCheckStatements(uint32_t iteration,uint32_t setNum,uint32_t bindingNum,uint32_t baseValue_,tcu::Maybe<uint32_t> arrayIndex,bool usePushConstants) const1509 std::string glslCheckStatements(uint32_t iteration, uint32_t setNum, uint32_t bindingNum, uint32_t baseValue_,
1510 tcu::Maybe<uint32_t> arrayIndex, bool usePushConstants) const override
1511 {
1512 DE_ASSERT(!arrayIndex);
1513 DE_UNREF(arrayIndex); // For release builds.
1514
1515 std::ostringstream checks;
1516 const auto numDescriptors = static_cast<uint32_t>(bindings.size());
1517
1518 for (uint32_t descriptorIdx = 0u; descriptorIdx < numDescriptors; ++descriptorIdx)
1519 {
1520 const auto &binding = bindings[descriptorIdx];
1521 checks << binding.glslCheckStatements(iteration, setNum, bindingNum, baseValue_ + descriptorIdx,
1522 tcu::just(descriptorIdx), usePushConstants);
1523 }
1524
1525 return checks.str();
1526 }
1527 };
1528
1529 class DescriptorSet;
1530
1531 using DescriptorSetPtr = de::SharedPtr<DescriptorSet>;
1532
1533 class DescriptorSet
1534 {
1535 public:
1536 using BindingInterfacePtr = de::MovePtr<BindingInterface>;
1537 using BindingPtrVector = std::vector<BindingInterfacePtr>;
1538
1539 private:
1540 BindingPtrVector bindings;
1541
1542 public:
DescriptorSet(BindingPtrVector & bindings_)1543 explicit DescriptorSet(BindingPtrVector &bindings_) : bindings(std::move(bindings_))
1544 {
1545 DE_ASSERT(!bindings.empty());
1546 }
1547
numBindings() const1548 size_t numBindings() const
1549 {
1550 return bindings.size();
1551 }
1552
getBinding(size_t bindingIdx) const1553 const BindingInterface *getBinding(size_t bindingIdx) const
1554 {
1555 return bindings.at(bindingIdx).get();
1556 }
1557
1558 // Maximum number of descriptor types used by any binding in the set.
maxTypes() const1559 uint32_t maxTypes() const
1560 {
1561 std::vector<uint32_t> maxSizes;
1562 maxSizes.reserve(bindings.size());
1563
1564 std::transform(begin(bindings), end(bindings), std::back_inserter(maxSizes),
1565 [](const BindingInterfacePtr &b) { return b->maxTypes(); });
1566
1567 const auto maxElement = std::max_element(begin(maxSizes), end(maxSizes));
1568 DE_ASSERT(maxElement != end(maxSizes));
1569 return *maxElement;
1570 }
1571
1572 // Create another descriptor set that can be the source for copies when setting descriptor values.
genSourceSet(SourceSetStrategy strategy,uint32_t iteration) const1573 DescriptorSetPtr genSourceSet(SourceSetStrategy strategy, uint32_t iteration) const
1574 {
1575 BindingPtrVector newBindings;
1576 for (const auto &b : bindings)
1577 {
1578 if (strategy == SourceSetStrategy::MUTABLE)
1579 newBindings.push_back(b->toMutable(iteration));
1580 else
1581 newBindings.push_back(b->toNonMutable(iteration));
1582 }
1583
1584 return DescriptorSetPtr(new DescriptorSet(newBindings));
1585 }
1586
1587 // Makes a descriptor pool that can be used when allocating descriptors for this set.
makeDescriptorPool(const DeviceInterface & vkd,VkDevice device,PoolMutableStrategy strategy,VkDescriptorPoolCreateFlags flags) const1588 Move<VkDescriptorPool> makeDescriptorPool(const DeviceInterface &vkd, VkDevice device, PoolMutableStrategy strategy,
1589 VkDescriptorPoolCreateFlags flags) const
1590 {
1591 std::vector<VkDescriptorPoolSize> poolSizes;
1592 std::vector<std::vector<VkDescriptorType>> mutableTypesVec;
1593 std::vector<VkMutableDescriptorTypeListEXT> mutableTypeLists;
1594 bool mutableDescriptorBefore = false;
1595 uint32_t countNonMutDescAfterMutDesc = 0;
1596
1597 // Make vector element addresses stable.
1598 const auto bindingCount = numBindings();
1599 poolSizes.reserve(bindingCount);
1600 mutableTypesVec.reserve(bindingCount);
1601 mutableTypeLists.reserve(bindingCount);
1602
1603 for (const auto &b : bindings)
1604 {
1605 const auto mainType = b->mainType();
1606 const VkDescriptorPoolSize poolSize = {
1607 mainType,
1608 static_cast<uint32_t>(b->size()),
1609 };
1610 poolSizes.push_back(poolSize);
1611
1612 if (strategy == PoolMutableStrategy::KEEP_TYPES || strategy == PoolMutableStrategy::EXPAND_TYPES ||
1613 strategy == PoolMutableStrategy::KEEP_NO_MUTABLE_TYPES)
1614 {
1615 if (mainType == VK_DESCRIPTOR_TYPE_MUTABLE_EXT)
1616 {
1617 if (strategy == PoolMutableStrategy::KEEP_NO_MUTABLE_TYPES)
1618 {
1619 mutableDescriptorBefore = true;
1620 countNonMutDescAfterMutDesc = 0;
1621 continue;
1622 }
1623 else if (strategy == PoolMutableStrategy::KEEP_TYPES)
1624 {
1625 mutableTypesVec.emplace_back(b->mutableTypes());
1626 }
1627 else
1628 {
1629 // Expand the type list with the mandatory types.
1630 static const auto mandatoryTypesFlags = toDescriptorTypeFlags(getMandatoryMutableTypes());
1631 const auto bindingTypes =
1632 toDescriptorTypeVector(mandatoryTypesFlags | toDescriptorTypeFlags(b->mutableTypes()));
1633
1634 mutableTypesVec.emplace_back(bindingTypes);
1635 }
1636
1637 const auto &lastVec = mutableTypesVec.back();
1638 const VkMutableDescriptorTypeListEXT typeList = {static_cast<uint32_t>(lastVec.size()),
1639 de::dataOrNull(lastVec)};
1640 mutableTypeLists.push_back(typeList);
1641 // mutable descriptor type out-of-range case: mutableTypeLists must not include type lists for mutable descriptors
1642 DE_ASSERT(!mutableDescriptorBefore || strategy != PoolMutableStrategy::KEEP_NO_MUTABLE_TYPES);
1643 }
1644 else
1645 {
1646 const VkMutableDescriptorTypeListEXT typeList = {0u, nullptr};
1647 mutableTypeLists.push_back(typeList);
1648 countNonMutDescAfterMutDesc++;
1649 }
1650 }
1651 else if (strategy == PoolMutableStrategy::NO_TYPES)
1652 ; // Do nothing, we will not use any type list.
1653 else
1654 DE_ASSERT(false);
1655 }
1656
1657 // mutable descriptor type out-of-range case:
1658 // there should be no non-mutable descriptor after the last mutable descriptor
1659 // and there should be at least 1 mutable descriptor in the binding list
1660 if (strategy == PoolMutableStrategy::KEEP_NO_MUTABLE_TYPES)
1661 {
1662 DE_ASSERT((mutableDescriptorBefore == true) && (countNonMutDescAfterMutDesc == 0));
1663 }
1664
1665 VkDescriptorPoolCreateInfo poolCreateInfo = initVulkanStructure();
1666
1667 poolCreateInfo.maxSets = 1u;
1668 poolCreateInfo.flags = flags;
1669 poolCreateInfo.poolSizeCount = static_cast<uint32_t>(poolSizes.size());
1670 poolCreateInfo.pPoolSizes = de::dataOrNull(poolSizes);
1671
1672 VkMutableDescriptorTypeCreateInfoEXT mutableInfo = initVulkanStructure();
1673
1674 if (strategy == PoolMutableStrategy::KEEP_TYPES || strategy == PoolMutableStrategy::EXPAND_TYPES ||
1675 ((strategy == PoolMutableStrategy::KEEP_NO_MUTABLE_TYPES) &&
1676 ((mutableDescriptorBefore == true) && (countNonMutDescAfterMutDesc == 0))))
1677 {
1678 mutableInfo.mutableDescriptorTypeListCount = static_cast<uint32_t>(mutableTypeLists.size());
1679 mutableInfo.pMutableDescriptorTypeLists = de::dataOrNull(mutableTypeLists);
1680 poolCreateInfo.pNext = &mutableInfo;
1681 }
1682
1683 return createDescriptorPool(vkd, device, &poolCreateInfo);
1684 }
1685
1686 private:
1687 // Building the descriptor set layout create info structure is cumbersome, so we'll reuse the same procedure to check support
1688 // and create the layout. This structure contains the result. "supported" is created as an enum to avoid the Move<> to bool
1689 // conversion cast in the contructors.
1690 struct DescriptorSetLayoutResult
1691 {
1692 enum class LayoutSupported
1693 {
1694 NO = 0,
1695 YES
1696 };
1697
1698 LayoutSupported supported;
1699 Move<VkDescriptorSetLayout> layout;
1700
DescriptorSetLayoutResultvkt::BindingModel::__anona50c901c0111::DescriptorSet::DescriptorSetLayoutResult1701 explicit DescriptorSetLayoutResult(Move<VkDescriptorSetLayout> &&layout_)
1702 : supported(LayoutSupported::YES)
1703 , layout(layout_)
1704 {
1705 }
1706
DescriptorSetLayoutResultvkt::BindingModel::__anona50c901c0111::DescriptorSet::DescriptorSetLayoutResult1707 explicit DescriptorSetLayoutResult(LayoutSupported supported_) : supported(supported_), layout()
1708 {
1709 }
1710 };
1711
makeOrCheckDescriptorSetLayout(bool checkOnly,const DeviceInterface & vkd,VkDevice device,VkShaderStageFlags stageFlags,VkDescriptorSetLayoutCreateFlags createFlags) const1712 DescriptorSetLayoutResult makeOrCheckDescriptorSetLayout(bool checkOnly, const DeviceInterface &vkd,
1713 VkDevice device, VkShaderStageFlags stageFlags,
1714 VkDescriptorSetLayoutCreateFlags createFlags) const
1715 {
1716 const auto numIterations = maxTypes();
1717 std::vector<VkDescriptorSetLayoutBinding> bindingsVec;
1718 std::vector<std::vector<VkDescriptorType>> mutableTypesVec;
1719 std::vector<VkMutableDescriptorTypeListEXT> mutableTypeLists;
1720
1721 // Make vector element addresses stable.
1722 const auto bindingCount = numBindings();
1723 bindingsVec.reserve(bindingCount);
1724 mutableTypesVec.reserve(bindingCount);
1725 mutableTypeLists.reserve(bindingCount);
1726
1727 for (size_t bindingIdx = 0u; bindingIdx < bindings.size(); ++bindingIdx)
1728 {
1729 const auto &binding = bindings[bindingIdx];
1730 const auto mainType = binding->mainType();
1731
1732 const VkDescriptorSetLayoutBinding layoutBinding = {
1733 static_cast<uint32_t>(bindingIdx), // uint32_t binding;
1734 mainType, // VkDescriptorType descriptorType;
1735 static_cast<uint32_t>(binding->size()), // uint32_t descriptorCount;
1736 stageFlags, // VkShaderStageFlags stageFlags;
1737 nullptr, // const VkSampler* pImmutableSamplers;
1738 };
1739 bindingsVec.push_back(layoutBinding);
1740
1741 // This list may be empty for non-mutable types, which is fine.
1742 mutableTypesVec.push_back(binding->mutableTypes());
1743 const auto &lastVec = mutableTypesVec.back();
1744
1745 const VkMutableDescriptorTypeListEXT typeList = {
1746 static_cast<uint32_t>(lastVec.size()), // uint32_t descriptorTypeCount;
1747 de::dataOrNull(lastVec), // const VkDescriptorType* pDescriptorTypes;
1748 };
1749 mutableTypeLists.push_back(typeList);
1750 }
1751
1752 // Make sure to include the variable descriptor count and/or update after bind binding flags.
1753 const bool updateAfterBind = ((createFlags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT) != 0u);
1754 bool lastIsUnbounded = false;
1755 bool aliasingNeded = false;
1756 std::vector<bool> bindingNeedsAliasing(bindings.size(), false);
1757
1758 for (size_t bindingIdx = 0; bindingIdx < bindings.size(); ++bindingIdx)
1759 {
1760 if (bindingIdx < bindings.size() - 1)
1761 DE_ASSERT(!bindings[bindingIdx]->isUnbounded());
1762 else
1763 lastIsUnbounded = bindings[bindingIdx]->isUnbounded();
1764
1765 if (bindings[bindingIdx]->needsAliasingUpTo(numIterations))
1766 {
1767 bindingNeedsAliasing[bindingIdx] = true;
1768 aliasingNeded = true;
1769 }
1770 }
1771
1772 using FlagsCreateInfoPtr = de::MovePtr<VkDescriptorSetLayoutBindingFlagsCreateInfo>;
1773 using BindingFlagsVecPtr = de::MovePtr<std::vector<VkDescriptorBindingFlags>>;
1774
1775 FlagsCreateInfoPtr flagsCreateInfo;
1776 BindingFlagsVecPtr bindingFlagsVec;
1777
1778 if (updateAfterBind || lastIsUnbounded || aliasingNeded)
1779 {
1780 flagsCreateInfo = FlagsCreateInfoPtr(new VkDescriptorSetLayoutBindingFlagsCreateInfo);
1781 *flagsCreateInfo = initVulkanStructure();
1782
1783 bindingFlagsVec = BindingFlagsVecPtr(new std::vector<VkDescriptorBindingFlags>(
1784 bindingsVec.size(), (updateAfterBind ? VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT : 0)));
1785 if (lastIsUnbounded)
1786 bindingFlagsVec->back() |= VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT;
1787
1788 for (size_t bindingIdx = 0; bindingIdx < bindings.size(); ++bindingIdx)
1789 {
1790 if (bindingNeedsAliasing[bindingIdx])
1791 bindingFlagsVec->at(bindingIdx) |= VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT;
1792 }
1793
1794 flagsCreateInfo->bindingCount = static_cast<uint32_t>(bindingFlagsVec->size());
1795 flagsCreateInfo->pBindingFlags = de::dataOrNull(*bindingFlagsVec);
1796 }
1797
1798 const VkMutableDescriptorTypeCreateInfoEXT createInfoMutable = {
1799 VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT,
1800 flagsCreateInfo.get(),
1801 static_cast<uint32_t>(mutableTypeLists.size()),
1802 de::dataOrNull(mutableTypeLists),
1803 };
1804
1805 const VkDescriptorSetLayoutCreateInfo layoutCreateInfo = {
1806 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
1807 &createInfoMutable, // const void* pNext;
1808 createFlags, // VkDescriptorSetLayoutCreateFlags flags;
1809 static_cast<uint32_t>(bindingsVec.size()), // uint32_t bindingCount;
1810 de::dataOrNull(bindingsVec), // const VkDescriptorSetLayoutBinding* pBindings;
1811 };
1812
1813 if (checkOnly)
1814 {
1815 VkDescriptorSetLayoutSupport support = initVulkanStructure();
1816 vkd.getDescriptorSetLayoutSupport(device, &layoutCreateInfo, &support);
1817 DescriptorSetLayoutResult result((support.supported == VK_TRUE) ?
1818 DescriptorSetLayoutResult::LayoutSupported::YES :
1819 DescriptorSetLayoutResult::LayoutSupported::NO);
1820 return result;
1821 }
1822 else
1823 {
1824 DescriptorSetLayoutResult result(createDescriptorSetLayout(vkd, device, &layoutCreateInfo));
1825 return result;
1826 }
1827 }
1828
1829 public:
makeDescriptorSetLayout(const DeviceInterface & vkd,VkDevice device,VkShaderStageFlags stageFlags,VkDescriptorSetLayoutCreateFlags createFlags) const1830 Move<VkDescriptorSetLayout> makeDescriptorSetLayout(const DeviceInterface &vkd, VkDevice device,
1831 VkShaderStageFlags stageFlags,
1832 VkDescriptorSetLayoutCreateFlags createFlags) const
1833 {
1834 return makeOrCheckDescriptorSetLayout(false /*checkOnly*/, vkd, device, stageFlags, createFlags).layout;
1835 }
1836
checkDescriptorSetLayout(const DeviceInterface & vkd,VkDevice device,VkShaderStageFlags stageFlags,VkDescriptorSetLayoutCreateFlags createFlags) const1837 bool checkDescriptorSetLayout(const DeviceInterface &vkd, VkDevice device, VkShaderStageFlags stageFlags,
1838 VkDescriptorSetLayoutCreateFlags createFlags) const
1839 {
1840 return (makeOrCheckDescriptorSetLayout(true /*checkOnly*/, vkd, device, stageFlags, createFlags).supported ==
1841 DescriptorSetLayoutResult::LayoutSupported::YES);
1842 }
1843
numDescriptors() const1844 size_t numDescriptors() const
1845 {
1846 size_t total = 0;
1847 for (const auto &b : bindings)
1848 total += b->size();
1849 return total;
1850 }
1851
createResources(const DeviceInterface & vkd,VkDevice device,Allocator & alloc,uint32_t qIndex,VkQueue queue,uint32_t iteration,bool useAABBs) const1852 std::vector<Resource> createResources(const DeviceInterface &vkd, VkDevice device, Allocator &alloc,
1853 uint32_t qIndex, VkQueue queue, uint32_t iteration, bool useAABBs) const
1854 {
1855 // Create resources for each binding.
1856 std::vector<Resource> result;
1857 result.reserve(numDescriptors());
1858
1859 const auto bindingsCount = static_cast<uint32_t>(bindings.size());
1860
1861 for (uint32_t bindingIdx = 0u; bindingIdx < bindingsCount; ++bindingIdx)
1862 {
1863 const auto &binding = bindings[bindingIdx];
1864 auto bindingResources = binding->createResources(vkd, device, alloc, qIndex, queue, iteration, useAABBs,
1865 getDescriptorNumericValue(iteration, bindingIdx));
1866
1867 for (auto &resource : bindingResources)
1868 result.emplace_back(std::move(resource));
1869 }
1870
1871 return result;
1872 }
1873
1874 // Updates a descriptor set with the given resources. Note: the set must have been created with a layout that's compatible with this object.
updateDescriptorSet(const DeviceInterface & vkd,VkDevice device,VkDescriptorSet set,uint32_t iteration,const std::vector<Resource> & resources) const1875 void updateDescriptorSet(const DeviceInterface &vkd, VkDevice device, VkDescriptorSet set, uint32_t iteration,
1876 const std::vector<Resource> &resources) const
1877 {
1878 // Make sure the number of resources is correct.
1879 const auto numResources = resources.size();
1880 DE_ASSERT(numDescriptors() == numResources);
1881
1882 std::vector<VkWriteDescriptorSet> descriptorWrites;
1883 descriptorWrites.reserve(numResources);
1884
1885 std::vector<VkDescriptorImageInfo> imageInfoVec;
1886 std::vector<VkDescriptorBufferInfo> bufferInfoVec;
1887 std::vector<VkBufferView> bufferViewVec;
1888 std::vector<VkWriteDescriptorSetAccelerationStructureKHR> asWriteVec;
1889 size_t resourceIdx = 0;
1890
1891 // We'll be storing pointers to elements of these vectors as we're appending elements, so we need their addresses to be stable.
1892 imageInfoVec.reserve(numResources);
1893 bufferInfoVec.reserve(numResources);
1894 bufferViewVec.reserve(numResources);
1895 asWriteVec.reserve(numResources);
1896
1897 for (size_t bindingIdx = 0; bindingIdx < bindings.size(); ++bindingIdx)
1898 {
1899 const auto &binding = bindings[bindingIdx];
1900 const auto descriptorTypes = binding->typesAtIteration(iteration);
1901
1902 for (size_t descriptorIdx = 0; descriptorIdx < binding->size(); ++descriptorIdx)
1903 {
1904 // Make sure the resource type matches the expected value.
1905 const auto &resource = resources[resourceIdx];
1906 const auto &descriptorType = descriptorTypes[descriptorIdx];
1907
1908 DE_ASSERT(resource.descriptorType == descriptorType);
1909
1910 // Obtain the descriptor write info for the resource.
1911 const auto writeInfo = resource.makeWriteInfo();
1912
1913 switch (writeInfo.writeType)
1914 {
1915 case WriteType::IMAGE_INFO:
1916 imageInfoVec.push_back(writeInfo.imageInfo);
1917 break;
1918 case WriteType::BUFFER_INFO:
1919 bufferInfoVec.push_back(writeInfo.bufferInfo);
1920 break;
1921 case WriteType::BUFFER_VIEW:
1922 bufferViewVec.push_back(writeInfo.bufferView);
1923 break;
1924 case WriteType::ACCELERATION_STRUCTURE_INFO:
1925 asWriteVec.push_back(writeInfo.asInfo);
1926 break;
1927 default:
1928 DE_ASSERT(false);
1929 break;
1930 }
1931
1932 // Add a new VkWriteDescriptorSet struct or extend the last one with more info. This helps us exercise different implementation code paths.
1933 bool extended = false;
1934
1935 if (!descriptorWrites.empty() && descriptorIdx > 0)
1936 {
1937 auto &last = descriptorWrites.back();
1938 if (last.dstSet == set /* this should always be true */ && last.dstBinding == bindingIdx &&
1939 (last.dstArrayElement + last.descriptorCount) == descriptorIdx &&
1940 last.descriptorType == descriptorType &&
1941 writeInfo.writeType != WriteType::ACCELERATION_STRUCTURE_INFO)
1942 {
1943 // The new write should be in the same vector (imageInfoVec, bufferInfoVec or bufferViewVec) so increasing the count works.
1944 ++last.descriptorCount;
1945 extended = true;
1946 }
1947 }
1948
1949 if (!extended)
1950 {
1951 const VkWriteDescriptorSet write = {
1952 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1953 ((writeInfo.writeType == WriteType::ACCELERATION_STRUCTURE_INFO) ? &asWriteVec.back() :
1954 nullptr),
1955 set,
1956 static_cast<uint32_t>(bindingIdx),
1957 static_cast<uint32_t>(descriptorIdx),
1958 1u,
1959 descriptorType,
1960 (writeInfo.writeType == WriteType::IMAGE_INFO ? &imageInfoVec.back() : nullptr),
1961 (writeInfo.writeType == WriteType::BUFFER_INFO ? &bufferInfoVec.back() : nullptr),
1962 (writeInfo.writeType == WriteType::BUFFER_VIEW ? &bufferViewVec.back() : nullptr),
1963 };
1964 descriptorWrites.push_back(write);
1965 }
1966
1967 ++resourceIdx;
1968 }
1969 }
1970
1971 // Finally, update descriptor set with all the writes.
1972 vkd.updateDescriptorSets(device, static_cast<uint32_t>(descriptorWrites.size()),
1973 de::dataOrNull(descriptorWrites), 0u, nullptr);
1974 }
1975
1976 // Copies between descriptor sets. They must be compatible and related to this set.
copyDescriptorSet(const DeviceInterface & vkd,VkDevice device,VkDescriptorSet srcSet,VkDescriptorSet dstSet) const1977 void copyDescriptorSet(const DeviceInterface &vkd, VkDevice device, VkDescriptorSet srcSet,
1978 VkDescriptorSet dstSet) const
1979 {
1980 std::vector<VkCopyDescriptorSet> copies;
1981
1982 for (size_t bindingIdx = 0; bindingIdx < numBindings(); ++bindingIdx)
1983 {
1984 const auto &binding = getBinding(bindingIdx);
1985 const auto bindingNumber = static_cast<uint32_t>(bindingIdx);
1986 const auto descriptorCount = static_cast<uint32_t>(binding->size());
1987
1988 const VkCopyDescriptorSet copy = {
1989 VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET,
1990 nullptr,
1991 // set, binding, array element.
1992 srcSet,
1993 bindingNumber,
1994 0u,
1995 dstSet,
1996 bindingNumber,
1997 0u,
1998 descriptorCount,
1999 };
2000
2001 copies.push_back(copy);
2002 }
2003
2004 vkd.updateDescriptorSets(device, 0u, nullptr, static_cast<uint32_t>(copies.size()), de::dataOrNull(copies));
2005 }
2006
2007 // Does any binding in the set need aliasing in a given iteration?
needsAliasing(uint32_t iteration) const2008 bool needsAliasing(uint32_t iteration) const
2009 {
2010 std::vector<bool> aliasingNeededFlags;
2011 aliasingNeededFlags.reserve(bindings.size());
2012
2013 std::transform(begin(bindings), end(bindings), std::back_inserter(aliasingNeededFlags),
2014 [iteration](const BindingInterfacePtr &b) { return b->needsAliasing(iteration); });
2015 return std::any_of(begin(aliasingNeededFlags), end(aliasingNeededFlags), [](bool f) { return f; });
2016 }
2017
2018 // Does any binding in the set need aliasing in any iteration?
needsAnyAliasing() const2019 bool needsAnyAliasing() const
2020 {
2021 const auto numIterations = maxTypes();
2022 std::vector<bool> aliasingNeededFlags(numIterations, false);
2023
2024 for (uint32_t iteration = 0; iteration < numIterations; ++iteration)
2025 aliasingNeededFlags[iteration] = needsAliasing(iteration);
2026
2027 return std::any_of(begin(aliasingNeededFlags), end(aliasingNeededFlags), [](bool f) { return f; });
2028 }
2029
2030 // Is the last binding an unbounded array?
lastBindingIsUnbounded() const2031 bool lastBindingIsUnbounded() const
2032 {
2033 if (bindings.empty())
2034 return false;
2035 return bindings.back()->isUnbounded();
2036 }
2037
2038 // Get the variable descriptor count for the last binding if any.
getVariableDescriptorCount() const2039 tcu::Maybe<uint32_t> getVariableDescriptorCount() const
2040 {
2041 if (lastBindingIsUnbounded())
2042 return tcu::just(static_cast<uint32_t>(bindings.back()->size()));
2043 return tcu::Nothing;
2044 }
2045
2046 // Check if the set contains a descriptor type of the given type at the given iteration.
containsTypeAtIteration(VkDescriptorType descriptorType,uint32_t iteration) const2047 bool containsTypeAtIteration(VkDescriptorType descriptorType, uint32_t iteration) const
2048 {
2049 return std::any_of(begin(bindings), end(bindings),
2050 [descriptorType, iteration](const BindingInterfacePtr &b)
2051 {
2052 const auto types = b->typesAtIteration(iteration);
2053 return de::contains(begin(types), end(types), descriptorType);
2054 });
2055 }
2056
2057 // Is any binding an array?
hasArrays() const2058 bool hasArrays() const
2059 {
2060 return std::any_of(begin(bindings), end(bindings), [](const BindingInterfacePtr &b) { return b->isArray(); });
2061 }
2062 };
2063
2064 enum class UpdateType
2065 {
2066 WRITE = 0,
2067 COPY,
2068 };
2069
2070 enum class SourceSetType
2071 {
2072 NORMAL = 0,
2073 HOST_ONLY,
2074 NO_SOURCE,
2075 };
2076
2077 enum class UpdateMoment
2078 {
2079 NORMAL = 0,
2080 UPDATE_AFTER_BIND,
2081 };
2082
2083 enum class TestingStage
2084 {
2085 COMPUTE = 0,
2086 VERTEX,
2087 TESS_EVAL,
2088 TESS_CONTROL,
2089 GEOMETRY,
2090 FRAGMENT,
2091 RAY_GEN,
2092 INTERSECTION,
2093 ANY_HIT,
2094 CLOSEST_HIT,
2095 MISS,
2096 CALLABLE,
2097 };
2098
2099 enum class ArrayAccessType
2100 {
2101 CONSTANT = 0,
2102 PUSH_CONSTANT,
2103 NO_ARRAY,
2104 };
2105
2106 // Are we testing a ray tracing pipeline stage?
isRayTracingStage(TestingStage stage)2107 bool isRayTracingStage(TestingStage stage)
2108 {
2109 switch (stage)
2110 {
2111 case TestingStage::RAY_GEN:
2112 case TestingStage::INTERSECTION:
2113 case TestingStage::ANY_HIT:
2114 case TestingStage::CLOSEST_HIT:
2115 case TestingStage::MISS:
2116 case TestingStage::CALLABLE:
2117 return true;
2118 default:
2119 break;
2120 }
2121
2122 return false;
2123 }
2124
2125 struct TestParams
2126 {
2127 DescriptorSetPtr descriptorSet;
2128 UpdateType updateType;
2129 SourceSetStrategy sourceSetStrategy;
2130 SourceSetType sourceSetType;
2131 PoolMutableStrategy poolMutableStrategy;
2132 UpdateMoment updateMoment;
2133 ArrayAccessType arrayAccessType;
2134 TestingStage testingStage;
2135
getStageFlagsvkt::BindingModel::__anona50c901c0111::TestParams2136 VkShaderStageFlags getStageFlags() const
2137 {
2138 VkShaderStageFlags flags = 0u;
2139
2140 switch (testingStage)
2141 {
2142 case TestingStage::COMPUTE:
2143 flags |= VK_SHADER_STAGE_COMPUTE_BIT;
2144 break;
2145 case TestingStage::VERTEX:
2146 flags |= VK_SHADER_STAGE_VERTEX_BIT;
2147 break;
2148 case TestingStage::TESS_EVAL:
2149 flags |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
2150 break;
2151 case TestingStage::TESS_CONTROL:
2152 flags |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
2153 break;
2154 case TestingStage::GEOMETRY:
2155 flags |= VK_SHADER_STAGE_GEOMETRY_BIT;
2156 break;
2157 case TestingStage::FRAGMENT:
2158 flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
2159 break;
2160 case TestingStage::RAY_GEN:
2161 flags |= VK_SHADER_STAGE_RAYGEN_BIT_KHR;
2162 break;
2163 case TestingStage::INTERSECTION:
2164 flags |= VK_SHADER_STAGE_INTERSECTION_BIT_KHR;
2165 break;
2166 case TestingStage::ANY_HIT:
2167 flags |= VK_SHADER_STAGE_ANY_HIT_BIT_KHR;
2168 break;
2169 case TestingStage::CLOSEST_HIT:
2170 flags |= VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR;
2171 break;
2172 case TestingStage::MISS:
2173 flags |= VK_SHADER_STAGE_MISS_BIT_KHR;
2174 break;
2175 case TestingStage::CALLABLE:
2176 flags |= VK_SHADER_STAGE_CALLABLE_BIT_KHR;
2177 break;
2178 default:
2179 DE_ASSERT(false);
2180 break;
2181 }
2182
2183 return flags;
2184 }
2185
getPipelineWriteStagevkt::BindingModel::__anona50c901c0111::TestParams2186 VkPipelineStageFlags getPipelineWriteStage() const
2187 {
2188 VkPipelineStageFlags flags = 0u;
2189
2190 switch (testingStage)
2191 {
2192 case TestingStage::COMPUTE:
2193 flags |= VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
2194 break;
2195 case TestingStage::VERTEX:
2196 flags |= VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
2197 break;
2198 case TestingStage::TESS_EVAL:
2199 flags |= VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT;
2200 break;
2201 case TestingStage::TESS_CONTROL:
2202 flags |= VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT;
2203 break;
2204 case TestingStage::GEOMETRY:
2205 flags |= VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT;
2206 break;
2207 case TestingStage::FRAGMENT:
2208 flags |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
2209 break;
2210 case TestingStage::RAY_GEN: // fallthrough
2211 case TestingStage::INTERSECTION: // fallthrough
2212 case TestingStage::ANY_HIT: // fallthrough
2213 case TestingStage::CLOSEST_HIT: // fallthrough
2214 case TestingStage::MISS: // fallthrough
2215 case TestingStage::CALLABLE:
2216 flags |= VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR;
2217 break;
2218 default:
2219 DE_ASSERT(false);
2220 break;
2221 }
2222
2223 return flags;
2224 }
2225
2226 private:
getLayoutCreateFlagsvkt::BindingModel::__anona50c901c0111::TestParams2227 VkDescriptorSetLayoutCreateFlags getLayoutCreateFlags(bool isSourceSet) const
2228 {
2229 // UPDATE_AFTER_BIND cannot be used with HOST_ONLY sets.
2230 //DE_ASSERT(!(updateMoment == UpdateMoment::UPDATE_AFTER_BIND && sourceSetType == SourceSetType::HOST_ONLY));
2231
2232 VkDescriptorSetLayoutCreateFlags createFlags = 0u;
2233
2234 if ((!isSourceSet || sourceSetType != SourceSetType::HOST_ONLY) &&
2235 updateMoment == UpdateMoment::UPDATE_AFTER_BIND)
2236 createFlags |= VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT;
2237
2238 if (isSourceSet && sourceSetType == SourceSetType::HOST_ONLY)
2239 createFlags |= VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_EXT;
2240
2241 return createFlags;
2242 }
2243
2244 public:
getSrcLayoutCreateFlagsvkt::BindingModel::__anona50c901c0111::TestParams2245 VkDescriptorSetLayoutCreateFlags getSrcLayoutCreateFlags() const
2246 {
2247 return getLayoutCreateFlags(true);
2248 }
2249
getDstLayoutCreateFlagsvkt::BindingModel::__anona50c901c0111::TestParams2250 VkDescriptorSetLayoutCreateFlags getDstLayoutCreateFlags() const
2251 {
2252 return getLayoutCreateFlags(false);
2253 }
2254
2255 private:
getPoolCreateFlagsvkt::BindingModel::__anona50c901c0111::TestParams2256 VkDescriptorPoolCreateFlags getPoolCreateFlags(bool isSourceSet) const
2257 {
2258 // UPDATE_AFTER_BIND cannot be used with HOST_ONLY sets.
2259 //DE_ASSERT(!(updateMoment == UpdateMoment::UPDATE_AFTER_BIND && sourceSetType == SourceSetType::HOST_ONLY));
2260
2261 VkDescriptorPoolCreateFlags poolCreateFlags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
2262
2263 if ((!isSourceSet || sourceSetType != SourceSetType::HOST_ONLY) &&
2264 updateMoment == UpdateMoment::UPDATE_AFTER_BIND)
2265 poolCreateFlags |= VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT;
2266
2267 if (isSourceSet && sourceSetType == SourceSetType::HOST_ONLY)
2268 poolCreateFlags |= VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_EXT;
2269
2270 return poolCreateFlags;
2271 }
2272
2273 public:
getSrcPoolCreateFlagsvkt::BindingModel::__anona50c901c0111::TestParams2274 VkDescriptorPoolCreateFlags getSrcPoolCreateFlags() const
2275 {
2276 return getPoolCreateFlags(true);
2277 }
2278
getDstPoolCreateFlagsvkt::BindingModel::__anona50c901c0111::TestParams2279 VkDescriptorPoolCreateFlags getDstPoolCreateFlags() const
2280 {
2281 return getPoolCreateFlags(false);
2282 }
2283
getBindPointvkt::BindingModel::__anona50c901c0111::TestParams2284 VkPipelineBindPoint getBindPoint() const
2285 {
2286 if (testingStage == TestingStage::COMPUTE)
2287 return VK_PIPELINE_BIND_POINT_COMPUTE;
2288 if (isRayTracingStage(testingStage))
2289 return VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR;
2290 return VK_PIPELINE_BIND_POINT_GRAPHICS;
2291 }
2292 };
2293
2294 class MutableTypesTest : public TestCase
2295 {
2296 public:
MutableTypesTest(tcu::TestContext & testCtx,const std::string & name,const TestParams & params)2297 MutableTypesTest(tcu::TestContext &testCtx, const std::string &name, const TestParams ¶ms)
2298 : TestCase(testCtx, name)
2299 , m_params(params)
2300 {
2301 }
2302
2303 ~MutableTypesTest() override = default;
2304
2305 void initPrograms(vk::SourceCollections &programCollection) const override;
2306 TestInstance *createInstance(Context &context) const override;
2307 void checkSupport(Context &context) const override;
2308
2309 private:
2310 TestParams m_params;
2311 };
2312
2313 class MutableTypesInstance : public TestInstance
2314 {
2315 public:
MutableTypesInstance(Context & context,const TestParams & params)2316 MutableTypesInstance(Context &context, const TestParams ¶ms) : TestInstance(context), m_params(params)
2317 {
2318 }
2319
2320 ~MutableTypesInstance() override = default;
2321
2322 tcu::TestStatus iterate() override;
2323
2324 private:
2325 TestParams m_params;
2326 };
2327
2328 // Check if a descriptor set contains a given descriptor type in any iteration up to maxTypes().
containsAnyDescriptorType(const DescriptorSet & descriptorSet,VkDescriptorType descriptorType)2329 bool containsAnyDescriptorType(const DescriptorSet &descriptorSet, VkDescriptorType descriptorType)
2330 {
2331 const auto numIterations = descriptorSet.maxTypes();
2332
2333 for (uint32_t iter = 0u; iter < numIterations; ++iter)
2334 {
2335 if (descriptorSet.containsTypeAtIteration(descriptorType, iter))
2336 return true;
2337 }
2338
2339 return false;
2340 }
2341
2342 // Check if testing this descriptor set needs an external image (for sampler descriptors).
needsExternalImage(const DescriptorSet & descriptorSet)2343 bool needsExternalImage(const DescriptorSet &descriptorSet)
2344 {
2345 return containsAnyDescriptorType(descriptorSet, VK_DESCRIPTOR_TYPE_SAMPLER);
2346 }
2347
2348 // Check if testing this descriptor set needs an external sampler (for sampled images).
needsExternalSampler(const DescriptorSet & descriptorSet)2349 bool needsExternalSampler(const DescriptorSet &descriptorSet)
2350 {
2351 return containsAnyDescriptorType(descriptorSet, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE);
2352 }
2353
2354 // Check if this descriptor set contains a input attachments.
usesInputAttachments(const DescriptorSet & descriptorSet)2355 bool usesInputAttachments(const DescriptorSet &descriptorSet)
2356 {
2357 return containsAnyDescriptorType(descriptorSet, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT);
2358 }
2359
2360 // Check if this descriptor set contains acceleration structures.
usesAccelerationStructures(const DescriptorSet & descriptorSet)2361 bool usesAccelerationStructures(const DescriptorSet &descriptorSet)
2362 {
2363 return containsAnyDescriptorType(descriptorSet, VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR);
2364 }
2365
shaderName(uint32_t iteration)2366 std::string shaderName(uint32_t iteration)
2367 {
2368 return ("iteration-" + de::toString(iteration));
2369 }
2370
initPrograms(vk::SourceCollections & programCollection) const2371 void MutableTypesTest::initPrograms(vk::SourceCollections &programCollection) const
2372 {
2373 const bool usePushConstants = (m_params.arrayAccessType == ArrayAccessType::PUSH_CONSTANT);
2374 const bool useExternalImage = needsExternalImage(*m_params.descriptorSet);
2375 const bool useExternalSampler = needsExternalSampler(*m_params.descriptorSet);
2376 const bool rayQueries = usesAccelerationStructures(*m_params.descriptorSet);
2377 const bool rayTracing = isRayTracingStage(m_params.testingStage);
2378 const auto numIterations = m_params.descriptorSet->maxTypes();
2379 const auto numBindings = m_params.descriptorSet->numBindings();
2380 const vk::ShaderBuildOptions rtBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_4, 0u, true);
2381
2382 // Extra set and bindings for external resources.
2383 std::ostringstream extraSet;
2384 uint32_t extraBindings = 0u;
2385
2386 extraSet << "layout (set=1, binding=" << extraBindings++ << ") buffer OutputBufferBlock { uint value["
2387 << numIterations << "]; } outputBuffer;\n";
2388 if (useExternalImage)
2389 extraSet << "layout (set=1, binding=" << extraBindings++ << ") uniform utexture2D externalSampledImage;\n";
2390 if (useExternalSampler)
2391 extraSet << "layout (set=1, binding=" << extraBindings++ << ") uniform sampler externalSampler;\n";
2392 // The extra binding below will be declared in the "passthrough" ray generation shader.
2393 #if 0
2394 if (rayTracing)
2395 extraSet << "layout (set=1, binding=" << extraBindings++ << ") uniform accelerationStructureEXT externalAS;\n";
2396 #endif
2397
2398 // Common vertex preamble.
2399 std::ostringstream vertexPreamble;
2400 vertexPreamble << "vec2 vertexPositions[3] = vec2[](\n"
2401 << " vec2(0.0, -0.5),\n"
2402 << " vec2(0.5, 0.5),\n"
2403 << " vec2(-0.5, 0.5)\n"
2404 << ");\n";
2405
2406 // Vertex shader body common statements.
2407 std::ostringstream vertexBodyCommon;
2408 vertexBodyCommon << " gl_Position = vec4(vertexPositions[gl_VertexIndex], 0.0, 1.0);\n";
2409
2410 // Common tessellation control preamble.
2411 std::ostringstream tescPreamble;
2412 tescPreamble << "layout (vertices=3) out;\n"
2413 << "in gl_PerVertex\n"
2414 << "{\n"
2415 << " vec4 gl_Position;\n"
2416 << "} gl_in[gl_MaxPatchVertices];\n"
2417 << "out gl_PerVertex\n"
2418 << "{\n"
2419 << " vec4 gl_Position;\n"
2420 << "} gl_out[];\n";
2421
2422 // Common tessellation control body.
2423 std::ostringstream tescBodyCommon;
2424 tescBodyCommon << " gl_TessLevelInner[0] = 1.0;\n"
2425 << " gl_TessLevelInner[1] = 1.0;\n"
2426 << " gl_TessLevelOuter[0] = 1.0;\n"
2427 << " gl_TessLevelOuter[1] = 1.0;\n"
2428 << " gl_TessLevelOuter[2] = 1.0;\n"
2429 << " gl_TessLevelOuter[3] = 1.0;\n"
2430 << " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n";
2431
2432 // Common tessellation evaluation preamble.
2433 std::ostringstream tesePreamble;
2434 tesePreamble << "layout (triangles, fractional_odd_spacing, cw) in;\n"
2435 << "in gl_PerVertex\n"
2436 << "{\n"
2437 << " vec4 gl_Position;\n"
2438 << "} gl_in[gl_MaxPatchVertices];\n"
2439 << "out gl_PerVertex\n"
2440 << "{\n"
2441 << " vec4 gl_Position;\n"
2442 << "};\n";
2443
2444 // Common tessellation evaluation body.
2445 std::ostringstream teseBodyCommon;
2446 teseBodyCommon << " gl_Position = (gl_TessCoord.x * gl_in[0].gl_Position) +\n"
2447 << " (gl_TessCoord.y * gl_in[1].gl_Position) +\n"
2448 << " (gl_TessCoord.z * gl_in[2].gl_Position);\n";
2449
2450 // Shader preamble.
2451 std::ostringstream preamble;
2452
2453 preamble << "#version 460\n"
2454 << "#extension GL_EXT_nonuniform_qualifier : enable\n"
2455 << "#extension GL_EXT_debug_printf : enable\n"
2456 << (rayTracing ? "#extension GL_EXT_ray_tracing : enable\n" : "")
2457 << (rayQueries ? "#extension GL_EXT_ray_query : enable\n" : "") << "\n";
2458
2459 if (m_params.testingStage == TestingStage::VERTEX)
2460 {
2461 preamble << vertexPreamble.str();
2462 }
2463 else if (m_params.testingStage == TestingStage::COMPUTE)
2464 {
2465 preamble << "layout (local_size_x=1, local_size_y=1, local_size_z=1) in;\n"
2466 << "\n";
2467 }
2468 else if (m_params.testingStage == TestingStage::GEOMETRY)
2469 {
2470 preamble << "layout (triangles) in;\n"
2471 << "layout (triangle_strip, max_vertices=3) out;\n"
2472 << "in gl_PerVertex\n"
2473 << "{\n"
2474 << " vec4 gl_Position;\n"
2475 << "} gl_in[3];\n"
2476 << "out gl_PerVertex\n"
2477 << "{\n"
2478 << " vec4 gl_Position;\n"
2479 << "};\n";
2480 }
2481 else if (m_params.testingStage == TestingStage::TESS_CONTROL)
2482 {
2483 preamble << tescPreamble.str();
2484 }
2485 else if (m_params.testingStage == TestingStage::TESS_EVAL)
2486 {
2487 preamble << tesePreamble.str();
2488 }
2489 else if (m_params.testingStage == TestingStage::CALLABLE)
2490 {
2491 preamble << "layout (location=0) callableDataInEXT float unusedCallableData;\n";
2492 }
2493 else if (m_params.testingStage == TestingStage::CLOSEST_HIT || m_params.testingStage == TestingStage::ANY_HIT ||
2494 m_params.testingStage == TestingStage::MISS)
2495 {
2496 preamble << "layout (location=0) rayPayloadInEXT float unusedRayPayload;\n";
2497 }
2498 else if (m_params.testingStage == TestingStage::INTERSECTION)
2499 {
2500 preamble << "hitAttributeEXT vec3 hitAttribute;\n";
2501 }
2502
2503 preamble << extraSet.str();
2504 if (usePushConstants)
2505 preamble << "layout (push_constant, std430) uniform PushConstantBlock { uint zero; } pc;\n";
2506 preamble << "\n";
2507
2508 // We need to create a shader per iteration.
2509 for (uint32_t iter = 0u; iter < numIterations; ++iter)
2510 {
2511 // Shader preamble.
2512 std::ostringstream shader;
2513 shader << preamble.str();
2514
2515 uint32_t inputAttachmentCount = 0u;
2516
2517 // Descriptor declarations for this iteration.
2518 for (size_t bindingIdx = 0; bindingIdx < numBindings; ++bindingIdx)
2519 {
2520 DE_ASSERT(bindingIdx <= std::numeric_limits<uint32_t>::max());
2521
2522 const auto binding = m_params.descriptorSet->getBinding(bindingIdx);
2523 const auto bindingTypes = binding->typesAtIteration(iter);
2524 const auto hasInputAttachment =
2525 de::contains(begin(bindingTypes), end(bindingTypes), VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT);
2526 const auto isArray = binding->isArray();
2527 const auto isUnbounded = binding->isUnbounded();
2528 const auto bindingSize = binding->size();
2529
2530 // If the binding is an input attachment, make sure it's not an array.
2531 DE_ASSERT(!hasInputAttachment || !isArray);
2532
2533 // Make sure the descriptor count fits a int32_t if needed.
2534 DE_ASSERT(!isArray || isUnbounded ||
2535 bindingSize <= static_cast<size_t>(std::numeric_limits<int32_t>::max()));
2536
2537 const auto arraySize =
2538 (isArray ? (isUnbounded ? tcu::just(int32_t{-1}) : tcu::just(static_cast<int32_t>(bindingSize))) :
2539 tcu::Nothing);
2540
2541 shader << binding->glslDeclarations(iter, 0u, static_cast<uint32_t>(bindingIdx), inputAttachmentCount,
2542 arraySize);
2543
2544 if (hasInputAttachment)
2545 ++inputAttachmentCount;
2546 }
2547
2548 // Main body.
2549 shader << "\n"
2550 << "void main() {\n"
2551 // This checks if we are the first invocation to arrive here, so the checks are executed only once.
2552 << " const uint flag = atomicCompSwap(outputBuffer.value[" << iter << "], 0u, 1u);\n"
2553 << " if (flag == 0u) {\n"
2554 << " uint anyError = 0u;\n";
2555
2556 for (size_t bindingIdx = 0; bindingIdx < numBindings; ++bindingIdx)
2557 {
2558 const auto binding = m_params.descriptorSet->getBinding(bindingIdx);
2559 const auto idx32 = static_cast<uint32_t>(bindingIdx);
2560 shader << binding->glslCheckStatements(iter, 0u, idx32, getDescriptorNumericValue(iter, idx32),
2561 tcu::Nothing, usePushConstants);
2562 }
2563
2564 shader << " if (anyError == 0u) {\n"
2565 << " atomicAdd(outputBuffer.value[" << iter << "], 1u);\n"
2566 << " }\n"
2567 << " }\n" // Closes if (flag == 0u).
2568 ;
2569
2570 if (m_params.testingStage == TestingStage::VERTEX)
2571 {
2572 shader << vertexBodyCommon.str();
2573 }
2574 else if (m_params.testingStage == TestingStage::GEOMETRY)
2575 {
2576 shader << " gl_Position = gl_in[0].gl_Position; EmitVertex();\n"
2577 << " gl_Position = gl_in[1].gl_Position; EmitVertex();\n"
2578 << " gl_Position = gl_in[2].gl_Position; EmitVertex();\n";
2579 }
2580 else if (m_params.testingStage == TestingStage::TESS_CONTROL)
2581 {
2582 shader << tescBodyCommon.str();
2583 }
2584 else if (m_params.testingStage == TestingStage::TESS_EVAL)
2585 {
2586 shader << teseBodyCommon.str();
2587 }
2588
2589 shader << "}\n" // End of main().
2590 ;
2591
2592 {
2593 const auto shaderNameStr = shaderName(iter);
2594 const auto shaderStr = shader.str();
2595 auto &glslSource = programCollection.glslSources.add(shaderNameStr);
2596
2597 if (m_params.testingStage == TestingStage::COMPUTE)
2598 glslSource << glu::ComputeSource(shaderStr);
2599 else if (m_params.testingStage == TestingStage::VERTEX)
2600 glslSource << glu::VertexSource(shaderStr);
2601 else if (m_params.testingStage == TestingStage::FRAGMENT)
2602 glslSource << glu::FragmentSource(shaderStr);
2603 else if (m_params.testingStage == TestingStage::GEOMETRY)
2604 glslSource << glu::GeometrySource(shaderStr);
2605 else if (m_params.testingStage == TestingStage::TESS_CONTROL)
2606 glslSource << glu::TessellationControlSource(shaderStr);
2607 else if (m_params.testingStage == TestingStage::TESS_EVAL)
2608 glslSource << glu::TessellationEvaluationSource(shaderStr);
2609 else if (m_params.testingStage == TestingStage::RAY_GEN)
2610 glslSource << glu::RaygenSource(updateRayTracingGLSL(shaderStr));
2611 else if (m_params.testingStage == TestingStage::INTERSECTION)
2612 glslSource << glu::IntersectionSource(updateRayTracingGLSL(shaderStr));
2613 else if (m_params.testingStage == TestingStage::ANY_HIT)
2614 glslSource << glu::AnyHitSource(updateRayTracingGLSL(shaderStr));
2615 else if (m_params.testingStage == TestingStage::CLOSEST_HIT)
2616 glslSource << glu::ClosestHitSource(updateRayTracingGLSL(shaderStr));
2617 else if (m_params.testingStage == TestingStage::MISS)
2618 glslSource << glu::MissSource(updateRayTracingGLSL(shaderStr));
2619 else if (m_params.testingStage == TestingStage::CALLABLE)
2620 glslSource << glu::CallableSource(updateRayTracingGLSL(shaderStr));
2621 else
2622 DE_ASSERT(false);
2623
2624 if (rayTracing || rayQueries)
2625 glslSource << rtBuildOptions;
2626 }
2627 }
2628
2629 if (m_params.testingStage == TestingStage::FRAGMENT || m_params.testingStage == TestingStage::GEOMETRY ||
2630 m_params.testingStage == TestingStage::TESS_CONTROL || m_params.testingStage == TestingStage::TESS_EVAL)
2631 {
2632 // Add passthrough vertex shader that works for points.
2633 std::ostringstream vertPassthrough;
2634 vertPassthrough << "#version 460\n"
2635 << "out gl_PerVertex\n"
2636 << "{\n"
2637 << " vec4 gl_Position;\n"
2638 << "};\n"
2639 << vertexPreamble.str() << "void main() {\n"
2640 << vertexBodyCommon.str() << "}\n";
2641 programCollection.glslSources.add("vert") << glu::VertexSource(vertPassthrough.str());
2642 }
2643
2644 if (m_params.testingStage == TestingStage::TESS_CONTROL)
2645 {
2646 // Add passthrough tessellation evaluation shader.
2647 std::ostringstream tesePassthrough;
2648 tesePassthrough << "#version 460\n"
2649 << tesePreamble.str() << "void main (void)\n"
2650 << "{\n"
2651 << teseBodyCommon.str() << "}\n";
2652
2653 programCollection.glslSources.add("tese") << glu::TessellationEvaluationSource(tesePassthrough.str());
2654 }
2655
2656 if (m_params.testingStage == TestingStage::TESS_EVAL)
2657 {
2658 // Add passthrough tessellation control shader.
2659 std::ostringstream tescPassthrough;
2660 tescPassthrough << "#version 460\n"
2661 << tescPreamble.str() << "void main (void)\n"
2662 << "{\n"
2663 << tescBodyCommon.str() << "}\n";
2664
2665 programCollection.glslSources.add("tesc") << glu::TessellationControlSource(tescPassthrough.str());
2666 }
2667
2668 if (rayTracing && m_params.testingStage != TestingStage::RAY_GEN)
2669 {
2670 // Add a "passthrough" ray generation shader.
2671 std::ostringstream rgen;
2672 rgen << "#version 460 core\n"
2673 << "#extension GL_EXT_ray_tracing : require\n"
2674 << "layout (set=1, binding=" << extraBindings << ") uniform accelerationStructureEXT externalAS;\n"
2675 << ((m_params.testingStage == TestingStage::CALLABLE) ?
2676 "layout (location=0) callableDataEXT float unusedCallableData;\n" :
2677 "layout (location=0) rayPayloadEXT float unusedRayPayload;\n")
2678 << "\n"
2679 << "void main()\n"
2680 << "{\n";
2681
2682 if (m_params.testingStage == TestingStage::INTERSECTION || m_params.testingStage == TestingStage::ANY_HIT ||
2683 m_params.testingStage == TestingStage::CLOSEST_HIT || m_params.testingStage == TestingStage::MISS)
2684 {
2685 // We need to trace rays in this case to get hits or misses.
2686 const auto zDir = ((m_params.testingStage == TestingStage::MISS) ? "-1.0" : "1.0");
2687
2688 rgen << " const uint cullMask = 0xFF;\n"
2689 << " const float tMin = 1.0;\n"
2690 << " const float tMax = 10.0;\n"
2691 << " const vec3 origin = vec3(0.0, 0.0, 0.0);\n"
2692 << " const vec3 direction = vec3(0.0, 0.0, " << zDir << ");\n"
2693 << " traceRayEXT(externalAS, gl_RayFlagsNoneEXT, cullMask, 0, 0, 0, origin, tMin, direction, tMax, "
2694 "0);\n";
2695 }
2696 else if (m_params.testingStage == TestingStage::CALLABLE)
2697 {
2698 rgen << " executeCallableEXT(0, 0);\n";
2699 }
2700
2701 // End of main().
2702 rgen << "}\n";
2703
2704 programCollection.glslSources.add("rgen")
2705 << glu::RaygenSource(updateRayTracingGLSL(rgen.str())) << rtBuildOptions;
2706
2707 // Intersection shaders will ignore the intersection, so we need a passthrough miss shader.
2708 if (m_params.testingStage == TestingStage::INTERSECTION)
2709 {
2710 std::ostringstream miss;
2711 miss << "#version 460 core\n"
2712 << "#extension GL_EXT_ray_tracing : require\n"
2713 << "layout (location=0) rayPayloadEXT float unusedRayPayload;\n"
2714 << "\n"
2715 << "void main()\n"
2716 << "{\n"
2717 << "}\n";
2718
2719 programCollection.glslSources.add("miss")
2720 << glu::MissSource(updateRayTracingGLSL(miss.str())) << rtBuildOptions;
2721 }
2722 }
2723 }
2724
createInstance(Context & context) const2725 TestInstance *MutableTypesTest::createInstance(Context &context) const
2726 {
2727 return new MutableTypesInstance(context, m_params);
2728 }
2729
requirePartiallyBound(Context & context)2730 void requirePartiallyBound(Context &context)
2731 {
2732 context.requireDeviceFunctionality("VK_EXT_descriptor_indexing");
2733 const auto &indexingFeatures = context.getDescriptorIndexingFeatures();
2734 if (!indexingFeatures.descriptorBindingPartiallyBound)
2735 TCU_THROW(NotSupportedError, "Partially bound bindings not supported");
2736 }
2737
requireVariableDescriptorCount(Context & context)2738 void requireVariableDescriptorCount(Context &context)
2739 {
2740 context.requireDeviceFunctionality("VK_EXT_descriptor_indexing");
2741 const auto &indexingFeatures = context.getDescriptorIndexingFeatures();
2742 if (!indexingFeatures.descriptorBindingVariableDescriptorCount)
2743 TCU_THROW(NotSupportedError, "Variable descriptor count not supported");
2744 }
2745
2746 // Calculates the set of used descriptor types for a given set and iteration count, for bindings matching a predicate.
getUsedDescriptorTypes(const DescriptorSet & descriptorSet,uint32_t numIterations,bool (* predicate)(const BindingInterface * binding))2747 std::set<VkDescriptorType> getUsedDescriptorTypes(const DescriptorSet &descriptorSet, uint32_t numIterations,
2748 bool (*predicate)(const BindingInterface *binding))
2749 {
2750 std::set<VkDescriptorType> usedDescriptorTypes;
2751
2752 for (size_t bindingIdx = 0; bindingIdx < descriptorSet.numBindings(); ++bindingIdx)
2753 {
2754 const auto bindingPtr = descriptorSet.getBinding(bindingIdx);
2755 if (predicate(bindingPtr))
2756 {
2757 for (uint32_t iter = 0u; iter < numIterations; ++iter)
2758 {
2759 const auto descTypes = bindingPtr->typesAtIteration(iter);
2760 usedDescriptorTypes.insert(begin(descTypes), end(descTypes));
2761 }
2762 }
2763 }
2764
2765 return usedDescriptorTypes;
2766 }
2767
getAllUsedDescriptorTypes(const DescriptorSet & descriptorSet,uint32_t numIterations)2768 std::set<VkDescriptorType> getAllUsedDescriptorTypes(const DescriptorSet &descriptorSet, uint32_t numIterations)
2769 {
2770 return getUsedDescriptorTypes(descriptorSet, numIterations, [](const BindingInterface *) { return true; });
2771 }
2772
getUsedArrayDescriptorTypes(const DescriptorSet & descriptorSet,uint32_t numIterations)2773 std::set<VkDescriptorType> getUsedArrayDescriptorTypes(const DescriptorSet &descriptorSet, uint32_t numIterations)
2774 {
2775 return getUsedDescriptorTypes(descriptorSet, numIterations, [](const BindingInterface *b) { return b->isArray(); });
2776 }
2777
2778 // Are we testing a vertex pipeline stage?
isVertexStage(TestingStage stage)2779 bool isVertexStage(TestingStage stage)
2780 {
2781 switch (stage)
2782 {
2783 case TestingStage::VERTEX:
2784 case TestingStage::TESS_CONTROL:
2785 case TestingStage::TESS_EVAL:
2786 case TestingStage::GEOMETRY:
2787 return true;
2788 default:
2789 break;
2790 }
2791
2792 return false;
2793 }
2794
checkSupport(Context & context) const2795 void MutableTypesTest::checkSupport(Context &context) const
2796 {
2797 if (!context.isDeviceFunctionalitySupported("VK_VALVE_mutable_descriptor_type") &&
2798 !context.isDeviceFunctionalitySupported("VK_EXT_mutable_descriptor_type"))
2799
2800 TCU_THROW(NotSupportedError,
2801 "VK_VALVE_mutable_descriptor_type or VK_EXT_mutable_descriptor_type is not supported");
2802
2803 VkPhysicalDeviceMutableDescriptorTypeFeaturesEXT mutableDescriptorType = initVulkanStructure();
2804 VkPhysicalDeviceFeatures2KHR features2 = initVulkanStructure(&mutableDescriptorType);
2805
2806 context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features2);
2807
2808 if (!mutableDescriptorType.mutableDescriptorType)
2809 TCU_THROW(NotSupportedError, "mutableDescriptorType feature is not supported");
2810
2811 // Check ray tracing if needed.
2812 const bool rayTracing = isRayTracingStage(m_params.testingStage);
2813
2814 if (rayTracing)
2815 {
2816 context.requireDeviceFunctionality("VK_KHR_acceleration_structure");
2817 context.requireDeviceFunctionality("VK_KHR_ray_tracing_pipeline");
2818 }
2819
2820 // Check if ray queries are needed. Ray queries are used to verify acceleration structure descriptors.
2821 const bool rayQueriesNeeded = usesAccelerationStructures(*m_params.descriptorSet);
2822 if (rayQueriesNeeded)
2823 {
2824 context.requireDeviceFunctionality("VK_KHR_acceleration_structure");
2825 context.requireDeviceFunctionality("VK_KHR_ray_query");
2826 }
2827
2828 // We'll use iterations to check each mutable type, as needed.
2829 const auto numIterations = m_params.descriptorSet->maxTypes();
2830
2831 if (m_params.descriptorSet->lastBindingIsUnbounded())
2832 requireVariableDescriptorCount(context);
2833
2834 for (uint32_t iter = 0u; iter < numIterations; ++iter)
2835 {
2836 if (m_params.descriptorSet->needsAliasing(iter))
2837 {
2838 requirePartiallyBound(context);
2839 break;
2840 }
2841 }
2842
2843 if (m_params.updateMoment == UpdateMoment::UPDATE_AFTER_BIND)
2844 {
2845 // Check update after bind for each used descriptor type.
2846 const auto &usedDescriptorTypes = getAllUsedDescriptorTypes(*m_params.descriptorSet, numIterations);
2847 const auto &indexingFeatures = context.getDescriptorIndexingFeatures();
2848
2849 for (const auto &descType : usedDescriptorTypes)
2850 {
2851 switch (descType)
2852 {
2853 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2854 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2855 if (!indexingFeatures.descriptorBindingUniformBufferUpdateAfterBind)
2856 TCU_THROW(NotSupportedError, "Update-after-bind not supported for uniform buffers");
2857 break;
2858
2859 case VK_DESCRIPTOR_TYPE_SAMPLER:
2860 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2861 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
2862 if (!indexingFeatures.descriptorBindingSampledImageUpdateAfterBind)
2863 TCU_THROW(NotSupportedError, "Update-after-bind not supported for samplers and sampled images");
2864 break;
2865
2866 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2867 if (!indexingFeatures.descriptorBindingStorageImageUpdateAfterBind)
2868 TCU_THROW(NotSupportedError, "Update-after-bind not supported for storage images");
2869 break;
2870
2871 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2872 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2873 if (!indexingFeatures.descriptorBindingStorageBufferUpdateAfterBind)
2874 TCU_THROW(NotSupportedError, "Update-after-bind not supported for storage buffers");
2875 break;
2876
2877 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2878 if (!indexingFeatures.descriptorBindingUniformTexelBufferUpdateAfterBind)
2879 TCU_THROW(NotSupportedError, "Update-after-bind not supported for uniform texel buffers");
2880 break;
2881
2882 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2883 if (!indexingFeatures.descriptorBindingStorageTexelBufferUpdateAfterBind)
2884 TCU_THROW(NotSupportedError, "Update-after-bind not supported for storage texel buffers");
2885 break;
2886
2887 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
2888 TCU_THROW(InternalError, "Tests do not support update-after-bind with input attachments");
2889
2890 case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
2891 {
2892 // Just in case we ever mix some of these in.
2893 context.requireDeviceFunctionality("VK_EXT_inline_uniform_block");
2894 const auto &iubFeatures = context.getInlineUniformBlockFeatures();
2895 if (!iubFeatures.descriptorBindingInlineUniformBlockUpdateAfterBind)
2896 TCU_THROW(NotSupportedError, "Update-after-bind not supported for inline uniform blocks");
2897 }
2898 break;
2899
2900 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
2901 {
2902 // Just in case we ever mix some of these in.
2903 context.requireDeviceFunctionality("VK_KHR_acceleration_structure");
2904 const auto &asFeatures = context.getAccelerationStructureFeatures();
2905 if (!asFeatures.descriptorBindingAccelerationStructureUpdateAfterBind)
2906 TCU_THROW(NotSupportedError, "Update-after-bind not supported for acceleration structures");
2907 }
2908 break;
2909
2910 case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
2911 TCU_THROW(InternalError, "Found VK_DESCRIPTOR_TYPE_MUTABLE_EXT in list of used descriptor types");
2912
2913 default:
2914 TCU_THROW(InternalError, "Unexpected descriptor type found in list of used descriptor types: " +
2915 de::toString(descType));
2916 }
2917 }
2918 }
2919
2920 if (m_params.arrayAccessType == ArrayAccessType::PUSH_CONSTANT)
2921 {
2922 // These require dynamically uniform indices.
2923 const auto &usedDescriptorTypes = getUsedArrayDescriptorTypes(*m_params.descriptorSet, numIterations);
2924 const auto &features = context.getDeviceFeatures();
2925 const auto descriptorIndexingSupported = context.isDeviceFunctionalitySupported("VK_EXT_descriptor_indexing");
2926 const auto &indexingFeatures = context.getDescriptorIndexingFeatures();
2927
2928 for (const auto &descType : usedDescriptorTypes)
2929 {
2930 switch (descType)
2931 {
2932 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2933 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2934 if (!features.shaderUniformBufferArrayDynamicIndexing)
2935 TCU_THROW(NotSupportedError, "Dynamic indexing not supported for uniform buffers");
2936 break;
2937
2938 case VK_DESCRIPTOR_TYPE_SAMPLER:
2939 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2940 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
2941 if (!features.shaderSampledImageArrayDynamicIndexing)
2942 TCU_THROW(NotSupportedError, "Dynamic indexing not supported for samplers and sampled images");
2943 break;
2944
2945 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2946 if (!features.shaderStorageImageArrayDynamicIndexing)
2947 TCU_THROW(NotSupportedError, "Dynamic indexing not supported for storage images");
2948 break;
2949
2950 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2951 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2952 if (!features.shaderStorageBufferArrayDynamicIndexing)
2953 TCU_THROW(NotSupportedError, "Dynamic indexing not supported for storage buffers");
2954 break;
2955
2956 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2957 if (!descriptorIndexingSupported || !indexingFeatures.shaderUniformTexelBufferArrayDynamicIndexing)
2958 TCU_THROW(NotSupportedError, "Dynamic indexing not supported for uniform texel buffers");
2959 break;
2960
2961 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2962 if (!descriptorIndexingSupported || !indexingFeatures.shaderStorageTexelBufferArrayDynamicIndexing)
2963 TCU_THROW(NotSupportedError, "Dynamic indexing not supported for storage texel buffers");
2964 break;
2965
2966 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
2967 if (!descriptorIndexingSupported || !indexingFeatures.shaderInputAttachmentArrayDynamicIndexing)
2968 TCU_THROW(NotSupportedError, "Dynamic indexing not supported for input attachments");
2969 break;
2970
2971 case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
2972 context.requireDeviceFunctionality("VK_KHR_acceleration_structure");
2973 break;
2974
2975 case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
2976 TCU_THROW(InternalError, "Found VK_DESCRIPTOR_TYPE_MUTABLE_EXT in list of used array descriptor types");
2977
2978 default:
2979 TCU_THROW(InternalError, "Unexpected descriptor type found in list of used descriptor types: " +
2980 de::toString(descType));
2981 }
2982 }
2983 }
2984
2985 // Check layout support.
2986 {
2987 const auto &vkd = context.getDeviceInterface();
2988 const auto device = getDevice(context);
2989 const auto stageFlags = m_params.getStageFlags();
2990
2991 {
2992 const auto layoutCreateFlags = m_params.getDstLayoutCreateFlags();
2993 const auto supported =
2994 m_params.descriptorSet->checkDescriptorSetLayout(vkd, device, stageFlags, layoutCreateFlags);
2995
2996 if (!supported)
2997 TCU_THROW(NotSupportedError, "Required descriptor set layout not supported");
2998 }
2999
3000 if (m_params.updateType == UpdateType::COPY)
3001 {
3002 const auto layoutCreateFlags = m_params.getSrcLayoutCreateFlags();
3003 const auto supported =
3004 m_params.descriptorSet->checkDescriptorSetLayout(vkd, device, stageFlags, layoutCreateFlags);
3005
3006 if (!supported)
3007 TCU_THROW(NotSupportedError, "Required descriptor set layout for source set not supported");
3008
3009 // Check specific layouts for the different source sets are supported.
3010 for (uint32_t iter = 0u; iter < numIterations; ++iter)
3011 {
3012 const auto srcSet = m_params.descriptorSet->genSourceSet(m_params.sourceSetStrategy, iter);
3013 const auto srcLayoutSupported =
3014 srcSet->checkDescriptorSetLayout(vkd, device, stageFlags, layoutCreateFlags);
3015
3016 if (!srcLayoutSupported)
3017 TCU_THROW(NotSupportedError, "Descriptor set layout for source set at iteration " +
3018 de::toString(iter) + " not supported");
3019 }
3020 }
3021 }
3022
3023 // Check supported stores and stages.
3024 const bool vertexStage = isVertexStage(m_params.testingStage);
3025 const bool fragmentStage = (m_params.testingStage == TestingStage::FRAGMENT);
3026 const bool geometryStage = (m_params.testingStage == TestingStage::GEOMETRY);
3027 const bool tessellation =
3028 (m_params.testingStage == TestingStage::TESS_CONTROL || m_params.testingStage == TestingStage::TESS_EVAL);
3029
3030 const auto &features = context.getDeviceFeatures();
3031
3032 if (vertexStage && !features.vertexPipelineStoresAndAtomics)
3033 TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
3034
3035 if (fragmentStage && !features.fragmentStoresAndAtomics)
3036 TCU_THROW(NotSupportedError, "Fragment shader stores and atomics not supported");
3037
3038 if (geometryStage && !features.geometryShader)
3039 TCU_THROW(NotSupportedError, "Geometry shader not supported");
3040
3041 if (tessellation && !features.tessellationShader)
3042 TCU_THROW(NotSupportedError, "Tessellation shaders not supported");
3043 }
3044
3045 // What to do at each iteration step. Used to apply UPDATE_AFTER_BIND or not.
3046 enum class Step
3047 {
3048 UPDATE = 0,
3049 BIND,
3050 };
3051
3052 // Create render pass.
buildRenderPass(const DeviceInterface & vkd,VkDevice device,const std::vector<Resource> & resources)3053 Move<VkRenderPass> buildRenderPass(const DeviceInterface &vkd, VkDevice device, const std::vector<Resource> &resources)
3054 {
3055 const auto imageFormat = getDescriptorImageFormat();
3056
3057 std::vector<VkAttachmentDescription> attachmentDescriptions;
3058 std::vector<VkAttachmentReference> attachmentReferences;
3059 std::vector<uint32_t> attachmentIndices;
3060
3061 for (const auto &resource : resources)
3062 {
3063 if (resource.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
3064 {
3065 const auto nextIndex = static_cast<uint32_t>(attachmentDescriptions.size());
3066
3067 const VkAttachmentDescription description = {
3068 0u, // VkAttachmentDescriptionFlags flags;
3069 imageFormat, // VkFormat format;
3070 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
3071 VK_ATTACHMENT_LOAD_OP_LOAD, // VkAttachmentLoadOp loadOp;
3072 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp storeOp;
3073 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
3074 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
3075 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout initialLayout;
3076 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout finalLayout;
3077 };
3078
3079 const VkAttachmentReference reference = {nextIndex, VK_IMAGE_LAYOUT_GENERAL};
3080
3081 attachmentIndices.push_back(nextIndex);
3082 attachmentDescriptions.push_back(description);
3083 attachmentReferences.push_back(reference);
3084 }
3085 }
3086
3087 const auto attachmentCount = static_cast<uint32_t>(attachmentDescriptions.size());
3088 DE_ASSERT(attachmentCount == static_cast<uint32_t>(attachmentIndices.size()));
3089 DE_ASSERT(attachmentCount == static_cast<uint32_t>(attachmentReferences.size()));
3090
3091 const VkSubpassDescription subpassDescription = {
3092 0u, // VkSubpassDescriptionFlags flags;
3093 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
3094 attachmentCount, // uint32_t inputAttachmentCount;
3095 de::dataOrNull(attachmentReferences), // const VkAttachmentReference* pInputAttachments;
3096 0u, // uint32_t colorAttachmentCount;
3097 nullptr, // const VkAttachmentReference* pColorAttachments;
3098 0u, // const VkAttachmentReference* pResolveAttachments;
3099 nullptr, // const VkAttachmentReference* pDepthStencilAttachment;
3100 0u, // uint32_t preserveAttachmentCount;
3101 nullptr, // const uint32_t* pPreserveAttachments;
3102 };
3103
3104 const VkRenderPassCreateInfo renderPassCreateInfo = {
3105 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
3106 nullptr, // const void* pNext;
3107 0u, // VkRenderPassCreateFlags flags;
3108 static_cast<uint32_t>(attachmentDescriptions.size()), // uint32_t attachmentCount;
3109 de::dataOrNull(attachmentDescriptions), // const VkAttachmentDescription* pAttachments;
3110 1u, // uint32_t subpassCount;
3111 &subpassDescription, // const VkSubpassDescription* pSubpasses;
3112 0u, // uint32_t dependencyCount;
3113 nullptr, // const VkSubpassDependency* pDependencies;
3114 };
3115
3116 return createRenderPass(vkd, device, &renderPassCreateInfo);
3117 }
3118
3119 // Create a graphics pipeline.
buildGraphicsPipeline(const DeviceInterface & vkd,VkDevice device,VkPipelineLayout pipelineLayout,VkShaderModule vertModule,VkShaderModule tescModule,VkShaderModule teseModule,VkShaderModule geomModule,VkShaderModule fragModule,VkRenderPass renderPass)3120 Move<VkPipeline> buildGraphicsPipeline(const DeviceInterface &vkd, VkDevice device, VkPipelineLayout pipelineLayout,
3121 VkShaderModule vertModule, VkShaderModule tescModule, VkShaderModule teseModule,
3122 VkShaderModule geomModule, VkShaderModule fragModule, VkRenderPass renderPass)
3123 {
3124 const auto extent = getDefaultExtent();
3125 const std::vector<VkViewport> viewports(1u, makeViewport(extent));
3126 const std::vector<VkRect2D> scissors(1u, makeRect2D(extent));
3127 const auto hasTess = (tescModule != VK_NULL_HANDLE || teseModule != VK_NULL_HANDLE);
3128 const auto topology = (hasTess ? VK_PRIMITIVE_TOPOLOGY_PATCH_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST);
3129
3130 const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo = initVulkanStructure();
3131
3132 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo = {
3133 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
3134 nullptr, // const void* pNext;
3135 0u, // VkPipelineInputAssemblyStateCreateFlags flags;
3136 topology, // VkPrimitiveTopology topology;
3137 VK_FALSE, // VkBool32 primitiveRestartEnable;
3138 };
3139
3140 const VkPipelineTessellationStateCreateInfo tessellationStateCreateInfo = {
3141 VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, // VkStructureType sType;
3142 nullptr, // const void* pNext;
3143 0u, // VkPipelineTessellationStateCreateFlags flags;
3144 (hasTess ? 3u : 0u), // uint32_t patchControlPoints;
3145 };
3146
3147 const VkPipelineViewportStateCreateInfo viewportStateCreateInfo = {
3148 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
3149 nullptr, // const void* pNext;
3150 0u, // VkPipelineViewportStateCreateFlags flags;
3151 static_cast<uint32_t>(viewports.size()), // uint32_t viewportCount;
3152 de::dataOrNull(viewports), // const VkViewport* pViewports;
3153 static_cast<uint32_t>(scissors.size()), // uint32_t scissorCount;
3154 de::dataOrNull(scissors), // const VkRect2D* pScissors;
3155 };
3156
3157 const VkPipelineRasterizationStateCreateInfo rasterizationStateCreateInfo = {
3158 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
3159 nullptr, // const void* pNext;
3160 0u, // VkPipelineRasterizationStateCreateFlags flags;
3161 VK_FALSE, // VkBool32 depthClampEnable;
3162 (fragModule == VK_NULL_HANDLE ? VK_TRUE : VK_FALSE), // VkBool32 rasterizerDiscardEnable;
3163 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
3164 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
3165 VK_FRONT_FACE_CLOCKWISE, // VkFrontFace frontFace;
3166 VK_FALSE, // VkBool32 depthBiasEnable;
3167 0.0f, // float depthBiasConstantFactor;
3168 0.0f, // float depthBiasClamp;
3169 0.0f, // float depthBiasSlopeFactor;
3170 1.0f, // float lineWidth;
3171 };
3172
3173 const VkPipelineMultisampleStateCreateInfo multisampleStateCreateInfo = {
3174 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
3175 nullptr, // const void* pNext;
3176 0u, // VkPipelineMultisampleStateCreateFlags flags;
3177 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples;
3178 VK_FALSE, // VkBool32 sampleShadingEnable;
3179 1.0f, // float minSampleShading;
3180 nullptr, // const VkSampleMask* pSampleMask;
3181 VK_FALSE, // VkBool32 alphaToCoverageEnable;
3182 VK_FALSE, // VkBool32 alphaToOneEnable;
3183 };
3184
3185 const VkPipelineDepthStencilStateCreateInfo depthStencilStateCreateInfo = initVulkanStructure();
3186
3187 const VkPipelineColorBlendStateCreateInfo colorBlendStateCreateInfo = initVulkanStructure();
3188
3189 return makeGraphicsPipeline(vkd, device, pipelineLayout, vertModule, tescModule, teseModule, geomModule, fragModule,
3190 renderPass, 0u, &vertexInputStateCreateInfo, &inputAssemblyStateCreateInfo,
3191 (hasTess ? &tessellationStateCreateInfo : nullptr), &viewportStateCreateInfo,
3192 &rasterizationStateCreateInfo, &multisampleStateCreateInfo,
3193 &depthStencilStateCreateInfo, &colorBlendStateCreateInfo, nullptr);
3194 }
3195
buildFramebuffer(const DeviceInterface & vkd,VkDevice device,VkRenderPass renderPass,const std::vector<Resource> & resources)3196 Move<VkFramebuffer> buildFramebuffer(const DeviceInterface &vkd, VkDevice device, VkRenderPass renderPass,
3197 const std::vector<Resource> &resources)
3198 {
3199 const auto extent = getDefaultExtent();
3200
3201 std::vector<VkImageView> inputAttachments;
3202 for (const auto &resource : resources)
3203 {
3204 if (resource.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
3205 inputAttachments.push_back(resource.imageView.get());
3206 }
3207
3208 const VkFramebufferCreateInfo framebufferCreateInfo = {
3209 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
3210 nullptr, // const void* pNext;
3211 0u, // VkFramebufferCreateFlags flags;
3212 renderPass, // VkRenderPass renderPass;
3213 static_cast<uint32_t>(inputAttachments.size()), // uint32_t attachmentCount;
3214 de::dataOrNull(inputAttachments), // const VkImageView* pAttachments;
3215 extent.width, // uint32_t width;
3216 extent.height, // uint32_t height;
3217 extent.depth, // uint32_t layers;
3218 };
3219
3220 return createFramebuffer(vkd, device, &framebufferCreateInfo);
3221 }
3222
iterate()3223 tcu::TestStatus MutableTypesInstance::iterate()
3224 {
3225 const auto &vki = m_context.getInstanceInterface();
3226 const auto &vkd = m_context.getDeviceInterface();
3227 const auto device = getDevice(m_context);
3228 const auto physDev = m_context.getPhysicalDevice();
3229 const auto qIndex = m_context.getUniversalQueueFamilyIndex();
3230 const auto queue = getDeviceQueue(vkd, device, m_context.getUniversalQueueFamilyIndex(), 0);
3231
3232 SimpleAllocator alloc(
3233 vkd, device,
3234 getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()));
3235
3236 const auto ¶mSet = m_params.descriptorSet;
3237 const auto numIterations = paramSet->maxTypes();
3238 const bool useExternalImage = needsExternalImage(*m_params.descriptorSet);
3239 const bool useExternalSampler = needsExternalSampler(*m_params.descriptorSet);
3240 const auto stageFlags = m_params.getStageFlags();
3241 const bool srcSetNeeded = (m_params.updateType == UpdateType::COPY);
3242 const bool updateAfterBind = (m_params.updateMoment == UpdateMoment::UPDATE_AFTER_BIND);
3243 const auto bindPoint = m_params.getBindPoint();
3244 const bool rayTracing = isRayTracingStage(m_params.testingStage);
3245 const bool useAABBs = (m_params.testingStage == TestingStage::INTERSECTION);
3246
3247 // Resources for each iteration.
3248 std::vector<std::vector<Resource>> allResources;
3249 allResources.reserve(numIterations);
3250
3251 // Command pool.
3252 const auto cmdPool = makeCommandPool(vkd, device, qIndex);
3253
3254 // Descriptor pool and set for the active (dst) descriptor set.
3255 const auto dstPoolFlags = m_params.getDstPoolCreateFlags();
3256 const auto dstLayoutFlags = m_params.getDstLayoutCreateFlags();
3257
3258 const auto dstPool = paramSet->makeDescriptorPool(vkd, device, m_params.poolMutableStrategy, dstPoolFlags);
3259 const auto dstLayout = paramSet->makeDescriptorSetLayout(vkd, device, stageFlags, dstLayoutFlags);
3260 const auto varCount = paramSet->getVariableDescriptorCount();
3261
3262 using VariableCountInfoPtr = de::MovePtr<VkDescriptorSetVariableDescriptorCountAllocateInfo>;
3263
3264 VariableCountInfoPtr dstVariableCountInfo;
3265 if (varCount)
3266 {
3267 dstVariableCountInfo = VariableCountInfoPtr(new VkDescriptorSetVariableDescriptorCountAllocateInfo);
3268 *dstVariableCountInfo = initVulkanStructure();
3269
3270 dstVariableCountInfo->descriptorSetCount = 1u;
3271 dstVariableCountInfo->pDescriptorCounts = &(varCount.get());
3272 }
3273 const auto dstSet = makeDescriptorSet(vkd, device, dstPool.get(), dstLayout.get(), dstVariableCountInfo.get());
3274
3275 // Source pool and set (optional).
3276 const auto srcPoolFlags = m_params.getSrcPoolCreateFlags();
3277 const auto srcLayoutFlags = m_params.getSrcLayoutCreateFlags();
3278 DescriptorSetPtr iterationSrcSet;
3279 Move<VkDescriptorPool> srcPool;
3280 Move<VkDescriptorSetLayout> srcLayout;
3281 Move<VkDescriptorSet> srcSet;
3282
3283 // Extra set for external resources and output buffer.
3284 std::vector<Resource> extraResources;
3285 extraResources.emplace_back(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vkd, device, alloc, qIndex, queue, useAABBs, 0u,
3286 numIterations);
3287 if (useExternalImage)
3288 extraResources.emplace_back(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, vkd, device, alloc, qIndex, queue, useAABBs,
3289 getExternalSampledImageValue());
3290 if (useExternalSampler)
3291 extraResources.emplace_back(VK_DESCRIPTOR_TYPE_SAMPLER, vkd, device, alloc, qIndex, queue, useAABBs, 0u);
3292 if (rayTracing)
3293 extraResources.emplace_back(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, vkd, device, alloc, qIndex, queue,
3294 useAABBs, 0u);
3295
3296 Move<VkDescriptorPool> extraPool;
3297 {
3298 DescriptorPoolBuilder poolBuilder;
3299 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
3300 if (useExternalImage)
3301 poolBuilder.addType(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE);
3302 if (useExternalSampler)
3303 poolBuilder.addType(VK_DESCRIPTOR_TYPE_SAMPLER);
3304 if (rayTracing)
3305 poolBuilder.addType(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR);
3306 extraPool = poolBuilder.build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
3307 }
3308
3309 Move<VkDescriptorSetLayout> extraLayout;
3310 {
3311 DescriptorSetLayoutBuilder layoutBuilder;
3312 layoutBuilder.addBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u, stageFlags, nullptr);
3313 if (useExternalImage)
3314 layoutBuilder.addBinding(VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1u, stageFlags, nullptr);
3315 if (useExternalSampler)
3316 layoutBuilder.addBinding(VK_DESCRIPTOR_TYPE_SAMPLER, 1u, stageFlags, nullptr);
3317 if (rayTracing)
3318 {
3319 // The extra acceleration structure is used from the ray generation shader only.
3320 layoutBuilder.addBinding(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, 1u, VK_SHADER_STAGE_RAYGEN_BIT_KHR,
3321 nullptr);
3322 }
3323 extraLayout = layoutBuilder.build(vkd, device);
3324 }
3325
3326 const auto extraSet = makeDescriptorSet(vkd, device, extraPool.get(), extraLayout.get());
3327
3328 // Update extra set.
3329 using DescriptorBufferInfoPtr = de::MovePtr<VkDescriptorBufferInfo>;
3330 using DescriptorImageInfoPtr = de::MovePtr<VkDescriptorImageInfo>;
3331 using DescriptorASInfoPtr = de::MovePtr<VkWriteDescriptorSetAccelerationStructureKHR>;
3332
3333 uint32_t bindingCount = 0u;
3334 DescriptorBufferInfoPtr bufferInfoPtr;
3335 DescriptorImageInfoPtr imageInfoPtr;
3336 DescriptorImageInfoPtr samplerInfoPtr;
3337 DescriptorASInfoPtr asWriteInfoPtr;
3338
3339 const auto outputBufferSize = static_cast<VkDeviceSize>(sizeof(uint32_t) * static_cast<size_t>(numIterations));
3340 bufferInfoPtr = DescriptorBufferInfoPtr(new VkDescriptorBufferInfo(
3341 makeDescriptorBufferInfo(extraResources[bindingCount++].bufferWithMemory->get(), 0ull, outputBufferSize)));
3342 if (useExternalImage)
3343 imageInfoPtr = DescriptorImageInfoPtr(new VkDescriptorImageInfo(makeDescriptorImageInfo(
3344 VK_NULL_HANDLE, extraResources[bindingCount++].imageView.get(), VK_IMAGE_LAYOUT_GENERAL)));
3345 if (useExternalSampler)
3346 samplerInfoPtr = DescriptorImageInfoPtr(new VkDescriptorImageInfo(makeDescriptorImageInfo(
3347 extraResources[bindingCount++].sampler.get(), VK_NULL_HANDLE, VK_IMAGE_LAYOUT_GENERAL)));
3348 if (rayTracing)
3349 {
3350 asWriteInfoPtr = DescriptorASInfoPtr(new VkWriteDescriptorSetAccelerationStructureKHR);
3351 *asWriteInfoPtr = initVulkanStructure();
3352 asWriteInfoPtr->accelerationStructureCount = 1u;
3353 asWriteInfoPtr->pAccelerationStructures = extraResources[bindingCount++].asData.tlas.get()->getPtr();
3354 }
3355
3356 {
3357 bindingCount = 0u;
3358 DescriptorSetUpdateBuilder updateBuilder;
3359 updateBuilder.writeSingle(extraSet.get(), DescriptorSetUpdateBuilder::Location::binding(bindingCount++),
3360 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, bufferInfoPtr.get());
3361 if (useExternalImage)
3362 updateBuilder.writeSingle(extraSet.get(), DescriptorSetUpdateBuilder::Location::binding(bindingCount++),
3363 VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, imageInfoPtr.get());
3364 if (useExternalSampler)
3365 updateBuilder.writeSingle(extraSet.get(), DescriptorSetUpdateBuilder::Location::binding(bindingCount++),
3366 VK_DESCRIPTOR_TYPE_SAMPLER, samplerInfoPtr.get());
3367 if (rayTracing)
3368 updateBuilder.writeSingle(extraSet.get(), DescriptorSetUpdateBuilder::Location::binding(bindingCount++),
3369 VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, asWriteInfoPtr.get());
3370 updateBuilder.update(vkd, device);
3371 }
3372
3373 // Push constants.
3374 const uint32_t zero = 0u;
3375 const VkPushConstantRange pcRange = {stageFlags, 0u /*offset*/, static_cast<uint32_t>(sizeof(zero)) /*size*/};
3376
3377 // Needed for some test variants.
3378 Move<VkShaderModule> vertPassthrough;
3379 Move<VkShaderModule> tesePassthrough;
3380 Move<VkShaderModule> tescPassthrough;
3381 Move<VkShaderModule> rgenPassthrough;
3382 Move<VkShaderModule> missPassthrough;
3383
3384 if (m_params.testingStage == TestingStage::FRAGMENT || m_params.testingStage == TestingStage::GEOMETRY ||
3385 m_params.testingStage == TestingStage::TESS_CONTROL || m_params.testingStage == TestingStage::TESS_EVAL)
3386 {
3387 vertPassthrough = createShaderModule(vkd, device, m_context.getBinaryCollection().get("vert"), 0u);
3388 }
3389
3390 if (m_params.testingStage == TestingStage::TESS_CONTROL)
3391 {
3392 tesePassthrough = createShaderModule(vkd, device, m_context.getBinaryCollection().get("tese"), 0u);
3393 }
3394
3395 if (m_params.testingStage == TestingStage::TESS_EVAL)
3396 {
3397 tescPassthrough = createShaderModule(vkd, device, m_context.getBinaryCollection().get("tesc"), 0u);
3398 }
3399
3400 if (m_params.testingStage == TestingStage::CLOSEST_HIT || m_params.testingStage == TestingStage::ANY_HIT ||
3401 m_params.testingStage == TestingStage::INTERSECTION || m_params.testingStage == TestingStage::MISS ||
3402 m_params.testingStage == TestingStage::CALLABLE)
3403 {
3404 rgenPassthrough = createShaderModule(vkd, device, m_context.getBinaryCollection().get("rgen"), 0u);
3405 }
3406
3407 if (m_params.testingStage == TestingStage::INTERSECTION)
3408 {
3409 missPassthrough = createShaderModule(vkd, device, m_context.getBinaryCollection().get("miss"), 0u);
3410 }
3411
3412 for (uint32_t iteration = 0u; iteration < numIterations; ++iteration)
3413 {
3414 // Generate source set for the current iteration.
3415 if (srcSetNeeded)
3416 {
3417 // Free previous descriptor set before rebuilding the pool.
3418 srcSet = Move<VkDescriptorSet>();
3419 iterationSrcSet = paramSet->genSourceSet(m_params.sourceSetStrategy, iteration);
3420 srcPool = iterationSrcSet->makeDescriptorPool(vkd, device, m_params.poolMutableStrategy, srcPoolFlags);
3421 srcLayout = iterationSrcSet->makeDescriptorSetLayout(vkd, device, stageFlags, srcLayoutFlags);
3422
3423 const auto srcVarCount = iterationSrcSet->getVariableDescriptorCount();
3424 VariableCountInfoPtr srcVariableCountInfo;
3425
3426 if (srcVarCount)
3427 {
3428 srcVariableCountInfo = VariableCountInfoPtr(new VkDescriptorSetVariableDescriptorCountAllocateInfo);
3429 *srcVariableCountInfo = initVulkanStructure();
3430
3431 srcVariableCountInfo->descriptorSetCount = 1u;
3432 srcVariableCountInfo->pDescriptorCounts = &(srcVarCount.get());
3433 }
3434
3435 srcSet = makeDescriptorSet(vkd, device, srcPool.get(), srcLayout.get(), srcVariableCountInfo.get());
3436 }
3437
3438 // Set layouts and sets used in the pipeline.
3439 const std::vector<VkDescriptorSetLayout> setLayouts = {dstLayout.get(), extraLayout.get()};
3440 const std::vector<VkDescriptorSet> usedSets = {dstSet.get(), extraSet.get()};
3441
3442 // Create resources.
3443 allResources.emplace_back(paramSet->createResources(vkd, device, alloc, qIndex, queue, iteration, useAABBs));
3444 const auto &resources = allResources.back();
3445
3446 // Make pipeline for the current iteration.
3447 const auto pipelineLayout = makePipelineLayout(vkd, device, static_cast<uint32_t>(setLayouts.size()),
3448 de::dataOrNull(setLayouts), 1u, &pcRange);
3449 const auto moduleName = shaderName(iteration);
3450 const auto shaderModule = createShaderModule(vkd, device, m_context.getBinaryCollection().get(moduleName), 0u);
3451
3452 Move<VkPipeline> pipeline;
3453 Move<VkRenderPass> renderPass;
3454 Move<VkFramebuffer> framebuffer;
3455
3456 uint32_t shaderGroupHandleSize = 0u;
3457 uint32_t shaderGroupBaseAlignment = 1u;
3458
3459 de::MovePtr<BufferWithMemory> raygenSBT;
3460 de::MovePtr<BufferWithMemory> missSBT;
3461 de::MovePtr<BufferWithMemory> hitSBT;
3462 de::MovePtr<BufferWithMemory> callableSBT;
3463
3464 VkStridedDeviceAddressRegionKHR raygenSBTRegion = makeStridedDeviceAddressRegionKHR(0, 0, 0);
3465 VkStridedDeviceAddressRegionKHR missSBTRegion = makeStridedDeviceAddressRegionKHR(0, 0, 0);
3466 VkStridedDeviceAddressRegionKHR hitSBTRegion = makeStridedDeviceAddressRegionKHR(0, 0, 0);
3467 VkStridedDeviceAddressRegionKHR callableSBTRegion = makeStridedDeviceAddressRegionKHR(0, 0, 0);
3468
3469 if (bindPoint == VK_PIPELINE_BIND_POINT_COMPUTE)
3470 pipeline = makeComputePipeline(vkd, device, pipelineLayout.get(), shaderModule.get());
3471 else if (bindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS)
3472 {
3473 VkShaderModule vertModule = VK_NULL_HANDLE;
3474 VkShaderModule teseModule = VK_NULL_HANDLE;
3475 VkShaderModule tescModule = VK_NULL_HANDLE;
3476 VkShaderModule geomModule = VK_NULL_HANDLE;
3477 VkShaderModule fragModule = VK_NULL_HANDLE;
3478
3479 if (m_params.testingStage == TestingStage::VERTEX)
3480 vertModule = shaderModule.get();
3481 else if (m_params.testingStage == TestingStage::FRAGMENT)
3482 {
3483 vertModule = vertPassthrough.get();
3484 fragModule = shaderModule.get();
3485 }
3486 else if (m_params.testingStage == TestingStage::GEOMETRY)
3487 {
3488 vertModule = vertPassthrough.get();
3489 geomModule = shaderModule.get();
3490 }
3491 else if (m_params.testingStage == TestingStage::TESS_CONTROL)
3492 {
3493 vertModule = vertPassthrough.get();
3494 teseModule = tesePassthrough.get();
3495 tescModule = shaderModule.get();
3496 }
3497 else if (m_params.testingStage == TestingStage::TESS_EVAL)
3498 {
3499 vertModule = vertPassthrough.get();
3500 tescModule = tescPassthrough.get();
3501 teseModule = shaderModule.get();
3502 }
3503 else
3504 DE_ASSERT(false);
3505
3506 renderPass = buildRenderPass(vkd, device, resources);
3507 pipeline = buildGraphicsPipeline(vkd, device, pipelineLayout.get(), vertModule, tescModule, teseModule,
3508 geomModule, fragModule, renderPass.get());
3509 framebuffer = buildFramebuffer(vkd, device, renderPass.get(), resources);
3510 }
3511 else if (bindPoint == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR)
3512 {
3513 const auto rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
3514 const auto rayTracingPropertiesKHR = makeRayTracingProperties(vki, physDev);
3515 shaderGroupHandleSize = rayTracingPropertiesKHR->getShaderGroupHandleSize();
3516 shaderGroupBaseAlignment = rayTracingPropertiesKHR->getShaderGroupBaseAlignment();
3517
3518 VkShaderModule rgenModule = VK_NULL_HANDLE;
3519 VkShaderModule isecModule = VK_NULL_HANDLE;
3520 VkShaderModule ahitModule = VK_NULL_HANDLE;
3521 VkShaderModule chitModule = VK_NULL_HANDLE;
3522 VkShaderModule missModule = VK_NULL_HANDLE;
3523 VkShaderModule callModule = VK_NULL_HANDLE;
3524
3525 const uint32_t rgenGroup = 0u;
3526 uint32_t hitGroup = 0u;
3527 uint32_t missGroup = 0u;
3528 uint32_t callGroup = 0u;
3529
3530 if (m_params.testingStage == TestingStage::RAY_GEN)
3531 {
3532 rgenModule = shaderModule.get();
3533 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, rgenModule, rgenGroup);
3534 }
3535 else if (m_params.testingStage == TestingStage::INTERSECTION)
3536 {
3537 hitGroup = 1u;
3538 missGroup = 2u;
3539 rgenModule = rgenPassthrough.get();
3540 missModule = missPassthrough.get();
3541 isecModule = shaderModule.get();
3542 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, rgenModule, rgenGroup);
3543 rayTracingPipeline->addShader(VK_SHADER_STAGE_INTERSECTION_BIT_KHR, isecModule, hitGroup);
3544 rayTracingPipeline->addShader(VK_SHADER_STAGE_MISS_BIT_KHR, missModule, missGroup);
3545 }
3546 else if (m_params.testingStage == TestingStage::ANY_HIT)
3547 {
3548 hitGroup = 1u;
3549 rgenModule = rgenPassthrough.get();
3550 ahitModule = shaderModule.get();
3551 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, rgenModule, rgenGroup);
3552 rayTracingPipeline->addShader(VK_SHADER_STAGE_ANY_HIT_BIT_KHR, ahitModule, hitGroup);
3553 }
3554 else if (m_params.testingStage == TestingStage::CLOSEST_HIT)
3555 {
3556 hitGroup = 1u;
3557 rgenModule = rgenPassthrough.get();
3558 chitModule = shaderModule.get();
3559 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, rgenModule, rgenGroup);
3560 rayTracingPipeline->addShader(VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR, chitModule, hitGroup);
3561 }
3562 else if (m_params.testingStage == TestingStage::MISS)
3563 {
3564 missGroup = 1u;
3565 rgenModule = rgenPassthrough.get();
3566 missModule = shaderModule.get();
3567 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, rgenModule, rgenGroup);
3568 rayTracingPipeline->addShader(VK_SHADER_STAGE_MISS_BIT_KHR, missModule, missGroup);
3569 }
3570 else if (m_params.testingStage == TestingStage::CALLABLE)
3571 {
3572 callGroup = 1u;
3573 rgenModule = rgenPassthrough.get();
3574 callModule = shaderModule.get();
3575 rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR, rgenModule, rgenGroup);
3576 rayTracingPipeline->addShader(VK_SHADER_STAGE_CALLABLE_BIT_KHR, callModule, callGroup);
3577 }
3578 else
3579 DE_ASSERT(false);
3580
3581 pipeline = rayTracingPipeline->createPipeline(vkd, device, pipelineLayout.get());
3582
3583 raygenSBT = rayTracingPipeline->createShaderBindingTable(
3584 vkd, device, pipeline.get(), alloc, shaderGroupHandleSize, shaderGroupBaseAlignment, rgenGroup, 1u);
3585 raygenSBTRegion =
3586 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, raygenSBT->get(), 0ull),
3587 shaderGroupHandleSize, shaderGroupHandleSize);
3588
3589 if (missGroup > 0u)
3590 {
3591 missSBT = rayTracingPipeline->createShaderBindingTable(
3592 vkd, device, pipeline.get(), alloc, shaderGroupHandleSize, shaderGroupBaseAlignment, missGroup, 1u);
3593 missSBTRegion =
3594 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, missSBT->get(), 0ull),
3595 shaderGroupHandleSize, shaderGroupHandleSize);
3596 }
3597
3598 if (hitGroup > 0u)
3599 {
3600 hitSBT = rayTracingPipeline->createShaderBindingTable(
3601 vkd, device, pipeline.get(), alloc, shaderGroupHandleSize, shaderGroupBaseAlignment, hitGroup, 1u);
3602 hitSBTRegion =
3603 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, hitSBT->get(), 0ull),
3604 shaderGroupHandleSize, shaderGroupHandleSize);
3605 }
3606
3607 if (callGroup > 0u)
3608 {
3609 callableSBT = rayTracingPipeline->createShaderBindingTable(
3610 vkd, device, pipeline.get(), alloc, shaderGroupHandleSize, shaderGroupBaseAlignment, callGroup, 1u);
3611 callableSBTRegion =
3612 makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, callableSBT->get(), 0ull),
3613 shaderGroupHandleSize, shaderGroupHandleSize);
3614 }
3615 }
3616 else
3617 DE_ASSERT(false);
3618
3619 // Command buffer for the current iteration.
3620 const auto cmdBufferPtr = allocateCommandBuffer(vkd, device, cmdPool.get(), VK_COMMAND_BUFFER_LEVEL_PRIMARY);
3621 const auto cmdBuffer = cmdBufferPtr.get();
3622
3623 beginCommandBuffer(vkd, cmdBuffer);
3624
3625 const Step steps[] = {(updateAfterBind ? Step::BIND : Step::UPDATE),
3626 (updateAfterBind ? Step::UPDATE : Step::BIND)};
3627
3628 for (const auto &step : steps)
3629 {
3630 if (step == Step::BIND)
3631 {
3632 vkd.cmdBindPipeline(cmdBuffer, bindPoint, pipeline.get());
3633 vkd.cmdBindDescriptorSets(cmdBuffer, bindPoint, pipelineLayout.get(), 0u,
3634 static_cast<uint32_t>(usedSets.size()), de::dataOrNull(usedSets), 0u,
3635 nullptr);
3636 }
3637 else // Step::UPDATE
3638 {
3639 if (srcSetNeeded)
3640 {
3641 // Note: these operations need to be called on paramSet and not iterationSrcSet. The latter is a compatible set
3642 // that's correct and contains compatible bindings but, when a binding has been changed from non-mutable to
3643 // mutable or to an extended mutable type, the list of descriptor types for the mutable bindings in
3644 // iterationSrcSet are not in iteration order like they are in the original set and must not be taken into
3645 // account to update or copy sets.
3646 paramSet->updateDescriptorSet(vkd, device, srcSet.get(), iteration, resources);
3647 paramSet->copyDescriptorSet(vkd, device, srcSet.get(), dstSet.get());
3648 }
3649 else
3650 {
3651 paramSet->updateDescriptorSet(vkd, device, dstSet.get(), iteration, resources);
3652 }
3653 }
3654 }
3655
3656 // Run shader.
3657 vkd.cmdPushConstants(cmdBuffer, pipelineLayout.get(), stageFlags, 0u, static_cast<uint32_t>(sizeof(zero)),
3658 &zero);
3659
3660 if (bindPoint == VK_PIPELINE_BIND_POINT_COMPUTE)
3661 vkd.cmdDispatch(cmdBuffer, 1u, 1u, 1u);
3662 else if (bindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS)
3663 {
3664 const auto extent = getDefaultExtent();
3665 const auto renderArea = makeRect2D(extent);
3666
3667 beginRenderPass(vkd, cmdBuffer, renderPass.get(), framebuffer.get(), renderArea);
3668 vkd.cmdDraw(cmdBuffer, 3u, 1u, 0u, 0u);
3669 endRenderPass(vkd, cmdBuffer);
3670 }
3671 else if (bindPoint == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR)
3672 {
3673 vkd.cmdTraceRaysKHR(cmdBuffer, &raygenSBTRegion, &missSBTRegion, &hitSBTRegion, &callableSBTRegion, 1u, 1u,
3674 1u);
3675 }
3676 else
3677 DE_ASSERT(false);
3678
3679 endCommandBuffer(vkd, cmdBuffer);
3680 submitCommandsAndWait(vkd, device, queue, cmdBuffer);
3681
3682 // Verify output buffer.
3683 {
3684 const auto outputBufferVal = extraResources[0].getStoredValue(vkd, device, alloc, qIndex, queue, iteration);
3685 DE_ASSERT(static_cast<bool>(outputBufferVal));
3686
3687 const auto expectedValue = getExpectedOutputBufferValue();
3688 if (outputBufferVal.get() != expectedValue)
3689 {
3690 std::ostringstream msg;
3691 msg << "Iteration " << iteration << ": unexpected value found in output buffer (expected "
3692 << expectedValue << " and found " << outputBufferVal.get() << ")";
3693 TCU_FAIL(msg.str());
3694 }
3695 }
3696
3697 // Verify descriptor writes.
3698 {
3699 size_t resourcesOffset = 0;
3700 const auto writeMask = getStoredValueMask();
3701 const auto numBindings = paramSet->numBindings();
3702
3703 for (uint32_t bindingIdx = 0u; bindingIdx < numBindings; ++bindingIdx)
3704 {
3705 const auto binding = paramSet->getBinding(bindingIdx);
3706 const auto bindingTypes = binding->typesAtIteration(iteration);
3707
3708 for (size_t descriptorIdx = 0; descriptorIdx < bindingTypes.size(); ++descriptorIdx)
3709 {
3710 const auto &descriptorType = bindingTypes[descriptorIdx];
3711 if (!isShaderWritable(descriptorType))
3712 continue;
3713
3714 const auto &resource = resources[resourcesOffset + descriptorIdx];
3715 const auto initialValue = resource.initialValue;
3716 const auto storedValuePtr = resource.getStoredValue(vkd, device, alloc, qIndex, queue);
3717
3718 DE_ASSERT(static_cast<bool>(storedValuePtr));
3719 const auto storedValue = storedValuePtr.get();
3720 const auto expectedValue = (initialValue | writeMask);
3721 if (expectedValue != storedValue)
3722 {
3723 std::ostringstream msg;
3724 msg << "Iteration " << iteration << ": descriptor at binding " << bindingIdx << " index "
3725 << descriptorIdx << " with type " << de::toString(descriptorType)
3726 << " contains unexpected value " << std::hex << storedValue << " (expected "
3727 << expectedValue << ")";
3728 TCU_FAIL(msg.str());
3729 }
3730 }
3731
3732 resourcesOffset += bindingTypes.size();
3733 }
3734 }
3735 }
3736
3737 return tcu::TestStatus::pass("Pass");
3738 }
3739
3740 using GroupPtr = de::MovePtr<tcu::TestCaseGroup>;
3741
createMutableTestVariants(tcu::TestContext & testCtx,tcu::TestCaseGroup * parentGroup,const DescriptorSetPtr & descriptorSet,const std::vector<TestingStage> & stagesToTest)3742 void createMutableTestVariants(tcu::TestContext &testCtx, tcu::TestCaseGroup *parentGroup,
3743 const DescriptorSetPtr &descriptorSet, const std::vector<TestingStage> &stagesToTest)
3744 {
3745 const struct
3746 {
3747 UpdateType updateType;
3748 const char *name;
3749 } updateTypes[] = {
3750 {UpdateType::WRITE, "update_write"},
3751 {UpdateType::COPY, "update_copy"},
3752 };
3753
3754 const struct
3755 {
3756 SourceSetStrategy sourceSetStrategy;
3757 const char *name;
3758 } sourceStrategies[] = {
3759 {SourceSetStrategy::MUTABLE, "mutable_source"},
3760 {SourceSetStrategy::NONMUTABLE, "nonmutable_source"},
3761 {SourceSetStrategy::NO_SOURCE, "no_source"},
3762 };
3763
3764 const struct
3765 {
3766 SourceSetType sourceSetType;
3767 const char *name;
3768 } sourceTypes[] = {
3769 {SourceSetType::NORMAL, "normal_source"},
3770 {SourceSetType::HOST_ONLY, "host_only_source"},
3771 {SourceSetType::NO_SOURCE, "no_source"},
3772 };
3773
3774 const struct
3775 {
3776 PoolMutableStrategy poolMutableStrategy;
3777 const char *name;
3778 } poolStrategies[] = {
3779 {PoolMutableStrategy::KEEP_TYPES, "pool_same_types"},
3780 {PoolMutableStrategy::NO_TYPES, "pool_no_types"},
3781 {PoolMutableStrategy::EXPAND_TYPES, "pool_expand_types"},
3782 };
3783
3784 const struct
3785 {
3786 UpdateMoment updateMoment;
3787 const char *name;
3788 } updateMoments[] = {
3789 {UpdateMoment::NORMAL, "pre_update"},
3790 {UpdateMoment::UPDATE_AFTER_BIND, "update_after_bind"},
3791 };
3792
3793 const struct
3794 {
3795 ArrayAccessType arrayAccessType;
3796 const char *name;
3797 } arrayAccessTypes[] = {
3798 {ArrayAccessType::CONSTANT, "index_constant"},
3799 {ArrayAccessType::PUSH_CONSTANT, "index_push_constant"},
3800 {ArrayAccessType::NO_ARRAY, "no_array"},
3801 };
3802
3803 const struct StageAndName
3804 {
3805 TestingStage testingStage;
3806 const char *name;
3807 } testStageList[] = {
3808 {TestingStage::COMPUTE, "comp"}, {TestingStage::VERTEX, "vert"}, {TestingStage::TESS_CONTROL, "tesc"},
3809 {TestingStage::TESS_EVAL, "tese"}, {TestingStage::GEOMETRY, "geom"}, {TestingStage::FRAGMENT, "frag"},
3810 {TestingStage::RAY_GEN, "rgen"}, {TestingStage::INTERSECTION, "isec"}, {TestingStage::ANY_HIT, "ahit"},
3811 {TestingStage::CLOSEST_HIT, "chit"}, {TestingStage::MISS, "miss"}, {TestingStage::CALLABLE, "call"},
3812 };
3813
3814 const bool hasArrays = descriptorSet->hasArrays();
3815 const bool hasInputAttachments = usesInputAttachments(*descriptorSet);
3816
3817 for (const auto &ut : updateTypes)
3818 {
3819 GroupPtr updateGroup(new tcu::TestCaseGroup(testCtx, ut.name));
3820
3821 for (const auto &srcStrategy : sourceStrategies)
3822 {
3823 // Skip combinations that make no sense.
3824 if (ut.updateType == UpdateType::WRITE && srcStrategy.sourceSetStrategy != SourceSetStrategy::NO_SOURCE)
3825 continue;
3826
3827 if (ut.updateType == UpdateType::COPY && srcStrategy.sourceSetStrategy == SourceSetStrategy::NO_SOURCE)
3828 continue;
3829
3830 if (srcStrategy.sourceSetStrategy == SourceSetStrategy::NONMUTABLE && descriptorSet->needsAnyAliasing())
3831 continue;
3832
3833 GroupPtr srcStrategyGroup(new tcu::TestCaseGroup(testCtx, srcStrategy.name));
3834
3835 for (const auto &srcType : sourceTypes)
3836 {
3837 // Skip combinations that make no sense.
3838 if (ut.updateType == UpdateType::WRITE && srcType.sourceSetType != SourceSetType::NO_SOURCE)
3839 continue;
3840
3841 if (ut.updateType == UpdateType::COPY && srcType.sourceSetType == SourceSetType::NO_SOURCE)
3842 continue;
3843
3844 GroupPtr srcTypeGroup(new tcu::TestCaseGroup(testCtx, srcType.name));
3845
3846 for (const auto &poolStrategy : poolStrategies)
3847 {
3848 GroupPtr poolStrategyGroup(new tcu::TestCaseGroup(testCtx, poolStrategy.name));
3849
3850 for (const auto &moment : updateMoments)
3851 {
3852 //if (moment.updateMoment == UpdateMoment::UPDATE_AFTER_BIND && srcType.sourceSetType == SourceSetType::HOST_ONLY)
3853 // continue;
3854
3855 if (moment.updateMoment == UpdateMoment::UPDATE_AFTER_BIND && hasInputAttachments)
3856 continue;
3857
3858 GroupPtr momentGroup(new tcu::TestCaseGroup(testCtx, moment.name));
3859
3860 for (const auto &accessType : arrayAccessTypes)
3861 {
3862 // Skip combinations that make no sense.
3863 if (hasArrays && accessType.arrayAccessType == ArrayAccessType::NO_ARRAY)
3864 continue;
3865
3866 if (!hasArrays && accessType.arrayAccessType != ArrayAccessType::NO_ARRAY)
3867 continue;
3868
3869 GroupPtr accessTypeGroup(new tcu::TestCaseGroup(testCtx, accessType.name));
3870
3871 for (const auto &testStage : stagesToTest)
3872 {
3873 const auto beginItr = std::begin(testStageList);
3874 const auto endItr = std::end(testStageList);
3875 const auto iter = std::find_if(beginItr, endItr,
3876 [testStage](const StageAndName &ts)
3877 { return ts.testingStage == testStage; });
3878
3879 DE_ASSERT(iter != endItr);
3880 const auto &stage = *iter;
3881
3882 if (hasInputAttachments && stage.testingStage != TestingStage::FRAGMENT)
3883 continue;
3884
3885 TestParams params = {
3886 descriptorSet,
3887 ut.updateType,
3888 srcStrategy.sourceSetStrategy,
3889 srcType.sourceSetType,
3890 poolStrategy.poolMutableStrategy,
3891 moment.updateMoment,
3892 accessType.arrayAccessType,
3893 stage.testingStage,
3894 };
3895
3896 accessTypeGroup->addChild(new MutableTypesTest(testCtx, stage.name, params));
3897 }
3898
3899 momentGroup->addChild(accessTypeGroup.release());
3900 }
3901
3902 poolStrategyGroup->addChild(momentGroup.release());
3903 }
3904
3905 srcTypeGroup->addChild(poolStrategyGroup.release());
3906 }
3907
3908 srcStrategyGroup->addChild(srcTypeGroup.release());
3909 }
3910
3911 updateGroup->addChild(srcStrategyGroup.release());
3912 }
3913
3914 parentGroup->addChild(updateGroup.release());
3915 }
3916 }
3917
3918 } // namespace
3919
descriptorTypeStr(VkDescriptorType descriptorType)3920 std::string descriptorTypeStr(VkDescriptorType descriptorType)
3921 {
3922 static const auto prefixLen = std::string("VK_DESCRIPTOR_TYPE_").size();
3923 return de::toLower(de::toString(descriptorType).substr(prefixLen));
3924 }
3925
3926 static void createChildren(tcu::TestCaseGroup *testGroup);
3927
cleanupGroup(tcu::TestCaseGroup * testGroup)3928 static void cleanupGroup(tcu::TestCaseGroup *testGroup)
3929 {
3930 DE_UNREF(testGroup);
3931 // Destroy singleton objects.
3932 g_singletonDevice.clear();
3933 }
3934
createDescriptorMutableTests(tcu::TestContext & testCtx)3935 tcu::TestCaseGroup *createDescriptorMutableTests(tcu::TestContext &testCtx)
3936 {
3937 return createTestGroup(testCtx, "mutable_descriptor", createChildren, cleanupGroup);
3938 }
3939
createChildren(tcu::TestCaseGroup * mainGroup)3940 void createChildren(tcu::TestCaseGroup *mainGroup)
3941 {
3942 tcu::TestContext &testCtx = mainGroup->getTestContext();
3943
3944 const VkDescriptorType basicDescriptorTypes[] = {
3945 VK_DESCRIPTOR_TYPE_SAMPLER,
3946 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
3947 VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
3948 VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
3949 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
3950 VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
3951 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
3952 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
3953 VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT,
3954 VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR,
3955 };
3956
3957 static const auto mandatoryTypes = getMandatoryMutableTypes();
3958
3959 using StageVec = std::vector<TestingStage>;
3960
3961 const StageVec allStages = {
3962 TestingStage::COMPUTE, TestingStage::VERTEX, TestingStage::TESS_CONTROL, TestingStage::TESS_EVAL,
3963 TestingStage::GEOMETRY, TestingStage::FRAGMENT, TestingStage::RAY_GEN, TestingStage::INTERSECTION,
3964 TestingStage::ANY_HIT, TestingStage::CLOSEST_HIT, TestingStage::MISS, TestingStage::CALLABLE,
3965 };
3966
3967 const StageVec reducedStages = {
3968 TestingStage::COMPUTE,
3969 TestingStage::VERTEX,
3970 TestingStage::FRAGMENT,
3971 TestingStage::RAY_GEN,
3972 };
3973
3974 const StageVec computeOnly = {
3975 TestingStage::COMPUTE,
3976 };
3977
3978 // Basic tests with a single mutable descriptor.
3979 {
3980 GroupPtr singleCases(new tcu::TestCaseGroup(testCtx, "single"));
3981
3982 for (const auto &descriptorType : basicDescriptorTypes)
3983 {
3984 const auto groupName = descriptorTypeStr(descriptorType);
3985 const std::vector<VkDescriptorType> actualTypes(1u, descriptorType);
3986
3987 DescriptorSetPtr setPtr;
3988 {
3989 DescriptorSet::BindingPtrVector setBindings;
3990 setBindings.emplace_back(new SingleBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, actualTypes));
3991 setPtr = DescriptorSetPtr(new DescriptorSet(setBindings));
3992 }
3993
3994 GroupPtr subGroup(new tcu::TestCaseGroup(testCtx, groupName.c_str()));
3995 createMutableTestVariants(testCtx, subGroup.get(), setPtr, allStages);
3996
3997 singleCases->addChild(subGroup.release());
3998 }
3999
4000 // Case with a single descriptor that iterates several types.
4001 {
4002 DescriptorSetPtr setPtr;
4003 {
4004 DescriptorSet::BindingPtrVector setBindings;
4005 setBindings.emplace_back(new SingleBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, mandatoryTypes));
4006 setPtr = DescriptorSetPtr(new DescriptorSet(setBindings));
4007 }
4008
4009 GroupPtr subGroup(new tcu::TestCaseGroup(testCtx, "all_mandatory"));
4010 createMutableTestVariants(testCtx, subGroup.get(), setPtr, reducedStages);
4011
4012 singleCases->addChild(subGroup.release());
4013 }
4014
4015 // Cases that try to verify switching from any descriptor type to any other is possible.
4016 {
4017 GroupPtr subGroup(new tcu::TestCaseGroup(testCtx, "switches"));
4018
4019 for (const auto &initialDescriptorType : basicDescriptorTypes)
4020 {
4021 for (const auto &finalDescriptorType : basicDescriptorTypes)
4022 {
4023 if (initialDescriptorType == finalDescriptorType)
4024 continue;
4025
4026 const std::vector<VkDescriptorType> mutableTypes{initialDescriptorType, finalDescriptorType};
4027 DescriptorSet::BindingPtrVector setBindings;
4028 setBindings.emplace_back(new SingleBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, mutableTypes));
4029
4030 DescriptorSetPtr setPtr = DescriptorSetPtr(new DescriptorSet(setBindings));
4031
4032 const auto groupName =
4033 descriptorTypeStr(initialDescriptorType) + "_" + descriptorTypeStr(finalDescriptorType);
4034 GroupPtr combinationGroup(new tcu::TestCaseGroup(testCtx, groupName.c_str()));
4035 createMutableTestVariants(testCtx, combinationGroup.get(), setPtr, reducedStages);
4036 subGroup->addChild(combinationGroup.release());
4037 }
4038 }
4039
4040 singleCases->addChild(subGroup.release());
4041 }
4042
4043 mainGroup->addChild(singleCases.release());
4044 }
4045
4046 // Cases with a single non-mutable descriptor. This provides some basic checks to verify copying to non-mutable bindings works.
4047 {
4048 GroupPtr singleNonMutableGroup(new tcu::TestCaseGroup(testCtx, "single_nonmutable"));
4049
4050 for (const auto &descriptorType : basicDescriptorTypes)
4051 {
4052 DescriptorSet::BindingPtrVector bindings;
4053 bindings.emplace_back(new SingleBinding(descriptorType, std::vector<VkDescriptorType>()));
4054 DescriptorSetPtr descriptorSet(new DescriptorSet(bindings));
4055
4056 const auto groupName = descriptorTypeStr(descriptorType);
4057 GroupPtr descGroup(new tcu::TestCaseGroup(testCtx, groupName.c_str()));
4058
4059 createMutableTestVariants(testCtx, descGroup.get(), descriptorSet, reducedStages);
4060 singleNonMutableGroup->addChild(descGroup.release());
4061 }
4062
4063 mainGroup->addChild(singleNonMutableGroup.release());
4064 }
4065
4066 const struct
4067 {
4068 bool unbounded;
4069 const char *name;
4070 } unboundedCases[] = {
4071 {false, "constant_size"},
4072 {true, "unbounded"},
4073 };
4074
4075 const struct
4076 {
4077 bool aliasing;
4078 const char *name;
4079 } aliasingCases[] = {
4080 {false, "noaliasing"},
4081 {true, "aliasing"},
4082 };
4083
4084 const struct
4085 {
4086 bool oneArrayOnly;
4087 bool mixNonMutable;
4088 const char *groupName;
4089 } arrayCountGroups[] = {
4090 // Tests using an array of mutable descriptors
4091 {true, false, "one_array"},
4092 // Tests using multiple arrays of mutable descriptors
4093 {false, false, "multiple_arrays"},
4094 // Tests using multiple arrays of mutable descriptors mixed with arrays of nonmutable ones
4095 {false, true, "multiple_arrays_mixed"},
4096 };
4097
4098 for (const auto &variant : arrayCountGroups)
4099 {
4100 GroupPtr arrayGroup(new tcu::TestCaseGroup(testCtx, variant.groupName));
4101
4102 for (const auto &unboundedCase : unboundedCases)
4103 {
4104 GroupPtr unboundedGroup(new tcu::TestCaseGroup(testCtx, unboundedCase.name));
4105
4106 for (const auto &aliasingCase : aliasingCases)
4107 {
4108 GroupPtr aliasingGroup(new tcu::TestCaseGroup(testCtx, aliasingCase.name));
4109
4110 DescriptorSet::BindingPtrVector setBindings;
4111
4112 // Prepare descriptors for this test variant.
4113 for (size_t mandatoryTypesRotation = 0; mandatoryTypesRotation < mandatoryTypes.size();
4114 ++mandatoryTypesRotation)
4115 {
4116 const bool isLastBinding =
4117 (variant.oneArrayOnly || mandatoryTypesRotation == mandatoryTypes.size() - 1u);
4118 const bool isUnbounded = (unboundedCase.unbounded && isLastBinding);
4119
4120 // Create a rotation of the mandatory types for each mutable array binding.
4121 auto mandatoryTypesVector = mandatoryTypes;
4122 {
4123 const auto beginPtr = &mandatoryTypesVector[0];
4124 const auto endPtr = beginPtr + mandatoryTypesVector.size();
4125 std::rotate(beginPtr, &mandatoryTypesVector[mandatoryTypesRotation], endPtr);
4126 }
4127
4128 std::vector<SingleBinding> arrayBindings;
4129
4130 if (aliasingCase.aliasing)
4131 {
4132 // With aliasing, the descriptor types rotate in each descriptor.
4133 for (size_t typeIdx = 0; typeIdx < mandatoryTypesVector.size(); ++typeIdx)
4134 {
4135 auto rotatedTypes = mandatoryTypesVector;
4136 const auto beginPtr = &rotatedTypes[0];
4137 const auto endPtr = beginPtr + rotatedTypes.size();
4138
4139 std::rotate(beginPtr, &rotatedTypes[typeIdx], endPtr);
4140
4141 arrayBindings.emplace_back(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, rotatedTypes);
4142 }
4143 }
4144 else
4145 {
4146 // Without aliasing, all descriptors use the same type at the same time.
4147 const SingleBinding noAliasingBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, mandatoryTypesVector);
4148 arrayBindings.resize(mandatoryTypesVector.size(), noAliasingBinding);
4149 }
4150
4151 setBindings.emplace_back(new ArrayBinding(isUnbounded, arrayBindings));
4152
4153 if (variant.mixNonMutable && !isUnbounded)
4154 {
4155 // Create a non-mutable array binding interleaved with the other ones.
4156 const SingleBinding nonMutableBinding(mandatoryTypes[mandatoryTypesRotation],
4157 std::vector<VkDescriptorType>());
4158 std::vector<SingleBinding> nonMutableBindings(mandatoryTypes.size(), nonMutableBinding);
4159 setBindings.emplace_back(new ArrayBinding(false, nonMutableBindings));
4160 }
4161
4162 if (variant.oneArrayOnly)
4163 break;
4164 }
4165
4166 DescriptorSetPtr descriptorSet(new DescriptorSet(setBindings));
4167 createMutableTestVariants(testCtx, aliasingGroup.get(), descriptorSet, computeOnly);
4168
4169 unboundedGroup->addChild(aliasingGroup.release());
4170 }
4171
4172 arrayGroup->addChild(unboundedGroup.release());
4173 }
4174
4175 mainGroup->addChild(arrayGroup.release());
4176 }
4177
4178 // Cases with a single mutable binding followed by an array of mutable bindings.
4179 // The array will use a single type beyond the mandatory ones.
4180 {
4181 GroupPtr singleAndArrayGroup(new tcu::TestCaseGroup(testCtx, "single_and_array"));
4182
4183 for (const auto &descriptorType : basicDescriptorTypes)
4184 {
4185 // Input attachments will not use arrays.
4186 if (descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)
4187 continue;
4188
4189 if (de::contains(begin(mandatoryTypes), end(mandatoryTypes), descriptorType))
4190 continue;
4191
4192 const auto groupName = descriptorTypeStr(descriptorType);
4193 GroupPtr descTypeGroup(new tcu::TestCaseGroup(testCtx, groupName.c_str()));
4194
4195 for (const auto &aliasingCase : aliasingCases)
4196 {
4197 GroupPtr aliasingGroup(new tcu::TestCaseGroup(testCtx, aliasingCase.name));
4198
4199 DescriptorSet::BindingPtrVector setBindings;
4200 std::vector<SingleBinding> arrayBindings;
4201
4202 // Add single type beyond the mandatory ones.
4203 auto arrayBindingDescTypes = mandatoryTypes;
4204 arrayBindingDescTypes.push_back(descriptorType);
4205
4206 // Single mutable descriptor as the first binding.
4207 setBindings.emplace_back(new SingleBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, arrayBindingDescTypes));
4208
4209 // Descriptor array as the second binding.
4210 if (aliasingCase.aliasing)
4211 {
4212 // With aliasing, the descriptor types rotate in each descriptor.
4213 for (size_t typeIdx = 0; typeIdx < arrayBindingDescTypes.size(); ++typeIdx)
4214 {
4215 auto rotatedTypes = arrayBindingDescTypes;
4216 const auto beginPtr = &rotatedTypes[0];
4217 const auto endPtr = beginPtr + rotatedTypes.size();
4218
4219 std::rotate(beginPtr, &rotatedTypes[typeIdx], endPtr);
4220
4221 arrayBindings.emplace_back(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, rotatedTypes);
4222 }
4223 }
4224 else
4225 {
4226 // Without aliasing, all descriptors use the same type at the same time.
4227 const SingleBinding noAliasingBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, arrayBindingDescTypes);
4228 arrayBindings.resize(arrayBindingDescTypes.size(), noAliasingBinding);
4229 }
4230
4231 // Second binding: array binding.
4232 setBindings.emplace_back(new ArrayBinding(false /*unbounded*/, arrayBindings));
4233
4234 // Create set and test variants.
4235 DescriptorSetPtr descriptorSet(new DescriptorSet(setBindings));
4236 createMutableTestVariants(testCtx, aliasingGroup.get(), descriptorSet, computeOnly);
4237
4238 descTypeGroup->addChild(aliasingGroup.release());
4239 }
4240
4241 singleAndArrayGroup->addChild(descTypeGroup.release());
4242 }
4243
4244 mainGroup->addChild(singleAndArrayGroup.release());
4245 }
4246
4247 // Cases with several mutable non-array bindings.
4248 {
4249 GroupPtr multipleGroup(new tcu::TestCaseGroup(testCtx, "multiple"));
4250 GroupPtr mutableOnlyGroup(new tcu::TestCaseGroup(testCtx, "mutable_only"));
4251 GroupPtr mixedGroup(new tcu::TestCaseGroup(testCtx, "mixed"));
4252
4253 // Each descriptor will have a different type in every iteration, like in the one_array aliasing case.
4254 for (int groupIdx = 0; groupIdx < 2; ++groupIdx)
4255 {
4256 const bool mixed = (groupIdx == 1);
4257 DescriptorSet::BindingPtrVector setBindings;
4258
4259 for (size_t typeIdx = 0; typeIdx < mandatoryTypes.size(); ++typeIdx)
4260 {
4261 auto rotatedTypes = mandatoryTypes;
4262 const auto beginPtr = &rotatedTypes[0];
4263 const auto endPtr = beginPtr + rotatedTypes.size();
4264
4265 std::rotate(beginPtr, &rotatedTypes[typeIdx], endPtr);
4266 setBindings.emplace_back(new SingleBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, rotatedTypes));
4267
4268 // Additional non-mutable binding interleaved with the mutable ones.
4269 if (mixed)
4270 setBindings.emplace_back(new SingleBinding(rotatedTypes[0], std::vector<VkDescriptorType>()));
4271 }
4272 DescriptorSetPtr descriptorSet(new DescriptorSet(setBindings));
4273
4274 const auto dstGroup = (mixed ? mixedGroup.get() : mutableOnlyGroup.get());
4275 createMutableTestVariants(testCtx, dstGroup, descriptorSet, computeOnly);
4276 }
4277
4278 multipleGroup->addChild(mutableOnlyGroup.release());
4279 multipleGroup->addChild(mixedGroup.release());
4280 mainGroup->addChild(multipleGroup.release());
4281 }
4282
4283 // Corner cases
4284 {
4285 GroupPtr miscGroup(new tcu::TestCaseGroup(testCtx, "misc"));
4286 {
4287 TestParams params = {(DescriptorSetPtr) nullptr,
4288 UpdateType::WRITE,
4289 SourceSetStrategy::MUTABLE,
4290 SourceSetType::NORMAL,
4291 PoolMutableStrategy::KEEP_NO_MUTABLE_TYPES,
4292 UpdateMoment::NORMAL,
4293 ArrayAccessType::CONSTANT,
4294 TestingStage::COMPUTE};
4295
4296 const std::vector<VkDescriptorType> mutableTypes(1u, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
4297 const uint32_t maxNonMutableDescs = 2;
4298 const uint32_t maxMutableDescs = 2;
4299
4300 for (uint32_t numNonMutDescs = 0; numNonMutDescs <= maxNonMutableDescs; numNonMutDescs++)
4301 {
4302 for (uint32_t numMutDescs = 1; numMutDescs <= maxMutableDescs; numMutDescs++)
4303 {
4304 DescriptorSetPtr setPtr;
4305 {
4306 DescriptorSet::BindingPtrVector setBindings;
4307
4308 for (uint32_t cntNonMutDescs = 0; cntNonMutDescs < numNonMutDescs; cntNonMutDescs++)
4309 setBindings.emplace_back(
4310 new SingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, std::vector<VkDescriptorType>()));
4311
4312 // mutable descriptors are kept at end to make them out-of-range
4313 for (uint32_t cntMutDescs = 0; cntMutDescs < numMutDescs; cntMutDescs++)
4314 setBindings.emplace_back(new SingleBinding(VK_DESCRIPTOR_TYPE_MUTABLE_EXT, mutableTypes));
4315
4316 setPtr = DescriptorSetPtr(new DescriptorSet(setBindings));
4317 }
4318 params.descriptorSet = setPtr;
4319
4320 // test mutable descriptor type out-of-range
4321 {
4322 const std::string &testName =
4323 "mutable_type_out_of_range_" + de::toString(numNonMutDescs) + de::toString(numMutDescs);
4324 miscGroup->addChild(new MutableTypesTest(testCtx, testName, params));
4325 }
4326 }
4327 }
4328 }
4329 mainGroup->addChild(miscGroup.release());
4330 }
4331 }
4332
4333 } // namespace BindingModel
4334 } // namespace vkt
4335