1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Synchronization operation abstraction
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktSynchronizationOperation.hpp"
25 #include "vkDefs.hpp"
26 #include "vktTestCase.hpp"
27 #include "vktTestCaseUtil.hpp"
28 #include "vkRef.hpp"
29 #include "vkRefUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkBarrierUtil.hpp"
32 #include "vkQueryUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkImageUtil.hpp"
35 #include "vkBuilderUtil.hpp"
36 #include "vkCmdUtil.hpp"
37 #include "vkObjUtil.hpp"
38 #include "deUniquePtr.hpp"
39 #include "tcuTestLog.hpp"
40 #include "tcuTextureUtil.hpp"
41 #include <vector>
42 #include <sstream>
43
44 namespace vkt
45 {
46 namespace synchronization
47 {
48 namespace
49 {
50 using namespace vk;
51
52 enum Constants
53 {
54 MAX_IMAGE_DIMENSION_2D = 0x1000u,
55 MAX_UBO_RANGE = 0x4000u,
56 MAX_UPDATE_BUFFER_SIZE = 0x10000u,
57 };
58
59 enum BufferType
60 {
61 BUFFER_TYPE_UNIFORM,
62 BUFFER_TYPE_STORAGE,
63 };
64
65 enum AccessMode
66 {
67 ACCESS_MODE_READ,
68 ACCESS_MODE_WRITE,
69 };
70
71 enum PipelineType
72 {
73 PIPELINE_TYPE_GRAPHICS,
74 PIPELINE_TYPE_COMPUTE,
75 };
76
77 static const char* const s_perVertexBlock = "gl_PerVertex {\n"
78 " vec4 gl_Position;\n"
79 "}";
80
81 //! A pipeline that can be embedded inside an operation.
82 class Pipeline
83 {
84 public:
~Pipeline(void)85 virtual ~Pipeline (void) {}
86 virtual void recordCommands (OperationContext& context, const VkCommandBuffer cmdBuffer, const VkDescriptorSet descriptorSet) = 0;
87 };
88
89 //! Vertex data that covers the whole viewport with two triangles.
90 class VertexGrid
91 {
92 public:
VertexGrid(OperationContext & context)93 VertexGrid (OperationContext& context)
94 : m_vertexFormat (VK_FORMAT_R32G32B32A32_SFLOAT)
95 , m_vertexStride (tcu::getPixelSize(mapVkFormat(m_vertexFormat)))
96 {
97 const DeviceInterface& vk = context.getDeviceInterface();
98 const VkDevice device = context.getDevice();
99 Allocator& allocator = context.getAllocator();
100
101 // Vertex positions
102 {
103 m_vertexData.push_back(tcu::Vec4( 1.0f, 1.0f, 0.0f, 1.0f));
104 m_vertexData.push_back(tcu::Vec4(-1.0f, 1.0f, 0.0f, 1.0f));
105 m_vertexData.push_back(tcu::Vec4(-1.0f, -1.0f, 0.0f, 1.0f));
106
107 m_vertexData.push_back(tcu::Vec4(-1.0f, -1.0f, 0.0f, 1.0f));
108 m_vertexData.push_back(tcu::Vec4( 1.0f, -1.0f, 0.0f, 1.0f));
109 m_vertexData.push_back(tcu::Vec4( 1.0f, 1.0f, 0.0f, 1.0f));
110 }
111
112 {
113 const VkDeviceSize vertexDataSizeBytes = m_vertexData.size() * sizeof(m_vertexData[0]);
114
115 m_vertexBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator, makeBufferCreateInfo(vertexDataSizeBytes, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT), MemoryRequirement::HostVisible));
116 DE_ASSERT(sizeof(m_vertexData[0]) == m_vertexStride);
117
118 {
119 const Allocation& alloc = m_vertexBuffer->getAllocation();
120
121 deMemcpy(alloc.getHostPtr(), &m_vertexData[0], static_cast<std::size_t>(vertexDataSizeBytes));
122 flushAlloc(vk, device, alloc);
123 }
124 }
125
126 // Indices
127 {
128 const VkDeviceSize indexBufferSizeBytes = sizeof(deUint32) * m_vertexData.size();
129 const deUint32 numIndices = static_cast<deUint32>(m_vertexData.size());
130
131 m_indexBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator, makeBufferCreateInfo(indexBufferSizeBytes, VK_BUFFER_USAGE_INDEX_BUFFER_BIT), MemoryRequirement::HostVisible));
132
133 {
134 const Allocation& alloc = m_indexBuffer->getAllocation();
135 deUint32* const pData = static_cast<deUint32*>(alloc.getHostPtr());
136
137 for (deUint32 i = 0; i < numIndices; ++i)
138 pData[i] = i;
139
140 flushAlloc(vk, device, alloc);
141 }
142 }
143 }
144
getVertexFormat(void) const145 VkFormat getVertexFormat (void) const { return m_vertexFormat; }
getVertexStride(void) const146 deUint32 getVertexStride (void) const { return m_vertexStride; }
getIndexType(void) const147 VkIndexType getIndexType (void) const { return VK_INDEX_TYPE_UINT32; }
getNumVertices(void) const148 deUint32 getNumVertices (void) const { return static_cast<deUint32>(m_vertexData.size()); }
getNumIndices(void) const149 deUint32 getNumIndices (void) const { return getNumVertices(); }
getVertexBuffer(void) const150 VkBuffer getVertexBuffer (void) const { return **m_vertexBuffer; }
getIndexBuffer(void) const151 VkBuffer getIndexBuffer (void) const { return **m_indexBuffer; }
152
153 private:
154 const VkFormat m_vertexFormat;
155 const deUint32 m_vertexStride;
156 std::vector<tcu::Vec4> m_vertexData;
157 de::MovePtr<Buffer> m_vertexBuffer;
158 de::MovePtr<Buffer> m_indexBuffer;
159 };
160
161 //! Add flags for all shader stages required to support a particular stage (e.g. fragment requires vertex as well).
getRequiredStages(const VkShaderStageFlagBits stage)162 VkShaderStageFlags getRequiredStages (const VkShaderStageFlagBits stage)
163 {
164 VkShaderStageFlags flags = 0;
165
166 DE_ASSERT(stage == VK_SHADER_STAGE_COMPUTE_BIT || (stage & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
167
168 if (stage & VK_SHADER_STAGE_ALL_GRAPHICS)
169 flags |= VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
170
171 if (stage & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
172 flags |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
173
174 if (stage & VK_SHADER_STAGE_GEOMETRY_BIT)
175 flags |= VK_SHADER_STAGE_GEOMETRY_BIT;
176
177 if (stage & VK_SHADER_STAGE_COMPUTE_BIT)
178 flags |= VK_SHADER_STAGE_COMPUTE_BIT;
179
180 return flags;
181 }
182
183 //! Check that SSBO read/write is available and that all shader stages are supported.
requireFeaturesForSSBOAccess(OperationContext & context,const VkShaderStageFlags usedStages)184 void requireFeaturesForSSBOAccess (OperationContext& context, const VkShaderStageFlags usedStages)
185 {
186 const InstanceInterface& vki = context.getInstanceInterface();
187 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
188 FeatureFlags flags = (FeatureFlags)0;
189
190 if (usedStages & VK_SHADER_STAGE_FRAGMENT_BIT)
191 flags |= FEATURE_FRAGMENT_STORES_AND_ATOMICS;
192
193 if (usedStages & (VK_SHADER_STAGE_ALL_GRAPHICS & (~VK_SHADER_STAGE_FRAGMENT_BIT)))
194 flags |= FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS;
195
196 if (usedStages & VK_SHADER_STAGE_GEOMETRY_BIT)
197 flags |= FEATURE_GEOMETRY_SHADER;
198
199 if (usedStages & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
200 flags |= FEATURE_TESSELLATION_SHADER;
201
202 requireFeatures(vki, physDevice, flags);
203 }
204
getHostBufferData(const OperationContext & context,const Buffer & hostBuffer,const VkDeviceSize size)205 Data getHostBufferData (const OperationContext& context, const Buffer& hostBuffer, const VkDeviceSize size)
206 {
207 const DeviceInterface& vk = context.getDeviceInterface();
208 const VkDevice device = context.getDevice();
209 const Allocation& alloc = hostBuffer.getAllocation();
210 const Data data =
211 {
212 static_cast<std::size_t>(size), // std::size_t size;
213 static_cast<deUint8*>(alloc.getHostPtr()), // const deUint8* data;
214 };
215
216 invalidateAlloc(vk, device, alloc);
217
218 return data;
219 }
220
assertValidShaderStage(const VkShaderStageFlagBits stage)221 void assertValidShaderStage (const VkShaderStageFlagBits stage)
222 {
223 switch (stage)
224 {
225 case VK_SHADER_STAGE_VERTEX_BIT:
226 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
227 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
228 case VK_SHADER_STAGE_GEOMETRY_BIT:
229 case VK_SHADER_STAGE_FRAGMENT_BIT:
230 case VK_SHADER_STAGE_COMPUTE_BIT:
231 // OK
232 break;
233
234 default:
235 DE_FATAL("Invalid shader stage");
236 break;
237 }
238 }
239
pipelineStageFlagsFromShaderStageFlagBits(const VkShaderStageFlagBits shaderStage)240 VkPipelineStageFlags pipelineStageFlagsFromShaderStageFlagBits (const VkShaderStageFlagBits shaderStage)
241 {
242 switch (shaderStage)
243 {
244 case VK_SHADER_STAGE_VERTEX_BIT: return VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
245 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT;
246 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT;
247 case VK_SHADER_STAGE_GEOMETRY_BIT: return VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT;
248 case VK_SHADER_STAGE_FRAGMENT_BIT: return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
249 case VK_SHADER_STAGE_COMPUTE_BIT: return VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
250
251 // Other usages are probably an error, so flag that.
252 default:
253 DE_FATAL("Invalid shader stage");
254 return (VkPipelineStageFlags)0;
255 }
256 }
257
258 //! Fill destination buffer with a repeating pattern.
fillPattern(void * const pData,const VkDeviceSize size)259 void fillPattern (void* const pData, const VkDeviceSize size)
260 {
261 static const deUint8 pattern[] = { 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31 };
262 deUint8* const pBytes = static_cast<deUint8*>(pData);
263
264 for (deUint32 i = 0; i < size; ++i)
265 pBytes[i] = pattern[i % DE_LENGTH_OF_ARRAY(pattern)];
266 }
267
268 //! Get size in bytes of a pixel buffer with given extent.
getPixelBufferSize(const VkFormat format,const VkExtent3D & extent)269 VkDeviceSize getPixelBufferSize (const VkFormat format, const VkExtent3D& extent)
270 {
271 const int pixelSize = tcu::getPixelSize(mapVkFormat(format));
272 return (pixelSize * extent.width * extent.height * extent.depth);
273 }
274
275 //! Determine the size of a 2D image that can hold sizeBytes data.
get2DImageExtentWithSize(const VkDeviceSize sizeBytes,const deUint32 pixelSize)276 VkExtent3D get2DImageExtentWithSize (const VkDeviceSize sizeBytes, const deUint32 pixelSize)
277 {
278 const deUint32 size = static_cast<deUint32>(sizeBytes / pixelSize);
279
280 DE_ASSERT(size <= MAX_IMAGE_DIMENSION_2D * MAX_IMAGE_DIMENSION_2D);
281
282 return makeExtent3D(
283 std::min(size, static_cast<deUint32>(MAX_IMAGE_DIMENSION_2D)),
284 (size / MAX_IMAGE_DIMENSION_2D) + (size % MAX_IMAGE_DIMENSION_2D != 0 ? 1u : 0u),
285 1u);
286 }
287
makeClearValue(const VkFormat format)288 VkClearValue makeClearValue (const VkFormat format)
289 {
290 if (isDepthStencilFormat(format))
291 return makeClearValueDepthStencil(0.4f, 21u);
292 else
293 {
294 if (isIntFormat(format) || isUintFormat(format))
295 return makeClearValueColorU32(8u, 16u, 24u, 32u);
296 else
297 return makeClearValueColorF32(0.25f, 0.49f, 0.75f, 1.0f);
298 }
299 }
300
clearPixelBuffer(tcu::PixelBufferAccess & pixels,const VkClearValue & clearValue)301 void clearPixelBuffer (tcu::PixelBufferAccess& pixels, const VkClearValue& clearValue)
302 {
303 const tcu::TextureFormat format = pixels.getFormat();
304 const tcu::TextureChannelClass channelClass = tcu::getTextureChannelClass(format.type);
305
306 if (format.order == tcu::TextureFormat::D)
307 {
308 for (int z = 0; z < pixels.getDepth(); z++)
309 for (int y = 0; y < pixels.getHeight(); y++)
310 for (int x = 0; x < pixels.getWidth(); x++)
311 pixels.setPixDepth(clearValue.depthStencil.depth, x, y, z);
312 }
313 else if (format.order == tcu::TextureFormat::S)
314 {
315 for (int z = 0; z < pixels.getDepth(); z++)
316 for (int y = 0; y < pixels.getHeight(); y++)
317 for (int x = 0; x < pixels.getWidth(); x++)
318 pixels.setPixStencil(clearValue.depthStencil.stencil, x, y, z);
319 }
320 else if (format.order == tcu::TextureFormat::DS)
321 {
322 for (int z = 0; z < pixels.getDepth(); z++)
323 for (int y = 0; y < pixels.getHeight(); y++)
324 for (int x = 0; x < pixels.getWidth(); x++)
325 {
326 pixels.setPixDepth(clearValue.depthStencil.depth, x, y, z);
327 pixels.setPixStencil(clearValue.depthStencil.stencil, x, y, z);
328 }
329 }
330 else if (channelClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER || channelClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER)
331 {
332 const tcu::UVec4 color (clearValue.color.uint32);
333
334 for (int z = 0; z < pixels.getDepth(); z++)
335 for (int y = 0; y < pixels.getHeight(); y++)
336 for (int x = 0; x < pixels.getWidth(); x++)
337 pixels.setPixel(color, x, y, z);
338 }
339 else
340 {
341 const tcu::Vec4 color (clearValue.color.float32);
342
343 for (int z = 0; z < pixels.getDepth(); z++)
344 for (int y = 0; y < pixels.getHeight(); y++)
345 for (int x = 0; x < pixels.getWidth(); x++)
346 pixels.setPixel(color, x, y, z);
347 }
348 }
349
350 //! Storage image format that requires StorageImageExtendedFormats SPIR-V capability (listed only Vulkan-defined formats).
isStorageImageExtendedFormat(const VkFormat format)351 bool isStorageImageExtendedFormat (const VkFormat format)
352 {
353 switch (format)
354 {
355 case VK_FORMAT_R32G32_SFLOAT:
356 case VK_FORMAT_R32G32_SINT:
357 case VK_FORMAT_R32G32_UINT:
358 case VK_FORMAT_R16G16B16A16_UNORM:
359 case VK_FORMAT_R16G16B16A16_SNORM:
360 case VK_FORMAT_R16G16_SFLOAT:
361 case VK_FORMAT_R16G16_UNORM:
362 case VK_FORMAT_R16G16_SNORM:
363 case VK_FORMAT_R16G16_SINT:
364 case VK_FORMAT_R16G16_UINT:
365 case VK_FORMAT_R16_SFLOAT:
366 case VK_FORMAT_R16_UNORM:
367 case VK_FORMAT_R16_SNORM:
368 case VK_FORMAT_R16_SINT:
369 case VK_FORMAT_R16_UINT:
370 case VK_FORMAT_R8G8_UNORM:
371 case VK_FORMAT_R8G8_SNORM:
372 case VK_FORMAT_R8G8_SINT:
373 case VK_FORMAT_R8G8_UINT:
374 case VK_FORMAT_R8_UNORM:
375 case VK_FORMAT_R8_SNORM:
376 case VK_FORMAT_R8_SINT:
377 case VK_FORMAT_R8_UINT:
378 return true;
379
380 default:
381 return false;
382 }
383 }
384
getImageViewType(const VkImageType imageType)385 VkImageViewType getImageViewType (const VkImageType imageType)
386 {
387 switch (imageType)
388 {
389 case VK_IMAGE_TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D;
390 case VK_IMAGE_TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D;
391 case VK_IMAGE_TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D;
392
393 default:
394 DE_FATAL("Unknown image type");
395 return VK_IMAGE_VIEW_TYPE_LAST;
396 }
397 }
398
getShaderImageType(const VkFormat format,const VkImageType imageType)399 std::string getShaderImageType (const VkFormat format, const VkImageType imageType)
400 {
401 const tcu::TextureFormat texFormat = mapVkFormat(format);
402 const std::string formatPart = tcu::getTextureChannelClass(texFormat.type) == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ? "u" :
403 tcu::getTextureChannelClass(texFormat.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER ? "i" : "";
404 switch (imageType)
405 {
406 case VK_IMAGE_TYPE_1D: return formatPart + "image1D";
407 case VK_IMAGE_TYPE_2D: return formatPart + "image2D";
408 case VK_IMAGE_TYPE_3D: return formatPart + "image3D";
409
410 default:
411 DE_FATAL("Unknown image type");
412 return DE_NULL;
413 }
414 }
415
getShaderImageFormatQualifier(const VkFormat format)416 std::string getShaderImageFormatQualifier (const VkFormat format)
417 {
418 const tcu::TextureFormat texFormat = mapVkFormat(format);
419 const char* orderPart = DE_NULL;
420 const char* typePart = DE_NULL;
421
422 switch (texFormat.order)
423 {
424 case tcu::TextureFormat::R: orderPart = "r"; break;
425 case tcu::TextureFormat::RG: orderPart = "rg"; break;
426 case tcu::TextureFormat::RGB: orderPart = "rgb"; break;
427 case tcu::TextureFormat::RGBA: orderPart = "rgba"; break;
428
429 default:
430 DE_FATAL("Unksupported texture channel order");
431 break;
432 }
433
434 switch (texFormat.type)
435 {
436 case tcu::TextureFormat::FLOAT: typePart = "32f"; break;
437 case tcu::TextureFormat::HALF_FLOAT: typePart = "16f"; break;
438
439 case tcu::TextureFormat::UNSIGNED_INT32: typePart = "32ui"; break;
440 case tcu::TextureFormat::UNSIGNED_INT16: typePart = "16ui"; break;
441 case tcu::TextureFormat::UNSIGNED_INT8: typePart = "8ui"; break;
442
443 case tcu::TextureFormat::SIGNED_INT32: typePart = "32i"; break;
444 case tcu::TextureFormat::SIGNED_INT16: typePart = "16i"; break;
445 case tcu::TextureFormat::SIGNED_INT8: typePart = "8i"; break;
446
447 case tcu::TextureFormat::UNORM_INT16: typePart = "16"; break;
448 case tcu::TextureFormat::UNORM_INT8: typePart = "8"; break;
449
450 case tcu::TextureFormat::SNORM_INT16: typePart = "16_snorm"; break;
451 case tcu::TextureFormat::SNORM_INT8: typePart = "8_snorm"; break;
452
453 default:
454 DE_FATAL("Unksupported texture channel type");
455 break;
456 }
457
458 return std::string(orderPart) + typePart;
459 }
460
461 namespace FillUpdateBuffer
462 {
463
464 enum BufferOp
465 {
466 BUFFER_OP_FILL,
467 BUFFER_OP_UPDATE,
468 };
469
470 class Implementation : public Operation
471 {
472 public:
Implementation(OperationContext & context,Resource & resource,const BufferOp bufferOp)473 Implementation (OperationContext& context, Resource& resource, const BufferOp bufferOp)
474 : m_context (context)
475 , m_resource (resource)
476 , m_fillValue (0x13)
477 , m_bufferOp (bufferOp)
478 {
479 DE_ASSERT((m_resource.getBuffer().size % sizeof(deUint32)) == 0);
480 DE_ASSERT(m_bufferOp == BUFFER_OP_FILL || m_resource.getBuffer().size <= MAX_UPDATE_BUFFER_SIZE);
481
482 m_data.resize(static_cast<size_t>(m_resource.getBuffer().size));
483
484 if (m_bufferOp == BUFFER_OP_FILL)
485 {
486 const std::size_t size = m_data.size() / sizeof(m_fillValue);
487 deUint32* const pData = reinterpret_cast<deUint32*>(&m_data[0]);
488
489 for (deUint32 i = 0; i < size; ++i)
490 pData[i] = m_fillValue;
491 }
492 else if (m_bufferOp == BUFFER_OP_UPDATE)
493 {
494 fillPattern(&m_data[0], m_data.size());
495 }
496 else
497 {
498 // \todo Really??
499 // Do nothing
500 }
501 }
502
recordCommands(const VkCommandBuffer cmdBuffer)503 void recordCommands (const VkCommandBuffer cmdBuffer)
504 {
505 const DeviceInterface& vk = m_context.getDeviceInterface();
506
507 if (m_bufferOp == BUFFER_OP_FILL)
508 vk.cmdFillBuffer(cmdBuffer, m_resource.getBuffer().handle, m_resource.getBuffer().offset, m_resource.getBuffer().size, m_fillValue);
509 else if (m_bufferOp == BUFFER_OP_UPDATE)
510 vk.cmdUpdateBuffer(cmdBuffer, m_resource.getBuffer().handle, m_resource.getBuffer().offset, m_resource.getBuffer().size, reinterpret_cast<deUint32*>(&m_data[0]));
511 else
512 {
513 // \todo Really??
514 // Do nothing
515 }
516 }
517
getSyncInfo(void) const518 SyncInfo getSyncInfo (void) const
519 {
520 const SyncInfo syncInfo =
521 {
522 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
523 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags accessMask;
524 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
525 };
526
527 return syncInfo;
528 }
529
getData(void) const530 Data getData (void) const
531 {
532 const Data data =
533 {
534 m_data.size(), // std::size_t size;
535 &m_data[0], // const deUint8* data;
536 };
537 return data;
538 }
539
540 private:
541 OperationContext& m_context;
542 Resource& m_resource;
543 std::vector<deUint8> m_data;
544 const deUint32 m_fillValue;
545 const BufferOp m_bufferOp;
546 };
547
548 class Support : public OperationSupport
549 {
550 public:
Support(const ResourceDescription & resourceDesc,const BufferOp bufferOp)551 Support (const ResourceDescription& resourceDesc, const BufferOp bufferOp)
552 : m_resourceDesc (resourceDesc)
553 , m_bufferOp (bufferOp)
554 {
555 DE_ASSERT(m_bufferOp == BUFFER_OP_FILL || m_bufferOp == BUFFER_OP_UPDATE);
556 DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_BUFFER);
557 }
558
getResourceUsageFlags(void) const559 deUint32 getResourceUsageFlags (void) const
560 {
561 return VK_BUFFER_USAGE_TRANSFER_DST_BIT;
562 }
563
getQueueFlags(const OperationContext & context) const564 VkQueueFlags getQueueFlags (const OperationContext& context) const
565 {
566 if (m_bufferOp == BUFFER_OP_FILL &&
567 !isDeviceExtensionSupported(context.getUsedApiVersion(), context.getDeviceExtensions(), "VK_KHR_maintenance1"))
568 {
569 return VK_QUEUE_COMPUTE_BIT | VK_QUEUE_GRAPHICS_BIT;
570 }
571
572 return VK_QUEUE_TRANSFER_BIT;
573 }
574
build(OperationContext & context,Resource & resource) const575 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
576 {
577 return de::MovePtr<Operation>(new Implementation(context, resource, m_bufferOp));
578 }
579
580 private:
581 const ResourceDescription m_resourceDesc;
582 const BufferOp m_bufferOp;
583 };
584
585 } // FillUpdateBuffer ns
586
587 namespace CopyBuffer
588 {
589
590 class Implementation : public Operation
591 {
592 public:
Implementation(OperationContext & context,Resource & resource,const AccessMode mode)593 Implementation (OperationContext& context, Resource& resource, const AccessMode mode)
594 : m_context (context)
595 , m_resource (resource)
596 , m_mode (mode)
597 {
598 const DeviceInterface& vk = m_context.getDeviceInterface();
599 const VkDevice device = m_context.getDevice();
600 Allocator& allocator = m_context.getAllocator();
601 const VkBufferUsageFlags hostBufferUsage = (m_mode == ACCESS_MODE_READ ? VK_BUFFER_USAGE_TRANSFER_DST_BIT : VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
602
603 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator, makeBufferCreateInfo(m_resource.getBuffer().size, hostBufferUsage), MemoryRequirement::HostVisible));
604
605 const Allocation& alloc = m_hostBuffer->getAllocation();
606
607 if (m_mode == ACCESS_MODE_READ)
608 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_resource.getBuffer().size));
609 else
610 fillPattern(alloc.getHostPtr(), m_resource.getBuffer().size);
611
612 flushAlloc(vk, device, alloc);
613 }
614
recordCommands(const VkCommandBuffer cmdBuffer)615 void recordCommands (const VkCommandBuffer cmdBuffer)
616 {
617 const DeviceInterface& vk = m_context.getDeviceInterface();
618 const VkBufferCopy copyRegion = makeBufferCopy(0u, 0u, m_resource.getBuffer().size);
619
620 if (m_mode == ACCESS_MODE_READ)
621 {
622 vk.cmdCopyBuffer(cmdBuffer, m_resource.getBuffer().handle, **m_hostBuffer, 1u, ©Region);
623
624 // Insert a barrier so copied data is available to the host
625 const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_resource.getBuffer().size);
626 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
627 }
628 else
629 vk.cmdCopyBuffer(cmdBuffer, **m_hostBuffer, m_resource.getBuffer().handle, 1u, ©Region);
630 }
631
getSyncInfo(void) const632 SyncInfo getSyncInfo (void) const
633 {
634 const VkAccessFlags access = (m_mode == ACCESS_MODE_READ ? VK_ACCESS_TRANSFER_READ_BIT : VK_ACCESS_TRANSFER_WRITE_BIT);
635 const SyncInfo syncInfo =
636 {
637 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
638 access, // VkAccessFlags accessMask;
639 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
640 };
641 return syncInfo;
642 }
643
getData(void) const644 Data getData (void) const
645 {
646 return getHostBufferData(m_context, *m_hostBuffer, m_resource.getBuffer().size);
647 }
648
649 private:
650 OperationContext& m_context;
651 Resource& m_resource;
652 const AccessMode m_mode;
653 de::MovePtr<Buffer> m_hostBuffer;
654 };
655
656 class Support : public OperationSupport
657 {
658 public:
Support(const ResourceDescription & resourceDesc,const AccessMode mode)659 Support (const ResourceDescription& resourceDesc, const AccessMode mode)
660 : m_mode (mode)
661 {
662 DE_ASSERT(resourceDesc.type == RESOURCE_TYPE_BUFFER);
663 DE_UNREF(resourceDesc);
664 }
665
getResourceUsageFlags(void) const666 deUint32 getResourceUsageFlags (void) const
667 {
668 return (m_mode == ACCESS_MODE_READ ? VK_BUFFER_USAGE_TRANSFER_SRC_BIT : VK_BUFFER_USAGE_TRANSFER_DST_BIT);
669 }
670
getQueueFlags(const OperationContext & context) const671 VkQueueFlags getQueueFlags (const OperationContext& context) const
672 {
673 DE_UNREF(context);
674 return VK_QUEUE_TRANSFER_BIT;
675 }
676
build(OperationContext & context,Resource & resource) const677 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
678 {
679 return de::MovePtr<Operation>(new Implementation(context, resource, m_mode));
680 }
681
682 private:
683 const AccessMode m_mode;
684 };
685
686 } // CopyBuffer ns
687
688 namespace CopyBlitImage
689 {
690
691 class ImplementationBase : public Operation
692 {
693 public:
694 //! Copy/Blit/Resolve etc. operation
695 virtual void recordCopyCommand (const VkCommandBuffer cmdBuffer) = 0;
696
ImplementationBase(OperationContext & context,Resource & resource,const AccessMode mode)697 ImplementationBase (OperationContext& context, Resource& resource, const AccessMode mode)
698 : m_context (context)
699 , m_resource (resource)
700 , m_mode (mode)
701 , m_bufferSize (getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent))
702 {
703 const DeviceInterface& vk = m_context.getDeviceInterface();
704 const VkDevice device = m_context.getDevice();
705 Allocator& allocator = m_context.getAllocator();
706
707 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
708 vk, device, allocator, makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
709 MemoryRequirement::HostVisible));
710
711 const Allocation& alloc = m_hostBuffer->getAllocation();
712 if (m_mode == ACCESS_MODE_READ)
713 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_bufferSize));
714 else
715 fillPattern(alloc.getHostPtr(), m_bufferSize);
716 flushAlloc(vk, device, alloc);
717
718 // Staging image
719 m_image = de::MovePtr<Image>(new Image(
720 vk, device, allocator,
721 makeImageCreateInfo(m_resource.getImage().imageType, m_resource.getImage().extent, m_resource.getImage().format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT),
722 MemoryRequirement::Any));
723 }
724
recordCommands(const VkCommandBuffer cmdBuffer)725 void recordCommands (const VkCommandBuffer cmdBuffer)
726 {
727 const DeviceInterface& vk = m_context.getDeviceInterface();
728 const VkBufferImageCopy bufferCopyRegion = makeBufferImageCopy(m_resource.getImage().subresourceLayers, m_resource.getImage().extent);
729
730 const VkImageMemoryBarrier stagingImageTransferSrcLayoutBarrier = makeImageMemoryBarrier(
731 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
732 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
733 **m_image, m_resource.getImage().subresourceRange);
734
735 // Staging image layout
736 {
737 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
738 (VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
739 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
740 **m_image, m_resource.getImage().subresourceRange);
741
742 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
743 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
744 }
745
746 if (m_mode == ACCESS_MODE_READ)
747 {
748 // Resource Image -> Staging image
749 recordCopyCommand(cmdBuffer);
750
751 // Staging image layout
752 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
753 0u, DE_NULL, 0u, DE_NULL, 1u, &stagingImageTransferSrcLayoutBarrier);
754
755 // Image -> Host buffer
756 vk.cmdCopyImageToBuffer(cmdBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_hostBuffer, 1u, &bufferCopyRegion);
757
758 // Insert a barrier so copied data is available to the host
759 const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_bufferSize);
760 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
761 }
762 else
763 {
764 // Host buffer -> Staging image
765 vk.cmdCopyBufferToImage(cmdBuffer, **m_hostBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &bufferCopyRegion);
766
767 // Staging image layout
768 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
769 0u, DE_NULL, 0u, DE_NULL, 1u, &stagingImageTransferSrcLayoutBarrier);
770
771 // Resource image layout
772 {
773 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
774 (VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
775 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
776 m_resource.getImage().handle, m_resource.getImage().subresourceRange);
777
778 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
779 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
780 }
781
782 // Staging image -> Resource Image
783 recordCopyCommand(cmdBuffer);
784 }
785 }
786
getSyncInfo(void) const787 SyncInfo getSyncInfo (void) const
788 {
789 const VkAccessFlags access = (m_mode == ACCESS_MODE_READ ? VK_ACCESS_TRANSFER_READ_BIT : VK_ACCESS_TRANSFER_WRITE_BIT);
790 const VkImageLayout layout = (m_mode == ACCESS_MODE_READ ? VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL : VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
791 const SyncInfo syncInfo =
792 {
793 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
794 access, // VkAccessFlags accessMask;
795 layout, // VkImageLayout imageLayout;
796 };
797 return syncInfo;
798 }
799
getData(void) const800 Data getData (void) const
801 {
802 return getHostBufferData(m_context, *m_hostBuffer, m_bufferSize);
803 }
804
805 protected:
806 OperationContext& m_context;
807 Resource& m_resource;
808 const AccessMode m_mode;
809 const VkDeviceSize m_bufferSize;
810 de::MovePtr<Buffer> m_hostBuffer;
811 de::MovePtr<Image> m_image;
812 };
813
makeExtentOffset(const Resource & resource)814 VkOffset3D makeExtentOffset (const Resource& resource)
815 {
816 DE_ASSERT(resource.getType() == RESOURCE_TYPE_IMAGE);
817 const VkExtent3D extent = resource.getImage().extent;
818
819 switch (resource.getImage().imageType)
820 {
821 case VK_IMAGE_TYPE_1D: return makeOffset3D(extent.width, 1, 1);
822 case VK_IMAGE_TYPE_2D: return makeOffset3D(extent.width, extent.height, 1);
823 case VK_IMAGE_TYPE_3D: return makeOffset3D(extent.width, extent.height, extent.depth);
824 default:
825 DE_ASSERT(0);
826 return VkOffset3D();
827 }
828 }
829
makeBlitRegion(const Resource & resource)830 VkImageBlit makeBlitRegion (const Resource& resource)
831 {
832 const VkImageBlit blitRegion =
833 {
834 resource.getImage().subresourceLayers, // VkImageSubresourceLayers srcSubresource;
835 { makeOffset3D(0, 0, 0), makeExtentOffset(resource) }, // VkOffset3D srcOffsets[2];
836 resource.getImage().subresourceLayers, // VkImageSubresourceLayers dstSubresource;
837 { makeOffset3D(0, 0, 0), makeExtentOffset(resource) }, // VkOffset3D dstOffsets[2];
838 };
839 return blitRegion;
840 }
841
842 class BlitImplementation : public ImplementationBase
843 {
844 public:
BlitImplementation(OperationContext & context,Resource & resource,const AccessMode mode)845 BlitImplementation (OperationContext& context, Resource& resource, const AccessMode mode)
846 : ImplementationBase (context, resource, mode)
847 , m_blitRegion (makeBlitRegion(m_resource))
848 {
849 const InstanceInterface& vki = m_context.getInstanceInterface();
850 const VkPhysicalDevice physDevice = m_context.getPhysicalDevice();
851 const VkFormatProperties formatProps = getPhysicalDeviceFormatProperties(vki, physDevice, m_resource.getImage().format);
852 const VkFormatFeatureFlags requiredFlags = (VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT);
853
854 // SRC and DST blit is required because both images are using the same format.
855 if ((formatProps.optimalTilingFeatures & requiredFlags) != requiredFlags)
856 TCU_THROW(NotSupportedError, "Format doesn't support blits");
857 }
858
recordCopyCommand(const VkCommandBuffer cmdBuffer)859 void recordCopyCommand (const VkCommandBuffer cmdBuffer)
860 {
861 const DeviceInterface& vk = m_context.getDeviceInterface();
862
863 if (m_mode == ACCESS_MODE_READ)
864 {
865 // Resource Image -> Staging image
866 vk.cmdBlitImage(cmdBuffer, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
867 1u, &m_blitRegion, VK_FILTER_NEAREST);
868 }
869 else
870 {
871 // Staging image -> Resource Image
872 vk.cmdBlitImage(cmdBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
873 1u, &m_blitRegion, VK_FILTER_NEAREST);
874 }
875 }
876
877 private:
878 const VkImageBlit m_blitRegion;
879 };
880
makeImageCopyRegion(const Resource & resource)881 VkImageCopy makeImageCopyRegion (const Resource& resource)
882 {
883 const VkImageCopy imageCopyRegion =
884 {
885 resource.getImage().subresourceLayers, // VkImageSubresourceLayers srcSubresource;
886 makeOffset3D(0, 0, 0), // VkOffset3D srcOffset;
887 resource.getImage().subresourceLayers, // VkImageSubresourceLayers dstSubresource;
888 makeOffset3D(0, 0, 0), // VkOffset3D dstOffset;
889 resource.getImage().extent, // VkExtent3D extent;
890 };
891 return imageCopyRegion;
892 }
893
894 class CopyImplementation : public ImplementationBase
895 {
896 public:
CopyImplementation(OperationContext & context,Resource & resource,const AccessMode mode)897 CopyImplementation (OperationContext& context, Resource& resource, const AccessMode mode)
898 : ImplementationBase (context, resource, mode)
899 , m_imageCopyRegion (makeImageCopyRegion(m_resource))
900 {
901 }
902
recordCopyCommand(const VkCommandBuffer cmdBuffer)903 void recordCopyCommand (const VkCommandBuffer cmdBuffer)
904 {
905 const DeviceInterface& vk = m_context.getDeviceInterface();
906
907 if (m_mode == ACCESS_MODE_READ)
908 {
909 // Resource Image -> Staging image
910 vk.cmdCopyImage(cmdBuffer, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &m_imageCopyRegion);
911 }
912 else
913 {
914 // Staging image -> Resource Image
915 vk.cmdCopyImage(cmdBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &m_imageCopyRegion);
916 }
917 }
918
919 private:
920 const VkImageCopy m_imageCopyRegion;
921 };
922
923 enum Type
924 {
925 TYPE_COPY,
926 TYPE_BLIT,
927 };
928
929 class Support : public OperationSupport
930 {
931 public:
Support(const ResourceDescription & resourceDesc,const Type type,const AccessMode mode)932 Support (const ResourceDescription& resourceDesc, const Type type, const AccessMode mode)
933 : m_type (type)
934 , m_mode (mode)
935 {
936 DE_ASSERT(resourceDesc.type == RESOURCE_TYPE_IMAGE);
937
938 const bool isDepthStencil = isDepthStencilFormat(resourceDesc.imageFormat);
939 m_requiredQueueFlags = (isDepthStencil || m_type == TYPE_BLIT ? VK_QUEUE_GRAPHICS_BIT : VK_QUEUE_TRANSFER_BIT);
940
941 // Don't blit depth/stencil images.
942 DE_ASSERT(m_type != TYPE_BLIT || !isDepthStencil);
943 }
944
getResourceUsageFlags(void) const945 deUint32 getResourceUsageFlags (void) const
946 {
947 return (m_mode == ACCESS_MODE_READ ? VK_BUFFER_USAGE_TRANSFER_SRC_BIT : VK_BUFFER_USAGE_TRANSFER_DST_BIT);
948 }
949
getQueueFlags(const OperationContext & context) const950 VkQueueFlags getQueueFlags (const OperationContext& context) const
951 {
952 DE_UNREF(context);
953 return m_requiredQueueFlags;
954 }
955
build(OperationContext & context,Resource & resource) const956 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
957 {
958 if (m_type == TYPE_COPY)
959 return de::MovePtr<Operation>(new CopyImplementation(context, resource, m_mode));
960 else
961 return de::MovePtr<Operation>(new BlitImplementation(context, resource, m_mode));
962 }
963
964 private:
965 const Type m_type;
966 const AccessMode m_mode;
967 VkQueueFlags m_requiredQueueFlags;
968 };
969
970 } // CopyBlitImage ns
971
972 namespace ShaderAccess
973 {
974
975 enum DispatchCall
976 {
977 DISPATCH_CALL_DISPATCH,
978 DISPATCH_CALL_DISPATCH_INDIRECT,
979 };
980
981 class GraphicsPipeline : public Pipeline
982 {
983 public:
GraphicsPipeline(OperationContext & context,const VkShaderStageFlagBits stage,const std::string & shaderPrefix,const VkDescriptorSetLayout descriptorSetLayout)984 GraphicsPipeline (OperationContext& context, const VkShaderStageFlagBits stage, const std::string& shaderPrefix, const VkDescriptorSetLayout descriptorSetLayout)
985 : m_vertices (context)
986 {
987 const DeviceInterface& vk = context.getDeviceInterface();
988 const VkDevice device = context.getDevice();
989 Allocator& allocator = context.getAllocator();
990 const VkShaderStageFlags requiredStages = getRequiredStages(stage);
991
992 // Color attachment
993
994 m_colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
995 m_colorImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
996 m_colorImageExtent = makeExtent3D(16u, 16u, 1u);
997 m_colorAttachmentImage = de::MovePtr<Image>(new Image(vk, device, allocator,
998 makeImageCreateInfo(VK_IMAGE_TYPE_2D, m_colorImageExtent, m_colorFormat, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT),
999 MemoryRequirement::Any));
1000
1001 // Pipeline
1002
1003 m_colorAttachmentView = makeImageView (vk, device, **m_colorAttachmentImage, VK_IMAGE_VIEW_TYPE_2D, m_colorFormat, m_colorImageSubresourceRange);
1004 m_renderPass = makeRenderPass (vk, device, m_colorFormat);
1005 m_framebuffer = makeFramebuffer (vk, device, *m_renderPass, *m_colorAttachmentView, m_colorImageExtent.width, m_colorImageExtent.height, 1u);
1006 m_pipelineLayout = makePipelineLayout(vk, device, descriptorSetLayout);
1007
1008 GraphicsPipelineBuilder pipelineBuilder;
1009 pipelineBuilder
1010 .setRenderSize (tcu::IVec2(m_colorImageExtent.width, m_colorImageExtent.height))
1011 .setVertexInputSingleAttribute (m_vertices.getVertexFormat(), m_vertices.getVertexStride())
1012 .setShader (vk, device, VK_SHADER_STAGE_VERTEX_BIT, context.getBinaryCollection().get(shaderPrefix + "vert"), DE_NULL)
1013 .setShader (vk, device, VK_SHADER_STAGE_FRAGMENT_BIT, context.getBinaryCollection().get(shaderPrefix + "frag"), DE_NULL);
1014
1015 if (requiredStages & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
1016 pipelineBuilder
1017 .setPatchControlPoints (m_vertices.getNumVertices())
1018 .setShader (vk, device, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, context.getBinaryCollection().get(shaderPrefix + "tesc"), DE_NULL)
1019 .setShader (vk, device, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, context.getBinaryCollection().get(shaderPrefix + "tese"), DE_NULL);
1020
1021 if (requiredStages & VK_SHADER_STAGE_GEOMETRY_BIT)
1022 pipelineBuilder
1023 .setShader (vk, device, VK_SHADER_STAGE_GEOMETRY_BIT, context.getBinaryCollection().get(shaderPrefix + "geom"), DE_NULL);
1024
1025 m_pipeline = pipelineBuilder.build(vk, device, *m_pipelineLayout, *m_renderPass, context.getPipelineCacheData());
1026 }
1027
recordCommands(OperationContext & context,const VkCommandBuffer cmdBuffer,const VkDescriptorSet descriptorSet)1028 void recordCommands (OperationContext& context, const VkCommandBuffer cmdBuffer, const VkDescriptorSet descriptorSet)
1029 {
1030 const DeviceInterface& vk = context.getDeviceInterface();
1031
1032 // Change color attachment image layout
1033 {
1034 const VkImageMemoryBarrier colorAttachmentLayoutBarrier = makeImageMemoryBarrier(
1035 (VkAccessFlags)0, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1036 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1037 **m_colorAttachmentImage, m_colorImageSubresourceRange);
1038
1039 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, (VkDependencyFlags)0,
1040 0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentLayoutBarrier);
1041 }
1042
1043 {
1044 const VkRect2D renderArea = makeRect2D(m_colorImageExtent);
1045 const tcu::Vec4 clearColor = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
1046
1047 beginRenderPass(vk, cmdBuffer, *m_renderPass, *m_framebuffer, renderArea, clearColor);
1048 }
1049
1050 vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
1051 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
1052 {
1053 const VkDeviceSize vertexBufferOffset = 0ull;
1054 const VkBuffer vertexBuffer = m_vertices.getVertexBuffer();
1055 vk.cmdBindVertexBuffers(cmdBuffer, 0u, 1u, &vertexBuffer, &vertexBufferOffset);
1056 }
1057
1058 vk.cmdDraw(cmdBuffer, m_vertices.getNumVertices(), 1u, 0u, 0u);
1059 endRenderPass(vk, cmdBuffer);
1060 }
1061
1062 private:
1063 const VertexGrid m_vertices;
1064 VkFormat m_colorFormat;
1065 de::MovePtr<Image> m_colorAttachmentImage;
1066 Move<VkImageView> m_colorAttachmentView;
1067 VkExtent3D m_colorImageExtent;
1068 VkImageSubresourceRange m_colorImageSubresourceRange;
1069 Move<VkRenderPass> m_renderPass;
1070 Move<VkFramebuffer> m_framebuffer;
1071 Move<VkPipelineLayout> m_pipelineLayout;
1072 Move<VkPipeline> m_pipeline;
1073 };
1074
1075 class ComputePipeline : public Pipeline
1076 {
1077 public:
ComputePipeline(OperationContext & context,const DispatchCall dispatchCall,const std::string & shaderPrefix,const VkDescriptorSetLayout descriptorSetLayout)1078 ComputePipeline (OperationContext& context, const DispatchCall dispatchCall, const std::string& shaderPrefix, const VkDescriptorSetLayout descriptorSetLayout)
1079 : m_dispatchCall (dispatchCall)
1080 {
1081 const DeviceInterface& vk = context.getDeviceInterface();
1082 const VkDevice device = context.getDevice();
1083 Allocator& allocator = context.getAllocator();
1084
1085 if (m_dispatchCall == DISPATCH_CALL_DISPATCH_INDIRECT)
1086 {
1087 m_indirectBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator,
1088 makeBufferCreateInfo(sizeof(VkDispatchIndirectCommand), VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT), MemoryRequirement::HostVisible));
1089
1090 const Allocation& alloc = m_indirectBuffer->getAllocation();
1091 VkDispatchIndirectCommand* const pIndirectCommand = static_cast<VkDispatchIndirectCommand*>(alloc.getHostPtr());
1092
1093 pIndirectCommand->x = 1u;
1094 pIndirectCommand->y = 1u;
1095 pIndirectCommand->z = 1u;
1096
1097 flushAlloc(vk, device, alloc);
1098 }
1099
1100 const Unique<VkShaderModule> shaderModule(createShaderModule(vk, device, context.getBinaryCollection().get(shaderPrefix + "comp"), (VkShaderModuleCreateFlags)0));
1101
1102 m_pipelineLayout = makePipelineLayout(vk, device, descriptorSetLayout);
1103 m_pipeline = makeComputePipeline(vk, device, *m_pipelineLayout, *shaderModule, DE_NULL, context.getPipelineCacheData());
1104 }
1105
recordCommands(OperationContext & context,const VkCommandBuffer cmdBuffer,const VkDescriptorSet descriptorSet)1106 void recordCommands (OperationContext& context, const VkCommandBuffer cmdBuffer, const VkDescriptorSet descriptorSet)
1107 {
1108 const DeviceInterface& vk = context.getDeviceInterface();
1109
1110 vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipeline);
1111 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
1112
1113 if (m_dispatchCall == DISPATCH_CALL_DISPATCH_INDIRECT)
1114 vk.cmdDispatchIndirect(cmdBuffer, **m_indirectBuffer, 0u);
1115 else
1116 vk.cmdDispatch(cmdBuffer, 1u, 1u, 1u);
1117 }
1118
1119 private:
1120 const DispatchCall m_dispatchCall;
1121 de::MovePtr<Buffer> m_indirectBuffer;
1122 Move<VkPipelineLayout> m_pipelineLayout;
1123 Move<VkPipeline> m_pipeline;
1124 };
1125
1126 //! Read/write operation on a UBO/SSBO in graphics/compute pipeline.
1127 class BufferImplementation : public Operation
1128 {
1129 public:
BufferImplementation(OperationContext & context,Resource & resource,const VkShaderStageFlagBits stage,const BufferType bufferType,const std::string & shaderPrefix,const AccessMode mode,const PipelineType pipelineType,const DispatchCall dispatchCall)1130 BufferImplementation (OperationContext& context,
1131 Resource& resource,
1132 const VkShaderStageFlagBits stage,
1133 const BufferType bufferType,
1134 const std::string& shaderPrefix,
1135 const AccessMode mode,
1136 const PipelineType pipelineType,
1137 const DispatchCall dispatchCall)
1138 : m_context (context)
1139 , m_resource (resource)
1140 , m_stage (stage)
1141 , m_pipelineStage (pipelineStageFlagsFromShaderStageFlagBits(m_stage))
1142 , m_bufferType (bufferType)
1143 , m_mode (mode)
1144 , m_dispatchCall (dispatchCall)
1145 {
1146 requireFeaturesForSSBOAccess (m_context, m_stage);
1147
1148 const DeviceInterface& vk = m_context.getDeviceInterface();
1149 const VkDevice device = m_context.getDevice();
1150 Allocator& allocator = m_context.getAllocator();
1151
1152 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
1153 vk, device, allocator, makeBufferCreateInfo(m_resource.getBuffer().size, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible));
1154
1155 // Init host buffer data
1156 {
1157 const Allocation& alloc = m_hostBuffer->getAllocation();
1158 if (m_mode == ACCESS_MODE_READ)
1159 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_resource.getBuffer().size));
1160 else
1161 fillPattern(alloc.getHostPtr(), m_resource.getBuffer().size);
1162 flushAlloc(vk, device, alloc);
1163 }
1164
1165 // Prepare descriptors
1166 {
1167 const VkDescriptorType bufferDescriptorType = (m_bufferType == BUFFER_TYPE_UNIFORM ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
1168
1169 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1170 .addSingleBinding(bufferDescriptorType, m_stage)
1171 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_stage)
1172 .build(vk, device);
1173
1174 m_descriptorPool = DescriptorPoolBuilder()
1175 .addType(bufferDescriptorType)
1176 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
1177 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1178
1179 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
1180
1181 const VkDescriptorBufferInfo bufferInfo = makeDescriptorBufferInfo(m_resource.getBuffer().handle, m_resource.getBuffer().offset, m_resource.getBuffer().size);
1182 const VkDescriptorBufferInfo hostBufferInfo = makeDescriptorBufferInfo(**m_hostBuffer, 0u, m_resource.getBuffer().size);
1183
1184 if (m_mode == ACCESS_MODE_READ)
1185 {
1186 DescriptorSetUpdateBuilder()
1187 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), bufferDescriptorType, &bufferInfo)
1188 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &hostBufferInfo)
1189 .update(vk, device);
1190 }
1191 else
1192 {
1193 DescriptorSetUpdateBuilder()
1194 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &hostBufferInfo)
1195 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &bufferInfo)
1196 .update(vk, device);
1197 }
1198 }
1199
1200 // Create pipeline
1201 m_pipeline = (pipelineType == PIPELINE_TYPE_GRAPHICS ? de::MovePtr<Pipeline>(new GraphicsPipeline(context, stage, shaderPrefix, *m_descriptorSetLayout))
1202 : de::MovePtr<Pipeline>(new ComputePipeline(context, m_dispatchCall, shaderPrefix, *m_descriptorSetLayout)));
1203 }
1204
recordCommands(const VkCommandBuffer cmdBuffer)1205 void recordCommands (const VkCommandBuffer cmdBuffer)
1206 {
1207 m_pipeline->recordCommands(m_context, cmdBuffer, *m_descriptorSet);
1208
1209 // Post draw/dispatch commands
1210
1211 if (m_mode == ACCESS_MODE_READ)
1212 {
1213 const DeviceInterface& vk = m_context.getDeviceInterface();
1214
1215 // Insert a barrier so data written by the shader is available to the host
1216 const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_resource.getBuffer().size);
1217 vk.cmdPipelineBarrier(cmdBuffer, m_pipelineStage, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
1218 }
1219 }
1220
getSyncInfo(void) const1221 SyncInfo getSyncInfo (void) const
1222 {
1223 const VkAccessFlags accessFlags = (m_mode == ACCESS_MODE_READ ? (m_bufferType == BUFFER_TYPE_UNIFORM ? VK_ACCESS_UNIFORM_READ_BIT
1224 : VK_ACCESS_SHADER_READ_BIT)
1225 : VK_ACCESS_SHADER_WRITE_BIT);
1226 const SyncInfo syncInfo =
1227 {
1228 m_pipelineStage, // VkPipelineStageFlags stageMask;
1229 accessFlags, // VkAccessFlags accessMask;
1230 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
1231 };
1232 return syncInfo;
1233 }
1234
getData(void) const1235 Data getData (void) const
1236 {
1237 return getHostBufferData(m_context, *m_hostBuffer, m_resource.getBuffer().size);
1238 }
1239
1240 private:
1241 OperationContext& m_context;
1242 Resource& m_resource;
1243 const VkShaderStageFlagBits m_stage;
1244 const VkPipelineStageFlags m_pipelineStage;
1245 const BufferType m_bufferType;
1246 const AccessMode m_mode;
1247 const DispatchCall m_dispatchCall;
1248 de::MovePtr<Buffer> m_hostBuffer;
1249 Move<VkDescriptorPool> m_descriptorPool;
1250 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1251 Move<VkDescriptorSet> m_descriptorSet;
1252 de::MovePtr<Pipeline> m_pipeline;
1253 };
1254
1255 class ImageImplementation : public Operation
1256 {
1257 public:
ImageImplementation(OperationContext & context,Resource & resource,const VkShaderStageFlagBits stage,const std::string & shaderPrefix,const AccessMode mode,const PipelineType pipelineType,const DispatchCall dispatchCall)1258 ImageImplementation (OperationContext& context,
1259 Resource& resource,
1260 const VkShaderStageFlagBits stage,
1261 const std::string& shaderPrefix,
1262 const AccessMode mode,
1263 const PipelineType pipelineType,
1264 const DispatchCall dispatchCall)
1265 : m_context (context)
1266 , m_resource (resource)
1267 , m_stage (stage)
1268 , m_pipelineStage (pipelineStageFlagsFromShaderStageFlagBits(m_stage))
1269 , m_mode (mode)
1270 , m_dispatchCall (dispatchCall)
1271 , m_hostBufferSizeBytes (getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent))
1272 {
1273 const DeviceInterface& vk = m_context.getDeviceInterface();
1274 const InstanceInterface& vki = m_context.getInstanceInterface();
1275 const VkDevice device = m_context.getDevice();
1276 const VkPhysicalDevice physDevice = m_context.getPhysicalDevice();
1277 Allocator& allocator = m_context.getAllocator();
1278
1279 // Image stores are always required, in either access mode.
1280 requireFeaturesForSSBOAccess(m_context, m_stage);
1281
1282 // Some storage image formats require additional capability.
1283 if (isStorageImageExtendedFormat(m_resource.getImage().format))
1284 requireFeatures(vki, physDevice, FEATURE_SHADER_STORAGE_IMAGE_EXTENDED_FORMATS);
1285
1286 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
1287 vk, device, allocator, makeBufferCreateInfo(m_hostBufferSizeBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
1288 MemoryRequirement::HostVisible));
1289
1290 // Init host buffer data
1291 {
1292 const Allocation& alloc = m_hostBuffer->getAllocation();
1293 if (m_mode == ACCESS_MODE_READ)
1294 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_hostBufferSizeBytes));
1295 else
1296 fillPattern(alloc.getHostPtr(), m_hostBufferSizeBytes);
1297 flushAlloc(vk, device, alloc);
1298 }
1299
1300 // Image resources
1301 {
1302 m_image = de::MovePtr<Image>(new Image(vk, device, allocator,
1303 makeImageCreateInfo(m_resource.getImage().imageType, m_resource.getImage().extent, m_resource.getImage().format,
1304 VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_STORAGE_BIT),
1305 MemoryRequirement::Any));
1306
1307 if (m_mode == ACCESS_MODE_READ)
1308 {
1309 m_srcImage = &m_resource.getImage().handle;
1310 m_dstImage = &(**m_image);
1311 }
1312 else
1313 {
1314 m_srcImage = &(**m_image);
1315 m_dstImage = &m_resource.getImage().handle;
1316 }
1317
1318 const VkImageViewType viewType = getImageViewType(m_resource.getImage().imageType);
1319
1320 m_srcImageView = makeImageView(vk, device, *m_srcImage, viewType, m_resource.getImage().format, m_resource.getImage().subresourceRange);
1321 m_dstImageView = makeImageView(vk, device, *m_dstImage, viewType, m_resource.getImage().format, m_resource.getImage().subresourceRange);
1322 }
1323
1324 // Prepare descriptors
1325 {
1326 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1327 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, m_stage)
1328 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, m_stage)
1329 .build(vk, device);
1330
1331 m_descriptorPool = DescriptorPoolBuilder()
1332 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1333 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1334 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1335
1336 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
1337
1338 const VkDescriptorImageInfo srcImageInfo = makeDescriptorImageInfo(DE_NULL, *m_srcImageView, VK_IMAGE_LAYOUT_GENERAL);
1339 const VkDescriptorImageInfo dstImageInfo = makeDescriptorImageInfo(DE_NULL, *m_dstImageView, VK_IMAGE_LAYOUT_GENERAL);
1340
1341 DescriptorSetUpdateBuilder()
1342 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &srcImageInfo)
1343 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &dstImageInfo)
1344 .update(vk, device);
1345 }
1346
1347 // Create pipeline
1348 m_pipeline = (pipelineType == PIPELINE_TYPE_GRAPHICS ? de::MovePtr<Pipeline>(new GraphicsPipeline(context, stage, shaderPrefix, *m_descriptorSetLayout))
1349 : de::MovePtr<Pipeline>(new ComputePipeline(context, m_dispatchCall, shaderPrefix, *m_descriptorSetLayout)));
1350 }
1351
recordCommands(const VkCommandBuffer cmdBuffer)1352 void recordCommands (const VkCommandBuffer cmdBuffer)
1353 {
1354 const DeviceInterface& vk = m_context.getDeviceInterface();
1355 const VkBufferImageCopy bufferCopyRegion = makeBufferImageCopy(m_resource.getImage().subresourceLayers, m_resource.getImage().extent);
1356
1357 // Destination image layout
1358 {
1359 const VkImageMemoryBarrier barrier = makeImageMemoryBarrier(
1360 (VkAccessFlags)0, VK_ACCESS_SHADER_WRITE_BIT,
1361 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1362 *m_dstImage, m_resource.getImage().subresourceRange);
1363
1364 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, m_pipelineStage, (VkDependencyFlags)0,
1365 0u, DE_NULL, 0u, DE_NULL, 1u, &barrier);
1366 }
1367
1368 // In write mode, source image must be filled with data.
1369 if (m_mode == ACCESS_MODE_WRITE)
1370 {
1371 // Layout for transfer
1372 {
1373 const VkImageMemoryBarrier barrier = makeImageMemoryBarrier(
1374 (VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
1375 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1376 *m_srcImage, m_resource.getImage().subresourceRange);
1377
1378 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
1379 0u, DE_NULL, 0u, DE_NULL, 1u, &barrier);
1380 }
1381
1382 // Host buffer -> Src image
1383 vk.cmdCopyBufferToImage(cmdBuffer, **m_hostBuffer, *m_srcImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &bufferCopyRegion);
1384
1385 // Layout for shader reading
1386 {
1387 const VkImageMemoryBarrier barrier = makeImageMemoryBarrier(
1388 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
1389 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
1390 *m_srcImage, m_resource.getImage().subresourceRange);
1391
1392 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_pipelineStage, (VkDependencyFlags)0,
1393 0u, DE_NULL, 0u, DE_NULL, 1u, &barrier);
1394 }
1395 }
1396
1397 // Execute shaders
1398
1399 m_pipeline->recordCommands(m_context, cmdBuffer, *m_descriptorSet);
1400
1401 // Post draw/dispatch commands
1402
1403 if (m_mode == ACCESS_MODE_READ)
1404 {
1405 // Layout for transfer
1406 {
1407 const VkImageMemoryBarrier barrier = makeImageMemoryBarrier(
1408 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1409 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1410 *m_dstImage, m_resource.getImage().subresourceRange);
1411
1412 vk.cmdPipelineBarrier(cmdBuffer, m_pipelineStage, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
1413 0u, DE_NULL, 0u, DE_NULL, 1u, &barrier);
1414 }
1415
1416 // Dst image -> Host buffer
1417 vk.cmdCopyImageToBuffer(cmdBuffer, *m_dstImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_hostBuffer, 1u, &bufferCopyRegion);
1418
1419 // Insert a barrier so data written by the shader is available to the host
1420 {
1421 const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_hostBufferSizeBytes);
1422 vk.cmdPipelineBarrier(cmdBuffer, m_pipelineStage, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
1423 }
1424 }
1425 }
1426
getSyncInfo(void) const1427 SyncInfo getSyncInfo (void) const
1428 {
1429 const VkAccessFlags accessFlags = (m_mode == ACCESS_MODE_READ ? VK_ACCESS_SHADER_READ_BIT : VK_ACCESS_SHADER_WRITE_BIT);
1430 const SyncInfo syncInfo =
1431 {
1432 m_pipelineStage, // VkPipelineStageFlags stageMask;
1433 accessFlags, // VkAccessFlags accessMask;
1434 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout imageLayout;
1435 };
1436 return syncInfo;
1437 }
1438
getData(void) const1439 Data getData (void) const
1440 {
1441 return getHostBufferData(m_context, *m_hostBuffer, m_hostBufferSizeBytes);
1442 }
1443
1444 private:
1445 OperationContext& m_context;
1446 Resource& m_resource;
1447 const VkShaderStageFlagBits m_stage;
1448 const VkPipelineStageFlags m_pipelineStage;
1449 const AccessMode m_mode;
1450 const DispatchCall m_dispatchCall;
1451 const VkDeviceSize m_hostBufferSizeBytes;
1452 de::MovePtr<Buffer> m_hostBuffer;
1453 de::MovePtr<Image> m_image; //! Additional image used as src or dst depending on operation mode.
1454 const VkImage* m_srcImage;
1455 const VkImage* m_dstImage;
1456 Move<VkImageView> m_srcImageView;
1457 Move<VkImageView> m_dstImageView;
1458 Move<VkDescriptorPool> m_descriptorPool;
1459 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1460 Move<VkDescriptorSet> m_descriptorSet;
1461 de::MovePtr<Pipeline> m_pipeline;
1462 };
1463
1464 //! Create generic passthrough shaders with bits of custom code inserted in a specific shader stage.
initPassthroughPrograms(SourceCollections & programCollection,const std::string & shaderPrefix,const std::string & declCode,const std::string & mainCode,const VkShaderStageFlagBits stage)1465 void initPassthroughPrograms (SourceCollections& programCollection,
1466 const std::string& shaderPrefix,
1467 const std::string& declCode,
1468 const std::string& mainCode,
1469 const VkShaderStageFlagBits stage)
1470 {
1471 const VkShaderStageFlags requiredStages = getRequiredStages(stage);
1472
1473 if (requiredStages & VK_SHADER_STAGE_VERTEX_BIT)
1474 {
1475 std::ostringstream src;
1476 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1477 << "\n"
1478 << "layout(location = 0) in vec4 v_in_position;\n"
1479 << "\n"
1480 << "out " << s_perVertexBlock << ";\n"
1481 << "\n"
1482 << (stage & VK_SHADER_STAGE_VERTEX_BIT ? declCode + "\n" : "")
1483 << "void main (void)\n"
1484 << "{\n"
1485 << " gl_Position = v_in_position;\n"
1486 << (stage & VK_SHADER_STAGE_VERTEX_BIT ? mainCode : "")
1487 << "}\n";
1488
1489 programCollection.glslSources.add(shaderPrefix + "vert") << glu::VertexSource(src.str());
1490 }
1491
1492 if (requiredStages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
1493 {
1494 std::ostringstream src;
1495 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1496 << "\n"
1497 << "layout(vertices = 3) out;\n"
1498 << "\n"
1499 << "in " << s_perVertexBlock << " gl_in[gl_MaxPatchVertices];\n"
1500 << "\n"
1501 << "out " << s_perVertexBlock << " gl_out[];\n"
1502 << "\n"
1503 << (stage & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT ? declCode + "\n" : "")
1504 << "void main (void)\n"
1505 << "{\n"
1506 << " gl_TessLevelInner[0] = 1.0;\n"
1507 << " gl_TessLevelInner[1] = 1.0;\n"
1508 << "\n"
1509 << " gl_TessLevelOuter[0] = 1.0;\n"
1510 << " gl_TessLevelOuter[1] = 1.0;\n"
1511 << " gl_TessLevelOuter[2] = 1.0;\n"
1512 << " gl_TessLevelOuter[3] = 1.0;\n"
1513 << "\n"
1514 << " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
1515 << (stage & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT ? "\n" + mainCode : "")
1516 << "}\n";
1517
1518 programCollection.glslSources.add(shaderPrefix + "tesc") << glu::TessellationControlSource(src.str());
1519 }
1520
1521 if (requiredStages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
1522 {
1523 std::ostringstream src;
1524 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1525 << "\n"
1526 << "layout(triangles, equal_spacing, ccw) in;\n"
1527 << "\n"
1528 << "in " << s_perVertexBlock << " gl_in[gl_MaxPatchVertices];\n"
1529 << "\n"
1530 << "out " << s_perVertexBlock << ";\n"
1531 << "\n"
1532 << (stage & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT ? declCode + "\n" : "")
1533 << "void main (void)\n"
1534 << "{\n"
1535 << " vec3 px = gl_TessCoord.x * gl_in[0].gl_Position.xyz;\n"
1536 << " vec3 py = gl_TessCoord.y * gl_in[1].gl_Position.xyz;\n"
1537 << " vec3 pz = gl_TessCoord.z * gl_in[2].gl_Position.xyz;\n"
1538 << " gl_Position = vec4(px + py + pz, 1.0);\n"
1539 << (stage & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT ? mainCode : "")
1540 << "}\n";
1541
1542 programCollection.glslSources.add(shaderPrefix + "tese") << glu::TessellationEvaluationSource(src.str());
1543 }
1544
1545 if (requiredStages & VK_SHADER_STAGE_GEOMETRY_BIT)
1546 {
1547 std::ostringstream src;
1548 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1549 << "\n"
1550 << "layout(triangles) in;\n"
1551 << "layout(triangle_strip, max_vertices = 3) out;\n"
1552 << "\n"
1553 << "in " << s_perVertexBlock << " gl_in[];\n"
1554 << "\n"
1555 << "out " << s_perVertexBlock << ";\n"
1556 << "\n"
1557 << (stage & VK_SHADER_STAGE_GEOMETRY_BIT ? declCode + "\n" : "")
1558 << "void main (void)\n"
1559 << "{\n"
1560 << " gl_Position = gl_in[0].gl_Position;\n"
1561 << " EmitVertex();\n"
1562 << "\n"
1563 << " gl_Position = gl_in[1].gl_Position;\n"
1564 << " EmitVertex();\n"
1565 << "\n"
1566 << " gl_Position = gl_in[2].gl_Position;\n"
1567 << " EmitVertex();\n"
1568 << (stage & VK_SHADER_STAGE_GEOMETRY_BIT ? "\n" + mainCode : "")
1569 << "}\n";
1570
1571 programCollection.glslSources.add(shaderPrefix + "geom") << glu::GeometrySource(src.str());
1572 }
1573
1574 if (requiredStages & VK_SHADER_STAGE_FRAGMENT_BIT)
1575 {
1576 std::ostringstream src;
1577 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1578 << "\n"
1579 << "layout(location = 0) out vec4 o_color;\n"
1580 << "\n"
1581 << (stage & VK_SHADER_STAGE_FRAGMENT_BIT ? declCode + "\n" : "")
1582 << "void main (void)\n"
1583 << "{\n"
1584 << " o_color = vec4(1.0);\n"
1585 << (stage & VK_SHADER_STAGE_FRAGMENT_BIT ? "\n" + mainCode : "")
1586 << "}\n";
1587
1588 programCollection.glslSources.add(shaderPrefix + "frag") << glu::FragmentSource(src.str());
1589 }
1590
1591 if (requiredStages & VK_SHADER_STAGE_COMPUTE_BIT)
1592 {
1593 std::ostringstream src;
1594 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1595 << "\n"
1596 << "layout(local_size_x = 1) in;\n"
1597 << "\n"
1598 << (stage & VK_SHADER_STAGE_COMPUTE_BIT ? declCode + "\n" : "")
1599 << "void main (void)\n"
1600 << "{\n"
1601 << (stage & VK_SHADER_STAGE_COMPUTE_BIT ? mainCode : "")
1602 << "}\n";
1603
1604 programCollection.glslSources.add(shaderPrefix + "comp") << glu::ComputeSource(src.str());
1605 }
1606 }
1607
1608 class BufferSupport : public OperationSupport
1609 {
1610 public:
BufferSupport(const ResourceDescription & resourceDesc,const BufferType bufferType,const AccessMode mode,const VkShaderStageFlagBits stage,const DispatchCall dispatchCall=DISPATCH_CALL_DISPATCH)1611 BufferSupport (const ResourceDescription& resourceDesc,
1612 const BufferType bufferType,
1613 const AccessMode mode,
1614 const VkShaderStageFlagBits stage,
1615 const DispatchCall dispatchCall = DISPATCH_CALL_DISPATCH)
1616 : m_resourceDesc (resourceDesc)
1617 , m_bufferType (bufferType)
1618 , m_mode (mode)
1619 , m_stage (stage)
1620 , m_shaderPrefix (std::string(m_mode == ACCESS_MODE_READ ? "read_" : "write_") + (m_bufferType == BUFFER_TYPE_UNIFORM ? "ubo_" : "ssbo_"))
1621 , m_dispatchCall (dispatchCall)
1622 {
1623 DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_BUFFER);
1624 DE_ASSERT(m_bufferType == BUFFER_TYPE_UNIFORM || m_bufferType == BUFFER_TYPE_STORAGE);
1625 DE_ASSERT(m_mode == ACCESS_MODE_READ || m_mode == ACCESS_MODE_WRITE);
1626 DE_ASSERT(m_mode == ACCESS_MODE_READ || m_bufferType == BUFFER_TYPE_STORAGE);
1627 DE_ASSERT(m_bufferType != BUFFER_TYPE_UNIFORM || m_resourceDesc.size.x() <= MAX_UBO_RANGE);
1628 DE_ASSERT(m_dispatchCall == DISPATCH_CALL_DISPATCH || m_dispatchCall == DISPATCH_CALL_DISPATCH_INDIRECT);
1629
1630 assertValidShaderStage(m_stage);
1631 }
1632
initPrograms(SourceCollections & programCollection) const1633 void initPrograms (SourceCollections& programCollection) const
1634 {
1635 DE_ASSERT((m_resourceDesc.size.x() % sizeof(tcu::UVec4)) == 0);
1636
1637 const std::string bufferTypeStr = (m_bufferType == BUFFER_TYPE_UNIFORM ? "uniform" : "buffer");
1638 const int numVecElements = static_cast<int>(m_resourceDesc.size.x() / sizeof(tcu::UVec4)); // std140 must be aligned to a multiple of 16
1639
1640 std::ostringstream declSrc;
1641 declSrc << "layout(set = 0, binding = 0, std140) readonly " << bufferTypeStr << " Input {\n"
1642 << " uvec4 data[" << numVecElements << "];\n"
1643 << "} b_in;\n"
1644 << "\n"
1645 << "layout(set = 0, binding = 1, std140) writeonly buffer Output {\n"
1646 << " uvec4 data[" << numVecElements << "];\n"
1647 << "} b_out;\n";
1648
1649 std::ostringstream copySrc;
1650 copySrc << " for (int i = 0; i < " << numVecElements << "; ++i) {\n"
1651 << " b_out.data[i] = b_in.data[i];\n"
1652 << " }\n";
1653
1654 initPassthroughPrograms(programCollection, m_shaderPrefix, declSrc.str(), copySrc.str(), m_stage);
1655 }
1656
getResourceUsageFlags(void) const1657 deUint32 getResourceUsageFlags (void) const
1658 {
1659 return (m_bufferType == BUFFER_TYPE_UNIFORM ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
1660 }
1661
getQueueFlags(const OperationContext & context) const1662 VkQueueFlags getQueueFlags (const OperationContext& context) const
1663 {
1664 DE_UNREF(context);
1665 return (m_stage == VK_SHADER_STAGE_COMPUTE_BIT ? VK_QUEUE_COMPUTE_BIT : VK_QUEUE_GRAPHICS_BIT);
1666 }
1667
build(OperationContext & context,Resource & resource) const1668 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
1669 {
1670 if (m_stage & VK_SHADER_STAGE_COMPUTE_BIT)
1671 return de::MovePtr<Operation>(new BufferImplementation(context, resource, m_stage, m_bufferType, m_shaderPrefix, m_mode, PIPELINE_TYPE_COMPUTE, m_dispatchCall));
1672 else
1673 return de::MovePtr<Operation>(new BufferImplementation(context, resource, m_stage, m_bufferType, m_shaderPrefix, m_mode, PIPELINE_TYPE_GRAPHICS, m_dispatchCall));
1674 }
1675
1676 private:
1677 const ResourceDescription m_resourceDesc;
1678 const BufferType m_bufferType;
1679 const AccessMode m_mode;
1680 const VkShaderStageFlagBits m_stage;
1681 const std::string m_shaderPrefix;
1682 const DispatchCall m_dispatchCall;
1683 };
1684
1685 class ImageSupport : public OperationSupport
1686 {
1687 public:
ImageSupport(const ResourceDescription & resourceDesc,const AccessMode mode,const VkShaderStageFlagBits stage,const DispatchCall dispatchCall=DISPATCH_CALL_DISPATCH)1688 ImageSupport (const ResourceDescription& resourceDesc,
1689 const AccessMode mode,
1690 const VkShaderStageFlagBits stage,
1691 const DispatchCall dispatchCall = DISPATCH_CALL_DISPATCH)
1692 : m_resourceDesc (resourceDesc)
1693 , m_mode (mode)
1694 , m_stage (stage)
1695 , m_shaderPrefix (m_mode == ACCESS_MODE_READ ? "read_image_" : "write_image_")
1696 , m_dispatchCall (dispatchCall)
1697 {
1698 DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_IMAGE);
1699 DE_ASSERT(m_mode == ACCESS_MODE_READ || m_mode == ACCESS_MODE_WRITE);
1700 DE_ASSERT(m_dispatchCall == DISPATCH_CALL_DISPATCH || m_dispatchCall == DISPATCH_CALL_DISPATCH_INDIRECT);
1701
1702 assertValidShaderStage(m_stage);
1703 }
1704
initPrograms(SourceCollections & programCollection) const1705 void initPrograms (SourceCollections& programCollection) const
1706 {
1707 const std::string imageFormat = getShaderImageFormatQualifier(m_resourceDesc.imageFormat);
1708 const std::string imageType = getShaderImageType(m_resourceDesc.imageFormat, m_resourceDesc.imageType);
1709
1710 std::ostringstream declSrc;
1711 declSrc << "layout(set = 0, binding = 0, " << imageFormat << ") readonly uniform " << imageType << " srcImg;\n"
1712 << "layout(set = 0, binding = 1, " << imageFormat << ") writeonly uniform " << imageType << " dstImg;\n";
1713
1714 std::ostringstream mainSrc;
1715 if (m_resourceDesc.imageType == VK_IMAGE_TYPE_1D)
1716 mainSrc << " for (int x = 0; x < " << m_resourceDesc.size.x() << "; ++x)\n"
1717 << " imageStore(dstImg, x, imageLoad(srcImg, x));\n";
1718 else if (m_resourceDesc.imageType == VK_IMAGE_TYPE_2D)
1719 mainSrc << " for (int y = 0; y < " << m_resourceDesc.size.y() << "; ++y)\n"
1720 << " for (int x = 0; x < " << m_resourceDesc.size.x() << "; ++x)\n"
1721 << " imageStore(dstImg, ivec2(x, y), imageLoad(srcImg, ivec2(x, y)));\n";
1722 else if (m_resourceDesc.imageType == VK_IMAGE_TYPE_3D)
1723 mainSrc << " for (int z = 0; z < " << m_resourceDesc.size.z() << "; ++z)\n"
1724 << " for (int y = 0; y < " << m_resourceDesc.size.y() << "; ++y)\n"
1725 << " for (int x = 0; x < " << m_resourceDesc.size.x() << "; ++x)\n"
1726 << " imageStore(dstImg, ivec3(x, y, z), imageLoad(srcImg, ivec3(x, y, z)));\n";
1727 else
1728 DE_ASSERT(0);
1729
1730 initPassthroughPrograms(programCollection, m_shaderPrefix, declSrc.str(), mainSrc.str(), m_stage);
1731 }
1732
getResourceUsageFlags(void) const1733 deUint32 getResourceUsageFlags (void) const
1734 {
1735 return VK_IMAGE_USAGE_STORAGE_BIT;
1736 }
1737
getQueueFlags(const OperationContext & context) const1738 VkQueueFlags getQueueFlags (const OperationContext& context) const
1739 {
1740 DE_UNREF(context);
1741 return (m_stage == VK_SHADER_STAGE_COMPUTE_BIT ? VK_QUEUE_COMPUTE_BIT : VK_QUEUE_GRAPHICS_BIT);
1742 }
1743
build(OperationContext & context,Resource & resource) const1744 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
1745 {
1746 if (m_stage & VK_SHADER_STAGE_COMPUTE_BIT)
1747 return de::MovePtr<Operation>(new ImageImplementation(context, resource, m_stage, m_shaderPrefix, m_mode, PIPELINE_TYPE_COMPUTE, m_dispatchCall));
1748 else
1749 return de::MovePtr<Operation>(new ImageImplementation(context, resource, m_stage, m_shaderPrefix, m_mode, PIPELINE_TYPE_GRAPHICS, m_dispatchCall));
1750 }
1751
1752 private:
1753 const ResourceDescription m_resourceDesc;
1754 const AccessMode m_mode;
1755 const VkShaderStageFlagBits m_stage;
1756 const std::string m_shaderPrefix;
1757 const DispatchCall m_dispatchCall;
1758 };
1759
1760 } // ShaderAccess ns
1761
1762 namespace CopyBufferToImage
1763 {
1764
1765 class WriteImplementation : public Operation
1766 {
1767 public:
WriteImplementation(OperationContext & context,Resource & resource)1768 WriteImplementation (OperationContext& context, Resource& resource)
1769 : m_context (context)
1770 , m_resource (resource)
1771 , m_bufferSize (getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent))
1772 {
1773 DE_ASSERT(m_resource.getType() == RESOURCE_TYPE_IMAGE);
1774
1775 const DeviceInterface& vk = m_context.getDeviceInterface();
1776 const VkDevice device = m_context.getDevice();
1777 Allocator& allocator = m_context.getAllocator();
1778
1779 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
1780 vk, device, allocator, makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT), MemoryRequirement::HostVisible));
1781
1782 const Allocation& alloc = m_hostBuffer->getAllocation();
1783 fillPattern(alloc.getHostPtr(), m_bufferSize);
1784 flushAlloc(vk, device, alloc);
1785 }
1786
recordCommands(const VkCommandBuffer cmdBuffer)1787 void recordCommands (const VkCommandBuffer cmdBuffer)
1788 {
1789 const DeviceInterface& vk = m_context.getDeviceInterface();
1790 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_resource.getImage().subresourceLayers, m_resource.getImage().extent);
1791
1792 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
1793 (VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
1794 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1795 m_resource.getImage().handle, m_resource.getImage().subresourceRange);
1796 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
1797
1798 vk.cmdCopyBufferToImage(cmdBuffer, **m_hostBuffer, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
1799 }
1800
getSyncInfo(void) const1801 SyncInfo getSyncInfo (void) const
1802 {
1803 const SyncInfo syncInfo =
1804 {
1805 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
1806 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags accessMask;
1807 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout imageLayout;
1808 };
1809 return syncInfo;
1810 }
1811
getData(void) const1812 Data getData (void) const
1813 {
1814 return getHostBufferData(m_context, *m_hostBuffer, m_bufferSize);
1815 }
1816
1817 private:
1818 OperationContext& m_context;
1819 Resource& m_resource;
1820 de::MovePtr<Buffer> m_hostBuffer;
1821 const VkDeviceSize m_bufferSize;
1822 };
1823
1824 class ReadImplementation : public Operation
1825 {
1826 public:
ReadImplementation(OperationContext & context,Resource & resource)1827 ReadImplementation (OperationContext& context, Resource& resource)
1828 : m_context (context)
1829 , m_resource (resource)
1830 , m_subresourceRange (makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u))
1831 , m_subresourceLayers (makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u))
1832 {
1833 DE_ASSERT(m_resource.getType() == RESOURCE_TYPE_BUFFER);
1834
1835 const DeviceInterface& vk = m_context.getDeviceInterface();
1836 const VkDevice device = m_context.getDevice();
1837 Allocator& allocator = m_context.getAllocator();
1838 const VkFormat format = VK_FORMAT_R8G8B8A8_UNORM;
1839 const deUint32 pixelSize = tcu::getPixelSize(mapVkFormat(format));
1840
1841 DE_ASSERT((m_resource.getBuffer().size % pixelSize) == 0);
1842 m_imageExtent = get2DImageExtentWithSize(m_resource.getBuffer().size, pixelSize); // there may be some unused space at the end
1843
1844 // Copy destination image.
1845 m_image = de::MovePtr<Image>(new Image(
1846 vk, device, allocator, makeImageCreateInfo(VK_IMAGE_TYPE_2D, m_imageExtent, format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT), MemoryRequirement::Any));
1847
1848 // Image data will be copied here, so it can be read on the host.
1849 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
1850 vk, device, allocator, makeBufferCreateInfo(m_resource.getBuffer().size, VK_BUFFER_USAGE_TRANSFER_DST_BIT), MemoryRequirement::HostVisible));
1851 }
1852
recordCommands(const VkCommandBuffer cmdBuffer)1853 void recordCommands (const VkCommandBuffer cmdBuffer)
1854 {
1855 const DeviceInterface& vk = m_context.getDeviceInterface();
1856 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_subresourceLayers, m_imageExtent);
1857
1858 // Resource -> Image
1859 {
1860 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
1861 (VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
1862 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1863 **m_image, m_subresourceRange);
1864 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
1865
1866 vk.cmdCopyBufferToImage(cmdBuffer, m_resource.getBuffer().handle, **m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
1867 }
1868 // Image -> Host buffer
1869 {
1870 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
1871 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1872 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1873 **m_image, m_subresourceRange);
1874 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
1875
1876 vk.cmdCopyImageToBuffer(cmdBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_hostBuffer, 1u, ©Region);
1877
1878 const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_resource.getBuffer().size);
1879 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
1880 }
1881 }
1882
getSyncInfo(void) const1883 SyncInfo getSyncInfo (void) const
1884 {
1885 const SyncInfo syncInfo =
1886 {
1887 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
1888 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags accessMask;
1889 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
1890 };
1891 return syncInfo;
1892 }
1893
getData(void) const1894 Data getData (void) const
1895 {
1896 return getHostBufferData(m_context, *m_hostBuffer, m_resource.getBuffer().size);
1897 }
1898
1899 private:
1900 OperationContext& m_context;
1901 Resource& m_resource;
1902 const VkImageSubresourceRange m_subresourceRange;
1903 const VkImageSubresourceLayers m_subresourceLayers;
1904 de::MovePtr<Buffer> m_hostBuffer;
1905 de::MovePtr<Image> m_image;
1906 VkExtent3D m_imageExtent;
1907 };
1908
1909 class Support : public OperationSupport
1910 {
1911 public:
Support(const ResourceDescription & resourceDesc,const AccessMode mode)1912 Support (const ResourceDescription& resourceDesc, const AccessMode mode)
1913 : m_mode (mode)
1914 , m_requiredQueueFlags (resourceDesc.type == RESOURCE_TYPE_IMAGE && isDepthStencilFormat(resourceDesc.imageFormat) ? VK_QUEUE_GRAPHICS_BIT : VK_QUEUE_TRANSFER_BIT)
1915 {
1916 // From spec:
1917 // Because depth or stencil aspect buffer to image copies may require format conversions on some implementations,
1918 // they are not supported on queues that do not support graphics.
1919
1920 DE_ASSERT(m_mode == ACCESS_MODE_READ || m_mode == ACCESS_MODE_WRITE);
1921 DE_ASSERT(m_mode == ACCESS_MODE_READ || resourceDesc.type != RESOURCE_TYPE_BUFFER);
1922 DE_ASSERT(m_mode == ACCESS_MODE_WRITE || resourceDesc.type != RESOURCE_TYPE_IMAGE);
1923 }
1924
getResourceUsageFlags(void) const1925 deUint32 getResourceUsageFlags (void) const
1926 {
1927 if (m_mode == ACCESS_MODE_READ)
1928 return VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1929 else
1930 return VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1931 }
1932
getQueueFlags(const OperationContext & context) const1933 VkQueueFlags getQueueFlags (const OperationContext& context) const
1934 {
1935 DE_UNREF(context);
1936 return m_requiredQueueFlags;
1937 }
1938
build(OperationContext & context,Resource & resource) const1939 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
1940 {
1941 if (m_mode == ACCESS_MODE_READ)
1942 return de::MovePtr<Operation>(new ReadImplementation(context, resource));
1943 else
1944 return de::MovePtr<Operation>(new WriteImplementation(context, resource));
1945 }
1946
1947 private:
1948 const AccessMode m_mode;
1949 const VkQueueFlags m_requiredQueueFlags;
1950 };
1951
1952 } // CopyBufferToImage ns
1953
1954 namespace CopyImageToBuffer
1955 {
1956
1957 class WriteImplementation : public Operation
1958 {
1959 public:
WriteImplementation(OperationContext & context,Resource & resource)1960 WriteImplementation (OperationContext& context, Resource& resource)
1961 : m_context (context)
1962 , m_resource (resource)
1963 , m_subresourceRange (makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u))
1964 , m_subresourceLayers (makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u))
1965 {
1966 DE_ASSERT(m_resource.getType() == RESOURCE_TYPE_BUFFER);
1967
1968 const DeviceInterface& vk = m_context.getDeviceInterface();
1969 const VkDevice device = m_context.getDevice();
1970 Allocator& allocator = m_context.getAllocator();
1971 const VkFormat format = VK_FORMAT_R8G8B8A8_UNORM;
1972 const deUint32 pixelSize = tcu::getPixelSize(mapVkFormat(format));
1973
1974 DE_ASSERT((m_resource.getBuffer().size % pixelSize) == 0);
1975 m_imageExtent = get2DImageExtentWithSize(m_resource.getBuffer().size, pixelSize);
1976
1977 // Source data staging buffer
1978 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
1979 vk, device, allocator, makeBufferCreateInfo(m_resource.getBuffer().size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT), MemoryRequirement::HostVisible));
1980
1981 const Allocation& alloc = m_hostBuffer->getAllocation();
1982 fillPattern(alloc.getHostPtr(), m_resource.getBuffer().size);
1983 flushAlloc(vk, device, alloc);
1984
1985 // Source data image
1986 m_image = de::MovePtr<Image>(new Image(
1987 vk, device, allocator, makeImageCreateInfo(VK_IMAGE_TYPE_2D, m_imageExtent, format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT), MemoryRequirement::Any));
1988 }
1989
recordCommands(const VkCommandBuffer cmdBuffer)1990 void recordCommands (const VkCommandBuffer cmdBuffer)
1991 {
1992 const DeviceInterface& vk = m_context.getDeviceInterface();
1993 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_subresourceLayers, m_imageExtent);
1994
1995 // Host buffer -> Image
1996 {
1997 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
1998 (VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
1999 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2000 **m_image, m_subresourceRange);
2001 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
2002
2003 vk.cmdCopyBufferToImage(cmdBuffer, **m_hostBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
2004 }
2005 // Image -> Resource
2006 {
2007 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
2008 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
2009 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2010 **m_image, m_subresourceRange);
2011 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
2012
2013 vk.cmdCopyImageToBuffer(cmdBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_resource.getBuffer().handle, 1u, ©Region);
2014 }
2015 }
2016
getSyncInfo(void) const2017 SyncInfo getSyncInfo (void) const
2018 {
2019 const SyncInfo syncInfo =
2020 {
2021 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
2022 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags accessMask;
2023 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
2024 };
2025 return syncInfo;
2026 }
2027
getData(void) const2028 Data getData (void) const
2029 {
2030 return getHostBufferData(m_context, *m_hostBuffer, m_resource.getBuffer().size);
2031 }
2032
2033 private:
2034 OperationContext& m_context;
2035 Resource& m_resource;
2036 const VkImageSubresourceRange m_subresourceRange;
2037 const VkImageSubresourceLayers m_subresourceLayers;
2038 de::MovePtr<Buffer> m_hostBuffer;
2039 de::MovePtr<Image> m_image;
2040 VkExtent3D m_imageExtent;
2041 };
2042
2043 class ReadImplementation : public Operation
2044 {
2045 public:
ReadImplementation(OperationContext & context,Resource & resource)2046 ReadImplementation (OperationContext& context, Resource& resource)
2047 : m_context (context)
2048 , m_resource (resource)
2049 , m_bufferSize (getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent))
2050 {
2051 DE_ASSERT(m_resource.getType() == RESOURCE_TYPE_IMAGE);
2052
2053 const DeviceInterface& vk = m_context.getDeviceInterface();
2054 const VkDevice device = m_context.getDevice();
2055 Allocator& allocator = m_context.getAllocator();
2056
2057 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
2058 vk, device, allocator, makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT), MemoryRequirement::HostVisible));
2059
2060 const Allocation& alloc = m_hostBuffer->getAllocation();
2061 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_bufferSize));
2062 flushAlloc(vk, device, alloc);
2063 }
2064
recordCommands(const VkCommandBuffer cmdBuffer)2065 void recordCommands (const VkCommandBuffer cmdBuffer)
2066 {
2067 const DeviceInterface& vk = m_context.getDeviceInterface();
2068 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_resource.getImage().subresourceLayers, m_resource.getImage().extent);
2069
2070 vk.cmdCopyImageToBuffer(cmdBuffer, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_hostBuffer, 1u, ©Region);
2071 }
2072
getSyncInfo(void) const2073 SyncInfo getSyncInfo (void) const
2074 {
2075 const SyncInfo syncInfo =
2076 {
2077 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
2078 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags accessMask;
2079 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout imageLayout;
2080 };
2081 return syncInfo;
2082 }
2083
getData(void) const2084 Data getData (void) const
2085 {
2086 return getHostBufferData(m_context, *m_hostBuffer, m_bufferSize);
2087 }
2088
2089 private:
2090 OperationContext& m_context;
2091 Resource& m_resource;
2092 de::MovePtr<Buffer> m_hostBuffer;
2093 const VkDeviceSize m_bufferSize;
2094 };
2095
2096 class Support : public OperationSupport
2097 {
2098 public:
Support(const ResourceDescription & resourceDesc,const AccessMode mode)2099 Support (const ResourceDescription& resourceDesc, const AccessMode mode)
2100 : m_mode (mode)
2101 , m_requiredQueueFlags (resourceDesc.type == RESOURCE_TYPE_IMAGE && isDepthStencilFormat(resourceDesc.imageFormat) ? VK_QUEUE_GRAPHICS_BIT : VK_QUEUE_TRANSFER_BIT)
2102 {
2103 DE_ASSERT(m_mode == ACCESS_MODE_READ || m_mode == ACCESS_MODE_WRITE);
2104 DE_ASSERT(m_mode == ACCESS_MODE_READ || resourceDesc.type != RESOURCE_TYPE_IMAGE);
2105 DE_ASSERT(m_mode == ACCESS_MODE_WRITE || resourceDesc.type != RESOURCE_TYPE_BUFFER);
2106 }
2107
getResourceUsageFlags(void) const2108 deUint32 getResourceUsageFlags (void) const
2109 {
2110 if (m_mode == ACCESS_MODE_READ)
2111 return VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
2112 else
2113 return VK_BUFFER_USAGE_TRANSFER_DST_BIT;
2114 }
2115
getQueueFlags(const OperationContext & context) const2116 VkQueueFlags getQueueFlags (const OperationContext& context) const
2117 {
2118 DE_UNREF(context);
2119 return m_requiredQueueFlags;
2120 }
2121
build(OperationContext & context,Resource & resource) const2122 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
2123 {
2124 if (m_mode == ACCESS_MODE_READ)
2125 return de::MovePtr<Operation>(new ReadImplementation(context, resource));
2126 else
2127 return de::MovePtr<Operation>(new WriteImplementation(context, resource));
2128 }
2129
2130 private:
2131 const AccessMode m_mode;
2132 const VkQueueFlags m_requiredQueueFlags;
2133 };
2134
2135 } // CopyImageToBuffer ns
2136
2137 namespace ClearImage
2138 {
2139
2140 enum ClearMode
2141 {
2142 CLEAR_MODE_COLOR,
2143 CLEAR_MODE_DEPTH_STENCIL,
2144 };
2145
2146 class Implementation : public Operation
2147 {
2148 public:
Implementation(OperationContext & context,Resource & resource,const ClearMode mode)2149 Implementation (OperationContext& context, Resource& resource, const ClearMode mode)
2150 : m_context (context)
2151 , m_resource (resource)
2152 , m_clearValue (makeClearValue(m_resource.getImage().format))
2153 , m_mode (mode)
2154 {
2155 const VkDeviceSize size = getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent);
2156 const VkExtent3D& extent = m_resource.getImage().extent;
2157 const VkFormat format = m_resource.getImage().format;
2158 const tcu::TextureFormat texFormat = mapVkFormat(format);
2159
2160 m_data.resize(static_cast<std::size_t>(size));
2161 tcu::PixelBufferAccess imagePixels(texFormat, extent.width, extent.height, extent.depth, &m_data[0]);
2162 clearPixelBuffer(imagePixels, m_clearValue);
2163 }
2164
recordCommands(const VkCommandBuffer cmdBuffer)2165 void recordCommands (const VkCommandBuffer cmdBuffer)
2166 {
2167 const DeviceInterface& vk = m_context.getDeviceInterface();
2168
2169 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
2170 (VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
2171 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2172 m_resource.getImage().handle, m_resource.getImage().subresourceRange);
2173
2174 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
2175
2176 if (m_mode == CLEAR_MODE_COLOR)
2177 vk.cmdClearColorImage(cmdBuffer, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &m_clearValue.color, 1u, &m_resource.getImage().subresourceRange);
2178 else
2179 vk.cmdClearDepthStencilImage(cmdBuffer, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &m_clearValue.depthStencil, 1u, &m_resource.getImage().subresourceRange);
2180 }
2181
getSyncInfo(void) const2182 SyncInfo getSyncInfo (void) const
2183 {
2184 const SyncInfo syncInfo =
2185 {
2186 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
2187 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags accessMask;
2188 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout imageLayout;
2189 };
2190 return syncInfo;
2191 }
2192
getData(void) const2193 Data getData (void) const
2194 {
2195 const Data data =
2196 {
2197 m_data.size(), // std::size_t size;
2198 &m_data[0], // const deUint8* data;
2199 };
2200 return data;
2201 }
2202
2203 private:
2204 OperationContext& m_context;
2205 Resource& m_resource;
2206 std::vector<deUint8> m_data;
2207 const VkClearValue m_clearValue;
2208 const ClearMode m_mode;
2209 };
2210
2211 class Support : public OperationSupport
2212 {
2213 public:
Support(const ResourceDescription & resourceDesc,const ClearMode mode)2214 Support (const ResourceDescription& resourceDesc, const ClearMode mode)
2215 : m_resourceDesc (resourceDesc)
2216 , m_mode (mode)
2217 {
2218 DE_ASSERT(m_mode == CLEAR_MODE_COLOR || m_mode == CLEAR_MODE_DEPTH_STENCIL);
2219 DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_IMAGE);
2220 DE_ASSERT(m_resourceDesc.imageAspect == VK_IMAGE_ASPECT_COLOR_BIT || (m_mode != CLEAR_MODE_COLOR));
2221 DE_ASSERT((m_resourceDesc.imageAspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) || (m_mode != CLEAR_MODE_DEPTH_STENCIL));
2222 }
2223
getResourceUsageFlags(void) const2224 deUint32 getResourceUsageFlags (void) const
2225 {
2226 return VK_IMAGE_USAGE_TRANSFER_DST_BIT;
2227 }
2228
getQueueFlags(const OperationContext & context) const2229 VkQueueFlags getQueueFlags (const OperationContext& context) const
2230 {
2231 DE_UNREF(context);
2232 if (m_mode == CLEAR_MODE_COLOR)
2233 return VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT;
2234 else
2235 return VK_QUEUE_GRAPHICS_BIT;
2236 }
2237
build(OperationContext & context,Resource & resource) const2238 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
2239 {
2240 return de::MovePtr<Operation>(new Implementation(context, resource, m_mode));
2241 }
2242
2243 private:
2244 const ResourceDescription m_resourceDesc;
2245 const ClearMode m_mode;
2246 };
2247
2248 } // ClearImage ns
2249
2250 namespace Draw
2251 {
2252
2253 enum DrawCall
2254 {
2255 DRAW_CALL_DRAW,
2256 DRAW_CALL_DRAW_INDEXED,
2257 DRAW_CALL_DRAW_INDIRECT,
2258 DRAW_CALL_DRAW_INDEXED_INDIRECT,
2259 };
2260
2261 //! A write operation that is a result of drawing to an image.
2262 //! \todo Add support for depth/stencil too?
2263 class Implementation : public Operation
2264 {
2265 public:
Implementation(OperationContext & context,Resource & resource,const DrawCall drawCall)2266 Implementation (OperationContext& context, Resource& resource, const DrawCall drawCall)
2267 : m_context (context)
2268 , m_resource (resource)
2269 , m_drawCall (drawCall)
2270 , m_vertices (context)
2271 {
2272 const DeviceInterface& vk = context.getDeviceInterface();
2273 const VkDevice device = context.getDevice();
2274 Allocator& allocator = context.getAllocator();
2275
2276 // Indirect buffer
2277
2278 if (m_drawCall == DRAW_CALL_DRAW_INDIRECT)
2279 {
2280 m_indirectBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator,
2281 makeBufferCreateInfo(sizeof(VkDrawIndirectCommand), VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT), MemoryRequirement::HostVisible));
2282
2283 const Allocation& alloc = m_indirectBuffer->getAllocation();
2284 VkDrawIndirectCommand* const pIndirectCommand = static_cast<VkDrawIndirectCommand*>(alloc.getHostPtr());
2285
2286 pIndirectCommand->vertexCount = m_vertices.getNumVertices();
2287 pIndirectCommand->instanceCount = 1u;
2288 pIndirectCommand->firstVertex = 0u;
2289 pIndirectCommand->firstInstance = 0u;
2290
2291 flushAlloc(vk, device, alloc);
2292 }
2293 else if (m_drawCall == DRAW_CALL_DRAW_INDEXED_INDIRECT)
2294 {
2295 m_indirectBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator,
2296 makeBufferCreateInfo(sizeof(VkDrawIndexedIndirectCommand), VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT), MemoryRequirement::HostVisible));
2297
2298 const Allocation& alloc = m_indirectBuffer->getAllocation();
2299 VkDrawIndexedIndirectCommand* const pIndirectCommand = static_cast<VkDrawIndexedIndirectCommand*>(alloc.getHostPtr());
2300
2301 pIndirectCommand->indexCount = m_vertices.getNumIndices();
2302 pIndirectCommand->instanceCount = 1u;
2303 pIndirectCommand->firstIndex = 0u;
2304 pIndirectCommand->vertexOffset = 0u;
2305 pIndirectCommand->firstInstance = 0u;
2306
2307 flushAlloc(vk, device, alloc);
2308 }
2309
2310 // Resource image is the color attachment
2311
2312 m_colorFormat = m_resource.getImage().format;
2313 m_colorSubresourceRange = m_resource.getImage().subresourceRange;
2314 m_colorImage = m_resource.getImage().handle;
2315 m_attachmentExtent = m_resource.getImage().extent;
2316
2317 // Pipeline
2318
2319 m_colorAttachmentView = makeImageView (vk, device, m_colorImage, VK_IMAGE_VIEW_TYPE_2D, m_colorFormat, m_colorSubresourceRange);
2320 m_renderPass = makeRenderPass (vk, device, m_colorFormat);
2321 m_framebuffer = makeFramebuffer (vk, device, *m_renderPass, *m_colorAttachmentView, m_attachmentExtent.width, m_attachmentExtent.height, 1u);
2322 m_pipelineLayout = makePipelineLayoutWithoutDescriptors(vk, device);
2323
2324 GraphicsPipelineBuilder pipelineBuilder;
2325 pipelineBuilder
2326 .setRenderSize (tcu::IVec2(m_attachmentExtent.width, m_attachmentExtent.height))
2327 .setVertexInputSingleAttribute (m_vertices.getVertexFormat(), m_vertices.getVertexStride())
2328 .setShader (vk, device, VK_SHADER_STAGE_VERTEX_BIT, context.getBinaryCollection().get("draw_vert"), DE_NULL)
2329 .setShader (vk, device, VK_SHADER_STAGE_FRAGMENT_BIT, context.getBinaryCollection().get("draw_frag"), DE_NULL);
2330
2331 m_pipeline = pipelineBuilder.build(vk, device, *m_pipelineLayout, *m_renderPass, context.getPipelineCacheData());
2332
2333 // Set expected draw values
2334
2335 m_expectedData.resize(static_cast<size_t>(getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent)));
2336 tcu::PixelBufferAccess imagePixels(mapVkFormat(m_colorFormat), m_attachmentExtent.width, m_attachmentExtent.height, m_attachmentExtent.depth, &m_expectedData[0]);
2337 clearPixelBuffer(imagePixels, makeClearValue(m_colorFormat));
2338 }
2339
recordCommands(const VkCommandBuffer cmdBuffer)2340 void recordCommands (const VkCommandBuffer cmdBuffer)
2341 {
2342 const DeviceInterface& vk = m_context.getDeviceInterface();
2343
2344 // Change color attachment image layout
2345 {
2346 const VkImageMemoryBarrier colorAttachmentLayoutBarrier = makeImageMemoryBarrier(
2347 (VkAccessFlags)0, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
2348 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
2349 m_colorImage, m_colorSubresourceRange);
2350
2351 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, (VkDependencyFlags)0,
2352 0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentLayoutBarrier);
2353 }
2354
2355 {
2356 const VkRect2D renderArea = makeRect2D(m_attachmentExtent);
2357 const tcu::Vec4 clearColor = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
2358
2359 beginRenderPass(vk, cmdBuffer, *m_renderPass, *m_framebuffer, renderArea, clearColor);
2360 }
2361
2362 vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
2363 {
2364 const VkDeviceSize vertexBufferOffset = 0ull;
2365 const VkBuffer vertexBuffer = m_vertices.getVertexBuffer();
2366 vk.cmdBindVertexBuffers(cmdBuffer, 0u, 1u, &vertexBuffer, &vertexBufferOffset);
2367 }
2368
2369 if (m_drawCall == DRAW_CALL_DRAW_INDEXED || m_drawCall == DRAW_CALL_DRAW_INDEXED_INDIRECT)
2370 vk.cmdBindIndexBuffer(cmdBuffer, m_vertices.getIndexBuffer(), 0u, m_vertices.getIndexType());
2371
2372 switch (m_drawCall)
2373 {
2374 case DRAW_CALL_DRAW:
2375 vk.cmdDraw(cmdBuffer, m_vertices.getNumVertices(), 1u, 0u, 0u);
2376 break;
2377
2378 case DRAW_CALL_DRAW_INDEXED:
2379 vk.cmdDrawIndexed(cmdBuffer, m_vertices.getNumIndices(), 1u, 0u, 0, 0u);
2380 break;
2381
2382 case DRAW_CALL_DRAW_INDIRECT:
2383 vk.cmdDrawIndirect(cmdBuffer, **m_indirectBuffer, 0u, 1u, 0u);
2384 break;
2385
2386 case DRAW_CALL_DRAW_INDEXED_INDIRECT:
2387 vk.cmdDrawIndexedIndirect(cmdBuffer, **m_indirectBuffer, 0u, 1u, 0u);
2388 break;
2389 }
2390
2391 endRenderPass(vk, cmdBuffer);
2392 }
2393
getSyncInfo(void) const2394 SyncInfo getSyncInfo (void) const
2395 {
2396 const SyncInfo syncInfo =
2397 {
2398 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // VkPipelineStageFlags stageMask;
2399 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags accessMask;
2400 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout imageLayout;
2401 };
2402 return syncInfo;
2403 }
2404
getData(void) const2405 Data getData (void) const
2406 {
2407 const Data data =
2408 {
2409 m_expectedData.size(), // std::size_t size;
2410 &m_expectedData[0], // const deUint8* data;
2411 };
2412 return data;
2413 }
2414
2415 private:
2416 OperationContext& m_context;
2417 Resource& m_resource;
2418 const DrawCall m_drawCall;
2419 const VertexGrid m_vertices;
2420 std::vector<deUint8> m_expectedData;
2421 de::MovePtr<Buffer> m_indirectBuffer;
2422 VkFormat m_colorFormat;
2423 VkImage m_colorImage;
2424 Move<VkImageView> m_colorAttachmentView;
2425 VkImageSubresourceRange m_colorSubresourceRange;
2426 VkExtent3D m_attachmentExtent;
2427 Move<VkRenderPass> m_renderPass;
2428 Move<VkFramebuffer> m_framebuffer;
2429 Move<VkPipelineLayout> m_pipelineLayout;
2430 Move<VkPipeline> m_pipeline;
2431 };
2432
2433 template<typename T, std::size_t N>
toString(const T (& values)[N])2434 std::string toString (const T (&values)[N])
2435 {
2436 std::ostringstream str;
2437 for (std::size_t i = 0; i < N; ++i)
2438 str << (i != 0 ? ", " : "") << values[i];
2439 return str.str();
2440 }
2441
2442 class Support : public OperationSupport
2443 {
2444 public:
Support(const ResourceDescription & resourceDesc,const DrawCall drawCall)2445 Support (const ResourceDescription& resourceDesc, const DrawCall drawCall)
2446 : m_resourceDesc (resourceDesc)
2447 , m_drawCall (drawCall)
2448 {
2449 DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_IMAGE && m_resourceDesc.imageType == VK_IMAGE_TYPE_2D);
2450 DE_ASSERT(!isDepthStencilFormat(m_resourceDesc.imageFormat));
2451 }
2452
initPrograms(SourceCollections & programCollection) const2453 void initPrograms (SourceCollections& programCollection) const
2454 {
2455 // Vertex
2456 {
2457 std::ostringstream src;
2458 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
2459 << "\n"
2460 << "layout(location = 0) in vec4 v_in_position;\n"
2461 << "\n"
2462 << "out " << s_perVertexBlock << ";\n"
2463 << "\n"
2464 << "void main (void)\n"
2465 << "{\n"
2466 << " gl_Position = v_in_position;\n"
2467 << "}\n";
2468
2469 programCollection.glslSources.add("draw_vert") << glu::VertexSource(src.str());
2470 }
2471
2472 // Fragment
2473 {
2474 const VkClearValue clearValue = makeClearValue(m_resourceDesc.imageFormat);
2475 const bool isIntegerFormat = isIntFormat(m_resourceDesc.imageFormat) || isUintFormat(m_resourceDesc.imageFormat);
2476 const std::string colorType = (isIntegerFormat ? "uvec4" : "vec4");
2477
2478 std::ostringstream src;
2479 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
2480 << "\n"
2481 << "layout(location = 0) out " << colorType << " o_color;\n"
2482 << "\n"
2483 << "void main (void)\n"
2484 << "{\n"
2485 << " o_color = " << colorType << "(" << (isIntegerFormat ? toString(clearValue.color.uint32) : toString(clearValue.color.float32)) << ");\n"
2486 << "}\n";
2487
2488 programCollection.glslSources.add("draw_frag") << glu::FragmentSource(src.str());
2489 }
2490 }
2491
getResourceUsageFlags(void) const2492 deUint32 getResourceUsageFlags (void) const
2493 {
2494 return VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
2495 }
2496
getQueueFlags(const OperationContext & context) const2497 VkQueueFlags getQueueFlags (const OperationContext& context) const
2498 {
2499 DE_UNREF(context);
2500 return VK_QUEUE_GRAPHICS_BIT;
2501 }
2502
build(OperationContext & context,Resource & resource) const2503 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
2504 {
2505 return de::MovePtr<Operation>(new Implementation(context, resource, m_drawCall));
2506 }
2507
2508 private:
2509 const ResourceDescription m_resourceDesc;
2510 const DrawCall m_drawCall;
2511 };
2512
2513 } // Draw ns
2514
2515 namespace ClearAttachments
2516 {
2517
2518 class Implementation : public Operation
2519 {
2520 public:
Implementation(OperationContext & context,Resource & resource)2521 Implementation (OperationContext& context, Resource& resource)
2522 : m_context (context)
2523 , m_resource (resource)
2524 , m_clearValue (makeClearValue(m_resource.getImage().format))
2525 {
2526 const DeviceInterface& vk = context.getDeviceInterface();
2527 const VkDevice device = context.getDevice();
2528
2529 const VkDeviceSize size = getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent);
2530 const VkExtent3D& extent = m_resource.getImage().extent;
2531 const VkFormat format = m_resource.getImage().format;
2532 const tcu::TextureFormat texFormat = mapVkFormat(format);
2533 const SyncInfo syncInfo = getSyncInfo();
2534
2535 m_data.resize(static_cast<std::size_t>(size));
2536 tcu::PixelBufferAccess imagePixels(texFormat, extent.width, extent.height, extent.depth, &m_data[0]);
2537 clearPixelBuffer(imagePixels, m_clearValue);
2538
2539 m_attachmentView = makeImageView(vk, device, m_resource.getImage().handle, getImageViewType(m_resource.getImage().imageType), m_resource.getImage().format, m_resource.getImage().subresourceRange);
2540
2541 switch (m_resource.getImage().subresourceRange.aspectMask)
2542 {
2543 case VK_IMAGE_ASPECT_COLOR_BIT:
2544 m_renderPass = makeRenderPass(vk, device, m_resource.getImage().format, VK_FORMAT_UNDEFINED, VK_ATTACHMENT_LOAD_OP_DONT_CARE, syncInfo.imageLayout);
2545 break;
2546 case VK_IMAGE_ASPECT_STENCIL_BIT:
2547 case VK_IMAGE_ASPECT_DEPTH_BIT:
2548 m_renderPass = makeRenderPass(vk, device, VK_FORMAT_UNDEFINED, m_resource.getImage().format, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, syncInfo.imageLayout);
2549 break;
2550 default:
2551 DE_ASSERT(0);
2552 break;
2553 }
2554
2555 m_frameBuffer = makeFramebuffer(vk, device, *m_renderPass, *m_attachmentView, m_resource.getImage().extent.width, m_resource.getImage().extent.height, 1u);
2556 }
2557
recordCommands(const VkCommandBuffer cmdBuffer)2558 void recordCommands (const VkCommandBuffer cmdBuffer)
2559 {
2560 const DeviceInterface& vk = m_context.getDeviceInterface();
2561 beginRenderPass(vk, cmdBuffer, *m_renderPass, *m_frameBuffer, makeRect2D(0 ,0, m_resource.getImage().extent.width, m_resource.getImage().extent.height), m_clearValue);
2562
2563 const VkClearAttachment clearAttachment =
2564 {
2565 m_resource.getImage().subresourceRange.aspectMask, // VkImageAspectFlags aspectMask;
2566 0, // deUint32 colorAttachment;
2567 m_clearValue // VkClearValue clearValue;
2568 };
2569
2570 const VkRect2D rect2D = makeRect2D(m_resource.getImage().extent);
2571
2572 const VkClearRect clearRect =
2573 {
2574 rect2D, // VkRect2D rect;
2575 0u, // deUint32 baseArrayLayer;
2576 m_resource.getImage().subresourceLayers.layerCount // deUint32 layerCount;
2577 };
2578
2579 vk.cmdClearAttachments(cmdBuffer, 1, &clearAttachment, 1, &clearRect);
2580
2581 endRenderPass(vk, cmdBuffer);
2582 }
2583
getSyncInfo(void) const2584 SyncInfo getSyncInfo (void) const
2585 {
2586 SyncInfo syncInfo;
2587 syncInfo.stageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
2588
2589 switch (m_resource.getImage().subresourceRange.aspectMask)
2590 {
2591 case VK_IMAGE_ASPECT_COLOR_BIT:
2592 syncInfo.accessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
2593 syncInfo.imageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
2594 break;
2595 case VK_IMAGE_ASPECT_STENCIL_BIT:
2596 case VK_IMAGE_ASPECT_DEPTH_BIT:
2597 syncInfo.accessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
2598 syncInfo.imageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
2599 break;
2600 default:
2601 DE_ASSERT(0);
2602 break;
2603 }
2604
2605 return syncInfo;
2606 }
2607
getData(void) const2608 Data getData (void) const
2609 {
2610 const Data data =
2611 {
2612 m_data.size(), // std::size_t size;
2613 &m_data[0], // const deUint8* data;
2614 };
2615 return data;
2616 }
2617
2618 private:
2619 OperationContext& m_context;
2620 Resource& m_resource;
2621 std::vector<deUint8> m_data;
2622 const VkClearValue m_clearValue;
2623 Move<VkImageView> m_attachmentView;
2624 Move<VkRenderPass> m_renderPass;
2625 Move<VkFramebuffer> m_frameBuffer;
2626 };
2627
2628 class Support : public OperationSupport
2629 {
2630 public:
Support(const ResourceDescription & resourceDesc)2631 Support (const ResourceDescription& resourceDesc)
2632 : m_resourceDesc (resourceDesc)
2633 {
2634 DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_IMAGE);
2635 }
2636
getResourceUsageFlags(void) const2637 deUint32 getResourceUsageFlags (void) const
2638 {
2639 switch (m_resourceDesc.imageAspect)
2640 {
2641 case VK_IMAGE_ASPECT_COLOR_BIT:
2642 return VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
2643 case VK_IMAGE_ASPECT_STENCIL_BIT:
2644 case VK_IMAGE_ASPECT_DEPTH_BIT:
2645 return VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
2646 default:
2647 DE_ASSERT(0);
2648 }
2649 return 0u;
2650 }
2651
getQueueFlags(const OperationContext & context) const2652 VkQueueFlags getQueueFlags (const OperationContext& context) const
2653 {
2654 DE_UNREF(context);
2655 return VK_QUEUE_GRAPHICS_BIT;
2656 }
2657
build(OperationContext & context,Resource & resource) const2658 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
2659 {
2660 return de::MovePtr<Operation>(new Implementation(context, resource));
2661 }
2662
2663 private:
2664 const ResourceDescription m_resourceDesc;
2665 };
2666
2667 } // ClearAttachments
2668
2669 namespace IndirectBuffer
2670 {
2671
2672 class GraphicsPipeline : public Pipeline
2673 {
2674 public:
GraphicsPipeline(OperationContext & context,const ResourceType resourceType,const VkBuffer indirectBuffer,const std::string & shaderPrefix,const VkDescriptorSetLayout descriptorSetLayout)2675 GraphicsPipeline (OperationContext& context,
2676 const ResourceType resourceType,
2677 const VkBuffer indirectBuffer,
2678 const std::string& shaderPrefix,
2679 const VkDescriptorSetLayout descriptorSetLayout)
2680 : m_resourceType (resourceType)
2681 , m_indirectBuffer (indirectBuffer)
2682 , m_vertices (context)
2683 {
2684 const DeviceInterface& vk = context.getDeviceInterface();
2685 const VkDevice device = context.getDevice();
2686 Allocator& allocator = context.getAllocator();
2687
2688 // Color attachment
2689
2690 m_colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
2691 m_colorImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
2692 m_colorImageExtent = makeExtent3D(16u, 16u, 1u);
2693 m_colorAttachmentImage = de::MovePtr<Image>(new Image(vk, device, allocator,
2694 makeImageCreateInfo(VK_IMAGE_TYPE_2D, m_colorImageExtent, m_colorFormat, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT),
2695 MemoryRequirement::Any));
2696
2697 // Pipeline
2698
2699 m_colorAttachmentView = makeImageView (vk, device, **m_colorAttachmentImage, VK_IMAGE_VIEW_TYPE_2D, m_colorFormat, m_colorImageSubresourceRange);
2700 m_renderPass = makeRenderPass (vk, device, m_colorFormat);
2701 m_framebuffer = makeFramebuffer (vk, device, *m_renderPass, *m_colorAttachmentView, m_colorImageExtent.width, m_colorImageExtent.height, 1u);
2702 m_pipelineLayout = makePipelineLayout(vk, device, descriptorSetLayout);
2703
2704 GraphicsPipelineBuilder pipelineBuilder;
2705 pipelineBuilder
2706 .setRenderSize (tcu::IVec2(m_colorImageExtent.width, m_colorImageExtent.height))
2707 .setVertexInputSingleAttribute (m_vertices.getVertexFormat(), m_vertices.getVertexStride())
2708 .setShader (vk, device, VK_SHADER_STAGE_VERTEX_BIT, context.getBinaryCollection().get(shaderPrefix + "vert"), DE_NULL)
2709 .setShader (vk, device, VK_SHADER_STAGE_FRAGMENT_BIT, context.getBinaryCollection().get(shaderPrefix + "frag"), DE_NULL);
2710
2711 m_pipeline = pipelineBuilder.build(vk, device, *m_pipelineLayout, *m_renderPass, context.getPipelineCacheData());
2712 }
2713
recordCommands(OperationContext & context,const VkCommandBuffer cmdBuffer,const VkDescriptorSet descriptorSet)2714 void recordCommands (OperationContext& context, const VkCommandBuffer cmdBuffer, const VkDescriptorSet descriptorSet)
2715 {
2716 const DeviceInterface& vk = context.getDeviceInterface();
2717
2718 // Change color attachment image layout
2719 {
2720 const VkImageMemoryBarrier colorAttachmentLayoutBarrier = makeImageMemoryBarrier(
2721 (VkAccessFlags)0, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
2722 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
2723 **m_colorAttachmentImage, m_colorImageSubresourceRange);
2724
2725 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, (VkDependencyFlags)0,
2726 0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentLayoutBarrier);
2727 }
2728
2729 {
2730 const VkRect2D renderArea = makeRect2D(m_colorImageExtent);
2731 const tcu::Vec4 clearColor = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
2732
2733 beginRenderPass(vk, cmdBuffer, *m_renderPass, *m_framebuffer, renderArea, clearColor);
2734 }
2735
2736 vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
2737 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
2738 {
2739 const VkDeviceSize vertexBufferOffset = 0ull;
2740 const VkBuffer vertexBuffer = m_vertices.getVertexBuffer();
2741 vk.cmdBindVertexBuffers(cmdBuffer, 0u, 1u, &vertexBuffer, &vertexBufferOffset);
2742 }
2743
2744 switch (m_resourceType)
2745 {
2746 case RESOURCE_TYPE_INDIRECT_BUFFER_DRAW:
2747 vk.cmdDrawIndirect(cmdBuffer, m_indirectBuffer, 0u, 1u, 0u);
2748 break;
2749
2750 case RESOURCE_TYPE_INDIRECT_BUFFER_DRAW_INDEXED:
2751 vk.cmdBindIndexBuffer(cmdBuffer, m_vertices.getIndexBuffer(), 0u, m_vertices.getIndexType());
2752 vk.cmdDrawIndexedIndirect(cmdBuffer, m_indirectBuffer, 0u, 1u, 0u);
2753 break;
2754
2755 default:
2756 DE_ASSERT(0);
2757 break;
2758 }
2759 endRenderPass(vk, cmdBuffer);
2760 }
2761
2762 private:
2763 const ResourceType m_resourceType;
2764 const VkBuffer m_indirectBuffer;
2765 const VertexGrid m_vertices;
2766 VkFormat m_colorFormat;
2767 de::MovePtr<Image> m_colorAttachmentImage;
2768 Move<VkImageView> m_colorAttachmentView;
2769 VkExtent3D m_colorImageExtent;
2770 VkImageSubresourceRange m_colorImageSubresourceRange;
2771 Move<VkRenderPass> m_renderPass;
2772 Move<VkFramebuffer> m_framebuffer;
2773 Move<VkPipelineLayout> m_pipelineLayout;
2774 Move<VkPipeline> m_pipeline;
2775 };
2776
2777 class ComputePipeline : public Pipeline
2778 {
2779 public:
ComputePipeline(OperationContext & context,const VkBuffer indirectBuffer,const std::string & shaderPrefix,const VkDescriptorSetLayout descriptorSetLayout)2780 ComputePipeline (OperationContext& context,
2781 const VkBuffer indirectBuffer,
2782 const std::string& shaderPrefix,
2783 const VkDescriptorSetLayout descriptorSetLayout)
2784 : m_indirectBuffer (indirectBuffer)
2785 {
2786 const DeviceInterface& vk = context.getDeviceInterface();
2787 const VkDevice device = context.getDevice();
2788
2789 const Unique<VkShaderModule> shaderModule(createShaderModule(vk, device, context.getBinaryCollection().get(shaderPrefix + "comp"), (VkShaderModuleCreateFlags)0));
2790
2791 m_pipelineLayout = makePipelineLayout(vk, device, descriptorSetLayout);
2792 m_pipeline = makeComputePipeline(vk, device, *m_pipelineLayout, *shaderModule, DE_NULL, context.getPipelineCacheData());
2793 }
2794
recordCommands(OperationContext & context,const VkCommandBuffer cmdBuffer,const VkDescriptorSet descriptorSet)2795 void recordCommands (OperationContext& context, const VkCommandBuffer cmdBuffer, const VkDescriptorSet descriptorSet)
2796 {
2797 const DeviceInterface& vk = context.getDeviceInterface();
2798
2799 vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipeline);
2800 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
2801 vk.cmdDispatchIndirect(cmdBuffer, m_indirectBuffer, 0u);
2802 }
2803
2804 private:
2805 const VkBuffer m_indirectBuffer;
2806 Move<VkPipelineLayout> m_pipelineLayout;
2807 Move<VkPipeline> m_pipeline;
2808 };
2809
2810 //! Read indirect buffer by executing an indirect draw or dispatch command.
2811 class ReadImplementation : public Operation
2812 {
2813 public:
ReadImplementation(OperationContext & context,Resource & resource)2814 ReadImplementation (OperationContext& context, Resource& resource)
2815 : m_context (context)
2816 , m_resource (resource)
2817 , m_stage (resource.getType() == RESOURCE_TYPE_INDIRECT_BUFFER_DISPATCH ? VK_SHADER_STAGE_COMPUTE_BIT : VK_SHADER_STAGE_VERTEX_BIT)
2818 , m_pipelineStage (pipelineStageFlagsFromShaderStageFlagBits(m_stage))
2819 , m_hostBufferSizeBytes (sizeof(deUint32))
2820 {
2821 requireFeaturesForSSBOAccess (m_context, m_stage);
2822
2823 const DeviceInterface& vk = m_context.getDeviceInterface();
2824 const VkDevice device = m_context.getDevice();
2825 Allocator& allocator = m_context.getAllocator();
2826
2827 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
2828 vk, device, allocator, makeBufferCreateInfo(m_hostBufferSizeBytes, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible));
2829
2830 // Init host buffer data
2831 {
2832 const Allocation& alloc = m_hostBuffer->getAllocation();
2833 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_hostBufferSizeBytes));
2834 flushAlloc(vk, device, alloc);
2835 }
2836
2837 // Prepare descriptors
2838 {
2839 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
2840 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_stage)
2841 .build(vk, device);
2842
2843 m_descriptorPool = DescriptorPoolBuilder()
2844 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
2845 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
2846
2847 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
2848
2849 const VkDescriptorBufferInfo hostBufferInfo = makeDescriptorBufferInfo(**m_hostBuffer, 0u, m_hostBufferSizeBytes);
2850
2851 DescriptorSetUpdateBuilder()
2852 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &hostBufferInfo)
2853 .update(vk, device);
2854 }
2855
2856 // Create pipeline
2857 m_pipeline = (m_resource.getType() == RESOURCE_TYPE_INDIRECT_BUFFER_DISPATCH
2858 ? de::MovePtr<Pipeline>(new ComputePipeline(context, m_resource.getBuffer().handle, "read_ib_", *m_descriptorSetLayout))
2859 : de::MovePtr<Pipeline>(new GraphicsPipeline(context, m_resource.getType(), m_resource.getBuffer().handle, "read_ib_", *m_descriptorSetLayout)));
2860 }
2861
recordCommands(const VkCommandBuffer cmdBuffer)2862 void recordCommands (const VkCommandBuffer cmdBuffer)
2863 {
2864 const DeviceInterface& vk = m_context.getDeviceInterface();
2865
2866 m_pipeline->recordCommands(m_context, cmdBuffer, *m_descriptorSet);
2867
2868 // Insert a barrier so data written by the shader is available to the host
2869 const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_hostBufferSizeBytes);
2870 vk.cmdPipelineBarrier(cmdBuffer, m_pipelineStage, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
2871 }
2872
getSyncInfo(void) const2873 SyncInfo getSyncInfo (void) const
2874 {
2875 const SyncInfo syncInfo =
2876 {
2877 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, // VkPipelineStageFlags stageMask;
2878 VK_ACCESS_INDIRECT_COMMAND_READ_BIT, // VkAccessFlags accessMask;
2879 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
2880 };
2881 return syncInfo;
2882 }
2883
getData(void) const2884 Data getData (void) const
2885 {
2886 return getHostBufferData(m_context, *m_hostBuffer, m_hostBufferSizeBytes);
2887 }
2888
2889 private:
2890 OperationContext& m_context;
2891 Resource& m_resource;
2892 const VkShaderStageFlagBits m_stage;
2893 const VkPipelineStageFlags m_pipelineStage;
2894 const VkDeviceSize m_hostBufferSizeBytes;
2895 de::MovePtr<Buffer> m_hostBuffer;
2896 Move<VkDescriptorPool> m_descriptorPool;
2897 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
2898 Move<VkDescriptorSet> m_descriptorSet;
2899 de::MovePtr<Pipeline> m_pipeline;
2900 };
2901
2902 //! Prepare indirect buffer for a draw/dispatch call.
2903 class WriteImplementation : public Operation
2904 {
2905 public:
WriteImplementation(OperationContext & context,Resource & resource)2906 WriteImplementation (OperationContext& context, Resource& resource)
2907 : m_context (context)
2908 , m_resource (resource)
2909 {
2910 switch (m_resource.getType())
2911 {
2912 case RESOURCE_TYPE_INDIRECT_BUFFER_DRAW:
2913 {
2914 m_drawIndirect.vertexCount = 6u;
2915 m_drawIndirect.instanceCount = 1u;
2916 m_drawIndirect.firstVertex = 0u;
2917 m_drawIndirect.firstInstance = 0u;
2918
2919 m_indirectData = reinterpret_cast<deUint32*>(&m_drawIndirect);
2920 m_expectedValue = 6u;
2921 }
2922 break;
2923
2924 case RESOURCE_TYPE_INDIRECT_BUFFER_DRAW_INDEXED:
2925 {
2926 m_drawIndexedIndirect.indexCount = 6u;
2927 m_drawIndexedIndirect.instanceCount = 1u;
2928 m_drawIndexedIndirect.firstIndex = 0u;
2929 m_drawIndexedIndirect.vertexOffset = 0u;
2930 m_drawIndexedIndirect.firstInstance = 0u;
2931
2932 m_indirectData = reinterpret_cast<deUint32*>(&m_drawIndexedIndirect);
2933 m_expectedValue = 6u;
2934 }
2935 break;
2936
2937 case RESOURCE_TYPE_INDIRECT_BUFFER_DISPATCH:
2938 {
2939 m_dispatchIndirect.x = 7u;
2940 m_dispatchIndirect.y = 2u;
2941 m_dispatchIndirect.z = 1u;
2942
2943 m_indirectData = reinterpret_cast<deUint32*>(&m_dispatchIndirect);
2944 m_expectedValue = 14u;
2945 }
2946 break;
2947
2948 default:
2949 DE_ASSERT(0);
2950 break;
2951 }
2952 }
2953
recordCommands(const VkCommandBuffer cmdBuffer)2954 void recordCommands (const VkCommandBuffer cmdBuffer)
2955 {
2956 const DeviceInterface& vk = m_context.getDeviceInterface();
2957
2958 vk.cmdUpdateBuffer(cmdBuffer, m_resource.getBuffer().handle, m_resource.getBuffer().offset, m_resource.getBuffer().size, m_indirectData);
2959 }
2960
getSyncInfo(void) const2961 SyncInfo getSyncInfo (void) const
2962 {
2963 const SyncInfo syncInfo =
2964 {
2965 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
2966 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags accessMask;
2967 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
2968 };
2969 return syncInfo;
2970 }
2971
getData(void) const2972 Data getData (void) const
2973 {
2974 const Data data =
2975 {
2976 sizeof(deUint32), // std::size_t size;
2977 reinterpret_cast<const deUint8*>(&m_expectedValue), // const deUint8* data;
2978 };
2979 return data;
2980 }
2981
2982 private:
2983 OperationContext& m_context;
2984 Resource& m_resource;
2985 VkDrawIndirectCommand m_drawIndirect;
2986 VkDrawIndexedIndirectCommand m_drawIndexedIndirect;
2987 VkDispatchIndirectCommand m_dispatchIndirect;
2988 deUint32* m_indirectData;
2989 deUint32 m_expectedValue; //! Side-effect value expected to be computed by a read (draw/dispatch) command.
2990 };
2991
2992 class ReadSupport : public OperationSupport
2993 {
2994 public:
ReadSupport(const ResourceDescription & resourceDesc)2995 ReadSupport (const ResourceDescription& resourceDesc)
2996 : m_resourceDesc (resourceDesc)
2997 {
2998 DE_ASSERT(isIndirectBuffer(m_resourceDesc.type));
2999 }
3000
initPrograms(SourceCollections & programCollection) const3001 void initPrograms (SourceCollections& programCollection) const
3002 {
3003 std::ostringstream decl;
3004 decl << "layout(set = 0, binding = 0, std140) coherent buffer Data {\n"
3005 << " uint value;\n"
3006 << "} sb_out;\n";
3007
3008 std::ostringstream main;
3009 main << " atomicAdd(sb_out.value, 1u);\n";
3010
3011 // Vertex
3012 {
3013 std::ostringstream src;
3014 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
3015 << "\n"
3016 << "layout(location = 0) in vec4 v_in_position;\n"
3017 << "\n"
3018 << "out " << s_perVertexBlock << ";\n"
3019 << "\n"
3020 << decl.str()
3021 << "\n"
3022 << "void main (void)\n"
3023 << "{\n"
3024 << " gl_Position = v_in_position;\n"
3025 << main.str()
3026 << "}\n";
3027
3028 programCollection.glslSources.add("read_ib_vert") << glu::VertexSource(src.str());
3029 }
3030
3031 // Fragment
3032 {
3033 std::ostringstream src;
3034 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
3035 << "\n"
3036 << "layout(location = 0) out vec4 o_color;\n"
3037 << "\n"
3038 << "void main (void)\n"
3039 << "{\n"
3040 << " o_color = vec4(1.0);\n"
3041 << "}\n";
3042
3043 programCollection.glslSources.add("read_ib_frag") << glu::FragmentSource(src.str());
3044 }
3045
3046 // Compute
3047 {
3048 std::ostringstream src;
3049 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
3050 << "\n"
3051 << "layout(local_size_x = 1) in;\n"
3052 << "\n"
3053 << decl.str()
3054 << "\n"
3055 << "void main (void)\n"
3056 << "{\n"
3057 << main.str()
3058 << "}\n";
3059
3060 programCollection.glslSources.add("read_ib_comp") << glu::ComputeSource(src.str());
3061 }
3062 }
3063
getResourceUsageFlags(void) const3064 deUint32 getResourceUsageFlags (void) const
3065 {
3066 return VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
3067 }
3068
getQueueFlags(const OperationContext & context) const3069 VkQueueFlags getQueueFlags (const OperationContext& context) const
3070 {
3071 DE_UNREF(context);
3072 return (m_resourceDesc.type == RESOURCE_TYPE_INDIRECT_BUFFER_DISPATCH ? VK_QUEUE_COMPUTE_BIT : VK_QUEUE_GRAPHICS_BIT);
3073 }
3074
build(OperationContext & context,Resource & resource) const3075 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
3076 {
3077 return de::MovePtr<Operation>(new ReadImplementation(context, resource));
3078 }
3079
3080 private:
3081 const ResourceDescription m_resourceDesc;
3082 };
3083
3084
3085 class WriteSupport : public OperationSupport
3086 {
3087 public:
WriteSupport(const ResourceDescription & resourceDesc)3088 WriteSupport (const ResourceDescription& resourceDesc)
3089 {
3090 DE_ASSERT(isIndirectBuffer(resourceDesc.type));
3091 DE_UNREF(resourceDesc);
3092 }
3093
getResourceUsageFlags(void) const3094 deUint32 getResourceUsageFlags (void) const
3095 {
3096 return VK_BUFFER_USAGE_TRANSFER_DST_BIT;
3097 }
3098
getQueueFlags(const OperationContext & context) const3099 VkQueueFlags getQueueFlags (const OperationContext& context) const
3100 {
3101 DE_UNREF(context);
3102 return VK_QUEUE_TRANSFER_BIT;
3103 }
3104
build(OperationContext & context,Resource & resource) const3105 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
3106 {
3107 return de::MovePtr<Operation>(new WriteImplementation(context, resource));
3108 }
3109 };
3110
3111 } // IndirectBuffer ns
3112
3113 namespace VertexInput
3114 {
3115
3116 class Implementation : public Operation
3117 {
3118 public:
Implementation(OperationContext & context,Resource & resource)3119 Implementation (OperationContext& context, Resource& resource)
3120 : m_context (context)
3121 , m_resource (resource)
3122 {
3123 requireFeaturesForSSBOAccess (m_context, VK_SHADER_STAGE_VERTEX_BIT);
3124
3125 const DeviceInterface& vk = context.getDeviceInterface();
3126 const VkDevice device = context.getDevice();
3127 Allocator& allocator = context.getAllocator();
3128 const VkDeviceSize dataSizeBytes = m_resource.getBuffer().size;
3129
3130 m_outputBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator,
3131 makeBufferCreateInfo(dataSizeBytes, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible));
3132
3133 {
3134 const Allocation& alloc = m_outputBuffer->getAllocation();
3135 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(dataSizeBytes));
3136 flushAlloc(vk, device, alloc);
3137 }
3138
3139 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
3140 .addSingleBinding (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_VERTEX_BIT)
3141 .build (vk, device);
3142
3143 m_descriptorPool = DescriptorPoolBuilder()
3144 .addType (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
3145 .build (vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
3146
3147 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
3148
3149 const VkDescriptorBufferInfo outputBufferDescriptorInfo = makeDescriptorBufferInfo(m_outputBuffer->get(), 0ull, dataSizeBytes);
3150 DescriptorSetUpdateBuilder()
3151 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &outputBufferDescriptorInfo)
3152 .update (vk, device);
3153
3154 // Color attachment
3155 m_colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
3156 m_colorImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
3157 m_colorImageExtent = makeExtent3D(16u, 16u, 1u);
3158 m_colorAttachmentImage = de::MovePtr<Image>(new Image(vk, device, allocator,
3159 makeImageCreateInfo(VK_IMAGE_TYPE_2D, m_colorImageExtent, m_colorFormat, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT),
3160 MemoryRequirement::Any));
3161
3162 // Pipeline
3163 m_colorAttachmentView = makeImageView (vk, device, **m_colorAttachmentImage, VK_IMAGE_VIEW_TYPE_2D, m_colorFormat, m_colorImageSubresourceRange);
3164 m_renderPass = makeRenderPass (vk, device, m_colorFormat);
3165 m_framebuffer = makeFramebuffer (vk, device, *m_renderPass, *m_colorAttachmentView, m_colorImageExtent.width, m_colorImageExtent.height, 1u);
3166 m_pipelineLayout = makePipelineLayout(vk, device, *m_descriptorSetLayout);
3167
3168 m_pipeline = GraphicsPipelineBuilder()
3169 .setPrimitiveTopology (VK_PRIMITIVE_TOPOLOGY_POINT_LIST)
3170 .setRenderSize (tcu::IVec2(static_cast<int>(m_colorImageExtent.width), static_cast<int>(m_colorImageExtent.height)))
3171 .setVertexInputSingleAttribute (VK_FORMAT_R32G32B32A32_UINT, tcu::getPixelSize(mapVkFormat(VK_FORMAT_R32G32B32A32_UINT)))
3172 .setShader (vk, device, VK_SHADER_STAGE_VERTEX_BIT, context.getBinaryCollection().get("input_vert"), DE_NULL)
3173 .setShader (vk, device, VK_SHADER_STAGE_FRAGMENT_BIT, context.getBinaryCollection().get("input_frag"), DE_NULL)
3174 .build (vk, device, *m_pipelineLayout, *m_renderPass, context.getPipelineCacheData());
3175 }
3176
recordCommands(const VkCommandBuffer cmdBuffer)3177 void recordCommands (const VkCommandBuffer cmdBuffer)
3178 {
3179 const DeviceInterface& vk = m_context.getDeviceInterface();
3180 const VkDeviceSize dataSizeBytes = m_resource.getBuffer().size;
3181
3182 // Change color attachment image layout
3183 {
3184 const VkImageMemoryBarrier colorAttachmentLayoutBarrier = makeImageMemoryBarrier(
3185 (VkAccessFlags)0, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
3186 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
3187 **m_colorAttachmentImage, m_colorImageSubresourceRange);
3188
3189 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, (VkDependencyFlags)0,
3190 0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentLayoutBarrier);
3191 }
3192
3193 {
3194 const VkRect2D renderArea = makeRect2D(m_colorImageExtent);
3195 const tcu::Vec4 clearColor = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
3196
3197 beginRenderPass(vk, cmdBuffer, *m_renderPass, *m_framebuffer, renderArea, clearColor);
3198 }
3199
3200 vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
3201 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
3202 {
3203 const VkDeviceSize vertexBufferOffset = 0ull;
3204 vk.cmdBindVertexBuffers(cmdBuffer, 0u, 1u, &m_resource.getBuffer().handle, &vertexBufferOffset);
3205 }
3206
3207 vk.cmdDraw(cmdBuffer, static_cast<deUint32>(dataSizeBytes / sizeof(tcu::UVec4)), 1u, 0u, 0u);
3208
3209 endRenderPass(vk, cmdBuffer);
3210
3211 // Insert a barrier so data written by the shader is available to the host
3212 {
3213 const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_outputBuffer, 0u, m_resource.getBuffer().size);
3214 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
3215 }
3216 }
3217
getSyncInfo(void) const3218 SyncInfo getSyncInfo (void) const
3219 {
3220 const SyncInfo syncInfo =
3221 {
3222 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, // VkPipelineStageFlags stageMask;
3223 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, // VkAccessFlags accessMask;
3224 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
3225 };
3226 return syncInfo;
3227 }
3228
getData(void) const3229 Data getData (void) const
3230 {
3231 return getHostBufferData(m_context, *m_outputBuffer, m_resource.getBuffer().size);
3232 }
3233
3234 private:
3235 OperationContext& m_context;
3236 Resource& m_resource;
3237 de::MovePtr<Buffer> m_outputBuffer;
3238 de::MovePtr<Buffer> m_indexBuffer;
3239 de::MovePtr<Buffer> m_indirectBuffer;
3240 Move<VkRenderPass> m_renderPass;
3241 Move<VkFramebuffer> m_framebuffer;
3242 Move<VkPipelineLayout> m_pipelineLayout;
3243 Move<VkPipeline> m_pipeline;
3244 VkFormat m_colorFormat;
3245 de::MovePtr<Image> m_colorAttachmentImage;
3246 Move<VkImageView> m_colorAttachmentView;
3247 VkExtent3D m_colorImageExtent;
3248 VkImageSubresourceRange m_colorImageSubresourceRange;
3249 Move<VkDescriptorPool> m_descriptorPool;
3250 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
3251 Move<VkDescriptorSet> m_descriptorSet;
3252 };
3253
3254 class Support : public OperationSupport
3255 {
3256 public:
Support(const ResourceDescription & resourceDesc)3257 Support (const ResourceDescription& resourceDesc)
3258 : m_resourceDesc (resourceDesc)
3259 {
3260 DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_BUFFER);
3261 }
3262
initPrograms(SourceCollections & programCollection) const3263 void initPrograms (SourceCollections& programCollection) const
3264 {
3265 // Vertex
3266 {
3267 int vertexStride = sizeof(tcu::UVec4);
3268 std::ostringstream src;
3269 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
3270 << "\n"
3271 << "layout(location = 0) in uvec4 v_in_data;\n"
3272 << "layout(set = 0, binding = 0, std140) writeonly buffer Output {\n"
3273 << " uvec4 data[" << m_resourceDesc.size.x()/vertexStride << "];\n"
3274 << "} b_out;\n"
3275 << "\n"
3276 << "void main (void)\n"
3277 << "{\n"
3278 << " b_out.data[gl_VertexIndex] = v_in_data;\n"
3279 << " gl_PointSize = 1.0f;\n"
3280 << "}\n";
3281 programCollection.glslSources.add("input_vert") << glu::VertexSource(src.str());
3282 }
3283
3284 // Fragment
3285 {
3286 std::ostringstream src;
3287 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
3288 << "\n"
3289 << "layout(location = 0) out vec4 o_color;\n"
3290 << "\n"
3291 << "void main (void)\n"
3292 << "{\n"
3293 << " o_color = vec4(1.0);\n"
3294 << "}\n";
3295 programCollection.glslSources.add("input_frag") << glu::FragmentSource(src.str());
3296 }
3297 }
3298
getResourceUsageFlags(void) const3299 deUint32 getResourceUsageFlags (void) const
3300 {
3301 return VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
3302 }
3303
getQueueFlags(const OperationContext & context) const3304 VkQueueFlags getQueueFlags (const OperationContext& context) const
3305 {
3306 DE_UNREF(context);
3307 return VK_QUEUE_GRAPHICS_BIT;
3308 }
3309
build(OperationContext & context,Resource & resource) const3310 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
3311 {
3312 return de::MovePtr<Operation>(new Implementation(context, resource));
3313 }
3314
3315 private:
3316 const ResourceDescription m_resourceDesc;
3317 };
3318
3319 } // VertexInput
3320
3321 } // anonymous ns
3322
OperationContext(Context & context,PipelineCacheData & pipelineCacheData)3323 OperationContext::OperationContext (Context& context, PipelineCacheData& pipelineCacheData)
3324 : m_vki (context.getInstanceInterface())
3325 , m_vk (context.getDeviceInterface())
3326 , m_physicalDevice (context.getPhysicalDevice())
3327 , m_device (context.getDevice())
3328 , m_allocator (context.getDefaultAllocator())
3329 , m_progCollection (context.getBinaryCollection())
3330 , m_pipelineCacheData (pipelineCacheData)
3331 , m_deviceExtensions (context.getDeviceExtensions())
3332 , m_usedApiVersion (context.getUsedApiVersion())
3333 {
3334 }
3335
OperationContext(Context & context,PipelineCacheData & pipelineCacheData,const DeviceInterface & vk,const VkDevice device,vk::Allocator & allocator)3336 OperationContext::OperationContext (Context& context, PipelineCacheData& pipelineCacheData, const DeviceInterface& vk, const VkDevice device, vk::Allocator& allocator)
3337 : m_vki (context.getInstanceInterface())
3338 , m_vk (vk)
3339 , m_physicalDevice (context.getPhysicalDevice())
3340 , m_device (device)
3341 , m_allocator (allocator)
3342 , m_progCollection (context.getBinaryCollection())
3343 , m_pipelineCacheData (pipelineCacheData)
3344 , m_deviceExtensions (context.getDeviceExtensions())
3345 , m_usedApiVersion (context.getUsedApiVersion())
3346 {
3347 }
3348
OperationContext(const deUint32 apiVersion,const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::Allocator & allocator,const std::vector<std::string> & deviceExtensions,vk::BinaryCollection & programCollection,PipelineCacheData & pipelineCacheData)3349 OperationContext::OperationContext (const deUint32 apiVersion,
3350 const vk::InstanceInterface& vki,
3351 const vk::DeviceInterface& vkd,
3352 vk::VkPhysicalDevice physicalDevice,
3353 vk::VkDevice device,
3354 vk::Allocator& allocator,
3355 const std::vector<std::string>& deviceExtensions,
3356 vk::BinaryCollection& programCollection,
3357 PipelineCacheData& pipelineCacheData)
3358 : m_vki (vki)
3359 , m_vk (vkd)
3360 , m_physicalDevice (physicalDevice)
3361 , m_device (device)
3362 , m_allocator (allocator)
3363 , m_progCollection (programCollection)
3364 , m_pipelineCacheData (pipelineCacheData)
3365 , m_deviceExtensions (deviceExtensions)
3366 , m_usedApiVersion (apiVersion)
3367 {
3368 }
3369
Resource(OperationContext & context,const ResourceDescription & desc,const deUint32 usage,const vk::VkSharingMode sharingMode,const std::vector<deUint32> & queueFamilyIndex)3370 Resource::Resource (OperationContext& context, const ResourceDescription& desc, const deUint32 usage, const vk::VkSharingMode sharingMode, const std::vector<deUint32>& queueFamilyIndex)
3371 : m_type (desc.type)
3372 {
3373 const DeviceInterface& vk = context.getDeviceInterface();
3374 const InstanceInterface& vki = context.getInstanceInterface();
3375 const VkDevice device = context.getDevice();
3376 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
3377 Allocator& allocator = context.getAllocator();
3378
3379 if (m_type == RESOURCE_TYPE_BUFFER || isIndirectBuffer(m_type))
3380 {
3381 m_bufferData.offset = 0u;
3382 m_bufferData.size = static_cast<VkDeviceSize>(desc.size.x());
3383 VkBufferCreateInfo bufferCreateInfo = makeBufferCreateInfo(m_bufferData.size, usage);
3384 bufferCreateInfo.sharingMode = sharingMode;
3385 if (queueFamilyIndex.size() > 0)
3386 {
3387 bufferCreateInfo.queueFamilyIndexCount = static_cast<deUint32>(queueFamilyIndex.size());
3388 bufferCreateInfo.pQueueFamilyIndices = &queueFamilyIndex[0];
3389 }
3390 m_buffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator, bufferCreateInfo, MemoryRequirement::Any));
3391 m_bufferData.handle = **m_buffer;
3392 }
3393 else if (m_type == RESOURCE_TYPE_IMAGE)
3394 {
3395 m_imageData.extent = makeExtent3D(desc.size.x(), std::max(1, desc.size.y()), std::max(1, desc.size.z()));
3396 m_imageData.imageType = desc.imageType;
3397 m_imageData.format = desc.imageFormat;
3398 m_imageData.subresourceRange = makeImageSubresourceRange(desc.imageAspect, 0u, 1u, 0u, 1u);
3399 m_imageData.subresourceLayers = makeImageSubresourceLayers(desc.imageAspect, 0u, 0u, 1u);
3400 VkImageCreateInfo imageInfo = makeImageCreateInfo(m_imageData.imageType, m_imageData.extent, m_imageData.format, usage);
3401 imageInfo.sharingMode = sharingMode;
3402 if (queueFamilyIndex.size() > 0)
3403 {
3404 imageInfo.queueFamilyIndexCount = static_cast<deUint32>(queueFamilyIndex.size());
3405 imageInfo.pQueueFamilyIndices = &queueFamilyIndex[0];
3406 }
3407
3408 VkImageFormatProperties imageFormatProperties;
3409 const VkResult formatResult = vki.getPhysicalDeviceImageFormatProperties(physDevice, imageInfo.format, imageInfo.imageType, imageInfo.tiling, imageInfo.usage, imageInfo.flags, &imageFormatProperties);
3410
3411 if (formatResult != VK_SUCCESS)
3412 TCU_THROW(NotSupportedError, "Image format is not supported");
3413
3414 m_image = de::MovePtr<Image>(new Image(vk, device, allocator, imageInfo, MemoryRequirement::Any));
3415 m_imageData.handle = **m_image;
3416 }
3417 else
3418 DE_ASSERT(0);
3419 }
3420
Resource(ResourceType type,vk::Move<vk::VkBuffer> buffer,de::MovePtr<vk::Allocation> allocation,vk::VkDeviceSize offset,vk::VkDeviceSize size)3421 Resource::Resource (ResourceType type,
3422 vk::Move<vk::VkBuffer> buffer,
3423 de::MovePtr<vk::Allocation> allocation,
3424 vk::VkDeviceSize offset,
3425 vk::VkDeviceSize size)
3426 : m_type (type)
3427 , m_buffer (new Buffer(buffer, allocation))
3428 {
3429 DE_ASSERT(type != RESOURCE_TYPE_IMAGE);
3430
3431 m_bufferData.handle = m_buffer->get();
3432 m_bufferData.offset = offset;
3433 m_bufferData.size = size;
3434 }
3435
Resource(vk::Move<vk::VkImage> image,de::MovePtr<vk::Allocation> allocation,const vk::VkExtent3D & extent,vk::VkImageType imageType,vk::VkFormat format,vk::VkImageSubresourceRange subresourceRange,vk::VkImageSubresourceLayers subresourceLayers)3436 Resource::Resource (vk::Move<vk::VkImage> image,
3437 de::MovePtr<vk::Allocation> allocation,
3438 const vk::VkExtent3D& extent,
3439 vk::VkImageType imageType,
3440 vk::VkFormat format,
3441 vk::VkImageSubresourceRange subresourceRange,
3442 vk::VkImageSubresourceLayers subresourceLayers)
3443 : m_type (RESOURCE_TYPE_IMAGE)
3444 , m_image (new Image(image, allocation))
3445 {
3446 m_imageData.handle = m_image->get();
3447 m_imageData.extent = extent;
3448 m_imageData.imageType = imageType;
3449 m_imageData.format = format;
3450 m_imageData.subresourceRange = subresourceRange;
3451 m_imageData.subresourceLayers = subresourceLayers;
3452 }
3453
getMemory(void) const3454 vk::VkDeviceMemory Resource::getMemory (void) const
3455 {
3456 if (m_type == RESOURCE_TYPE_IMAGE)
3457 return m_image->getAllocation().getMemory();
3458 else
3459 return m_buffer->getAllocation().getMemory();
3460 }
3461
3462 //! \note This function exists for performance reasons. We're creating a lot of tests and checking requirements here
3463 //! before creating an OperationSupport object is faster.
isResourceSupported(const OperationName opName,const ResourceDescription & resourceDesc)3464 bool isResourceSupported (const OperationName opName, const ResourceDescription& resourceDesc)
3465 {
3466 switch (opName)
3467 {
3468 case OPERATION_NAME_WRITE_FILL_BUFFER:
3469 case OPERATION_NAME_WRITE_COPY_BUFFER:
3470 case OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER:
3471 case OPERATION_NAME_WRITE_SSBO_VERTEX:
3472 case OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL:
3473 case OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION:
3474 case OPERATION_NAME_WRITE_SSBO_GEOMETRY:
3475 case OPERATION_NAME_WRITE_SSBO_FRAGMENT:
3476 case OPERATION_NAME_WRITE_SSBO_COMPUTE:
3477 case OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT:
3478 case OPERATION_NAME_READ_COPY_BUFFER:
3479 case OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE:
3480 case OPERATION_NAME_READ_SSBO_VERTEX:
3481 case OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL:
3482 case OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION:
3483 case OPERATION_NAME_READ_SSBO_GEOMETRY:
3484 case OPERATION_NAME_READ_SSBO_FRAGMENT:
3485 case OPERATION_NAME_READ_SSBO_COMPUTE:
3486 case OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT:
3487 case OPERATION_NAME_READ_VERTEX_INPUT:
3488 return resourceDesc.type == RESOURCE_TYPE_BUFFER;
3489
3490 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DRAW:
3491 case OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW:
3492 return resourceDesc.type == RESOURCE_TYPE_INDIRECT_BUFFER_DRAW;
3493
3494 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DRAW_INDEXED:
3495 case OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED:
3496 return resourceDesc.type == RESOURCE_TYPE_INDIRECT_BUFFER_DRAW_INDEXED;
3497
3498 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DISPATCH:
3499 case OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH:
3500 return resourceDesc.type == RESOURCE_TYPE_INDIRECT_BUFFER_DISPATCH;
3501
3502 case OPERATION_NAME_WRITE_UPDATE_BUFFER:
3503 return resourceDesc.type == RESOURCE_TYPE_BUFFER && resourceDesc.size.x() <= MAX_UPDATE_BUFFER_SIZE;
3504
3505 case OPERATION_NAME_WRITE_COPY_IMAGE:
3506 case OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE:
3507 case OPERATION_NAME_READ_COPY_IMAGE:
3508 case OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER:
3509 return resourceDesc.type == RESOURCE_TYPE_IMAGE;
3510
3511 case OPERATION_NAME_WRITE_CLEAR_ATTACHMENTS:
3512 return resourceDesc.type == RESOURCE_TYPE_IMAGE && resourceDesc.imageType != VK_IMAGE_TYPE_3D;
3513
3514 case OPERATION_NAME_WRITE_BLIT_IMAGE:
3515 case OPERATION_NAME_READ_BLIT_IMAGE:
3516 case OPERATION_NAME_WRITE_IMAGE_VERTEX:
3517 case OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL:
3518 case OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION:
3519 case OPERATION_NAME_WRITE_IMAGE_GEOMETRY:
3520 case OPERATION_NAME_WRITE_IMAGE_FRAGMENT:
3521 case OPERATION_NAME_WRITE_IMAGE_COMPUTE:
3522 case OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT:
3523 case OPERATION_NAME_READ_IMAGE_VERTEX:
3524 case OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL:
3525 case OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION:
3526 case OPERATION_NAME_READ_IMAGE_GEOMETRY:
3527 case OPERATION_NAME_READ_IMAGE_FRAGMENT:
3528 case OPERATION_NAME_READ_IMAGE_COMPUTE:
3529 case OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT:
3530 return resourceDesc.type == RESOURCE_TYPE_IMAGE && resourceDesc.imageAspect == VK_IMAGE_ASPECT_COLOR_BIT;
3531
3532 case OPERATION_NAME_READ_UBO_VERTEX:
3533 case OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL:
3534 case OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION:
3535 case OPERATION_NAME_READ_UBO_GEOMETRY:
3536 case OPERATION_NAME_READ_UBO_FRAGMENT:
3537 case OPERATION_NAME_READ_UBO_COMPUTE:
3538 case OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT:
3539 return resourceDesc.type == RESOURCE_TYPE_BUFFER && resourceDesc.size.x() <= MAX_UBO_RANGE;
3540
3541 case OPERATION_NAME_WRITE_CLEAR_COLOR_IMAGE:
3542 return resourceDesc.type == RESOURCE_TYPE_IMAGE && resourceDesc.imageAspect == VK_IMAGE_ASPECT_COLOR_BIT;
3543
3544 case OPERATION_NAME_WRITE_CLEAR_DEPTH_STENCIL_IMAGE:
3545 return resourceDesc.type == RESOURCE_TYPE_IMAGE && (resourceDesc.imageAspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
3546
3547 case OPERATION_NAME_WRITE_DRAW:
3548 case OPERATION_NAME_WRITE_DRAW_INDEXED:
3549 case OPERATION_NAME_WRITE_DRAW_INDIRECT:
3550 case OPERATION_NAME_WRITE_DRAW_INDEXED_INDIRECT:
3551 return resourceDesc.type == RESOURCE_TYPE_IMAGE && resourceDesc.imageType == VK_IMAGE_TYPE_2D
3552 && (resourceDesc.imageAspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0;
3553
3554 default:
3555 DE_ASSERT(0);
3556 return false;
3557 }
3558 }
3559
getOperationName(const OperationName opName)3560 std::string getOperationName (const OperationName opName)
3561 {
3562 switch (opName)
3563 {
3564 case OPERATION_NAME_WRITE_FILL_BUFFER: return "write_fill_buffer";
3565 case OPERATION_NAME_WRITE_UPDATE_BUFFER: return "write_update_buffer";
3566 case OPERATION_NAME_WRITE_COPY_BUFFER: return "write_copy_buffer";
3567 case OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE: return "write_copy_buffer_to_image";
3568 case OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER: return "write_copy_image_to_buffer";
3569 case OPERATION_NAME_WRITE_COPY_IMAGE: return "write_copy_image";
3570 case OPERATION_NAME_WRITE_BLIT_IMAGE: return "write_blit_image";
3571 case OPERATION_NAME_WRITE_SSBO_VERTEX: return "write_ssbo_vertex";
3572 case OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL: return "write_ssbo_tess_control";
3573 case OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION: return "write_ssbo_tess_eval";
3574 case OPERATION_NAME_WRITE_SSBO_GEOMETRY: return "write_ssbo_geometry";
3575 case OPERATION_NAME_WRITE_SSBO_FRAGMENT: return "write_ssbo_fragment";
3576 case OPERATION_NAME_WRITE_SSBO_COMPUTE: return "write_ssbo_compute";
3577 case OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT: return "write_ssbo_compute_indirect";
3578 case OPERATION_NAME_WRITE_IMAGE_VERTEX: return "write_image_vertex";
3579 case OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL: return "write_image_tess_control";
3580 case OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION: return "write_image_tess_eval";
3581 case OPERATION_NAME_WRITE_IMAGE_GEOMETRY: return "write_image_geometry";
3582 case OPERATION_NAME_WRITE_IMAGE_FRAGMENT: return "write_image_fragment";
3583 case OPERATION_NAME_WRITE_IMAGE_COMPUTE: return "write_image_compute";
3584 case OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT: return "write_image_compute_indirect";
3585 case OPERATION_NAME_WRITE_CLEAR_COLOR_IMAGE: return "write_clear_color_image";
3586 case OPERATION_NAME_WRITE_CLEAR_DEPTH_STENCIL_IMAGE: return "write_clear_depth_stencil_image";
3587 case OPERATION_NAME_WRITE_DRAW: return "write_draw";
3588 case OPERATION_NAME_WRITE_DRAW_INDEXED: return "write_draw_indexed";
3589 case OPERATION_NAME_WRITE_DRAW_INDIRECT: return "write_draw_indirect";
3590 case OPERATION_NAME_WRITE_DRAW_INDEXED_INDIRECT: return "write_draw_indexed_indirect";
3591 case OPERATION_NAME_WRITE_CLEAR_ATTACHMENTS: return "write_clear_attachments";
3592 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DRAW: return "write_indirect_buffer_draw";
3593 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DRAW_INDEXED: return "write_indirect_buffer_draw_indexed";
3594 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DISPATCH: return "write_indirect_buffer_dispatch";
3595
3596 case OPERATION_NAME_READ_COPY_BUFFER: return "read_copy_buffer";
3597 case OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE: return "read_copy_buffer_to_image";
3598 case OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER: return "read_copy_image_to_buffer";
3599 case OPERATION_NAME_READ_COPY_IMAGE: return "read_copy_image";
3600 case OPERATION_NAME_READ_BLIT_IMAGE: return "read_blit_image";
3601 case OPERATION_NAME_READ_UBO_VERTEX: return "read_ubo_vertex";
3602 case OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL: return "read_ubo_tess_control";
3603 case OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION: return "read_ubo_tess_eval";
3604 case OPERATION_NAME_READ_UBO_GEOMETRY: return "read_ubo_geometry";
3605 case OPERATION_NAME_READ_UBO_FRAGMENT: return "read_ubo_fragment";
3606 case OPERATION_NAME_READ_UBO_COMPUTE: return "read_ubo_compute";
3607 case OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT: return "read_ubo_compute_indirect";
3608 case OPERATION_NAME_READ_SSBO_VERTEX: return "read_ssbo_vertex";
3609 case OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL: return "read_ssbo_tess_control";
3610 case OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION: return "read_ssbo_tess_eval";
3611 case OPERATION_NAME_READ_SSBO_GEOMETRY: return "read_ssbo_geometry";
3612 case OPERATION_NAME_READ_SSBO_FRAGMENT: return "read_ssbo_fragment";
3613 case OPERATION_NAME_READ_SSBO_COMPUTE: return "read_ssbo_compute";
3614 case OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT: return "read_ssbo_compute_indirect";
3615 case OPERATION_NAME_READ_IMAGE_VERTEX: return "read_image_vertex";
3616 case OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL: return "read_image_tess_control";
3617 case OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION: return "read_image_tess_eval";
3618 case OPERATION_NAME_READ_IMAGE_GEOMETRY: return "read_image_geometry";
3619 case OPERATION_NAME_READ_IMAGE_FRAGMENT: return "read_image_fragment";
3620 case OPERATION_NAME_READ_IMAGE_COMPUTE: return "read_image_compute";
3621 case OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT: return "read_image_compute_indirect";
3622 case OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW: return "read_indirect_buffer_draw";
3623 case OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED: return "read_indirect_buffer_draw_indexed";
3624 case OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH: return "read_indirect_buffer_dispatch";
3625 case OPERATION_NAME_READ_VERTEX_INPUT: return "read_vertex_input";
3626
3627 default:
3628 DE_ASSERT(0);
3629 return "";
3630 }
3631 }
3632
makeOperationSupport(const OperationName opName,const ResourceDescription & resourceDesc)3633 de::MovePtr<OperationSupport> makeOperationSupport (const OperationName opName, const ResourceDescription& resourceDesc)
3634 {
3635 switch (opName)
3636 {
3637 case OPERATION_NAME_WRITE_FILL_BUFFER: return de::MovePtr<OperationSupport>(new FillUpdateBuffer ::Support (resourceDesc, FillUpdateBuffer::BUFFER_OP_FILL));
3638 case OPERATION_NAME_WRITE_UPDATE_BUFFER: return de::MovePtr<OperationSupport>(new FillUpdateBuffer ::Support (resourceDesc, FillUpdateBuffer::BUFFER_OP_UPDATE));
3639 case OPERATION_NAME_WRITE_COPY_BUFFER: return de::MovePtr<OperationSupport>(new CopyBuffer ::Support (resourceDesc, ACCESS_MODE_WRITE));
3640 case OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE: return de::MovePtr<OperationSupport>(new CopyBufferToImage ::Support (resourceDesc, ACCESS_MODE_WRITE));
3641 case OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER: return de::MovePtr<OperationSupport>(new CopyImageToBuffer ::Support (resourceDesc, ACCESS_MODE_WRITE));
3642 case OPERATION_NAME_WRITE_COPY_IMAGE: return de::MovePtr<OperationSupport>(new CopyBlitImage ::Support (resourceDesc, CopyBlitImage::TYPE_COPY, ACCESS_MODE_WRITE));
3643 case OPERATION_NAME_WRITE_BLIT_IMAGE: return de::MovePtr<OperationSupport>(new CopyBlitImage ::Support (resourceDesc, CopyBlitImage::TYPE_BLIT, ACCESS_MODE_WRITE));
3644 case OPERATION_NAME_WRITE_SSBO_VERTEX: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_WRITE, VK_SHADER_STAGE_VERTEX_BIT));
3645 case OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_WRITE, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT));
3646 case OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_WRITE, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT));
3647 case OPERATION_NAME_WRITE_SSBO_GEOMETRY: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_WRITE, VK_SHADER_STAGE_GEOMETRY_BIT));
3648 case OPERATION_NAME_WRITE_SSBO_FRAGMENT: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_WRITE, VK_SHADER_STAGE_FRAGMENT_BIT));
3649 case OPERATION_NAME_WRITE_SSBO_COMPUTE: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_WRITE, VK_SHADER_STAGE_COMPUTE_BIT));
3650 case OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_WRITE, VK_SHADER_STAGE_COMPUTE_BIT, ShaderAccess::DISPATCH_CALL_DISPATCH_INDIRECT));
3651 case OPERATION_NAME_WRITE_IMAGE_VERTEX: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_WRITE, VK_SHADER_STAGE_VERTEX_BIT));
3652 case OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_WRITE, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT));
3653 case OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_WRITE, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT));
3654 case OPERATION_NAME_WRITE_IMAGE_GEOMETRY: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_WRITE, VK_SHADER_STAGE_GEOMETRY_BIT));
3655 case OPERATION_NAME_WRITE_IMAGE_FRAGMENT: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_WRITE, VK_SHADER_STAGE_FRAGMENT_BIT));
3656 case OPERATION_NAME_WRITE_IMAGE_COMPUTE: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_WRITE, VK_SHADER_STAGE_COMPUTE_BIT));
3657 case OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_WRITE, VK_SHADER_STAGE_COMPUTE_BIT, ShaderAccess::DISPATCH_CALL_DISPATCH_INDIRECT));
3658 case OPERATION_NAME_WRITE_CLEAR_COLOR_IMAGE: return de::MovePtr<OperationSupport>(new ClearImage ::Support (resourceDesc, ClearImage::CLEAR_MODE_COLOR));
3659 case OPERATION_NAME_WRITE_CLEAR_DEPTH_STENCIL_IMAGE: return de::MovePtr<OperationSupport>(new ClearImage ::Support (resourceDesc, ClearImage::CLEAR_MODE_DEPTH_STENCIL));
3660 case OPERATION_NAME_WRITE_DRAW: return de::MovePtr<OperationSupport>(new Draw ::Support (resourceDesc, Draw::DRAW_CALL_DRAW));
3661 case OPERATION_NAME_WRITE_DRAW_INDEXED: return de::MovePtr<OperationSupport>(new Draw ::Support (resourceDesc, Draw::DRAW_CALL_DRAW_INDEXED));
3662 case OPERATION_NAME_WRITE_DRAW_INDIRECT: return de::MovePtr<OperationSupport>(new Draw ::Support (resourceDesc, Draw::DRAW_CALL_DRAW_INDIRECT));
3663 case OPERATION_NAME_WRITE_DRAW_INDEXED_INDIRECT: return de::MovePtr<OperationSupport>(new Draw ::Support (resourceDesc, Draw::DRAW_CALL_DRAW_INDEXED_INDIRECT));
3664 case OPERATION_NAME_WRITE_CLEAR_ATTACHMENTS: return de::MovePtr<OperationSupport>(new ClearAttachments ::Support (resourceDesc));
3665 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DRAW: return de::MovePtr<OperationSupport>(new IndirectBuffer ::WriteSupport (resourceDesc));
3666 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DRAW_INDEXED: return de::MovePtr<OperationSupport>(new IndirectBuffer ::WriteSupport (resourceDesc));
3667 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DISPATCH: return de::MovePtr<OperationSupport>(new IndirectBuffer ::WriteSupport (resourceDesc));
3668
3669 case OPERATION_NAME_READ_COPY_BUFFER: return de::MovePtr<OperationSupport>(new CopyBuffer ::Support (resourceDesc, ACCESS_MODE_READ));
3670 case OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE: return de::MovePtr<OperationSupport>(new CopyBufferToImage ::Support (resourceDesc, ACCESS_MODE_READ));
3671 case OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER: return de::MovePtr<OperationSupport>(new CopyImageToBuffer ::Support (resourceDesc, ACCESS_MODE_READ));
3672 case OPERATION_NAME_READ_COPY_IMAGE: return de::MovePtr<OperationSupport>(new CopyBlitImage ::Support (resourceDesc, CopyBlitImage::TYPE_COPY, ACCESS_MODE_READ));
3673 case OPERATION_NAME_READ_BLIT_IMAGE: return de::MovePtr<OperationSupport>(new CopyBlitImage ::Support (resourceDesc, CopyBlitImage::TYPE_BLIT, ACCESS_MODE_READ));
3674 case OPERATION_NAME_READ_UBO_VERTEX: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_UNIFORM, ACCESS_MODE_READ, VK_SHADER_STAGE_VERTEX_BIT));
3675 case OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_UNIFORM, ACCESS_MODE_READ, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT));
3676 case OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_UNIFORM, ACCESS_MODE_READ, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT));
3677 case OPERATION_NAME_READ_UBO_GEOMETRY: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_UNIFORM, ACCESS_MODE_READ, VK_SHADER_STAGE_GEOMETRY_BIT));
3678 case OPERATION_NAME_READ_UBO_FRAGMENT: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_UNIFORM, ACCESS_MODE_READ, VK_SHADER_STAGE_FRAGMENT_BIT));
3679 case OPERATION_NAME_READ_UBO_COMPUTE: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_UNIFORM, ACCESS_MODE_READ, VK_SHADER_STAGE_COMPUTE_BIT));
3680 case OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_UNIFORM, ACCESS_MODE_READ, VK_SHADER_STAGE_COMPUTE_BIT, ShaderAccess::DISPATCH_CALL_DISPATCH_INDIRECT));
3681 case OPERATION_NAME_READ_SSBO_VERTEX: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_READ, VK_SHADER_STAGE_VERTEX_BIT));
3682 case OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_READ, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT));
3683 case OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_READ, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT));
3684 case OPERATION_NAME_READ_SSBO_GEOMETRY: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_READ, VK_SHADER_STAGE_GEOMETRY_BIT));
3685 case OPERATION_NAME_READ_SSBO_FRAGMENT: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_READ, VK_SHADER_STAGE_FRAGMENT_BIT));
3686 case OPERATION_NAME_READ_SSBO_COMPUTE: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_READ, VK_SHADER_STAGE_COMPUTE_BIT));
3687 case OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_READ, VK_SHADER_STAGE_COMPUTE_BIT, ShaderAccess::DISPATCH_CALL_DISPATCH_INDIRECT));
3688 case OPERATION_NAME_READ_IMAGE_VERTEX: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_READ, VK_SHADER_STAGE_VERTEX_BIT));
3689 case OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_READ, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT));
3690 case OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_READ, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT));
3691 case OPERATION_NAME_READ_IMAGE_GEOMETRY: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_READ, VK_SHADER_STAGE_GEOMETRY_BIT));
3692 case OPERATION_NAME_READ_IMAGE_FRAGMENT: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_READ, VK_SHADER_STAGE_FRAGMENT_BIT));
3693 case OPERATION_NAME_READ_IMAGE_COMPUTE: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_READ, VK_SHADER_STAGE_COMPUTE_BIT));
3694 case OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_READ, VK_SHADER_STAGE_COMPUTE_BIT, ShaderAccess::DISPATCH_CALL_DISPATCH_INDIRECT));
3695 case OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW: return de::MovePtr<OperationSupport>(new IndirectBuffer ::ReadSupport (resourceDesc));
3696 case OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED: return de::MovePtr<OperationSupport>(new IndirectBuffer ::ReadSupport (resourceDesc));
3697 case OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH: return de::MovePtr<OperationSupport>(new IndirectBuffer ::ReadSupport (resourceDesc));
3698 case OPERATION_NAME_READ_VERTEX_INPUT: return de::MovePtr<OperationSupport>(new VertexInput ::Support (resourceDesc));
3699
3700 default:
3701 DE_ASSERT(0);
3702 return de::MovePtr<OperationSupport>();
3703 }
3704 }
3705
3706 } // synchronization
3707 } // vkt
3708