1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Synchronization operation abstraction
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktSynchronizationOperation.hpp"
25 #include "vkDefs.hpp"
26 #include "vktTestCase.hpp"
27 #include "vktTestCaseUtil.hpp"
28 #include "vkRef.hpp"
29 #include "vkRefUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkQueryUtil.hpp"
32 #include "vkTypeUtil.hpp"
33 #include "vkImageUtil.hpp"
34 #include "vkBuilderUtil.hpp"
35 #include "deUniquePtr.hpp"
36 #include "tcuTestLog.hpp"
37 #include "tcuTextureUtil.hpp"
38 #include <vector>
39 #include <sstream>
40
41 namespace vkt
42 {
43 namespace synchronization
44 {
45 namespace
46 {
47 using namespace vk;
48
49 enum Constants
50 {
51 MAX_IMAGE_DIMENSION_2D = 0x1000u,
52 MAX_UBO_RANGE = 0x4000u,
53 MAX_UPDATE_BUFFER_SIZE = 0x10000u,
54 };
55
56 enum BufferType
57 {
58 BUFFER_TYPE_UNIFORM,
59 BUFFER_TYPE_STORAGE,
60 };
61
62 enum AccessMode
63 {
64 ACCESS_MODE_READ,
65 ACCESS_MODE_WRITE,
66 };
67
68 enum PipelineType
69 {
70 PIPELINE_TYPE_GRAPHICS,
71 PIPELINE_TYPE_COMPUTE,
72 };
73
74 static const char* const s_perVertexBlock = "gl_PerVertex {\n"
75 " vec4 gl_Position;\n"
76 "}";
77
78 //! A pipeline that can be embedded inside an operation.
79 class Pipeline
80 {
81 public:
~Pipeline(void)82 virtual ~Pipeline (void) {}
83 virtual void recordCommands (OperationContext& context, const VkCommandBuffer cmdBuffer, const VkDescriptorSet descriptorSet) = 0;
84 };
85
86 //! Vertex data that covers the whole viewport with two triangles.
87 class VertexGrid
88 {
89 public:
VertexGrid(OperationContext & context)90 VertexGrid (OperationContext& context)
91 {
92 const DeviceInterface& vk = context.getDeviceInterface();
93 const VkDevice device = context.getDevice();
94 Allocator& allocator = context.getAllocator();
95
96 // Vertex positions
97 {
98 m_vertexData.push_back(tcu::Vec4( 1.0f, 1.0f, 0.0f, 1.0f));
99 m_vertexData.push_back(tcu::Vec4(-1.0f, 1.0f, 0.0f, 1.0f));
100 m_vertexData.push_back(tcu::Vec4(-1.0f, -1.0f, 0.0f, 1.0f));
101
102 m_vertexData.push_back(tcu::Vec4(-1.0f, -1.0f, 0.0f, 1.0f));
103 m_vertexData.push_back(tcu::Vec4( 1.0f, -1.0f, 0.0f, 1.0f));
104 m_vertexData.push_back(tcu::Vec4( 1.0f, 1.0f, 0.0f, 1.0f));
105 }
106 m_vertexFormat = VK_FORMAT_R32G32B32A32_SFLOAT;
107 m_vertexStride = tcu::getPixelSize(mapVkFormat(m_vertexFormat));
108 const VkDeviceSize vertexDataSizeBytes = m_vertexData.size() * sizeof(m_vertexData[0]);
109
110 m_vertexBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator,
111 makeBufferCreateInfo(vertexDataSizeBytes, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT), MemoryRequirement::HostVisible));
112
113 DE_ASSERT(sizeof(m_vertexData[0]) == m_vertexStride);
114
115 {
116 const Allocation& alloc = m_vertexBuffer->getAllocation();
117 deMemcpy(alloc.getHostPtr(), &m_vertexData[0], static_cast<std::size_t>(vertexDataSizeBytes));
118 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), vertexDataSizeBytes);
119 }
120
121 // Indices
122 {
123 const VkDeviceSize indexBufferSizeBytes = sizeof(deUint32) * m_vertexData.size();
124 const deUint32 numIndices = static_cast<deUint32>(m_vertexData.size());
125 m_indexBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator,
126 makeBufferCreateInfo(indexBufferSizeBytes, VK_BUFFER_USAGE_INDEX_BUFFER_BIT), MemoryRequirement::HostVisible));
127
128 const Allocation& alloc = m_indexBuffer->getAllocation();
129 deUint32* const pData = static_cast<deUint32*>(alloc.getHostPtr());
130
131 for (deUint32 i = 0; i < numIndices; ++i)
132 pData[i] = i;
133
134 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), indexBufferSizeBytes);
135 }
136 }
137
getVertexFormat(void) const138 VkFormat getVertexFormat (void) const { return m_vertexFormat; }
getVertexStride(void) const139 deUint32 getVertexStride (void) const { return m_vertexStride; }
getIndexType(void) const140 VkIndexType getIndexType (void) const { return VK_INDEX_TYPE_UINT32; }
getNumVertices(void) const141 deUint32 getNumVertices (void) const { return static_cast<deUint32>(m_vertexData.size()); }
getNumIndices(void) const142 deUint32 getNumIndices (void) const { return getNumVertices(); }
getVertexBuffer(void) const143 VkBuffer getVertexBuffer (void) const { return **m_vertexBuffer; }
getIndexBuffer(void) const144 VkBuffer getIndexBuffer (void) const { return **m_indexBuffer; }
145
146 private:
147 VkFormat m_vertexFormat;
148 deUint32 m_vertexStride;
149 std::vector<tcu::Vec4> m_vertexData;
150 de::MovePtr<Buffer> m_vertexBuffer;
151 de::MovePtr<Buffer> m_indexBuffer;
152 };
153
154 //! Add flags for all shader stages required to support a particular stage (e.g. fragment requires vertex as well).
getRequiredStages(const VkShaderStageFlagBits stage)155 VkShaderStageFlags getRequiredStages (const VkShaderStageFlagBits stage)
156 {
157 if (stage & VK_SHADER_STAGE_COMPUTE_BIT)
158 {
159 DE_ASSERT(stage == VK_SHADER_STAGE_COMPUTE_BIT);
160 return stage;
161 }
162 else
163 DE_ASSERT((stage & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
164
165 VkShaderStageFlags flags = 0u;
166
167 if (stage & VK_SHADER_STAGE_ALL_GRAPHICS)
168 flags |= VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
169 if (stage & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
170 flags |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
171 if (stage & VK_SHADER_STAGE_GEOMETRY_BIT)
172 flags |= VK_SHADER_STAGE_GEOMETRY_BIT;
173
174 return flags;
175 }
176
177 //! Check that SSBO read/write is available and that all shader stages are supported.
requireFeaturesForSSBOAccess(OperationContext & context,const VkShaderStageFlags usedStages)178 void requireFeaturesForSSBOAccess (OperationContext& context, const VkShaderStageFlags usedStages)
179 {
180 const InstanceInterface& vki = context.getInstanceInterface();
181 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
182 FeatureFlags flags = (FeatureFlags)0;
183
184 if (usedStages & VK_SHADER_STAGE_FRAGMENT_BIT)
185 flags |= FEATURE_FRAGMENT_STORES_AND_ATOMICS;
186 if (usedStages & (VK_SHADER_STAGE_ALL_GRAPHICS & (~VK_SHADER_STAGE_FRAGMENT_BIT)))
187 flags |= FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS;
188 if (usedStages & VK_SHADER_STAGE_GEOMETRY_BIT)
189 flags |= FEATURE_GEOMETRY_SHADER;
190 if (usedStages & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
191 flags |= FEATURE_TESSELLATION_SHADER;
192
193 requireFeatures(vki, physDevice, flags);
194 }
195
getHostBufferData(const OperationContext & context,const Buffer & hostBuffer,const VkDeviceSize size)196 Data getHostBufferData (const OperationContext& context, const Buffer& hostBuffer, const VkDeviceSize size)
197 {
198 const DeviceInterface& vk = context.getDeviceInterface();
199 const VkDevice device = context.getDevice();
200 const Allocation& alloc = hostBuffer.getAllocation();
201
202 invalidateMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), size);
203
204 const Data data =
205 {
206 static_cast<std::size_t>(size), // std::size_t size;
207 static_cast<deUint8*>(alloc.getHostPtr()), // const deUint8* data;
208 };
209 return data;
210 }
211
assertValidShaderStage(const VkShaderStageFlagBits stage)212 void assertValidShaderStage (const VkShaderStageFlagBits stage)
213 {
214 switch (stage)
215 {
216 case VK_SHADER_STAGE_VERTEX_BIT:
217 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
218 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
219 case VK_SHADER_STAGE_GEOMETRY_BIT:
220 case VK_SHADER_STAGE_FRAGMENT_BIT:
221 case VK_SHADER_STAGE_COMPUTE_BIT:
222 // OK
223 break;
224
225 default:
226 DE_ASSERT(0);
227 break;
228 }
229 }
230
pipelineStageFlagsFromShaderStageFlagBits(const VkShaderStageFlagBits shaderStage)231 VkPipelineStageFlags pipelineStageFlagsFromShaderStageFlagBits (const VkShaderStageFlagBits shaderStage)
232 {
233 switch (shaderStage)
234 {
235 case VK_SHADER_STAGE_VERTEX_BIT: return VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
236 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT;
237 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT;
238 case VK_SHADER_STAGE_GEOMETRY_BIT: return VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT;
239 case VK_SHADER_STAGE_FRAGMENT_BIT: return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
240 case VK_SHADER_STAGE_COMPUTE_BIT: return VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
241
242 // Other usages are probably an error, so flag that.
243 default:
244 DE_ASSERT(0);
245 return (VkPipelineStageFlags)0;
246 }
247 }
248
249 //! Fill destination buffer with a repeating pattern.
fillPattern(void * const pData,const VkDeviceSize size)250 void fillPattern (void* const pData, const VkDeviceSize size)
251 {
252 static const deUint8 pattern[] = { 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31 };
253 deUint8* const pBytes = static_cast<deUint8*>(pData);
254
255 for (deUint32 i = 0; i < size; ++i)
256 pBytes[i] = pattern[i % DE_LENGTH_OF_ARRAY(pattern)];
257 }
258
259 //! Get size in bytes of a pixel buffer with given extent.
getPixelBufferSize(const VkFormat format,const VkExtent3D & extent)260 VkDeviceSize getPixelBufferSize (const VkFormat format, const VkExtent3D& extent)
261 {
262 const int pixelSize = tcu::getPixelSize(mapVkFormat(format));
263 return (pixelSize * extent.width * extent.height * extent.depth);
264 }
265
266 //! Determine the size of a 2D image that can hold sizeBytes data.
get2DImageExtentWithSize(const VkDeviceSize sizeBytes,const deUint32 pixelSize)267 VkExtent3D get2DImageExtentWithSize (const VkDeviceSize sizeBytes, const deUint32 pixelSize)
268 {
269 const deUint32 size = static_cast<deUint32>(sizeBytes / pixelSize);
270
271 DE_ASSERT(size <= MAX_IMAGE_DIMENSION_2D * MAX_IMAGE_DIMENSION_2D);
272
273 return makeExtent3D(
274 std::min(size, static_cast<deUint32>(MAX_IMAGE_DIMENSION_2D)),
275 (size / MAX_IMAGE_DIMENSION_2D) + (size % MAX_IMAGE_DIMENSION_2D != 0 ? 1u : 0u),
276 1u);
277 }
278
makeClearValue(const VkFormat format)279 VkClearValue makeClearValue (const VkFormat format)
280 {
281 if (isDepthStencilFormat(format))
282 return makeClearValueDepthStencil(0.4f, 21u);
283 else
284 {
285 if (isIntFormat(format) || isUintFormat(format))
286 return makeClearValueColorU32(8u, 16u, 24u, 32u);
287 else
288 return makeClearValueColorF32(0.25f, 0.49f, 0.75f, 1.0f);
289 }
290 }
291
clearPixelBuffer(tcu::PixelBufferAccess & pixels,const VkClearValue & clearValue)292 void clearPixelBuffer (tcu::PixelBufferAccess& pixels, const VkClearValue& clearValue)
293 {
294 const tcu::TextureFormat format = pixels.getFormat();
295 const tcu::TextureChannelClass channelClass = tcu::getTextureChannelClass(format.type);
296
297 if (format.order == tcu::TextureFormat::D)
298 {
299 for (int z = 0; z < pixels.getDepth(); z++)
300 for (int y = 0; y < pixels.getHeight(); y++)
301 for (int x = 0; x < pixels.getWidth(); x++)
302 pixels.setPixDepth(clearValue.depthStencil.depth, x, y, z);
303 }
304 else if (format.order == tcu::TextureFormat::S)
305 {
306 for (int z = 0; z < pixels.getDepth(); z++)
307 for (int y = 0; y < pixels.getHeight(); y++)
308 for (int x = 0; x < pixels.getWidth(); x++)
309 pixels.setPixStencil(clearValue.depthStencil.stencil, x, y, z);
310 }
311 else if (format.order == tcu::TextureFormat::DS)
312 {
313 for (int z = 0; z < pixels.getDepth(); z++)
314 for (int y = 0; y < pixels.getHeight(); y++)
315 for (int x = 0; x < pixels.getWidth(); x++)
316 {
317 pixels.setPixDepth(clearValue.depthStencil.depth, x, y, z);
318 pixels.setPixStencil(clearValue.depthStencil.stencil, x, y, z);
319 }
320 }
321 else if (channelClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER || channelClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER)
322 {
323 const tcu::UVec4 color (clearValue.color.uint32);
324
325 for (int z = 0; z < pixels.getDepth(); z++)
326 for (int y = 0; y < pixels.getHeight(); y++)
327 for (int x = 0; x < pixels.getWidth(); x++)
328 pixels.setPixel(color, x, y, z);
329 }
330 else
331 {
332 const tcu::Vec4 color (clearValue.color.float32);
333
334 for (int z = 0; z < pixels.getDepth(); z++)
335 for (int y = 0; y < pixels.getHeight(); y++)
336 for (int x = 0; x < pixels.getWidth(); x++)
337 pixels.setPixel(color, x, y, z);
338 }
339 }
340
341 //! Storage image format that requires StorageImageExtendedFormats SPIR-V capability (listed only Vulkan-defined formats).
isStorageImageExtendedFormat(const VkFormat format)342 bool isStorageImageExtendedFormat (const VkFormat format)
343 {
344 switch (format)
345 {
346 case VK_FORMAT_R32G32_SFLOAT:
347 case VK_FORMAT_R32G32_SINT:
348 case VK_FORMAT_R32G32_UINT:
349 case VK_FORMAT_R16G16B16A16_UNORM:
350 case VK_FORMAT_R16G16B16A16_SNORM:
351 case VK_FORMAT_R16G16_SFLOAT:
352 case VK_FORMAT_R16G16_UNORM:
353 case VK_FORMAT_R16G16_SNORM:
354 case VK_FORMAT_R16G16_SINT:
355 case VK_FORMAT_R16G16_UINT:
356 case VK_FORMAT_R16_SFLOAT:
357 case VK_FORMAT_R16_UNORM:
358 case VK_FORMAT_R16_SNORM:
359 case VK_FORMAT_R16_SINT:
360 case VK_FORMAT_R16_UINT:
361 case VK_FORMAT_R8G8_UNORM:
362 case VK_FORMAT_R8G8_SNORM:
363 case VK_FORMAT_R8G8_SINT:
364 case VK_FORMAT_R8G8_UINT:
365 case VK_FORMAT_R8_UNORM:
366 case VK_FORMAT_R8_SNORM:
367 case VK_FORMAT_R8_SINT:
368 case VK_FORMAT_R8_UINT:
369 return true;
370
371 default:
372 return false;
373 }
374 }
375
getImageViewType(const VkImageType imageType)376 VkImageViewType getImageViewType (const VkImageType imageType)
377 {
378 switch (imageType)
379 {
380 case VK_IMAGE_TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D;
381 case VK_IMAGE_TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D;
382 case VK_IMAGE_TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D;
383
384 default:
385 DE_ASSERT(0);
386 return VK_IMAGE_VIEW_TYPE_LAST;
387 }
388 }
389
getShaderImageType(const VkFormat format,const VkImageType imageType)390 std::string getShaderImageType (const VkFormat format, const VkImageType imageType)
391 {
392 const tcu::TextureFormat texFormat = mapVkFormat(format);
393 const std::string formatPart = tcu::getTextureChannelClass(texFormat.type) == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ? "u" :
394 tcu::getTextureChannelClass(texFormat.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER ? "i" : "";
395 switch (imageType)
396 {
397 case VK_IMAGE_TYPE_1D: return formatPart + "image1D";
398 case VK_IMAGE_TYPE_2D: return formatPart + "image2D";
399 case VK_IMAGE_TYPE_3D: return formatPart + "image3D";
400
401 default:
402 DE_ASSERT(false);
403 return DE_NULL;
404 }
405 }
406
getShaderImageFormatQualifier(const VkFormat format)407 std::string getShaderImageFormatQualifier (const VkFormat format)
408 {
409 const tcu::TextureFormat texFormat = mapVkFormat(format);
410 const char* orderPart = DE_NULL;
411 const char* typePart = DE_NULL;
412
413 switch (texFormat.order)
414 {
415 case tcu::TextureFormat::R: orderPart = "r"; break;
416 case tcu::TextureFormat::RG: orderPart = "rg"; break;
417 case tcu::TextureFormat::RGB: orderPart = "rgb"; break;
418 case tcu::TextureFormat::RGBA: orderPart = "rgba"; break;
419
420 default:
421 DE_ASSERT(false);
422 break;
423 }
424
425 switch (texFormat.type)
426 {
427 case tcu::TextureFormat::FLOAT: typePart = "32f"; break;
428 case tcu::TextureFormat::HALF_FLOAT: typePart = "16f"; break;
429
430 case tcu::TextureFormat::UNSIGNED_INT32: typePart = "32ui"; break;
431 case tcu::TextureFormat::UNSIGNED_INT16: typePart = "16ui"; break;
432 case tcu::TextureFormat::UNSIGNED_INT8: typePart = "8ui"; break;
433
434 case tcu::TextureFormat::SIGNED_INT32: typePart = "32i"; break;
435 case tcu::TextureFormat::SIGNED_INT16: typePart = "16i"; break;
436 case tcu::TextureFormat::SIGNED_INT8: typePart = "8i"; break;
437
438 case tcu::TextureFormat::UNORM_INT16: typePart = "16"; break;
439 case tcu::TextureFormat::UNORM_INT8: typePart = "8"; break;
440
441 case tcu::TextureFormat::SNORM_INT16: typePart = "16_snorm"; break;
442 case tcu::TextureFormat::SNORM_INT8: typePart = "8_snorm"; break;
443
444 default:
445 DE_ASSERT(false);
446 break;
447 }
448
449 return std::string() + orderPart + typePart;
450 }
451
452 namespace FillUpdateBuffer
453 {
454
455 enum BufferOp
456 {
457 BUFFER_OP_FILL,
458 BUFFER_OP_UPDATE,
459 };
460
461 class Implementation : public Operation
462 {
463 public:
Implementation(OperationContext & context,Resource & resource,const BufferOp bufferOp)464 Implementation (OperationContext& context, Resource& resource, const BufferOp bufferOp)
465 : m_context (context)
466 , m_resource (resource)
467 , m_fillValue (0x13)
468 , m_bufferOp (bufferOp)
469 {
470 DE_ASSERT((m_resource.getBuffer().size % sizeof(deUint32)) == 0);
471 DE_ASSERT(m_bufferOp == BUFFER_OP_FILL || m_resource.getBuffer().size <= MAX_UPDATE_BUFFER_SIZE);
472
473 m_data.resize(static_cast<size_t>(m_resource.getBuffer().size));
474
475 if (m_bufferOp == BUFFER_OP_FILL)
476 {
477 const std::size_t size = m_data.size() / sizeof(m_fillValue);
478 deUint32* pData = reinterpret_cast<deUint32*>(&m_data[0]);
479 for (deUint32 i = 0; i < size; ++i)
480 pData[i] = m_fillValue;
481 }
482 else if (m_bufferOp == BUFFER_OP_UPDATE)
483 fillPattern(&m_data[0], m_data.size());
484 }
485
recordCommands(const VkCommandBuffer cmdBuffer)486 void recordCommands (const VkCommandBuffer cmdBuffer)
487 {
488 const DeviceInterface& vk = m_context.getDeviceInterface();
489
490 if (m_bufferOp == BUFFER_OP_FILL)
491 vk.cmdFillBuffer(cmdBuffer, m_resource.getBuffer().handle, m_resource.getBuffer().offset, m_resource.getBuffer().size, m_fillValue);
492 else if (m_bufferOp == BUFFER_OP_UPDATE)
493 vk.cmdUpdateBuffer(cmdBuffer, m_resource.getBuffer().handle, m_resource.getBuffer().offset, m_resource.getBuffer().size, reinterpret_cast<deUint32*>(&m_data[0]));
494 }
495
getSyncInfo(void) const496 SyncInfo getSyncInfo (void) const
497 {
498 const SyncInfo syncInfo =
499 {
500 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
501 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags accessMask;
502 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
503 };
504 return syncInfo;
505 }
506
getData(void) const507 Data getData (void) const
508 {
509 const Data data =
510 {
511 m_data.size(), // std::size_t size;
512 &m_data[0], // const deUint8* data;
513 };
514 return data;
515 }
516
517 private:
518 OperationContext& m_context;
519 Resource& m_resource;
520 std::vector<deUint8> m_data;
521 const deUint32 m_fillValue;
522 const BufferOp m_bufferOp;
523 };
524
525 class Support : public OperationSupport
526 {
527 public:
Support(const ResourceDescription & resourceDesc,const BufferOp bufferOp)528 Support (const ResourceDescription& resourceDesc, const BufferOp bufferOp)
529 : m_resourceDesc (resourceDesc)
530 , m_bufferOp (bufferOp)
531 {
532 DE_ASSERT(m_bufferOp == BUFFER_OP_FILL || m_bufferOp == BUFFER_OP_UPDATE);
533 DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_BUFFER);
534 }
535
getResourceUsageFlags(void) const536 deUint32 getResourceUsageFlags (void) const
537 {
538 return VK_BUFFER_USAGE_TRANSFER_DST_BIT;
539 }
540
getQueueFlags(const OperationContext & context) const541 VkQueueFlags getQueueFlags (const OperationContext& context) const
542 {
543 if (std::find(context.getDeviceExtensions().begin(), context.getDeviceExtensions().end(), "VK_KHR_maintenance1") == context.getDeviceExtensions().end() ||
544 BUFFER_OP_UPDATE != m_bufferOp)
545 return VK_QUEUE_COMPUTE_BIT | VK_QUEUE_GRAPHICS_BIT;
546
547 return VK_QUEUE_TRANSFER_BIT;
548 }
549
build(OperationContext & context,Resource & resource) const550 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
551 {
552 return de::MovePtr<Operation>(new Implementation(context, resource, m_bufferOp));
553 }
554
555 private:
556 const ResourceDescription m_resourceDesc;
557 const BufferOp m_bufferOp;
558 };
559
560 } // FillUpdateBuffer ns
561
562 namespace CopyBuffer
563 {
564
565 class Implementation : public Operation
566 {
567 public:
Implementation(OperationContext & context,Resource & resource,const AccessMode mode)568 Implementation (OperationContext& context, Resource& resource, const AccessMode mode)
569 : m_context (context)
570 , m_resource (resource)
571 , m_mode (mode)
572 {
573 const DeviceInterface& vk = m_context.getDeviceInterface();
574 const VkDevice device = m_context.getDevice();
575 Allocator& allocator = m_context.getAllocator();
576
577 const VkBufferUsageFlags hostBufferUsage = (m_mode == ACCESS_MODE_READ ? VK_BUFFER_USAGE_TRANSFER_DST_BIT : VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
578
579 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
580 vk, device, allocator, makeBufferCreateInfo(m_resource.getBuffer().size, hostBufferUsage), MemoryRequirement::HostVisible));
581
582 const Allocation& alloc = m_hostBuffer->getAllocation();
583 if (m_mode == ACCESS_MODE_READ)
584 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_resource.getBuffer().size));
585 else
586 fillPattern(alloc.getHostPtr(), m_resource.getBuffer().size);
587 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_resource.getBuffer().size);
588 }
589
recordCommands(const VkCommandBuffer cmdBuffer)590 void recordCommands (const VkCommandBuffer cmdBuffer)
591 {
592 const DeviceInterface& vk = m_context.getDeviceInterface();
593 const VkBufferCopy copyRegion = makeBufferCopy(0u, 0u, m_resource.getBuffer().size);
594
595 if (m_mode == ACCESS_MODE_READ)
596 {
597 vk.cmdCopyBuffer(cmdBuffer, m_resource.getBuffer().handle, **m_hostBuffer, 1u, ©Region);
598
599 // Insert a barrier so copied data is available to the host
600 const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_resource.getBuffer().size);
601 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
602 }
603 else
604 vk.cmdCopyBuffer(cmdBuffer, **m_hostBuffer, m_resource.getBuffer().handle, 1u, ©Region);
605 }
606
getSyncInfo(void) const607 SyncInfo getSyncInfo (void) const
608 {
609 const VkAccessFlags access = (m_mode == ACCESS_MODE_READ ? VK_ACCESS_TRANSFER_READ_BIT : VK_ACCESS_TRANSFER_WRITE_BIT);
610 const SyncInfo syncInfo =
611 {
612 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
613 access, // VkAccessFlags accessMask;
614 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
615 };
616 return syncInfo;
617 }
618
getData(void) const619 Data getData (void) const
620 {
621 return getHostBufferData(m_context, *m_hostBuffer, m_resource.getBuffer().size);
622 }
623
624 private:
625 OperationContext& m_context;
626 Resource& m_resource;
627 const AccessMode m_mode;
628 de::MovePtr<Buffer> m_hostBuffer;
629 };
630
631 class Support : public OperationSupport
632 {
633 public:
Support(const ResourceDescription & resourceDesc,const AccessMode mode)634 Support (const ResourceDescription& resourceDesc, const AccessMode mode)
635 : m_mode (mode)
636 {
637 DE_ASSERT(resourceDesc.type == RESOURCE_TYPE_BUFFER);
638 DE_UNREF(resourceDesc);
639 }
640
getResourceUsageFlags(void) const641 deUint32 getResourceUsageFlags (void) const
642 {
643 return (m_mode == ACCESS_MODE_READ ? VK_BUFFER_USAGE_TRANSFER_SRC_BIT : VK_BUFFER_USAGE_TRANSFER_DST_BIT);
644 }
645
getQueueFlags(const OperationContext & context) const646 VkQueueFlags getQueueFlags (const OperationContext& context) const
647 {
648 DE_UNREF(context);
649 return VK_QUEUE_TRANSFER_BIT;
650 }
651
build(OperationContext & context,Resource & resource) const652 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
653 {
654 return de::MovePtr<Operation>(new Implementation(context, resource, m_mode));
655 }
656
657 private:
658 const AccessMode m_mode;
659 };
660
661 } // CopyBuffer ns
662
663 namespace CopyBlitImage
664 {
665
666 class ImplementationBase : public Operation
667 {
668 public:
669 //! Copy/Blit/Resolve etc. operation
670 virtual void recordCopyCommand (const VkCommandBuffer cmdBuffer) = 0;
671
ImplementationBase(OperationContext & context,Resource & resource,const AccessMode mode)672 ImplementationBase (OperationContext& context, Resource& resource, const AccessMode mode)
673 : m_context (context)
674 , m_resource (resource)
675 , m_mode (mode)
676 , m_bufferSize (getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent))
677 {
678 const DeviceInterface& vk = m_context.getDeviceInterface();
679 const VkDevice device = m_context.getDevice();
680 Allocator& allocator = m_context.getAllocator();
681
682 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
683 vk, device, allocator, makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
684 MemoryRequirement::HostVisible));
685
686 const Allocation& alloc = m_hostBuffer->getAllocation();
687 if (m_mode == ACCESS_MODE_READ)
688 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_bufferSize));
689 else
690 fillPattern(alloc.getHostPtr(), m_bufferSize);
691 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_bufferSize);
692
693 // Staging image
694 m_image = de::MovePtr<Image>(new Image(
695 vk, device, allocator,
696 makeImageCreateInfo(m_resource.getImage().imageType, m_resource.getImage().extent, m_resource.getImage().format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT),
697 MemoryRequirement::Any));
698 }
699
recordCommands(const VkCommandBuffer cmdBuffer)700 void recordCommands (const VkCommandBuffer cmdBuffer)
701 {
702 const DeviceInterface& vk = m_context.getDeviceInterface();
703 const VkBufferImageCopy bufferCopyRegion = makeBufferImageCopy(m_resource.getImage().subresourceLayers, m_resource.getImage().extent);
704
705 const VkImageMemoryBarrier stagingImageTransferSrcLayoutBarrier = makeImageMemoryBarrier(
706 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
707 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
708 **m_image, m_resource.getImage().subresourceRange);
709
710 // Staging image layout
711 {
712 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
713 (VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
714 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
715 **m_image, m_resource.getImage().subresourceRange);
716
717 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
718 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
719 }
720
721 if (m_mode == ACCESS_MODE_READ)
722 {
723 // Resource Image -> Staging image
724 recordCopyCommand(cmdBuffer);
725
726 // Staging image layout
727 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
728 0u, DE_NULL, 0u, DE_NULL, 1u, &stagingImageTransferSrcLayoutBarrier);
729
730 // Image -> Host buffer
731 vk.cmdCopyImageToBuffer(cmdBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_hostBuffer, 1u, &bufferCopyRegion);
732
733 // Insert a barrier so copied data is available to the host
734 const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_bufferSize);
735 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
736 }
737 else
738 {
739 // Host buffer -> Staging image
740 vk.cmdCopyBufferToImage(cmdBuffer, **m_hostBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &bufferCopyRegion);
741
742 // Staging image layout
743 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
744 0u, DE_NULL, 0u, DE_NULL, 1u, &stagingImageTransferSrcLayoutBarrier);
745
746 // Resource image layout
747 {
748 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
749 (VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
750 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
751 m_resource.getImage().handle, m_resource.getImage().subresourceRange);
752
753 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
754 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
755 }
756
757 // Staging image -> Resource Image
758 recordCopyCommand(cmdBuffer);
759 }
760 }
761
getSyncInfo(void) const762 SyncInfo getSyncInfo (void) const
763 {
764 const VkAccessFlags access = (m_mode == ACCESS_MODE_READ ? VK_ACCESS_TRANSFER_READ_BIT : VK_ACCESS_TRANSFER_WRITE_BIT);
765 const VkImageLayout layout = (m_mode == ACCESS_MODE_READ ? VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL : VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
766 const SyncInfo syncInfo =
767 {
768 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
769 access, // VkAccessFlags accessMask;
770 layout, // VkImageLayout imageLayout;
771 };
772 return syncInfo;
773 }
774
getData(void) const775 Data getData (void) const
776 {
777 return getHostBufferData(m_context, *m_hostBuffer, m_bufferSize);
778 }
779
780 protected:
781 OperationContext& m_context;
782 Resource& m_resource;
783 const AccessMode m_mode;
784 const VkDeviceSize m_bufferSize;
785 de::MovePtr<Buffer> m_hostBuffer;
786 de::MovePtr<Image> m_image;
787 };
788
makeExtentOffset(const Resource & resource)789 VkOffset3D makeExtentOffset (const Resource& resource)
790 {
791 DE_ASSERT(resource.getType() == RESOURCE_TYPE_IMAGE);
792 const VkExtent3D extent = resource.getImage().extent;
793
794 switch (resource.getImage().imageType)
795 {
796 case VK_IMAGE_TYPE_1D: return makeOffset3D(extent.width, 1, 1);
797 case VK_IMAGE_TYPE_2D: return makeOffset3D(extent.width, extent.height, 1);
798 case VK_IMAGE_TYPE_3D: return makeOffset3D(extent.width, extent.height, extent.depth);
799 default:
800 DE_ASSERT(0);
801 return VkOffset3D();
802 }
803 }
804
makeBlitRegion(const Resource & resource)805 VkImageBlit makeBlitRegion (const Resource& resource)
806 {
807 const VkImageBlit blitRegion =
808 {
809 resource.getImage().subresourceLayers, // VkImageSubresourceLayers srcSubresource;
810 { makeOffset3D(0, 0, 0), makeExtentOffset(resource) }, // VkOffset3D srcOffsets[2];
811 resource.getImage().subresourceLayers, // VkImageSubresourceLayers dstSubresource;
812 { makeOffset3D(0, 0, 0), makeExtentOffset(resource) }, // VkOffset3D dstOffsets[2];
813 };
814 return blitRegion;
815 }
816
817 class BlitImplementation : public ImplementationBase
818 {
819 public:
BlitImplementation(OperationContext & context,Resource & resource,const AccessMode mode)820 BlitImplementation (OperationContext& context, Resource& resource, const AccessMode mode)
821 : ImplementationBase (context, resource, mode)
822 , m_blitRegion (makeBlitRegion(m_resource))
823 {
824 const InstanceInterface& vki = m_context.getInstanceInterface();
825 const VkPhysicalDevice physDevice = m_context.getPhysicalDevice();
826 const VkFormatProperties formatProps = getPhysicalDeviceFormatProperties(vki, physDevice, m_resource.getImage().format);
827 const VkFormatFeatureFlags requiredFlags = (VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT);
828
829 // SRC and DST blit is required because both images are using the same format.
830 if ((formatProps.optimalTilingFeatures & requiredFlags) != requiredFlags)
831 TCU_THROW(NotSupportedError, "Format doesn't support blits");
832 }
833
recordCopyCommand(const VkCommandBuffer cmdBuffer)834 void recordCopyCommand (const VkCommandBuffer cmdBuffer)
835 {
836 const DeviceInterface& vk = m_context.getDeviceInterface();
837
838 if (m_mode == ACCESS_MODE_READ)
839 {
840 // Resource Image -> Staging image
841 vk.cmdBlitImage(cmdBuffer, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
842 1u, &m_blitRegion, VK_FILTER_NEAREST);
843 }
844 else
845 {
846 // Staging image -> Resource Image
847 vk.cmdBlitImage(cmdBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
848 1u, &m_blitRegion, VK_FILTER_NEAREST);
849 }
850 }
851
852 private:
853 const VkImageBlit m_blitRegion;
854 };
855
makeImageCopyRegion(const Resource & resource)856 VkImageCopy makeImageCopyRegion (const Resource& resource)
857 {
858 const VkImageCopy imageCopyRegion =
859 {
860 resource.getImage().subresourceLayers, // VkImageSubresourceLayers srcSubresource;
861 makeOffset3D(0, 0, 0), // VkOffset3D srcOffset;
862 resource.getImage().subresourceLayers, // VkImageSubresourceLayers dstSubresource;
863 makeOffset3D(0, 0, 0), // VkOffset3D dstOffset;
864 resource.getImage().extent, // VkExtent3D extent;
865 };
866 return imageCopyRegion;
867 }
868
869 class CopyImplementation : public ImplementationBase
870 {
871 public:
CopyImplementation(OperationContext & context,Resource & resource,const AccessMode mode)872 CopyImplementation (OperationContext& context, Resource& resource, const AccessMode mode)
873 : ImplementationBase (context, resource, mode)
874 , m_imageCopyRegion (makeImageCopyRegion(m_resource))
875 {
876 }
877
recordCopyCommand(const VkCommandBuffer cmdBuffer)878 void recordCopyCommand (const VkCommandBuffer cmdBuffer)
879 {
880 const DeviceInterface& vk = m_context.getDeviceInterface();
881
882 if (m_mode == ACCESS_MODE_READ)
883 {
884 // Resource Image -> Staging image
885 vk.cmdCopyImage(cmdBuffer, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &m_imageCopyRegion);
886 }
887 else
888 {
889 // Staging image -> Resource Image
890 vk.cmdCopyImage(cmdBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &m_imageCopyRegion);
891 }
892 }
893
894 private:
895 const VkImageCopy m_imageCopyRegion;
896 };
897
898 enum Type
899 {
900 TYPE_COPY,
901 TYPE_BLIT,
902 };
903
904 class Support : public OperationSupport
905 {
906 public:
Support(const ResourceDescription & resourceDesc,const Type type,const AccessMode mode)907 Support (const ResourceDescription& resourceDesc, const Type type, const AccessMode mode)
908 : m_type (type)
909 , m_mode (mode)
910 {
911 DE_ASSERT(resourceDesc.type == RESOURCE_TYPE_IMAGE);
912
913 const bool isDepthStencil = isDepthStencilFormat(resourceDesc.imageFormat);
914 m_requiredQueueFlags = (isDepthStencil || m_type == TYPE_BLIT ? VK_QUEUE_GRAPHICS_BIT : VK_QUEUE_TRANSFER_BIT);
915
916 // Don't blit depth/stencil images.
917 DE_ASSERT(m_type != TYPE_BLIT || !isDepthStencil);
918 }
919
getResourceUsageFlags(void) const920 deUint32 getResourceUsageFlags (void) const
921 {
922 return (m_mode == ACCESS_MODE_READ ? VK_BUFFER_USAGE_TRANSFER_SRC_BIT : VK_BUFFER_USAGE_TRANSFER_DST_BIT);
923 }
924
getQueueFlags(const OperationContext & context) const925 VkQueueFlags getQueueFlags (const OperationContext& context) const
926 {
927 DE_UNREF(context);
928 return m_requiredQueueFlags;
929 }
930
build(OperationContext & context,Resource & resource) const931 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
932 {
933 if (m_type == TYPE_COPY)
934 return de::MovePtr<Operation>(new CopyImplementation(context, resource, m_mode));
935 else
936 return de::MovePtr<Operation>(new BlitImplementation(context, resource, m_mode));
937 }
938
939 private:
940 const Type m_type;
941 const AccessMode m_mode;
942 VkQueueFlags m_requiredQueueFlags;
943 };
944
945 } // CopyBlitImage ns
946
947 namespace ShaderAccess
948 {
949
950 enum DispatchCall
951 {
952 DISPATCH_CALL_DISPATCH,
953 DISPATCH_CALL_DISPATCH_INDIRECT,
954 };
955
956 class GraphicsPipeline : public Pipeline
957 {
958 public:
GraphicsPipeline(OperationContext & context,const VkShaderStageFlagBits stage,const std::string & shaderPrefix,const VkDescriptorSetLayout descriptorSetLayout)959 GraphicsPipeline (OperationContext& context, const VkShaderStageFlagBits stage, const std::string& shaderPrefix, const VkDescriptorSetLayout descriptorSetLayout)
960 : m_vertices (context)
961 {
962 const DeviceInterface& vk = context.getDeviceInterface();
963 const VkDevice device = context.getDevice();
964 Allocator& allocator = context.getAllocator();
965 const VkShaderStageFlags requiredStages = getRequiredStages(stage);
966
967 // Color attachment
968
969 m_colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
970 m_colorImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
971 m_colorImageExtent = makeExtent3D(16u, 16u, 1u);
972 m_colorAttachmentImage = de::MovePtr<Image>(new Image(vk, device, allocator,
973 makeImageCreateInfo(VK_IMAGE_TYPE_2D, m_colorImageExtent, m_colorFormat, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT),
974 MemoryRequirement::Any));
975
976 // Pipeline
977
978 m_colorAttachmentView = makeImageView (vk, device, **m_colorAttachmentImage, VK_IMAGE_VIEW_TYPE_2D, m_colorFormat, m_colorImageSubresourceRange);
979 m_renderPass = makeRenderPass (vk, device, m_colorFormat);
980 m_framebuffer = makeFramebuffer (vk, device, *m_renderPass, *m_colorAttachmentView, m_colorImageExtent.width, m_colorImageExtent.height, 1u);
981 m_pipelineLayout = makePipelineLayout(vk, device, descriptorSetLayout);
982
983 GraphicsPipelineBuilder pipelineBuilder;
984 pipelineBuilder
985 .setRenderSize (tcu::IVec2(m_colorImageExtent.width, m_colorImageExtent.height))
986 .setVertexInputSingleAttribute (m_vertices.getVertexFormat(), m_vertices.getVertexStride())
987 .setShader (vk, device, VK_SHADER_STAGE_VERTEX_BIT, context.getBinaryCollection().get(shaderPrefix + "vert"), DE_NULL)
988 .setShader (vk, device, VK_SHADER_STAGE_FRAGMENT_BIT, context.getBinaryCollection().get(shaderPrefix + "frag"), DE_NULL);
989
990 if (requiredStages & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
991 pipelineBuilder
992 .setPatchControlPoints (m_vertices.getNumVertices())
993 .setShader (vk, device, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, context.getBinaryCollection().get(shaderPrefix + "tesc"), DE_NULL)
994 .setShader (vk, device, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, context.getBinaryCollection().get(shaderPrefix + "tese"), DE_NULL);
995
996 if (requiredStages & VK_SHADER_STAGE_GEOMETRY_BIT)
997 pipelineBuilder
998 .setShader (vk, device, VK_SHADER_STAGE_GEOMETRY_BIT, context.getBinaryCollection().get(shaderPrefix + "geom"), DE_NULL);
999
1000 m_pipeline = pipelineBuilder.build(vk, device, *m_pipelineLayout, *m_renderPass, context.getPipelineCacheData());
1001 }
1002
recordCommands(OperationContext & context,const VkCommandBuffer cmdBuffer,const VkDescriptorSet descriptorSet)1003 void recordCommands (OperationContext& context, const VkCommandBuffer cmdBuffer, const VkDescriptorSet descriptorSet)
1004 {
1005 const DeviceInterface& vk = context.getDeviceInterface();
1006
1007 // Change color attachment image layout
1008 {
1009 const VkImageMemoryBarrier colorAttachmentLayoutBarrier = makeImageMemoryBarrier(
1010 (VkAccessFlags)0, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
1011 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1012 **m_colorAttachmentImage, m_colorImageSubresourceRange);
1013
1014 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, (VkDependencyFlags)0,
1015 0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentLayoutBarrier);
1016 }
1017
1018 {
1019 const VkRect2D renderArea = {
1020 makeOffset2D(0, 0),
1021 makeExtent2D(m_colorImageExtent.width, m_colorImageExtent.height),
1022 };
1023 const tcu::Vec4 clearColor = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
1024
1025 beginRenderPass(vk, cmdBuffer, *m_renderPass, *m_framebuffer, renderArea, clearColor);
1026 }
1027
1028 vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
1029 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
1030 {
1031 const VkDeviceSize vertexBufferOffset = 0ull;
1032 const VkBuffer vertexBuffer = m_vertices.getVertexBuffer();
1033 vk.cmdBindVertexBuffers(cmdBuffer, 0u, 1u, &vertexBuffer, &vertexBufferOffset);
1034 }
1035
1036 vk.cmdDraw(cmdBuffer, m_vertices.getNumVertices(), 1u, 0u, 0u);
1037 endRenderPass(vk, cmdBuffer);
1038 }
1039
1040 private:
1041 const VertexGrid m_vertices;
1042 VkFormat m_colorFormat;
1043 de::MovePtr<Image> m_colorAttachmentImage;
1044 Move<VkImageView> m_colorAttachmentView;
1045 VkExtent3D m_colorImageExtent;
1046 VkImageSubresourceRange m_colorImageSubresourceRange;
1047 Move<VkRenderPass> m_renderPass;
1048 Move<VkFramebuffer> m_framebuffer;
1049 Move<VkPipelineLayout> m_pipelineLayout;
1050 Move<VkPipeline> m_pipeline;
1051 };
1052
1053 class ComputePipeline : public Pipeline
1054 {
1055 public:
ComputePipeline(OperationContext & context,const DispatchCall dispatchCall,const std::string & shaderPrefix,const VkDescriptorSetLayout descriptorSetLayout)1056 ComputePipeline (OperationContext& context, const DispatchCall dispatchCall, const std::string& shaderPrefix, const VkDescriptorSetLayout descriptorSetLayout)
1057 : m_dispatchCall (dispatchCall)
1058 {
1059 const DeviceInterface& vk = context.getDeviceInterface();
1060 const VkDevice device = context.getDevice();
1061 Allocator& allocator = context.getAllocator();
1062
1063 if (m_dispatchCall == DISPATCH_CALL_DISPATCH_INDIRECT)
1064 {
1065 m_indirectBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator,
1066 makeBufferCreateInfo(sizeof(VkDispatchIndirectCommand), VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT), MemoryRequirement::HostVisible));
1067
1068 const Allocation& alloc = m_indirectBuffer->getAllocation();
1069 VkDispatchIndirectCommand* const pIndirectCommand = static_cast<VkDispatchIndirectCommand*>(alloc.getHostPtr());
1070
1071 pIndirectCommand->x = 1u;
1072 pIndirectCommand->y = 1u;
1073 pIndirectCommand->z = 1u;
1074
1075 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), sizeof(VkDispatchIndirectCommand));
1076 }
1077
1078 const Unique<VkShaderModule> shaderModule(createShaderModule(vk, device, context.getBinaryCollection().get(shaderPrefix + "comp"), (VkShaderModuleCreateFlags)0));
1079
1080 m_pipelineLayout = makePipelineLayout(vk, device, descriptorSetLayout);
1081 m_pipeline = makeComputePipeline(vk, device, *m_pipelineLayout, *shaderModule, DE_NULL, context.getPipelineCacheData());
1082 }
1083
recordCommands(OperationContext & context,const VkCommandBuffer cmdBuffer,const VkDescriptorSet descriptorSet)1084 void recordCommands (OperationContext& context, const VkCommandBuffer cmdBuffer, const VkDescriptorSet descriptorSet)
1085 {
1086 const DeviceInterface& vk = context.getDeviceInterface();
1087
1088 vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipeline);
1089 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
1090
1091 if (m_dispatchCall == DISPATCH_CALL_DISPATCH_INDIRECT)
1092 vk.cmdDispatchIndirect(cmdBuffer, **m_indirectBuffer, 0u);
1093 else
1094 vk.cmdDispatch(cmdBuffer, 1u, 1u, 1u);
1095 }
1096
1097 private:
1098 const DispatchCall m_dispatchCall;
1099 de::MovePtr<Buffer> m_indirectBuffer;
1100 Move<VkPipelineLayout> m_pipelineLayout;
1101 Move<VkPipeline> m_pipeline;
1102 };
1103
1104 //! Read/write operation on a UBO/SSBO in graphics/compute pipeline.
1105 class BufferImplementation : public Operation
1106 {
1107 public:
BufferImplementation(OperationContext & context,Resource & resource,const VkShaderStageFlagBits stage,const BufferType bufferType,const std::string & shaderPrefix,const AccessMode mode,const PipelineType pipelineType,const DispatchCall dispatchCall)1108 BufferImplementation (OperationContext& context,
1109 Resource& resource,
1110 const VkShaderStageFlagBits stage,
1111 const BufferType bufferType,
1112 const std::string& shaderPrefix,
1113 const AccessMode mode,
1114 const PipelineType pipelineType,
1115 const DispatchCall dispatchCall)
1116 : m_context (context)
1117 , m_resource (resource)
1118 , m_stage (stage)
1119 , m_pipelineStage (pipelineStageFlagsFromShaderStageFlagBits(m_stage))
1120 , m_bufferType (bufferType)
1121 , m_mode (mode)
1122 , m_dispatchCall (dispatchCall)
1123 {
1124 requireFeaturesForSSBOAccess (m_context, m_stage);
1125
1126 const DeviceInterface& vk = m_context.getDeviceInterface();
1127 const VkDevice device = m_context.getDevice();
1128 Allocator& allocator = m_context.getAllocator();
1129
1130 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
1131 vk, device, allocator, makeBufferCreateInfo(m_resource.getBuffer().size, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible));
1132
1133 // Init host buffer data
1134 {
1135 const Allocation& alloc = m_hostBuffer->getAllocation();
1136 if (m_mode == ACCESS_MODE_READ)
1137 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_resource.getBuffer().size));
1138 else
1139 fillPattern(alloc.getHostPtr(), m_resource.getBuffer().size);
1140 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_resource.getBuffer().size);
1141 }
1142
1143 // Prepare descriptors
1144 {
1145 const VkDescriptorType bufferDescriptorType = (m_bufferType == BUFFER_TYPE_UNIFORM ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
1146
1147 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1148 .addSingleBinding(bufferDescriptorType, m_stage)
1149 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_stage)
1150 .build(vk, device);
1151
1152 m_descriptorPool = DescriptorPoolBuilder()
1153 .addType(bufferDescriptorType)
1154 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
1155 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1156
1157 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
1158
1159 const VkDescriptorBufferInfo bufferInfo = makeDescriptorBufferInfo(m_resource.getBuffer().handle, m_resource.getBuffer().offset, m_resource.getBuffer().size);
1160 const VkDescriptorBufferInfo hostBufferInfo = makeDescriptorBufferInfo(**m_hostBuffer, 0u, m_resource.getBuffer().size);
1161
1162 if (m_mode == ACCESS_MODE_READ)
1163 {
1164 DescriptorSetUpdateBuilder()
1165 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), bufferDescriptorType, &bufferInfo)
1166 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &hostBufferInfo)
1167 .update(vk, device);
1168 }
1169 else
1170 {
1171 DescriptorSetUpdateBuilder()
1172 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &hostBufferInfo)
1173 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &bufferInfo)
1174 .update(vk, device);
1175 }
1176 }
1177
1178 // Create pipeline
1179 m_pipeline = (pipelineType == PIPELINE_TYPE_GRAPHICS ? de::MovePtr<Pipeline>(new GraphicsPipeline(context, stage, shaderPrefix, *m_descriptorSetLayout))
1180 : de::MovePtr<Pipeline>(new ComputePipeline(context, m_dispatchCall, shaderPrefix, *m_descriptorSetLayout)));
1181 }
1182
recordCommands(const VkCommandBuffer cmdBuffer)1183 void recordCommands (const VkCommandBuffer cmdBuffer)
1184 {
1185 m_pipeline->recordCommands(m_context, cmdBuffer, *m_descriptorSet);
1186
1187 // Post draw/dispatch commands
1188
1189 if (m_mode == ACCESS_MODE_READ)
1190 {
1191 const DeviceInterface& vk = m_context.getDeviceInterface();
1192
1193 // Insert a barrier so data written by the shader is available to the host
1194 const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_resource.getBuffer().size);
1195 vk.cmdPipelineBarrier(cmdBuffer, m_pipelineStage, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
1196 }
1197 }
1198
getSyncInfo(void) const1199 SyncInfo getSyncInfo (void) const
1200 {
1201 const VkAccessFlags accessFlags = (m_mode == ACCESS_MODE_READ ? (m_bufferType == BUFFER_TYPE_UNIFORM ? VK_ACCESS_UNIFORM_READ_BIT
1202 : VK_ACCESS_SHADER_READ_BIT)
1203 : VK_ACCESS_SHADER_WRITE_BIT);
1204 const SyncInfo syncInfo =
1205 {
1206 m_pipelineStage, // VkPipelineStageFlags stageMask;
1207 accessFlags, // VkAccessFlags accessMask;
1208 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
1209 };
1210 return syncInfo;
1211 }
1212
getData(void) const1213 Data getData (void) const
1214 {
1215 return getHostBufferData(m_context, *m_hostBuffer, m_resource.getBuffer().size);
1216 }
1217
1218 private:
1219 OperationContext& m_context;
1220 Resource& m_resource;
1221 const VkShaderStageFlagBits m_stage;
1222 const VkPipelineStageFlags m_pipelineStage;
1223 const BufferType m_bufferType;
1224 const AccessMode m_mode;
1225 const DispatchCall m_dispatchCall;
1226 de::MovePtr<Buffer> m_hostBuffer;
1227 Move<VkDescriptorPool> m_descriptorPool;
1228 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1229 Move<VkDescriptorSet> m_descriptorSet;
1230 de::MovePtr<Pipeline> m_pipeline;
1231 };
1232
1233 class ImageImplementation : public Operation
1234 {
1235 public:
ImageImplementation(OperationContext & context,Resource & resource,const VkShaderStageFlagBits stage,const std::string & shaderPrefix,const AccessMode mode,const PipelineType pipelineType,const DispatchCall dispatchCall)1236 ImageImplementation (OperationContext& context,
1237 Resource& resource,
1238 const VkShaderStageFlagBits stage,
1239 const std::string& shaderPrefix,
1240 const AccessMode mode,
1241 const PipelineType pipelineType,
1242 const DispatchCall dispatchCall)
1243 : m_context (context)
1244 , m_resource (resource)
1245 , m_stage (stage)
1246 , m_pipelineStage (pipelineStageFlagsFromShaderStageFlagBits(m_stage))
1247 , m_mode (mode)
1248 , m_dispatchCall (dispatchCall)
1249 , m_hostBufferSizeBytes (getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent))
1250 {
1251 const DeviceInterface& vk = m_context.getDeviceInterface();
1252 const InstanceInterface& vki = m_context.getInstanceInterface();
1253 const VkDevice device = m_context.getDevice();
1254 const VkPhysicalDevice physDevice = m_context.getPhysicalDevice();
1255 Allocator& allocator = m_context.getAllocator();
1256
1257 // Image stores are always required, in either access mode.
1258 requireFeaturesForSSBOAccess(m_context, m_stage);
1259
1260 // Some storage image formats require additional capability.
1261 if (isStorageImageExtendedFormat(m_resource.getImage().format))
1262 requireFeatures(vki, physDevice, FEATURE_SHADER_STORAGE_IMAGE_EXTENDED_FORMATS);
1263
1264 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
1265 vk, device, allocator, makeBufferCreateInfo(m_hostBufferSizeBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
1266 MemoryRequirement::HostVisible));
1267
1268 // Init host buffer data
1269 {
1270 const Allocation& alloc = m_hostBuffer->getAllocation();
1271 if (m_mode == ACCESS_MODE_READ)
1272 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_hostBufferSizeBytes));
1273 else
1274 fillPattern(alloc.getHostPtr(), m_hostBufferSizeBytes);
1275 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_hostBufferSizeBytes);
1276 }
1277
1278 // Image resources
1279 {
1280 m_image = de::MovePtr<Image>(new Image(vk, device, allocator,
1281 makeImageCreateInfo(m_resource.getImage().imageType, m_resource.getImage().extent, m_resource.getImage().format,
1282 VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_STORAGE_BIT),
1283 MemoryRequirement::Any));
1284
1285 if (m_mode == ACCESS_MODE_READ)
1286 {
1287 m_srcImage = &m_resource.getImage().handle;
1288 m_dstImage = &(**m_image);
1289 }
1290 else
1291 {
1292 m_srcImage = &(**m_image);
1293 m_dstImage = &m_resource.getImage().handle;
1294 }
1295
1296 const VkImageViewType viewType = getImageViewType(m_resource.getImage().imageType);
1297
1298 m_srcImageView = makeImageView(vk, device, *m_srcImage, viewType, m_resource.getImage().format, m_resource.getImage().subresourceRange);
1299 m_dstImageView = makeImageView(vk, device, *m_dstImage, viewType, m_resource.getImage().format, m_resource.getImage().subresourceRange);
1300 }
1301
1302 // Prepare descriptors
1303 {
1304 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1305 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, m_stage)
1306 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, m_stage)
1307 .build(vk, device);
1308
1309 m_descriptorPool = DescriptorPoolBuilder()
1310 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1311 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1312 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1313
1314 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
1315
1316 const VkDescriptorImageInfo srcImageInfo = makeDescriptorImageInfo(DE_NULL, *m_srcImageView, VK_IMAGE_LAYOUT_GENERAL);
1317 const VkDescriptorImageInfo dstImageInfo = makeDescriptorImageInfo(DE_NULL, *m_dstImageView, VK_IMAGE_LAYOUT_GENERAL);
1318
1319 DescriptorSetUpdateBuilder()
1320 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &srcImageInfo)
1321 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &dstImageInfo)
1322 .update(vk, device);
1323 }
1324
1325 // Create pipeline
1326 m_pipeline = (pipelineType == PIPELINE_TYPE_GRAPHICS ? de::MovePtr<Pipeline>(new GraphicsPipeline(context, stage, shaderPrefix, *m_descriptorSetLayout))
1327 : de::MovePtr<Pipeline>(new ComputePipeline(context, m_dispatchCall, shaderPrefix, *m_descriptorSetLayout)));
1328 }
1329
recordCommands(const VkCommandBuffer cmdBuffer)1330 void recordCommands (const VkCommandBuffer cmdBuffer)
1331 {
1332 const DeviceInterface& vk = m_context.getDeviceInterface();
1333 const VkBufferImageCopy bufferCopyRegion = makeBufferImageCopy(m_resource.getImage().subresourceLayers, m_resource.getImage().extent);
1334
1335 // Destination image layout
1336 {
1337 const VkImageMemoryBarrier barrier = makeImageMemoryBarrier(
1338 (VkAccessFlags)0, VK_ACCESS_SHADER_WRITE_BIT,
1339 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1340 *m_dstImage, m_resource.getImage().subresourceRange);
1341
1342 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, m_pipelineStage, (VkDependencyFlags)0,
1343 0u, DE_NULL, 0u, DE_NULL, 1u, &barrier);
1344 }
1345
1346 // In write mode, source image must be filled with data.
1347 if (m_mode == ACCESS_MODE_WRITE)
1348 {
1349 // Layout for transfer
1350 {
1351 const VkImageMemoryBarrier barrier = makeImageMemoryBarrier(
1352 (VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
1353 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1354 *m_srcImage, m_resource.getImage().subresourceRange);
1355
1356 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
1357 0u, DE_NULL, 0u, DE_NULL, 1u, &barrier);
1358 }
1359
1360 // Host buffer -> Src image
1361 vk.cmdCopyBufferToImage(cmdBuffer, **m_hostBuffer, *m_srcImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &bufferCopyRegion);
1362
1363 // Layout for shader reading
1364 {
1365 const VkImageMemoryBarrier barrier = makeImageMemoryBarrier(
1366 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
1367 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
1368 *m_srcImage, m_resource.getImage().subresourceRange);
1369
1370 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_pipelineStage, (VkDependencyFlags)0,
1371 0u, DE_NULL, 0u, DE_NULL, 1u, &barrier);
1372 }
1373 }
1374
1375 // Execute shaders
1376
1377 m_pipeline->recordCommands(m_context, cmdBuffer, *m_descriptorSet);
1378
1379 // Post draw/dispatch commands
1380
1381 if (m_mode == ACCESS_MODE_READ)
1382 {
1383 // Layout for transfer
1384 {
1385 const VkImageMemoryBarrier barrier = makeImageMemoryBarrier(
1386 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1387 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1388 *m_dstImage, m_resource.getImage().subresourceRange);
1389
1390 vk.cmdPipelineBarrier(cmdBuffer, m_pipelineStage, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
1391 0u, DE_NULL, 0u, DE_NULL, 1u, &barrier);
1392 }
1393
1394 // Dst image -> Host buffer
1395 vk.cmdCopyImageToBuffer(cmdBuffer, *m_dstImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_hostBuffer, 1u, &bufferCopyRegion);
1396
1397 // Insert a barrier so data written by the shader is available to the host
1398 {
1399 const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_hostBufferSizeBytes);
1400 vk.cmdPipelineBarrier(cmdBuffer, m_pipelineStage, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
1401 }
1402 }
1403 }
1404
getSyncInfo(void) const1405 SyncInfo getSyncInfo (void) const
1406 {
1407 const VkAccessFlags accessFlags = (m_mode == ACCESS_MODE_READ ? VK_ACCESS_SHADER_READ_BIT : VK_ACCESS_SHADER_WRITE_BIT);
1408 const SyncInfo syncInfo =
1409 {
1410 m_pipelineStage, // VkPipelineStageFlags stageMask;
1411 accessFlags, // VkAccessFlags accessMask;
1412 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout imageLayout;
1413 };
1414 return syncInfo;
1415 }
1416
getData(void) const1417 Data getData (void) const
1418 {
1419 return getHostBufferData(m_context, *m_hostBuffer, m_hostBufferSizeBytes);
1420 }
1421
1422 private:
1423 OperationContext& m_context;
1424 Resource& m_resource;
1425 const VkShaderStageFlagBits m_stage;
1426 const VkPipelineStageFlags m_pipelineStage;
1427 const AccessMode m_mode;
1428 const DispatchCall m_dispatchCall;
1429 const VkDeviceSize m_hostBufferSizeBytes;
1430 de::MovePtr<Buffer> m_hostBuffer;
1431 de::MovePtr<Image> m_image; //! Additional image used as src or dst depending on operation mode.
1432 const VkImage* m_srcImage;
1433 const VkImage* m_dstImage;
1434 Move<VkImageView> m_srcImageView;
1435 Move<VkImageView> m_dstImageView;
1436 Move<VkDescriptorPool> m_descriptorPool;
1437 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1438 Move<VkDescriptorSet> m_descriptorSet;
1439 de::MovePtr<Pipeline> m_pipeline;
1440 };
1441
1442 //! Create generic passthrough shaders with bits of custom code inserted in a specific shader stage.
initPassthroughPrograms(SourceCollections & programCollection,const std::string & shaderPrefix,const std::string & declCode,const std::string & mainCode,const VkShaderStageFlagBits stage)1443 void initPassthroughPrograms (SourceCollections& programCollection,
1444 const std::string& shaderPrefix,
1445 const std::string& declCode,
1446 const std::string& mainCode,
1447 const VkShaderStageFlagBits stage)
1448 {
1449 const VkShaderStageFlags requiredStages = getRequiredStages(stage);
1450
1451 if (requiredStages & VK_SHADER_STAGE_VERTEX_BIT)
1452 {
1453 std::ostringstream src;
1454 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1455 << "\n"
1456 << "layout(location = 0) in vec4 v_in_position;\n"
1457 << "\n"
1458 << "out " << s_perVertexBlock << ";\n"
1459 << "\n"
1460 << (stage & VK_SHADER_STAGE_VERTEX_BIT ? declCode + "\n" : "")
1461 << "void main (void)\n"
1462 << "{\n"
1463 << " gl_Position = v_in_position;\n"
1464 << (stage & VK_SHADER_STAGE_VERTEX_BIT ? mainCode : "")
1465 << "}\n";
1466
1467 programCollection.glslSources.add(shaderPrefix + "vert") << glu::VertexSource(src.str());
1468 }
1469
1470 if (requiredStages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
1471 {
1472 std::ostringstream src;
1473 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1474 << "\n"
1475 << "layout(vertices = 3) out;\n"
1476 << "\n"
1477 << "in " << s_perVertexBlock << " gl_in[gl_MaxPatchVertices];\n"
1478 << "\n"
1479 << "out " << s_perVertexBlock << " gl_out[];\n"
1480 << "\n"
1481 << (stage & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT ? declCode + "\n" : "")
1482 << "void main (void)\n"
1483 << "{\n"
1484 << " gl_TessLevelInner[0] = 1.0;\n"
1485 << " gl_TessLevelInner[1] = 1.0;\n"
1486 << "\n"
1487 << " gl_TessLevelOuter[0] = 1.0;\n"
1488 << " gl_TessLevelOuter[1] = 1.0;\n"
1489 << " gl_TessLevelOuter[2] = 1.0;\n"
1490 << " gl_TessLevelOuter[3] = 1.0;\n"
1491 << "\n"
1492 << " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
1493 << (stage & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT ? "\n" + mainCode : "")
1494 << "}\n";
1495
1496 programCollection.glslSources.add(shaderPrefix + "tesc") << glu::TessellationControlSource(src.str());
1497 }
1498
1499 if (requiredStages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
1500 {
1501 std::ostringstream src;
1502 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1503 << "\n"
1504 << "layout(triangles, equal_spacing, ccw) in;\n"
1505 << "\n"
1506 << "in " << s_perVertexBlock << " gl_in[gl_MaxPatchVertices];\n"
1507 << "\n"
1508 << "out " << s_perVertexBlock << ";\n"
1509 << "\n"
1510 << (stage & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT ? declCode + "\n" : "")
1511 << "void main (void)\n"
1512 << "{\n"
1513 << " vec3 px = gl_TessCoord.x * gl_in[0].gl_Position.xyz;\n"
1514 << " vec3 py = gl_TessCoord.y * gl_in[1].gl_Position.xyz;\n"
1515 << " vec3 pz = gl_TessCoord.z * gl_in[2].gl_Position.xyz;\n"
1516 << " gl_Position = vec4(px + py + pz, 1.0);\n"
1517 << (stage & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT ? mainCode : "")
1518 << "}\n";
1519
1520 programCollection.glslSources.add(shaderPrefix + "tese") << glu::TessellationEvaluationSource(src.str());
1521 }
1522
1523 if (requiredStages & VK_SHADER_STAGE_GEOMETRY_BIT)
1524 {
1525 std::ostringstream src;
1526 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1527 << "\n"
1528 << "layout(triangles) in;\n"
1529 << "layout(triangle_strip, max_vertices = 3) out;\n"
1530 << "\n"
1531 << "in " << s_perVertexBlock << " gl_in[];\n"
1532 << "\n"
1533 << "out " << s_perVertexBlock << ";\n"
1534 << "\n"
1535 << (stage & VK_SHADER_STAGE_GEOMETRY_BIT ? declCode + "\n" : "")
1536 << "void main (void)\n"
1537 << "{\n"
1538 << " gl_Position = gl_in[0].gl_Position;\n"
1539 << " EmitVertex();\n"
1540 << "\n"
1541 << " gl_Position = gl_in[1].gl_Position;\n"
1542 << " EmitVertex();\n"
1543 << "\n"
1544 << " gl_Position = gl_in[2].gl_Position;\n"
1545 << " EmitVertex();\n"
1546 << (stage & VK_SHADER_STAGE_GEOMETRY_BIT ? "\n" + mainCode : "")
1547 << "}\n";
1548
1549 programCollection.glslSources.add(shaderPrefix + "geom") << glu::GeometrySource(src.str());
1550 }
1551
1552 if (requiredStages & VK_SHADER_STAGE_FRAGMENT_BIT)
1553 {
1554 std::ostringstream src;
1555 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1556 << "\n"
1557 << "layout(location = 0) out vec4 o_color;\n"
1558 << "\n"
1559 << (stage & VK_SHADER_STAGE_FRAGMENT_BIT ? declCode + "\n" : "")
1560 << "void main (void)\n"
1561 << "{\n"
1562 << " o_color = vec4(1.0);\n"
1563 << (stage & VK_SHADER_STAGE_FRAGMENT_BIT ? "\n" + mainCode : "")
1564 << "}\n";
1565
1566 programCollection.glslSources.add(shaderPrefix + "frag") << glu::FragmentSource(src.str());
1567 }
1568
1569 if (requiredStages & VK_SHADER_STAGE_COMPUTE_BIT)
1570 {
1571 std::ostringstream src;
1572 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1573 << "\n"
1574 << "layout(local_size_x = 1) in;\n"
1575 << "\n"
1576 << (stage & VK_SHADER_STAGE_COMPUTE_BIT ? declCode + "\n" : "")
1577 << "void main (void)\n"
1578 << "{\n"
1579 << (stage & VK_SHADER_STAGE_COMPUTE_BIT ? mainCode : "")
1580 << "}\n";
1581
1582 programCollection.glslSources.add(shaderPrefix + "comp") << glu::ComputeSource(src.str());
1583 }
1584 }
1585
1586 class BufferSupport : public OperationSupport
1587 {
1588 public:
BufferSupport(const ResourceDescription & resourceDesc,const BufferType bufferType,const AccessMode mode,const VkShaderStageFlagBits stage,const DispatchCall dispatchCall=DISPATCH_CALL_DISPATCH)1589 BufferSupport (const ResourceDescription& resourceDesc,
1590 const BufferType bufferType,
1591 const AccessMode mode,
1592 const VkShaderStageFlagBits stage,
1593 const DispatchCall dispatchCall = DISPATCH_CALL_DISPATCH)
1594 : m_resourceDesc (resourceDesc)
1595 , m_bufferType (bufferType)
1596 , m_mode (mode)
1597 , m_stage (stage)
1598 , m_shaderPrefix (std::string(m_mode == ACCESS_MODE_READ ? "read_" : "write_") + (m_bufferType == BUFFER_TYPE_UNIFORM ? "ubo_" : "ssbo_"))
1599 , m_dispatchCall (dispatchCall)
1600 {
1601 DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_BUFFER);
1602 DE_ASSERT(m_bufferType == BUFFER_TYPE_UNIFORM || m_bufferType == BUFFER_TYPE_STORAGE);
1603 DE_ASSERT(m_mode == ACCESS_MODE_READ || m_mode == ACCESS_MODE_WRITE);
1604 DE_ASSERT(m_mode == ACCESS_MODE_READ || m_bufferType == BUFFER_TYPE_STORAGE);
1605 DE_ASSERT(m_bufferType != BUFFER_TYPE_UNIFORM || m_resourceDesc.size.x() <= MAX_UBO_RANGE);
1606 DE_ASSERT(m_dispatchCall == DISPATCH_CALL_DISPATCH || m_dispatchCall == DISPATCH_CALL_DISPATCH_INDIRECT);
1607
1608 assertValidShaderStage(m_stage);
1609 }
1610
initPrograms(SourceCollections & programCollection) const1611 void initPrograms (SourceCollections& programCollection) const
1612 {
1613 DE_ASSERT((m_resourceDesc.size.x() % sizeof(tcu::UVec4)) == 0);
1614
1615 const std::string bufferTypeStr = (m_bufferType == BUFFER_TYPE_UNIFORM ? "uniform" : "buffer");
1616 const int numVecElements = static_cast<int>(m_resourceDesc.size.x() / sizeof(tcu::UVec4)); // std140 must be aligned to a multiple of 16
1617
1618 std::ostringstream declSrc;
1619 declSrc << "layout(set = 0, binding = 0, std140) readonly " << bufferTypeStr << " Input {\n"
1620 << " uvec4 data[" << numVecElements << "];\n"
1621 << "} b_in;\n"
1622 << "\n"
1623 << "layout(set = 0, binding = 1, std140) writeonly buffer Output {\n"
1624 << " uvec4 data[" << numVecElements << "];\n"
1625 << "} b_out;\n";
1626
1627 std::ostringstream copySrc;
1628 copySrc << " for (int i = 0; i < " << numVecElements << "; ++i) {\n"
1629 << " b_out.data[i] = b_in.data[i];\n"
1630 << " }\n";
1631
1632 initPassthroughPrograms(programCollection, m_shaderPrefix, declSrc.str(), copySrc.str(), m_stage);
1633 }
1634
getResourceUsageFlags(void) const1635 deUint32 getResourceUsageFlags (void) const
1636 {
1637 return (m_bufferType == BUFFER_TYPE_UNIFORM ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
1638 }
1639
getQueueFlags(const OperationContext & context) const1640 VkQueueFlags getQueueFlags (const OperationContext& context) const
1641 {
1642 DE_UNREF(context);
1643 return (m_stage == VK_SHADER_STAGE_COMPUTE_BIT ? VK_QUEUE_COMPUTE_BIT : VK_QUEUE_GRAPHICS_BIT);
1644 }
1645
build(OperationContext & context,Resource & resource) const1646 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
1647 {
1648 if (m_stage & VK_SHADER_STAGE_COMPUTE_BIT)
1649 return de::MovePtr<Operation>(new BufferImplementation(context, resource, m_stage, m_bufferType, m_shaderPrefix, m_mode, PIPELINE_TYPE_COMPUTE, m_dispatchCall));
1650 else
1651 return de::MovePtr<Operation>(new BufferImplementation(context, resource, m_stage, m_bufferType, m_shaderPrefix, m_mode, PIPELINE_TYPE_GRAPHICS, m_dispatchCall));
1652 }
1653
1654 private:
1655 const ResourceDescription m_resourceDesc;
1656 const BufferType m_bufferType;
1657 const AccessMode m_mode;
1658 const VkShaderStageFlagBits m_stage;
1659 const std::string m_shaderPrefix;
1660 const DispatchCall m_dispatchCall;
1661 };
1662
1663 class ImageSupport : public OperationSupport
1664 {
1665 public:
ImageSupport(const ResourceDescription & resourceDesc,const AccessMode mode,const VkShaderStageFlagBits stage,const DispatchCall dispatchCall=DISPATCH_CALL_DISPATCH)1666 ImageSupport (const ResourceDescription& resourceDesc,
1667 const AccessMode mode,
1668 const VkShaderStageFlagBits stage,
1669 const DispatchCall dispatchCall = DISPATCH_CALL_DISPATCH)
1670 : m_resourceDesc (resourceDesc)
1671 , m_mode (mode)
1672 , m_stage (stage)
1673 , m_shaderPrefix (m_mode == ACCESS_MODE_READ ? "read_image_" : "write_image_")
1674 , m_dispatchCall (dispatchCall)
1675 {
1676 DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_IMAGE);
1677 DE_ASSERT(m_mode == ACCESS_MODE_READ || m_mode == ACCESS_MODE_WRITE);
1678 DE_ASSERT(m_dispatchCall == DISPATCH_CALL_DISPATCH || m_dispatchCall == DISPATCH_CALL_DISPATCH_INDIRECT);
1679
1680 assertValidShaderStage(m_stage);
1681 }
1682
initPrograms(SourceCollections & programCollection) const1683 void initPrograms (SourceCollections& programCollection) const
1684 {
1685 const std::string imageFormat = getShaderImageFormatQualifier(m_resourceDesc.imageFormat);
1686 const std::string imageType = getShaderImageType(m_resourceDesc.imageFormat, m_resourceDesc.imageType);
1687
1688 std::ostringstream declSrc;
1689 declSrc << "layout(set = 0, binding = 0, " << imageFormat << ") readonly uniform " << imageType << " srcImg;\n"
1690 << "layout(set = 0, binding = 1, " << imageFormat << ") writeonly uniform " << imageType << " dstImg;\n";
1691
1692 std::ostringstream mainSrc;
1693 if (m_resourceDesc.imageType == VK_IMAGE_TYPE_1D)
1694 mainSrc << " for (int x = 0; x < " << m_resourceDesc.size.x() << "; ++x)\n"
1695 << " imageStore(dstImg, x, imageLoad(srcImg, x));\n";
1696 else if (m_resourceDesc.imageType == VK_IMAGE_TYPE_2D)
1697 mainSrc << " for (int y = 0; y < " << m_resourceDesc.size.y() << "; ++y)\n"
1698 << " for (int x = 0; x < " << m_resourceDesc.size.x() << "; ++x)\n"
1699 << " imageStore(dstImg, ivec2(x, y), imageLoad(srcImg, ivec2(x, y)));\n";
1700 else if (m_resourceDesc.imageType == VK_IMAGE_TYPE_3D)
1701 mainSrc << " for (int z = 0; z < " << m_resourceDesc.size.z() << "; ++z)\n"
1702 << " for (int y = 0; y < " << m_resourceDesc.size.y() << "; ++y)\n"
1703 << " for (int x = 0; x < " << m_resourceDesc.size.x() << "; ++x)\n"
1704 << " imageStore(dstImg, ivec3(x, y, z), imageLoad(srcImg, ivec3(x, y, z)));\n";
1705 else
1706 DE_ASSERT(0);
1707
1708 initPassthroughPrograms(programCollection, m_shaderPrefix, declSrc.str(), mainSrc.str(), m_stage);
1709 }
1710
getResourceUsageFlags(void) const1711 deUint32 getResourceUsageFlags (void) const
1712 {
1713 return VK_IMAGE_USAGE_STORAGE_BIT;
1714 }
1715
getQueueFlags(const OperationContext & context) const1716 VkQueueFlags getQueueFlags (const OperationContext& context) const
1717 {
1718 DE_UNREF(context);
1719 return (m_stage == VK_SHADER_STAGE_COMPUTE_BIT ? VK_QUEUE_COMPUTE_BIT : VK_QUEUE_GRAPHICS_BIT);
1720 }
1721
build(OperationContext & context,Resource & resource) const1722 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
1723 {
1724 if (m_stage & VK_SHADER_STAGE_COMPUTE_BIT)
1725 return de::MovePtr<Operation>(new ImageImplementation(context, resource, m_stage, m_shaderPrefix, m_mode, PIPELINE_TYPE_COMPUTE, m_dispatchCall));
1726 else
1727 return de::MovePtr<Operation>(new ImageImplementation(context, resource, m_stage, m_shaderPrefix, m_mode, PIPELINE_TYPE_GRAPHICS, m_dispatchCall));
1728 }
1729
1730 private:
1731 const ResourceDescription m_resourceDesc;
1732 const AccessMode m_mode;
1733 const VkShaderStageFlagBits m_stage;
1734 const std::string m_shaderPrefix;
1735 const DispatchCall m_dispatchCall;
1736 };
1737
1738 } // ShaderAccess ns
1739
1740 namespace CopyBufferToImage
1741 {
1742
1743 class WriteImplementation : public Operation
1744 {
1745 public:
WriteImplementation(OperationContext & context,Resource & resource)1746 WriteImplementation (OperationContext& context, Resource& resource)
1747 : m_context (context)
1748 , m_resource (resource)
1749 , m_bufferSize (getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent))
1750 {
1751 DE_ASSERT(m_resource.getType() == RESOURCE_TYPE_IMAGE);
1752
1753 const DeviceInterface& vk = m_context.getDeviceInterface();
1754 const VkDevice device = m_context.getDevice();
1755 Allocator& allocator = m_context.getAllocator();
1756
1757 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
1758 vk, device, allocator, makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT), MemoryRequirement::HostVisible));
1759
1760 const Allocation& alloc = m_hostBuffer->getAllocation();
1761 fillPattern(alloc.getHostPtr(), m_bufferSize);
1762 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_bufferSize);
1763 }
1764
recordCommands(const VkCommandBuffer cmdBuffer)1765 void recordCommands (const VkCommandBuffer cmdBuffer)
1766 {
1767 const DeviceInterface& vk = m_context.getDeviceInterface();
1768 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_resource.getImage().subresourceLayers, m_resource.getImage().extent);
1769
1770 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
1771 (VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
1772 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1773 m_resource.getImage().handle, m_resource.getImage().subresourceRange);
1774 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
1775
1776 vk.cmdCopyBufferToImage(cmdBuffer, **m_hostBuffer, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
1777 }
1778
getSyncInfo(void) const1779 SyncInfo getSyncInfo (void) const
1780 {
1781 const SyncInfo syncInfo =
1782 {
1783 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
1784 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags accessMask;
1785 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout imageLayout;
1786 };
1787 return syncInfo;
1788 }
1789
getData(void) const1790 Data getData (void) const
1791 {
1792 return getHostBufferData(m_context, *m_hostBuffer, m_bufferSize);
1793 }
1794
1795 private:
1796 OperationContext& m_context;
1797 Resource& m_resource;
1798 de::MovePtr<Buffer> m_hostBuffer;
1799 const VkDeviceSize m_bufferSize;
1800 };
1801
1802 class ReadImplementation : public Operation
1803 {
1804 public:
ReadImplementation(OperationContext & context,Resource & resource)1805 ReadImplementation (OperationContext& context, Resource& resource)
1806 : m_context (context)
1807 , m_resource (resource)
1808 , m_subresourceRange (makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u))
1809 , m_subresourceLayers (makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u))
1810 {
1811 DE_ASSERT(m_resource.getType() == RESOURCE_TYPE_BUFFER);
1812
1813 const DeviceInterface& vk = m_context.getDeviceInterface();
1814 const VkDevice device = m_context.getDevice();
1815 Allocator& allocator = m_context.getAllocator();
1816 const VkFormat format = VK_FORMAT_R8G8B8A8_UNORM;
1817 const deUint32 pixelSize = tcu::getPixelSize(mapVkFormat(format));
1818
1819 DE_ASSERT((m_resource.getBuffer().size % pixelSize) == 0);
1820 m_imageExtent = get2DImageExtentWithSize(m_resource.getBuffer().size, pixelSize); // there may be some unused space at the end
1821
1822 // Copy destination image.
1823 m_image = de::MovePtr<Image>(new Image(
1824 vk, device, allocator, makeImageCreateInfo(VK_IMAGE_TYPE_2D, m_imageExtent, format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT), MemoryRequirement::Any));
1825
1826 // Image data will be copied here, so it can be read on the host.
1827 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
1828 vk, device, allocator, makeBufferCreateInfo(m_resource.getBuffer().size, VK_BUFFER_USAGE_TRANSFER_DST_BIT), MemoryRequirement::HostVisible));
1829 }
1830
recordCommands(const VkCommandBuffer cmdBuffer)1831 void recordCommands (const VkCommandBuffer cmdBuffer)
1832 {
1833 const DeviceInterface& vk = m_context.getDeviceInterface();
1834 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_subresourceLayers, m_imageExtent);
1835
1836 // Resource -> Image
1837 {
1838 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
1839 (VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
1840 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1841 **m_image, m_subresourceRange);
1842 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
1843
1844 vk.cmdCopyBufferToImage(cmdBuffer, m_resource.getBuffer().handle, **m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
1845 }
1846 // Image -> Host buffer
1847 {
1848 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
1849 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1850 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1851 **m_image, m_subresourceRange);
1852 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
1853
1854 vk.cmdCopyImageToBuffer(cmdBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_hostBuffer, 1u, ©Region);
1855
1856 const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_resource.getBuffer().size);
1857 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
1858 }
1859 }
1860
getSyncInfo(void) const1861 SyncInfo getSyncInfo (void) const
1862 {
1863 const SyncInfo syncInfo =
1864 {
1865 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
1866 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags accessMask;
1867 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
1868 };
1869 return syncInfo;
1870 }
1871
getData(void) const1872 Data getData (void) const
1873 {
1874 return getHostBufferData(m_context, *m_hostBuffer, m_resource.getBuffer().size);
1875 }
1876
1877 private:
1878 OperationContext& m_context;
1879 Resource& m_resource;
1880 const VkImageSubresourceRange m_subresourceRange;
1881 const VkImageSubresourceLayers m_subresourceLayers;
1882 de::MovePtr<Buffer> m_hostBuffer;
1883 de::MovePtr<Image> m_image;
1884 VkExtent3D m_imageExtent;
1885 };
1886
1887 class Support : public OperationSupport
1888 {
1889 public:
Support(const ResourceDescription & resourceDesc,const AccessMode mode)1890 Support (const ResourceDescription& resourceDesc, const AccessMode mode)
1891 : m_mode (mode)
1892 , m_requiredQueueFlags (resourceDesc.type == RESOURCE_TYPE_IMAGE && isDepthStencilFormat(resourceDesc.imageFormat) ? VK_QUEUE_GRAPHICS_BIT : VK_QUEUE_TRANSFER_BIT)
1893 {
1894 // From spec:
1895 // Because depth or stencil aspect buffer to image copies may require format conversions on some implementations,
1896 // they are not supported on queues that do not support graphics.
1897
1898 DE_ASSERT(m_mode == ACCESS_MODE_READ || m_mode == ACCESS_MODE_WRITE);
1899 DE_ASSERT(m_mode == ACCESS_MODE_READ || resourceDesc.type != RESOURCE_TYPE_BUFFER);
1900 DE_ASSERT(m_mode == ACCESS_MODE_WRITE || resourceDesc.type != RESOURCE_TYPE_IMAGE);
1901 }
1902
getResourceUsageFlags(void) const1903 deUint32 getResourceUsageFlags (void) const
1904 {
1905 if (m_mode == ACCESS_MODE_READ)
1906 return VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
1907 else
1908 return VK_IMAGE_USAGE_TRANSFER_DST_BIT;
1909 }
1910
getQueueFlags(const OperationContext & context) const1911 VkQueueFlags getQueueFlags (const OperationContext& context) const
1912 {
1913 DE_UNREF(context);
1914 return m_requiredQueueFlags;
1915 }
1916
build(OperationContext & context,Resource & resource) const1917 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
1918 {
1919 if (m_mode == ACCESS_MODE_READ)
1920 return de::MovePtr<Operation>(new ReadImplementation(context, resource));
1921 else
1922 return de::MovePtr<Operation>(new WriteImplementation(context, resource));
1923 }
1924
1925 private:
1926 const AccessMode m_mode;
1927 const VkQueueFlags m_requiredQueueFlags;
1928 };
1929
1930 } // CopyBufferToImage ns
1931
1932 namespace CopyImageToBuffer
1933 {
1934
1935 class WriteImplementation : public Operation
1936 {
1937 public:
WriteImplementation(OperationContext & context,Resource & resource)1938 WriteImplementation (OperationContext& context, Resource& resource)
1939 : m_context (context)
1940 , m_resource (resource)
1941 , m_subresourceRange (makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u))
1942 , m_subresourceLayers (makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u))
1943 {
1944 DE_ASSERT(m_resource.getType() == RESOURCE_TYPE_BUFFER);
1945
1946 const DeviceInterface& vk = m_context.getDeviceInterface();
1947 const VkDevice device = m_context.getDevice();
1948 Allocator& allocator = m_context.getAllocator();
1949 const VkFormat format = VK_FORMAT_R8G8B8A8_UNORM;
1950 const deUint32 pixelSize = tcu::getPixelSize(mapVkFormat(format));
1951
1952 DE_ASSERT((m_resource.getBuffer().size % pixelSize) == 0);
1953 m_imageExtent = get2DImageExtentWithSize(m_resource.getBuffer().size, pixelSize);
1954
1955 // Source data staging buffer
1956 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
1957 vk, device, allocator, makeBufferCreateInfo(m_resource.getBuffer().size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT), MemoryRequirement::HostVisible));
1958
1959 const Allocation& alloc = m_hostBuffer->getAllocation();
1960 fillPattern(alloc.getHostPtr(), m_resource.getBuffer().size);
1961 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_resource.getBuffer().size);
1962
1963 // Source data image
1964 m_image = de::MovePtr<Image>(new Image(
1965 vk, device, allocator, makeImageCreateInfo(VK_IMAGE_TYPE_2D, m_imageExtent, format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT), MemoryRequirement::Any));
1966 }
1967
recordCommands(const VkCommandBuffer cmdBuffer)1968 void recordCommands (const VkCommandBuffer cmdBuffer)
1969 {
1970 const DeviceInterface& vk = m_context.getDeviceInterface();
1971 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_subresourceLayers, m_imageExtent);
1972
1973 // Host buffer -> Image
1974 {
1975 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
1976 (VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
1977 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1978 **m_image, m_subresourceRange);
1979 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
1980
1981 vk.cmdCopyBufferToImage(cmdBuffer, **m_hostBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
1982 }
1983 // Image -> Resource
1984 {
1985 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
1986 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1987 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1988 **m_image, m_subresourceRange);
1989 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
1990
1991 vk.cmdCopyImageToBuffer(cmdBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_resource.getBuffer().handle, 1u, ©Region);
1992 }
1993 }
1994
getSyncInfo(void) const1995 SyncInfo getSyncInfo (void) const
1996 {
1997 const SyncInfo syncInfo =
1998 {
1999 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
2000 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags accessMask;
2001 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
2002 };
2003 return syncInfo;
2004 }
2005
getData(void) const2006 Data getData (void) const
2007 {
2008 return getHostBufferData(m_context, *m_hostBuffer, m_resource.getBuffer().size);
2009 }
2010
2011 private:
2012 OperationContext& m_context;
2013 Resource& m_resource;
2014 const VkImageSubresourceRange m_subresourceRange;
2015 const VkImageSubresourceLayers m_subresourceLayers;
2016 de::MovePtr<Buffer> m_hostBuffer;
2017 de::MovePtr<Image> m_image;
2018 VkExtent3D m_imageExtent;
2019 };
2020
2021 class ReadImplementation : public Operation
2022 {
2023 public:
ReadImplementation(OperationContext & context,Resource & resource)2024 ReadImplementation (OperationContext& context, Resource& resource)
2025 : m_context (context)
2026 , m_resource (resource)
2027 , m_bufferSize (getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent))
2028 {
2029 DE_ASSERT(m_resource.getType() == RESOURCE_TYPE_IMAGE);
2030
2031 const DeviceInterface& vk = m_context.getDeviceInterface();
2032 const VkDevice device = m_context.getDevice();
2033 Allocator& allocator = m_context.getAllocator();
2034
2035 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
2036 vk, device, allocator, makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT), MemoryRequirement::HostVisible));
2037
2038 const Allocation& alloc = m_hostBuffer->getAllocation();
2039 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_bufferSize));
2040 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_bufferSize);
2041 }
2042
recordCommands(const VkCommandBuffer cmdBuffer)2043 void recordCommands (const VkCommandBuffer cmdBuffer)
2044 {
2045 const DeviceInterface& vk = m_context.getDeviceInterface();
2046 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_resource.getImage().subresourceLayers, m_resource.getImage().extent);
2047
2048 vk.cmdCopyImageToBuffer(cmdBuffer, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_hostBuffer, 1u, ©Region);
2049 }
2050
getSyncInfo(void) const2051 SyncInfo getSyncInfo (void) const
2052 {
2053 const SyncInfo syncInfo =
2054 {
2055 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
2056 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags accessMask;
2057 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout imageLayout;
2058 };
2059 return syncInfo;
2060 }
2061
getData(void) const2062 Data getData (void) const
2063 {
2064 return getHostBufferData(m_context, *m_hostBuffer, m_bufferSize);
2065 }
2066
2067 private:
2068 OperationContext& m_context;
2069 Resource& m_resource;
2070 de::MovePtr<Buffer> m_hostBuffer;
2071 const VkDeviceSize m_bufferSize;
2072 };
2073
2074 class Support : public OperationSupport
2075 {
2076 public:
Support(const ResourceDescription & resourceDesc,const AccessMode mode)2077 Support (const ResourceDescription& resourceDesc, const AccessMode mode)
2078 : m_mode (mode)
2079 , m_requiredQueueFlags (resourceDesc.type == RESOURCE_TYPE_IMAGE && isDepthStencilFormat(resourceDesc.imageFormat) ? VK_QUEUE_GRAPHICS_BIT : VK_QUEUE_TRANSFER_BIT)
2080 {
2081 DE_ASSERT(m_mode == ACCESS_MODE_READ || m_mode == ACCESS_MODE_WRITE);
2082 DE_ASSERT(m_mode == ACCESS_MODE_READ || resourceDesc.type != RESOURCE_TYPE_IMAGE);
2083 DE_ASSERT(m_mode == ACCESS_MODE_WRITE || resourceDesc.type != RESOURCE_TYPE_BUFFER);
2084 }
2085
getResourceUsageFlags(void) const2086 deUint32 getResourceUsageFlags (void) const
2087 {
2088 if (m_mode == ACCESS_MODE_READ)
2089 return VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
2090 else
2091 return VK_BUFFER_USAGE_TRANSFER_DST_BIT;
2092 }
2093
getQueueFlags(const OperationContext & context) const2094 VkQueueFlags getQueueFlags (const OperationContext& context) const
2095 {
2096 DE_UNREF(context);
2097 return m_requiredQueueFlags;
2098 }
2099
build(OperationContext & context,Resource & resource) const2100 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
2101 {
2102 if (m_mode == ACCESS_MODE_READ)
2103 return de::MovePtr<Operation>(new ReadImplementation(context, resource));
2104 else
2105 return de::MovePtr<Operation>(new WriteImplementation(context, resource));
2106 }
2107
2108 private:
2109 const AccessMode m_mode;
2110 const VkQueueFlags m_requiredQueueFlags;
2111 };
2112
2113 } // CopyImageToBuffer ns
2114
2115 namespace ClearImage
2116 {
2117
2118 enum ClearMode
2119 {
2120 CLEAR_MODE_COLOR,
2121 CLEAR_MODE_DEPTH_STENCIL,
2122 };
2123
2124 class Implementation : public Operation
2125 {
2126 public:
Implementation(OperationContext & context,Resource & resource,const ClearMode mode)2127 Implementation (OperationContext& context, Resource& resource, const ClearMode mode)
2128 : m_context (context)
2129 , m_resource (resource)
2130 , m_clearValue (makeClearValue(m_resource.getImage().format))
2131 , m_mode (mode)
2132 {
2133 const VkDeviceSize size = getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent);
2134 const VkExtent3D& extent = m_resource.getImage().extent;
2135 const VkFormat format = m_resource.getImage().format;
2136 const tcu::TextureFormat texFormat = mapVkFormat(format);
2137
2138 m_data.resize(static_cast<std::size_t>(size));
2139 tcu::PixelBufferAccess imagePixels(texFormat, extent.width, extent.height, extent.depth, &m_data[0]);
2140 clearPixelBuffer(imagePixels, m_clearValue);
2141 }
2142
recordCommands(const VkCommandBuffer cmdBuffer)2143 void recordCommands (const VkCommandBuffer cmdBuffer)
2144 {
2145 const DeviceInterface& vk = m_context.getDeviceInterface();
2146
2147 const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
2148 (VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
2149 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2150 m_resource.getImage().handle, m_resource.getImage().subresourceRange);
2151
2152 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
2153
2154 if (m_mode == CLEAR_MODE_COLOR)
2155 vk.cmdClearColorImage(cmdBuffer, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &m_clearValue.color, 1u, &m_resource.getImage().subresourceRange);
2156 else
2157 vk.cmdClearDepthStencilImage(cmdBuffer, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &m_clearValue.depthStencil, 1u, &m_resource.getImage().subresourceRange);
2158 }
2159
getSyncInfo(void) const2160 SyncInfo getSyncInfo (void) const
2161 {
2162 const SyncInfo syncInfo =
2163 {
2164 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
2165 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags accessMask;
2166 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout imageLayout;
2167 };
2168 return syncInfo;
2169 }
2170
getData(void) const2171 Data getData (void) const
2172 {
2173 const Data data =
2174 {
2175 m_data.size(), // std::size_t size;
2176 &m_data[0], // const deUint8* data;
2177 };
2178 return data;
2179 }
2180
2181 private:
2182 OperationContext& m_context;
2183 Resource& m_resource;
2184 std::vector<deUint8> m_data;
2185 const VkClearValue m_clearValue;
2186 const ClearMode m_mode;
2187 };
2188
2189 class Support : public OperationSupport
2190 {
2191 public:
Support(const ResourceDescription & resourceDesc,const ClearMode mode)2192 Support (const ResourceDescription& resourceDesc, const ClearMode mode)
2193 : m_resourceDesc (resourceDesc)
2194 , m_mode (mode)
2195 {
2196 DE_ASSERT(m_mode == CLEAR_MODE_COLOR || m_mode == CLEAR_MODE_DEPTH_STENCIL);
2197 DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_IMAGE);
2198 DE_ASSERT(m_resourceDesc.imageAspect == VK_IMAGE_ASPECT_COLOR_BIT || (m_mode != CLEAR_MODE_COLOR));
2199 DE_ASSERT((m_resourceDesc.imageAspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) || (m_mode != CLEAR_MODE_DEPTH_STENCIL));
2200 }
2201
getResourceUsageFlags(void) const2202 deUint32 getResourceUsageFlags (void) const
2203 {
2204 return VK_IMAGE_USAGE_TRANSFER_DST_BIT;
2205 }
2206
getQueueFlags(const OperationContext & context) const2207 VkQueueFlags getQueueFlags (const OperationContext& context) const
2208 {
2209 DE_UNREF(context);
2210 if (m_mode == CLEAR_MODE_COLOR)
2211 return VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT;
2212 else
2213 return VK_QUEUE_GRAPHICS_BIT;
2214 }
2215
build(OperationContext & context,Resource & resource) const2216 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
2217 {
2218 return de::MovePtr<Operation>(new Implementation(context, resource, m_mode));
2219 }
2220
2221 private:
2222 const ResourceDescription m_resourceDesc;
2223 const ClearMode m_mode;
2224 };
2225
2226 } // ClearImage ns
2227
2228 namespace Draw
2229 {
2230
2231 enum DrawCall
2232 {
2233 DRAW_CALL_DRAW,
2234 DRAW_CALL_DRAW_INDEXED,
2235 DRAW_CALL_DRAW_INDIRECT,
2236 DRAW_CALL_DRAW_INDEXED_INDIRECT,
2237 };
2238
2239 //! A write operation that is a result of drawing to an image.
2240 //! \todo Add support for depth/stencil too?
2241 class Implementation : public Operation
2242 {
2243 public:
Implementation(OperationContext & context,Resource & resource,const DrawCall drawCall)2244 Implementation (OperationContext& context, Resource& resource, const DrawCall drawCall)
2245 : m_context (context)
2246 , m_resource (resource)
2247 , m_drawCall (drawCall)
2248 , m_vertices (context)
2249 {
2250 const DeviceInterface& vk = context.getDeviceInterface();
2251 const VkDevice device = context.getDevice();
2252 Allocator& allocator = context.getAllocator();
2253
2254 // Indirect buffer
2255
2256 if (m_drawCall == DRAW_CALL_DRAW_INDIRECT)
2257 {
2258 m_indirectBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator,
2259 makeBufferCreateInfo(sizeof(VkDrawIndirectCommand), VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT), MemoryRequirement::HostVisible));
2260
2261 const Allocation& alloc = m_indirectBuffer->getAllocation();
2262 VkDrawIndirectCommand* const pIndirectCommand = static_cast<VkDrawIndirectCommand*>(alloc.getHostPtr());
2263
2264 pIndirectCommand->vertexCount = m_vertices.getNumVertices();
2265 pIndirectCommand->instanceCount = 1u;
2266 pIndirectCommand->firstVertex = 0u;
2267 pIndirectCommand->firstInstance = 0u;
2268
2269 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), sizeof(VkDrawIndirectCommand));
2270 }
2271 else if (m_drawCall == DRAW_CALL_DRAW_INDEXED_INDIRECT)
2272 {
2273 m_indirectBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator,
2274 makeBufferCreateInfo(sizeof(VkDrawIndexedIndirectCommand), VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT), MemoryRequirement::HostVisible));
2275
2276 const Allocation& alloc = m_indirectBuffer->getAllocation();
2277 VkDrawIndexedIndirectCommand* const pIndirectCommand = static_cast<VkDrawIndexedIndirectCommand*>(alloc.getHostPtr());
2278
2279 pIndirectCommand->indexCount = m_vertices.getNumIndices();
2280 pIndirectCommand->instanceCount = 1u;
2281 pIndirectCommand->firstIndex = 0u;
2282 pIndirectCommand->vertexOffset = 0u;
2283 pIndirectCommand->firstInstance = 0u;
2284
2285 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), sizeof(VkDrawIndexedIndirectCommand));
2286 }
2287
2288 // Resource image is the color attachment
2289
2290 m_colorFormat = m_resource.getImage().format;
2291 m_colorSubresourceRange = m_resource.getImage().subresourceRange;
2292 m_colorImage = m_resource.getImage().handle;
2293 m_attachmentExtent = m_resource.getImage().extent;
2294
2295 // Pipeline
2296
2297 m_colorAttachmentView = makeImageView (vk, device, m_colorImage, VK_IMAGE_VIEW_TYPE_2D, m_colorFormat, m_colorSubresourceRange);
2298 m_renderPass = makeRenderPass (vk, device, m_colorFormat);
2299 m_framebuffer = makeFramebuffer (vk, device, *m_renderPass, *m_colorAttachmentView, m_attachmentExtent.width, m_attachmentExtent.height, 1u);
2300 m_pipelineLayout = makePipelineLayoutWithoutDescriptors(vk, device);
2301
2302 GraphicsPipelineBuilder pipelineBuilder;
2303 pipelineBuilder
2304 .setRenderSize (tcu::IVec2(m_attachmentExtent.width, m_attachmentExtent.height))
2305 .setVertexInputSingleAttribute (m_vertices.getVertexFormat(), m_vertices.getVertexStride())
2306 .setShader (vk, device, VK_SHADER_STAGE_VERTEX_BIT, context.getBinaryCollection().get("draw_vert"), DE_NULL)
2307 .setShader (vk, device, VK_SHADER_STAGE_FRAGMENT_BIT, context.getBinaryCollection().get("draw_frag"), DE_NULL);
2308
2309 m_pipeline = pipelineBuilder.build(vk, device, *m_pipelineLayout, *m_renderPass, context.getPipelineCacheData());
2310
2311 // Set expected draw values
2312
2313 m_expectedData.resize(static_cast<size_t>(getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent)));
2314 tcu::PixelBufferAccess imagePixels(mapVkFormat(m_colorFormat), m_attachmentExtent.width, m_attachmentExtent.height, m_attachmentExtent.depth, &m_expectedData[0]);
2315 clearPixelBuffer(imagePixels, makeClearValue(m_colorFormat));
2316 }
2317
recordCommands(const VkCommandBuffer cmdBuffer)2318 void recordCommands (const VkCommandBuffer cmdBuffer)
2319 {
2320 const DeviceInterface& vk = m_context.getDeviceInterface();
2321
2322 // Change color attachment image layout
2323 {
2324 const VkImageMemoryBarrier colorAttachmentLayoutBarrier = makeImageMemoryBarrier(
2325 (VkAccessFlags)0, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
2326 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
2327 m_colorImage, m_colorSubresourceRange);
2328
2329 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, (VkDependencyFlags)0,
2330 0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentLayoutBarrier);
2331 }
2332
2333 {
2334 const VkRect2D renderArea = {
2335 makeOffset2D(0, 0),
2336 makeExtent2D(m_attachmentExtent.width, m_attachmentExtent.height),
2337 };
2338 const tcu::Vec4 clearColor = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
2339
2340 beginRenderPass(vk, cmdBuffer, *m_renderPass, *m_framebuffer, renderArea, clearColor);
2341 }
2342
2343 vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
2344 {
2345 const VkDeviceSize vertexBufferOffset = 0ull;
2346 const VkBuffer vertexBuffer = m_vertices.getVertexBuffer();
2347 vk.cmdBindVertexBuffers(cmdBuffer, 0u, 1u, &vertexBuffer, &vertexBufferOffset);
2348 }
2349
2350 if (m_drawCall == DRAW_CALL_DRAW_INDEXED || m_drawCall == DRAW_CALL_DRAW_INDEXED_INDIRECT)
2351 vk.cmdBindIndexBuffer(cmdBuffer, m_vertices.getIndexBuffer(), 0u, m_vertices.getIndexType());
2352
2353 switch (m_drawCall)
2354 {
2355 case DRAW_CALL_DRAW:
2356 vk.cmdDraw(cmdBuffer, m_vertices.getNumVertices(), 1u, 0u, 0u);
2357 break;
2358
2359 case DRAW_CALL_DRAW_INDEXED:
2360 vk.cmdDrawIndexed(cmdBuffer, m_vertices.getNumIndices(), 1u, 0u, 0, 0u);
2361 break;
2362
2363 case DRAW_CALL_DRAW_INDIRECT:
2364 vk.cmdDrawIndirect(cmdBuffer, **m_indirectBuffer, 0u, 1u, 0u);
2365 break;
2366
2367 case DRAW_CALL_DRAW_INDEXED_INDIRECT:
2368 vk.cmdDrawIndexedIndirect(cmdBuffer, **m_indirectBuffer, 0u, 1u, 0u);
2369 break;
2370 }
2371
2372 endRenderPass(vk, cmdBuffer);
2373 }
2374
getSyncInfo(void) const2375 SyncInfo getSyncInfo (void) const
2376 {
2377 const SyncInfo syncInfo =
2378 {
2379 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // VkPipelineStageFlags stageMask;
2380 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags accessMask;
2381 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout imageLayout;
2382 };
2383 return syncInfo;
2384 }
2385
getData(void) const2386 Data getData (void) const
2387 {
2388 const Data data =
2389 {
2390 m_expectedData.size(), // std::size_t size;
2391 &m_expectedData[0], // const deUint8* data;
2392 };
2393 return data;
2394 }
2395
2396 private:
2397 OperationContext& m_context;
2398 Resource& m_resource;
2399 const DrawCall m_drawCall;
2400 const VertexGrid m_vertices;
2401 std::vector<deUint8> m_expectedData;
2402 de::MovePtr<Buffer> m_indirectBuffer;
2403 VkFormat m_colorFormat;
2404 VkImage m_colorImage;
2405 Move<VkImageView> m_colorAttachmentView;
2406 VkImageSubresourceRange m_colorSubresourceRange;
2407 VkExtent3D m_attachmentExtent;
2408 Move<VkRenderPass> m_renderPass;
2409 Move<VkFramebuffer> m_framebuffer;
2410 Move<VkPipelineLayout> m_pipelineLayout;
2411 Move<VkPipeline> m_pipeline;
2412 };
2413
2414 template<typename T, std::size_t N>
toString(const T (& values)[N])2415 std::string toString (const T (&values)[N])
2416 {
2417 std::ostringstream str;
2418 for (std::size_t i = 0; i < N; ++i)
2419 str << (i != 0 ? ", " : "") << values[i];
2420 return str.str();
2421 }
2422
2423 class Support : public OperationSupport
2424 {
2425 public:
Support(const ResourceDescription & resourceDesc,const DrawCall drawCall)2426 Support (const ResourceDescription& resourceDesc, const DrawCall drawCall)
2427 : m_resourceDesc (resourceDesc)
2428 , m_drawCall (drawCall)
2429 {
2430 DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_IMAGE && m_resourceDesc.imageType == VK_IMAGE_TYPE_2D);
2431 DE_ASSERT(!isDepthStencilFormat(m_resourceDesc.imageFormat));
2432 }
2433
initPrograms(SourceCollections & programCollection) const2434 void initPrograms (SourceCollections& programCollection) const
2435 {
2436 // Vertex
2437 {
2438 std::ostringstream src;
2439 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
2440 << "\n"
2441 << "layout(location = 0) in vec4 v_in_position;\n"
2442 << "\n"
2443 << "out " << s_perVertexBlock << ";\n"
2444 << "\n"
2445 << "void main (void)\n"
2446 << "{\n"
2447 << " gl_Position = v_in_position;\n"
2448 << "}\n";
2449
2450 programCollection.glslSources.add("draw_vert") << glu::VertexSource(src.str());
2451 }
2452
2453 // Fragment
2454 {
2455 const VkClearValue clearValue = makeClearValue(m_resourceDesc.imageFormat);
2456 const bool isIntegerFormat = isIntFormat(m_resourceDesc.imageFormat) || isUintFormat(m_resourceDesc.imageFormat);
2457 const std::string colorType = (isIntegerFormat ? "uvec4" : "vec4");
2458
2459 std::ostringstream src;
2460 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
2461 << "\n"
2462 << "layout(location = 0) out " << colorType << " o_color;\n"
2463 << "\n"
2464 << "void main (void)\n"
2465 << "{\n"
2466 << " o_color = " << colorType << "(" << (isIntegerFormat ? toString(clearValue.color.uint32) : toString(clearValue.color.float32)) << ");\n"
2467 << "}\n";
2468
2469 programCollection.glslSources.add("draw_frag") << glu::FragmentSource(src.str());
2470 }
2471 }
2472
getResourceUsageFlags(void) const2473 deUint32 getResourceUsageFlags (void) const
2474 {
2475 return VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
2476 }
2477
getQueueFlags(const OperationContext & context) const2478 VkQueueFlags getQueueFlags (const OperationContext& context) const
2479 {
2480 DE_UNREF(context);
2481 return VK_QUEUE_GRAPHICS_BIT;
2482 }
2483
build(OperationContext & context,Resource & resource) const2484 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
2485 {
2486 return de::MovePtr<Operation>(new Implementation(context, resource, m_drawCall));
2487 }
2488
2489 private:
2490 const ResourceDescription m_resourceDesc;
2491 const DrawCall m_drawCall;
2492 };
2493
2494 } // Draw ns
2495
2496 namespace ClearAttachments
2497 {
2498
2499 class Implementation : public Operation
2500 {
2501 public:
Implementation(OperationContext & context,Resource & resource)2502 Implementation (OperationContext& context, Resource& resource)
2503 : m_context (context)
2504 , m_resource (resource)
2505 , m_clearValue (makeClearValue(m_resource.getImage().format))
2506 {
2507 const DeviceInterface& vk = context.getDeviceInterface();
2508 const VkDevice device = context.getDevice();
2509
2510 const VkDeviceSize size = getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent);
2511 const VkExtent3D& extent = m_resource.getImage().extent;
2512 const VkFormat format = m_resource.getImage().format;
2513 const tcu::TextureFormat texFormat = mapVkFormat(format);
2514 const SyncInfo syncInfo = getSyncInfo();
2515
2516 m_data.resize(static_cast<std::size_t>(size));
2517 tcu::PixelBufferAccess imagePixels(texFormat, extent.width, extent.height, extent.depth, &m_data[0]);
2518 clearPixelBuffer(imagePixels, m_clearValue);
2519
2520 m_attachmentView = makeImageView(vk, device, m_resource.getImage().handle, getImageViewType(m_resource.getImage().imageType), m_resource.getImage().format, m_resource.getImage().subresourceRange);
2521
2522 const VkAttachmentDescription colorAttachmentDescription =
2523 {
2524 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
2525 m_resource.getImage().format, // VkFormat format;
2526 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
2527 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp loadOp;
2528 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
2529 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
2530 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp stencilStoreOp;
2531 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
2532 syncInfo.imageLayout // VkImageLayout finalLayout;
2533 };
2534
2535 const VkAttachmentReference colorAttachmentReference =
2536 {
2537 0u, // deUint32 attachment;
2538 syncInfo.imageLayout // VkImageLayout layout;
2539 };
2540
2541 const VkAttachmentReference depthStencilAttachmentReference =
2542 {
2543 0u, // deUint32 attachment;
2544 syncInfo.imageLayout // VkImageLayout layout;
2545 };
2546
2547 VkSubpassDescription subpassDescription =
2548 {
2549 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags;
2550 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
2551 0u, // deUint32 inputAttachmentCount;
2552 DE_NULL, // const VkAttachmentReference* pInputAttachments;
2553 0u, // deUint32 colorAttachmentCount;
2554 DE_NULL, // const VkAttachmentReference* pColorAttachments;
2555 DE_NULL, // const VkAttachmentReference* pResolveAttachments;
2556 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment;
2557 0u, // deUint32 preserveAttachmentCount;
2558 DE_NULL // const deUint32* pPreserveAttachments;
2559 };
2560
2561 switch (m_resource.getImage().subresourceRange.aspectMask)
2562 {
2563 case VK_IMAGE_ASPECT_COLOR_BIT:
2564 subpassDescription.colorAttachmentCount = 1u;
2565 subpassDescription.pColorAttachments = &colorAttachmentReference;
2566 break;
2567 case VK_IMAGE_ASPECT_STENCIL_BIT:
2568 case VK_IMAGE_ASPECT_DEPTH_BIT:
2569 subpassDescription.pDepthStencilAttachment = &depthStencilAttachmentReference;
2570 break;
2571 default:
2572 DE_ASSERT(0);
2573 break;
2574 }
2575
2576 const VkRenderPassCreateInfo renderPassInfo =
2577 {
2578 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
2579 DE_NULL, // const void* pNext;
2580 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags;
2581 1u, // deUint32 attachmentCount;
2582 &colorAttachmentDescription, // const VkAttachmentDescription* pAttachments;
2583 1u, // deUint32 subpassCount;
2584 &subpassDescription, // const VkSubpassDescription* pSubpasses;
2585 0u, // deUint32 dependencyCount;
2586 DE_NULL // const VkSubpassDependency* pDependencies;
2587 };
2588
2589 m_renderPass = createRenderPass(vk, device, &renderPassInfo);
2590 m_frameBuffer = makeFramebuffer(vk, device, *m_renderPass, *m_attachmentView, m_resource.getImage().extent.width, m_resource.getImage().extent.height, 1u);
2591 }
2592
recordCommands(const VkCommandBuffer cmdBuffer)2593 void recordCommands (const VkCommandBuffer cmdBuffer)
2594 {
2595 const DeviceInterface& vk = m_context.getDeviceInterface();
2596 const VkRenderPassBeginInfo renderPassBeginInfo =
2597 {
2598 VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType;
2599 DE_NULL, // const void* pNext;
2600 *m_renderPass, // VkRenderPass renderPass;
2601 *m_frameBuffer, // VkFramebuffer framebuffer;
2602 {
2603 { 0, 0 }, // VkOffset2D offset;
2604 {
2605 m_resource.getImage().extent.width, // deUint32 width;
2606 m_resource.getImage().extent.height // deUint32 height;
2607 } // VkExtent2D extent;
2608 }, // VkRect2D renderArea;
2609 1u, // deUint32 clearValueCount;
2610 &m_clearValue // const VkClearValue* pClearValues;
2611 };
2612
2613 vk.cmdBeginRenderPass(cmdBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
2614
2615 const VkClearAttachment clearAttachment =
2616 {
2617 m_resource.getImage().subresourceRange.aspectMask, // VkImageAspectFlags aspectMask;
2618 0, // deUint32 colorAttachment;
2619 m_clearValue // VkClearValue clearValue;
2620 };
2621
2622 const VkRect2D rect2D =
2623 {
2624 { 0u, 0u, }, // VkOffset2D offset;
2625 { m_resource.getImage().extent.width, m_resource.getImage().extent.height }, // VkExtent2D extent;
2626 };
2627
2628 const VkClearRect clearRect =
2629 {
2630 rect2D, // VkRect2D rect;
2631 0u, // deUint32 baseArrayLayer;
2632 m_resource.getImage().subresourceLayers.layerCount // deUint32 layerCount;
2633 };
2634
2635 vk.cmdClearAttachments(cmdBuffer, 1, &clearAttachment, 1, &clearRect);
2636
2637 vk.cmdEndRenderPass(cmdBuffer);
2638 }
2639
getSyncInfo(void) const2640 SyncInfo getSyncInfo (void) const
2641 {
2642 SyncInfo syncInfo;
2643 syncInfo.stageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
2644
2645 switch (m_resource.getImage().subresourceRange.aspectMask)
2646 {
2647 case VK_IMAGE_ASPECT_COLOR_BIT:
2648 syncInfo.accessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
2649 syncInfo.imageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
2650 break;
2651 case VK_IMAGE_ASPECT_STENCIL_BIT:
2652 case VK_IMAGE_ASPECT_DEPTH_BIT:
2653 syncInfo.accessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
2654 syncInfo.imageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
2655 break;
2656 default:
2657 DE_ASSERT(0);
2658 break;
2659 }
2660
2661 return syncInfo;
2662 }
2663
getData(void) const2664 Data getData (void) const
2665 {
2666 const Data data =
2667 {
2668 m_data.size(), // std::size_t size;
2669 &m_data[0], // const deUint8* data;
2670 };
2671 return data;
2672 }
2673
2674 private:
2675 OperationContext& m_context;
2676 Resource& m_resource;
2677 std::vector<deUint8> m_data;
2678 const VkClearValue m_clearValue;
2679 Move<VkImageView> m_attachmentView;
2680 Move<VkRenderPass> m_renderPass;
2681 Move<VkFramebuffer> m_frameBuffer;
2682 };
2683
2684 class Support : public OperationSupport
2685 {
2686 public:
Support(const ResourceDescription & resourceDesc)2687 Support (const ResourceDescription& resourceDesc)
2688 : m_resourceDesc (resourceDesc)
2689 {
2690 DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_IMAGE);
2691 }
2692
getResourceUsageFlags(void) const2693 deUint32 getResourceUsageFlags (void) const
2694 {
2695 switch (m_resourceDesc.imageAspect)
2696 {
2697 case VK_IMAGE_ASPECT_COLOR_BIT:
2698 return VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
2699 case VK_IMAGE_ASPECT_STENCIL_BIT:
2700 case VK_IMAGE_ASPECT_DEPTH_BIT:
2701 return VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
2702 default:
2703 DE_ASSERT(0);
2704 }
2705 return 0u;
2706 }
2707
getQueueFlags(const OperationContext & context) const2708 VkQueueFlags getQueueFlags (const OperationContext& context) const
2709 {
2710 DE_UNREF(context);
2711 return VK_QUEUE_GRAPHICS_BIT;
2712 }
2713
build(OperationContext & context,Resource & resource) const2714 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
2715 {
2716 return de::MovePtr<Operation>(new Implementation(context, resource));
2717 }
2718
2719 private:
2720 const ResourceDescription m_resourceDesc;
2721 };
2722
2723 } // ClearAttachments
2724
2725 namespace IndirectBuffer
2726 {
2727
2728 class GraphicsPipeline : public Pipeline
2729 {
2730 public:
GraphicsPipeline(OperationContext & context,const ResourceType resourceType,const VkBuffer indirectBuffer,const std::string & shaderPrefix,const VkDescriptorSetLayout descriptorSetLayout)2731 GraphicsPipeline (OperationContext& context,
2732 const ResourceType resourceType,
2733 const VkBuffer indirectBuffer,
2734 const std::string& shaderPrefix,
2735 const VkDescriptorSetLayout descriptorSetLayout)
2736 : m_resourceType (resourceType)
2737 , m_indirectBuffer (indirectBuffer)
2738 , m_vertices (context)
2739 {
2740 const DeviceInterface& vk = context.getDeviceInterface();
2741 const VkDevice device = context.getDevice();
2742 Allocator& allocator = context.getAllocator();
2743
2744 // Color attachment
2745
2746 m_colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
2747 m_colorImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
2748 m_colorImageExtent = makeExtent3D(16u, 16u, 1u);
2749 m_colorAttachmentImage = de::MovePtr<Image>(new Image(vk, device, allocator,
2750 makeImageCreateInfo(VK_IMAGE_TYPE_2D, m_colorImageExtent, m_colorFormat, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT),
2751 MemoryRequirement::Any));
2752
2753 // Pipeline
2754
2755 m_colorAttachmentView = makeImageView (vk, device, **m_colorAttachmentImage, VK_IMAGE_VIEW_TYPE_2D, m_colorFormat, m_colorImageSubresourceRange);
2756 m_renderPass = makeRenderPass (vk, device, m_colorFormat);
2757 m_framebuffer = makeFramebuffer (vk, device, *m_renderPass, *m_colorAttachmentView, m_colorImageExtent.width, m_colorImageExtent.height, 1u);
2758 m_pipelineLayout = makePipelineLayout(vk, device, descriptorSetLayout);
2759
2760 GraphicsPipelineBuilder pipelineBuilder;
2761 pipelineBuilder
2762 .setRenderSize (tcu::IVec2(m_colorImageExtent.width, m_colorImageExtent.height))
2763 .setVertexInputSingleAttribute (m_vertices.getVertexFormat(), m_vertices.getVertexStride())
2764 .setShader (vk, device, VK_SHADER_STAGE_VERTEX_BIT, context.getBinaryCollection().get(shaderPrefix + "vert"), DE_NULL)
2765 .setShader (vk, device, VK_SHADER_STAGE_FRAGMENT_BIT, context.getBinaryCollection().get(shaderPrefix + "frag"), DE_NULL);
2766
2767 m_pipeline = pipelineBuilder.build(vk, device, *m_pipelineLayout, *m_renderPass, context.getPipelineCacheData());
2768 }
2769
recordCommands(OperationContext & context,const VkCommandBuffer cmdBuffer,const VkDescriptorSet descriptorSet)2770 void recordCommands (OperationContext& context, const VkCommandBuffer cmdBuffer, const VkDescriptorSet descriptorSet)
2771 {
2772 const DeviceInterface& vk = context.getDeviceInterface();
2773
2774 // Change color attachment image layout
2775 {
2776 const VkImageMemoryBarrier colorAttachmentLayoutBarrier = makeImageMemoryBarrier(
2777 (VkAccessFlags)0, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
2778 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
2779 **m_colorAttachmentImage, m_colorImageSubresourceRange);
2780
2781 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, (VkDependencyFlags)0,
2782 0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentLayoutBarrier);
2783 }
2784
2785 {
2786 const VkRect2D renderArea = {
2787 makeOffset2D(0, 0),
2788 makeExtent2D(m_colorImageExtent.width, m_colorImageExtent.height),
2789 };
2790 const tcu::Vec4 clearColor = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
2791
2792 beginRenderPass(vk, cmdBuffer, *m_renderPass, *m_framebuffer, renderArea, clearColor);
2793 }
2794
2795 vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
2796 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
2797 {
2798 const VkDeviceSize vertexBufferOffset = 0ull;
2799 const VkBuffer vertexBuffer = m_vertices.getVertexBuffer();
2800 vk.cmdBindVertexBuffers(cmdBuffer, 0u, 1u, &vertexBuffer, &vertexBufferOffset);
2801 }
2802
2803 switch (m_resourceType)
2804 {
2805 case RESOURCE_TYPE_INDIRECT_BUFFER_DRAW:
2806 vk.cmdDrawIndirect(cmdBuffer, m_indirectBuffer, 0u, 1u, 0u);
2807 break;
2808
2809 case RESOURCE_TYPE_INDIRECT_BUFFER_DRAW_INDEXED:
2810 vk.cmdBindIndexBuffer(cmdBuffer, m_vertices.getIndexBuffer(), 0u, m_vertices.getIndexType());
2811 vk.cmdDrawIndexedIndirect(cmdBuffer, m_indirectBuffer, 0u, 1u, 0u);
2812 break;
2813
2814 default:
2815 DE_ASSERT(0);
2816 break;
2817 }
2818 endRenderPass(vk, cmdBuffer);
2819 }
2820
2821 private:
2822 const ResourceType m_resourceType;
2823 const VkBuffer m_indirectBuffer;
2824 const VertexGrid m_vertices;
2825 VkFormat m_colorFormat;
2826 de::MovePtr<Image> m_colorAttachmentImage;
2827 Move<VkImageView> m_colorAttachmentView;
2828 VkExtent3D m_colorImageExtent;
2829 VkImageSubresourceRange m_colorImageSubresourceRange;
2830 Move<VkRenderPass> m_renderPass;
2831 Move<VkFramebuffer> m_framebuffer;
2832 Move<VkPipelineLayout> m_pipelineLayout;
2833 Move<VkPipeline> m_pipeline;
2834 };
2835
2836 class ComputePipeline : public Pipeline
2837 {
2838 public:
ComputePipeline(OperationContext & context,const VkBuffer indirectBuffer,const std::string & shaderPrefix,const VkDescriptorSetLayout descriptorSetLayout)2839 ComputePipeline (OperationContext& context,
2840 const VkBuffer indirectBuffer,
2841 const std::string& shaderPrefix,
2842 const VkDescriptorSetLayout descriptorSetLayout)
2843 : m_indirectBuffer (indirectBuffer)
2844 {
2845 const DeviceInterface& vk = context.getDeviceInterface();
2846 const VkDevice device = context.getDevice();
2847
2848 const Unique<VkShaderModule> shaderModule(createShaderModule(vk, device, context.getBinaryCollection().get(shaderPrefix + "comp"), (VkShaderModuleCreateFlags)0));
2849
2850 m_pipelineLayout = makePipelineLayout(vk, device, descriptorSetLayout);
2851 m_pipeline = makeComputePipeline(vk, device, *m_pipelineLayout, *shaderModule, DE_NULL, context.getPipelineCacheData());
2852 }
2853
recordCommands(OperationContext & context,const VkCommandBuffer cmdBuffer,const VkDescriptorSet descriptorSet)2854 void recordCommands (OperationContext& context, const VkCommandBuffer cmdBuffer, const VkDescriptorSet descriptorSet)
2855 {
2856 const DeviceInterface& vk = context.getDeviceInterface();
2857
2858 vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipeline);
2859 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
2860 vk.cmdDispatchIndirect(cmdBuffer, m_indirectBuffer, 0u);
2861 }
2862
2863 private:
2864 const VkBuffer m_indirectBuffer;
2865 Move<VkPipelineLayout> m_pipelineLayout;
2866 Move<VkPipeline> m_pipeline;
2867 };
2868
2869 //! Read indirect buffer by executing an indirect draw or dispatch command.
2870 class ReadImplementation : public Operation
2871 {
2872 public:
ReadImplementation(OperationContext & context,Resource & resource)2873 ReadImplementation (OperationContext& context, Resource& resource)
2874 : m_context (context)
2875 , m_resource (resource)
2876 , m_stage (resource.getType() == RESOURCE_TYPE_INDIRECT_BUFFER_DISPATCH ? VK_SHADER_STAGE_COMPUTE_BIT : VK_SHADER_STAGE_VERTEX_BIT)
2877 , m_pipelineStage (pipelineStageFlagsFromShaderStageFlagBits(m_stage))
2878 , m_hostBufferSizeBytes (sizeof(deUint32))
2879 {
2880 requireFeaturesForSSBOAccess (m_context, m_stage);
2881
2882 const DeviceInterface& vk = m_context.getDeviceInterface();
2883 const VkDevice device = m_context.getDevice();
2884 Allocator& allocator = m_context.getAllocator();
2885
2886 m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
2887 vk, device, allocator, makeBufferCreateInfo(m_hostBufferSizeBytes, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible));
2888
2889 // Init host buffer data
2890 {
2891 const Allocation& alloc = m_hostBuffer->getAllocation();
2892 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_hostBufferSizeBytes));
2893 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), static_cast<size_t>(m_hostBufferSizeBytes));
2894 }
2895
2896 // Prepare descriptors
2897 {
2898 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
2899 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_stage)
2900 .build(vk, device);
2901
2902 m_descriptorPool = DescriptorPoolBuilder()
2903 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
2904 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
2905
2906 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
2907
2908 const VkDescriptorBufferInfo hostBufferInfo = makeDescriptorBufferInfo(**m_hostBuffer, 0u, m_hostBufferSizeBytes);
2909
2910 DescriptorSetUpdateBuilder()
2911 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &hostBufferInfo)
2912 .update(vk, device);
2913 }
2914
2915 // Create pipeline
2916 m_pipeline = (m_resource.getType() == RESOURCE_TYPE_INDIRECT_BUFFER_DISPATCH
2917 ? de::MovePtr<Pipeline>(new ComputePipeline(context, m_resource.getBuffer().handle, "read_ib_", *m_descriptorSetLayout))
2918 : de::MovePtr<Pipeline>(new GraphicsPipeline(context, m_resource.getType(), m_resource.getBuffer().handle, "read_ib_", *m_descriptorSetLayout)));
2919 }
2920
recordCommands(const VkCommandBuffer cmdBuffer)2921 void recordCommands (const VkCommandBuffer cmdBuffer)
2922 {
2923 const DeviceInterface& vk = m_context.getDeviceInterface();
2924
2925 m_pipeline->recordCommands(m_context, cmdBuffer, *m_descriptorSet);
2926
2927 // Insert a barrier so data written by the shader is available to the host
2928 const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_hostBufferSizeBytes);
2929 vk.cmdPipelineBarrier(cmdBuffer, m_pipelineStage, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
2930 }
2931
getSyncInfo(void) const2932 SyncInfo getSyncInfo (void) const
2933 {
2934 const SyncInfo syncInfo =
2935 {
2936 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, // VkPipelineStageFlags stageMask;
2937 VK_ACCESS_INDIRECT_COMMAND_READ_BIT, // VkAccessFlags accessMask;
2938 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
2939 };
2940 return syncInfo;
2941 }
2942
getData(void) const2943 Data getData (void) const
2944 {
2945 return getHostBufferData(m_context, *m_hostBuffer, m_hostBufferSizeBytes);
2946 }
2947
2948 private:
2949 OperationContext& m_context;
2950 Resource& m_resource;
2951 const VkShaderStageFlagBits m_stage;
2952 const VkPipelineStageFlags m_pipelineStage;
2953 const VkDeviceSize m_hostBufferSizeBytes;
2954 de::MovePtr<Buffer> m_hostBuffer;
2955 Move<VkDescriptorPool> m_descriptorPool;
2956 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
2957 Move<VkDescriptorSet> m_descriptorSet;
2958 de::MovePtr<Pipeline> m_pipeline;
2959 };
2960
2961 //! Prepare indirect buffer for a draw/dispatch call.
2962 class WriteImplementation : public Operation
2963 {
2964 public:
WriteImplementation(OperationContext & context,Resource & resource)2965 WriteImplementation (OperationContext& context, Resource& resource)
2966 : m_context (context)
2967 , m_resource (resource)
2968 {
2969 switch (m_resource.getType())
2970 {
2971 case RESOURCE_TYPE_INDIRECT_BUFFER_DRAW:
2972 {
2973 m_drawIndirect.vertexCount = 6u;
2974 m_drawIndirect.instanceCount = 1u;
2975 m_drawIndirect.firstVertex = 0u;
2976 m_drawIndirect.firstInstance = 0u;
2977
2978 m_indirectData = reinterpret_cast<deUint32*>(&m_drawIndirect);
2979 m_expectedValue = 6u;
2980 }
2981 break;
2982
2983 case RESOURCE_TYPE_INDIRECT_BUFFER_DRAW_INDEXED:
2984 {
2985 m_drawIndexedIndirect.indexCount = 6u;
2986 m_drawIndexedIndirect.instanceCount = 1u;
2987 m_drawIndexedIndirect.firstIndex = 0u;
2988 m_drawIndexedIndirect.vertexOffset = 0u;
2989 m_drawIndexedIndirect.firstInstance = 0u;
2990
2991 m_indirectData = reinterpret_cast<deUint32*>(&m_drawIndexedIndirect);
2992 m_expectedValue = 6u;
2993 }
2994 break;
2995
2996 case RESOURCE_TYPE_INDIRECT_BUFFER_DISPATCH:
2997 {
2998 m_dispatchIndirect.x = 7u;
2999 m_dispatchIndirect.y = 2u;
3000 m_dispatchIndirect.z = 1u;
3001
3002 m_indirectData = reinterpret_cast<deUint32*>(&m_dispatchIndirect);
3003 m_expectedValue = 14u;
3004 }
3005 break;
3006
3007 default:
3008 DE_ASSERT(0);
3009 break;
3010 }
3011 }
3012
recordCommands(const VkCommandBuffer cmdBuffer)3013 void recordCommands (const VkCommandBuffer cmdBuffer)
3014 {
3015 const DeviceInterface& vk = m_context.getDeviceInterface();
3016
3017 vk.cmdUpdateBuffer(cmdBuffer, m_resource.getBuffer().handle, m_resource.getBuffer().offset, m_resource.getBuffer().size, m_indirectData);
3018 }
3019
getSyncInfo(void) const3020 SyncInfo getSyncInfo (void) const
3021 {
3022 const SyncInfo syncInfo =
3023 {
3024 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
3025 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags accessMask;
3026 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
3027 };
3028 return syncInfo;
3029 }
3030
getData(void) const3031 Data getData (void) const
3032 {
3033 const Data data =
3034 {
3035 sizeof(deUint32), // std::size_t size;
3036 reinterpret_cast<const deUint8*>(&m_expectedValue), // const deUint8* data;
3037 };
3038 return data;
3039 }
3040
3041 private:
3042 OperationContext& m_context;
3043 Resource& m_resource;
3044 VkDrawIndirectCommand m_drawIndirect;
3045 VkDrawIndexedIndirectCommand m_drawIndexedIndirect;
3046 VkDispatchIndirectCommand m_dispatchIndirect;
3047 deUint32* m_indirectData;
3048 deUint32 m_expectedValue; //! Side-effect value expected to be computed by a read (draw/dispatch) command.
3049 };
3050
3051 class ReadSupport : public OperationSupport
3052 {
3053 public:
ReadSupport(const ResourceDescription & resourceDesc)3054 ReadSupport (const ResourceDescription& resourceDesc)
3055 : m_resourceDesc (resourceDesc)
3056 {
3057 DE_ASSERT(isIndirectBuffer(m_resourceDesc.type));
3058 }
3059
initPrograms(SourceCollections & programCollection) const3060 void initPrograms (SourceCollections& programCollection) const
3061 {
3062 std::ostringstream decl;
3063 decl << "layout(set = 0, binding = 0, std140) coherent buffer Data {\n"
3064 << " uint value;\n"
3065 << "} sb_out;\n";
3066
3067 std::ostringstream main;
3068 main << " atomicAdd(sb_out.value, 1u);\n";
3069
3070 // Vertex
3071 {
3072 std::ostringstream src;
3073 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
3074 << "\n"
3075 << "layout(location = 0) in vec4 v_in_position;\n"
3076 << "\n"
3077 << "out " << s_perVertexBlock << ";\n"
3078 << "\n"
3079 << decl.str()
3080 << "\n"
3081 << "void main (void)\n"
3082 << "{\n"
3083 << " gl_Position = v_in_position;\n"
3084 << main.str()
3085 << "}\n";
3086
3087 programCollection.glslSources.add("read_ib_vert") << glu::VertexSource(src.str());
3088 }
3089
3090 // Fragment
3091 {
3092 std::ostringstream src;
3093 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
3094 << "\n"
3095 << "layout(location = 0) out vec4 o_color;\n"
3096 << "\n"
3097 << "void main (void)\n"
3098 << "{\n"
3099 << " o_color = vec4(1.0);\n"
3100 << "}\n";
3101
3102 programCollection.glslSources.add("read_ib_frag") << glu::FragmentSource(src.str());
3103 }
3104
3105 // Compute
3106 {
3107 std::ostringstream src;
3108 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
3109 << "\n"
3110 << "layout(local_size_x = 1) in;\n"
3111 << "\n"
3112 << decl.str()
3113 << "\n"
3114 << "void main (void)\n"
3115 << "{\n"
3116 << main.str()
3117 << "}\n";
3118
3119 programCollection.glslSources.add("read_ib_comp") << glu::ComputeSource(src.str());
3120 }
3121 }
3122
getResourceUsageFlags(void) const3123 deUint32 getResourceUsageFlags (void) const
3124 {
3125 return VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
3126 }
3127
getQueueFlags(const OperationContext & context) const3128 VkQueueFlags getQueueFlags (const OperationContext& context) const
3129 {
3130 DE_UNREF(context);
3131 return (m_resourceDesc.type == RESOURCE_TYPE_INDIRECT_BUFFER_DISPATCH ? VK_QUEUE_COMPUTE_BIT : VK_QUEUE_GRAPHICS_BIT);
3132 }
3133
build(OperationContext & context,Resource & resource) const3134 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
3135 {
3136 return de::MovePtr<Operation>(new ReadImplementation(context, resource));
3137 }
3138
3139 private:
3140 const ResourceDescription m_resourceDesc;
3141 };
3142
3143
3144 class WriteSupport : public OperationSupport
3145 {
3146 public:
WriteSupport(const ResourceDescription & resourceDesc)3147 WriteSupport (const ResourceDescription& resourceDesc)
3148 {
3149 DE_ASSERT(isIndirectBuffer(resourceDesc.type));
3150 DE_UNREF(resourceDesc);
3151 }
3152
getResourceUsageFlags(void) const3153 deUint32 getResourceUsageFlags (void) const
3154 {
3155 return VK_BUFFER_USAGE_TRANSFER_DST_BIT;
3156 }
3157
getQueueFlags(const OperationContext & context) const3158 VkQueueFlags getQueueFlags (const OperationContext& context) const
3159 {
3160 DE_UNREF(context);
3161 return VK_QUEUE_TRANSFER_BIT;
3162 }
3163
build(OperationContext & context,Resource & resource) const3164 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
3165 {
3166 return de::MovePtr<Operation>(new WriteImplementation(context, resource));
3167 }
3168 };
3169
3170 } // IndirectBuffer ns
3171
3172 namespace VertexInput
3173 {
3174
3175 class Implementation : public Operation
3176 {
3177 public:
Implementation(OperationContext & context,Resource & resource)3178 Implementation (OperationContext& context, Resource& resource)
3179 : m_context (context)
3180 , m_resource (resource)
3181 {
3182 requireFeaturesForSSBOAccess (m_context, VK_SHADER_STAGE_VERTEX_BIT);
3183
3184 const DeviceInterface& vk = context.getDeviceInterface();
3185 const VkDevice device = context.getDevice();
3186 Allocator& allocator = context.getAllocator();
3187 const VkDeviceSize dataSizeBytes = m_resource.getBuffer().size;
3188
3189 m_outputBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator,
3190 makeBufferCreateInfo(dataSizeBytes, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible));
3191
3192 {
3193 const Allocation& alloc = m_outputBuffer->getAllocation();
3194 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(dataSizeBytes));
3195 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), dataSizeBytes);
3196 }
3197
3198 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
3199 .addSingleBinding (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_VERTEX_BIT)
3200 .build (vk, device);
3201
3202 m_descriptorPool = DescriptorPoolBuilder()
3203 .addType (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
3204 .build (vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
3205
3206 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
3207
3208 const VkDescriptorBufferInfo outputBufferDescriptorInfo = makeDescriptorBufferInfo(m_outputBuffer->get(), 0ull, dataSizeBytes);
3209 DescriptorSetUpdateBuilder()
3210 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &outputBufferDescriptorInfo)
3211 .update (vk, device);
3212
3213 // Color attachment
3214 m_colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
3215 m_colorImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
3216 m_colorImageExtent = makeExtent3D(16u, 16u, 1u);
3217 m_colorAttachmentImage = de::MovePtr<Image>(new Image(vk, device, allocator,
3218 makeImageCreateInfo(VK_IMAGE_TYPE_2D, m_colorImageExtent, m_colorFormat, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT),
3219 MemoryRequirement::Any));
3220
3221 // Pipeline
3222 m_colorAttachmentView = makeImageView (vk, device, **m_colorAttachmentImage, VK_IMAGE_VIEW_TYPE_2D, m_colorFormat, m_colorImageSubresourceRange);
3223 m_renderPass = makeRenderPass (vk, device, m_colorFormat);
3224 m_framebuffer = makeFramebuffer (vk, device, *m_renderPass, *m_colorAttachmentView, m_colorImageExtent.width, m_colorImageExtent.height, 1u);
3225 m_pipelineLayout = makePipelineLayout(vk, device, *m_descriptorSetLayout);
3226
3227 m_pipeline = GraphicsPipelineBuilder()
3228 .setPrimitiveTopology (VK_PRIMITIVE_TOPOLOGY_POINT_LIST)
3229 .setRenderSize (tcu::IVec2(static_cast<int>(m_colorImageExtent.width), static_cast<int>(m_colorImageExtent.height)))
3230 .setVertexInputSingleAttribute (VK_FORMAT_R32G32B32A32_UINT, tcu::getPixelSize(mapVkFormat(VK_FORMAT_R32G32B32A32_UINT)))
3231 .setShader (vk, device, VK_SHADER_STAGE_VERTEX_BIT, context.getBinaryCollection().get("input_vert"), DE_NULL)
3232 .setShader (vk, device, VK_SHADER_STAGE_FRAGMENT_BIT, context.getBinaryCollection().get("input_frag"), DE_NULL)
3233 .build (vk, device, *m_pipelineLayout, *m_renderPass, context.getPipelineCacheData());
3234 }
3235
recordCommands(const VkCommandBuffer cmdBuffer)3236 void recordCommands (const VkCommandBuffer cmdBuffer)
3237 {
3238 const DeviceInterface& vk = m_context.getDeviceInterface();
3239 const VkDeviceSize dataSizeBytes = m_resource.getBuffer().size;
3240
3241 // Change color attachment image layout
3242 {
3243 const VkImageMemoryBarrier colorAttachmentLayoutBarrier = makeImageMemoryBarrier(
3244 (VkAccessFlags)0, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
3245 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
3246 **m_colorAttachmentImage, m_colorImageSubresourceRange);
3247
3248 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, (VkDependencyFlags)0,
3249 0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentLayoutBarrier);
3250 }
3251
3252 {
3253 const VkRect2D renderArea = {
3254 makeOffset2D(0, 0),
3255 makeExtent2D(m_colorImageExtent.width, m_colorImageExtent.height),
3256 };
3257 const tcu::Vec4 clearColor = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
3258
3259 beginRenderPass(vk, cmdBuffer, *m_renderPass, *m_framebuffer, renderArea, clearColor);
3260 }
3261
3262 vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
3263 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
3264 {
3265 const VkDeviceSize vertexBufferOffset = 0ull;
3266 vk.cmdBindVertexBuffers(cmdBuffer, 0u, 1u, &m_resource.getBuffer().handle, &vertexBufferOffset);
3267 }
3268
3269 vk.cmdDraw(cmdBuffer, static_cast<deUint32>(dataSizeBytes / sizeof(tcu::UVec4)), 1u, 0u, 0u);
3270
3271 const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_outputBuffer, 0u, m_resource.getBuffer().size);
3272 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
3273
3274 endRenderPass(vk, cmdBuffer);
3275 }
3276
getSyncInfo(void) const3277 SyncInfo getSyncInfo (void) const
3278 {
3279 const SyncInfo syncInfo =
3280 {
3281 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, // VkPipelineStageFlags stageMask;
3282 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, // VkAccessFlags accessMask;
3283 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
3284 };
3285 return syncInfo;
3286 }
3287
getData(void) const3288 Data getData (void) const
3289 {
3290 return getHostBufferData(m_context, *m_outputBuffer, m_resource.getBuffer().size);
3291 }
3292
3293 private:
3294 OperationContext& m_context;
3295 Resource& m_resource;
3296 de::MovePtr<Buffer> m_outputBuffer;
3297 de::MovePtr<Buffer> m_indexBuffer;
3298 de::MovePtr<Buffer> m_indirectBuffer;
3299 Move<VkRenderPass> m_renderPass;
3300 Move<VkFramebuffer> m_framebuffer;
3301 Move<VkPipelineLayout> m_pipelineLayout;
3302 Move<VkPipeline> m_pipeline;
3303 VkFormat m_colorFormat;
3304 de::MovePtr<Image> m_colorAttachmentImage;
3305 Move<VkImageView> m_colorAttachmentView;
3306 VkExtent3D m_colorImageExtent;
3307 VkImageSubresourceRange m_colorImageSubresourceRange;
3308 Move<VkDescriptorPool> m_descriptorPool;
3309 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
3310 Move<VkDescriptorSet> m_descriptorSet;
3311 };
3312
3313 class Support : public OperationSupport
3314 {
3315 public:
Support(const ResourceDescription & resourceDesc)3316 Support (const ResourceDescription& resourceDesc)
3317 : m_resourceDesc (resourceDesc)
3318 {
3319 DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_BUFFER);
3320 }
3321
initPrograms(SourceCollections & programCollection) const3322 void initPrograms (SourceCollections& programCollection) const
3323 {
3324 // Vertex
3325 {
3326 int vertexStride = sizeof(tcu::UVec4);
3327 std::ostringstream src;
3328 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
3329 << "\n"
3330 << "layout(location = 0) in uvec4 v_in_data;\n"
3331 << "layout(set = 0, binding = 0, std140) writeonly buffer Output {\n"
3332 << " uvec4 data[" << m_resourceDesc.size.x()/vertexStride << "];\n"
3333 << "} b_out;\n"
3334 << "\n"
3335 << "void main (void)\n"
3336 << "{\n"
3337 << " b_out.data[gl_VertexIndex] = v_in_data;\n"
3338 << "}\n";
3339 programCollection.glslSources.add("input_vert") << glu::VertexSource(src.str());
3340 }
3341
3342 // Fragment
3343 {
3344 std::ostringstream src;
3345 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
3346 << "\n"
3347 << "layout(location = 0) out vec4 o_color;\n"
3348 << "\n"
3349 << "void main (void)\n"
3350 << "{\n"
3351 << " o_color = vec4(1.0);\n"
3352 << "}\n";
3353 programCollection.glslSources.add("input_frag") << glu::FragmentSource(src.str());
3354 }
3355 }
3356
getResourceUsageFlags(void) const3357 deUint32 getResourceUsageFlags (void) const
3358 {
3359 return VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
3360 }
3361
getQueueFlags(const OperationContext & context) const3362 VkQueueFlags getQueueFlags (const OperationContext& context) const
3363 {
3364 DE_UNREF(context);
3365 return VK_QUEUE_GRAPHICS_BIT;
3366 }
3367
build(OperationContext & context,Resource & resource) const3368 de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
3369 {
3370 return de::MovePtr<Operation>(new Implementation(context, resource));
3371 }
3372
3373 private:
3374 const ResourceDescription m_resourceDesc;
3375 };
3376
3377 } // VertexInput
3378
3379 } // anonymous ns
3380
OperationContext(Context & context,PipelineCacheData & pipelineCacheData)3381 OperationContext::OperationContext (Context& context, PipelineCacheData& pipelineCacheData)
3382 : m_vki (context.getInstanceInterface())
3383 , m_vk (context.getDeviceInterface())
3384 , m_physicalDevice (context.getPhysicalDevice())
3385 , m_device (context.getDevice())
3386 , m_allocator (context.getDefaultAllocator())
3387 , m_progCollection (context.getBinaryCollection())
3388 , m_pipelineCacheData (pipelineCacheData)
3389 , m_deviceExtensions (context.getDeviceExtensions())
3390 {
3391 }
3392
OperationContext(Context & context,PipelineCacheData & pipelineCacheData,const DeviceInterface & vk,const VkDevice device,vk::Allocator & allocator)3393 OperationContext::OperationContext (Context& context, PipelineCacheData& pipelineCacheData, const DeviceInterface& vk, const VkDevice device, vk::Allocator& allocator)
3394 : m_vki (context.getInstanceInterface())
3395 , m_vk (vk)
3396 , m_physicalDevice (context.getPhysicalDevice())
3397 , m_device (device)
3398 , m_allocator (allocator)
3399 , m_progCollection (context.getBinaryCollection())
3400 , m_pipelineCacheData (pipelineCacheData)
3401 , m_deviceExtensions (context.getDeviceExtensions())
3402 {
3403 }
3404
Resource(OperationContext & context,const ResourceDescription & desc,const deUint32 usage,const vk::VkSharingMode sharingMode,const std::vector<deUint32> & queueFamilyIndex)3405 Resource::Resource (OperationContext& context, const ResourceDescription& desc, const deUint32 usage, const vk::VkSharingMode sharingMode, const std::vector<deUint32>& queueFamilyIndex)
3406 : m_type (desc.type)
3407 {
3408 const DeviceInterface& vk = context.getDeviceInterface();
3409 const InstanceInterface& vki = context.getInstanceInterface();
3410 const VkDevice device = context.getDevice();
3411 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
3412 Allocator& allocator = context.getAllocator();
3413
3414 if (m_type == RESOURCE_TYPE_BUFFER || isIndirectBuffer(m_type))
3415 {
3416 m_bufferData.offset = 0u;
3417 m_bufferData.size = static_cast<VkDeviceSize>(desc.size.x());
3418 VkBufferCreateInfo bufferCreateInfo = makeBufferCreateInfo(m_bufferData.size, usage);
3419 bufferCreateInfo.sharingMode = sharingMode;
3420 if (queueFamilyIndex.size() > 0)
3421 {
3422 bufferCreateInfo.queueFamilyIndexCount = static_cast<deUint32>(queueFamilyIndex.size());
3423 bufferCreateInfo.pQueueFamilyIndices = &queueFamilyIndex[0];
3424 }
3425 m_buffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator, bufferCreateInfo, MemoryRequirement::Any));
3426 m_bufferData.handle = **m_buffer;
3427 }
3428 else if (m_type == RESOURCE_TYPE_IMAGE)
3429 {
3430 m_imageData.extent = makeExtent3D(desc.size.x(), std::max(1, desc.size.y()), std::max(1, desc.size.z()));
3431 m_imageData.imageType = desc.imageType;
3432 m_imageData.format = desc.imageFormat;
3433 m_imageData.subresourceRange = makeImageSubresourceRange(desc.imageAspect, 0u, 1u, 0u, 1u);
3434 m_imageData.subresourceLayers = makeImageSubresourceLayers(desc.imageAspect, 0u, 0u, 1u);
3435 VkImageCreateInfo imageInfo = makeImageCreateInfo(m_imageData.imageType, m_imageData.extent, m_imageData.format, usage);
3436 imageInfo.sharingMode = sharingMode;
3437 if (queueFamilyIndex.size() > 0)
3438 {
3439 imageInfo.queueFamilyIndexCount = static_cast<deUint32>(queueFamilyIndex.size());
3440 imageInfo.pQueueFamilyIndices = &queueFamilyIndex[0];
3441 }
3442
3443 VkImageFormatProperties imageFormatProperties;
3444 const VkResult formatResult = vki.getPhysicalDeviceImageFormatProperties(physDevice, imageInfo.format, imageInfo.imageType, imageInfo.tiling, imageInfo.usage, imageInfo.flags, &imageFormatProperties);
3445
3446 if (formatResult != VK_SUCCESS)
3447 TCU_THROW(NotSupportedError, "Image format is not supported");
3448
3449 m_image = de::MovePtr<Image>(new Image(vk, device, allocator, imageInfo, MemoryRequirement::Any));
3450 m_imageData.handle = **m_image;
3451 }
3452 else
3453 DE_ASSERT(0);
3454 }
3455
3456 //! \note This function exists for performance reasons. We're creating a lot of tests and checking requirements here
3457 //! before creating an OperationSupport object is faster.
isResourceSupported(const OperationName opName,const ResourceDescription & resourceDesc)3458 bool isResourceSupported (const OperationName opName, const ResourceDescription& resourceDesc)
3459 {
3460 switch (opName)
3461 {
3462 case OPERATION_NAME_WRITE_FILL_BUFFER:
3463 case OPERATION_NAME_WRITE_COPY_BUFFER:
3464 case OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER:
3465 case OPERATION_NAME_WRITE_SSBO_VERTEX:
3466 case OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL:
3467 case OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION:
3468 case OPERATION_NAME_WRITE_SSBO_GEOMETRY:
3469 case OPERATION_NAME_WRITE_SSBO_FRAGMENT:
3470 case OPERATION_NAME_WRITE_SSBO_COMPUTE:
3471 case OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT:
3472 case OPERATION_NAME_READ_COPY_BUFFER:
3473 case OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE:
3474 case OPERATION_NAME_READ_SSBO_VERTEX:
3475 case OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL:
3476 case OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION:
3477 case OPERATION_NAME_READ_SSBO_GEOMETRY:
3478 case OPERATION_NAME_READ_SSBO_FRAGMENT:
3479 case OPERATION_NAME_READ_SSBO_COMPUTE:
3480 case OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT:
3481 case OPERATION_NAME_READ_VERTEX_INPUT:
3482 return resourceDesc.type == RESOURCE_TYPE_BUFFER;
3483
3484 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DRAW:
3485 case OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW:
3486 return resourceDesc.type == RESOURCE_TYPE_INDIRECT_BUFFER_DRAW;
3487
3488 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DRAW_INDEXED:
3489 case OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED:
3490 return resourceDesc.type == RESOURCE_TYPE_INDIRECT_BUFFER_DRAW_INDEXED;
3491
3492 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DISPATCH:
3493 case OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH:
3494 return resourceDesc.type == RESOURCE_TYPE_INDIRECT_BUFFER_DISPATCH;
3495
3496 case OPERATION_NAME_WRITE_UPDATE_BUFFER:
3497 return resourceDesc.type == RESOURCE_TYPE_BUFFER && resourceDesc.size.x() <= MAX_UPDATE_BUFFER_SIZE;
3498
3499 case OPERATION_NAME_WRITE_COPY_IMAGE:
3500 case OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE:
3501 case OPERATION_NAME_READ_COPY_IMAGE:
3502 case OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER:
3503 return resourceDesc.type == RESOURCE_TYPE_IMAGE;
3504
3505 case OPERATION_NAME_WRITE_CLEAR_ATTACHMENTS:
3506 return resourceDesc.type == RESOURCE_TYPE_IMAGE && resourceDesc.imageType != VK_IMAGE_TYPE_3D;
3507
3508 case OPERATION_NAME_WRITE_BLIT_IMAGE:
3509 case OPERATION_NAME_READ_BLIT_IMAGE:
3510 case OPERATION_NAME_WRITE_IMAGE_VERTEX:
3511 case OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL:
3512 case OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION:
3513 case OPERATION_NAME_WRITE_IMAGE_GEOMETRY:
3514 case OPERATION_NAME_WRITE_IMAGE_FRAGMENT:
3515 case OPERATION_NAME_WRITE_IMAGE_COMPUTE:
3516 case OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT:
3517 case OPERATION_NAME_READ_IMAGE_VERTEX:
3518 case OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL:
3519 case OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION:
3520 case OPERATION_NAME_READ_IMAGE_GEOMETRY:
3521 case OPERATION_NAME_READ_IMAGE_FRAGMENT:
3522 case OPERATION_NAME_READ_IMAGE_COMPUTE:
3523 case OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT:
3524 return resourceDesc.type == RESOURCE_TYPE_IMAGE && resourceDesc.imageAspect == VK_IMAGE_ASPECT_COLOR_BIT;
3525
3526 case OPERATION_NAME_READ_UBO_VERTEX:
3527 case OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL:
3528 case OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION:
3529 case OPERATION_NAME_READ_UBO_GEOMETRY:
3530 case OPERATION_NAME_READ_UBO_FRAGMENT:
3531 case OPERATION_NAME_READ_UBO_COMPUTE:
3532 case OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT:
3533 return resourceDesc.type == RESOURCE_TYPE_BUFFER && resourceDesc.size.x() <= MAX_UBO_RANGE;
3534
3535 case OPERATION_NAME_WRITE_CLEAR_COLOR_IMAGE:
3536 return resourceDesc.type == RESOURCE_TYPE_IMAGE && resourceDesc.imageAspect == VK_IMAGE_ASPECT_COLOR_BIT;
3537
3538 case OPERATION_NAME_WRITE_CLEAR_DEPTH_STENCIL_IMAGE:
3539 return resourceDesc.type == RESOURCE_TYPE_IMAGE && (resourceDesc.imageAspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
3540
3541 case OPERATION_NAME_WRITE_DRAW:
3542 case OPERATION_NAME_WRITE_DRAW_INDEXED:
3543 case OPERATION_NAME_WRITE_DRAW_INDIRECT:
3544 case OPERATION_NAME_WRITE_DRAW_INDEXED_INDIRECT:
3545 return resourceDesc.type == RESOURCE_TYPE_IMAGE && resourceDesc.imageType == VK_IMAGE_TYPE_2D
3546 && (resourceDesc.imageAspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0;
3547
3548 default:
3549 DE_ASSERT(0);
3550 return false;
3551 }
3552 }
3553
getOperationName(const OperationName opName)3554 std::string getOperationName (const OperationName opName)
3555 {
3556 switch (opName)
3557 {
3558 case OPERATION_NAME_WRITE_FILL_BUFFER: return "write_fill_buffer";
3559 case OPERATION_NAME_WRITE_UPDATE_BUFFER: return "write_update_buffer";
3560 case OPERATION_NAME_WRITE_COPY_BUFFER: return "write_copy_buffer";
3561 case OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE: return "write_copy_buffer_to_image";
3562 case OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER: return "write_copy_image_to_buffer";
3563 case OPERATION_NAME_WRITE_COPY_IMAGE: return "write_copy_image";
3564 case OPERATION_NAME_WRITE_BLIT_IMAGE: return "write_blit_image";
3565 case OPERATION_NAME_WRITE_SSBO_VERTEX: return "write_ssbo_vertex";
3566 case OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL: return "write_ssbo_tess_control";
3567 case OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION: return "write_ssbo_tess_eval";
3568 case OPERATION_NAME_WRITE_SSBO_GEOMETRY: return "write_ssbo_geometry";
3569 case OPERATION_NAME_WRITE_SSBO_FRAGMENT: return "write_ssbo_fragment";
3570 case OPERATION_NAME_WRITE_SSBO_COMPUTE: return "write_ssbo_compute";
3571 case OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT: return "write_ssbo_compute_indirect";
3572 case OPERATION_NAME_WRITE_IMAGE_VERTEX: return "write_image_vertex";
3573 case OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL: return "write_image_tess_control";
3574 case OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION: return "write_image_tess_eval";
3575 case OPERATION_NAME_WRITE_IMAGE_GEOMETRY: return "write_image_geometry";
3576 case OPERATION_NAME_WRITE_IMAGE_FRAGMENT: return "write_image_fragment";
3577 case OPERATION_NAME_WRITE_IMAGE_COMPUTE: return "write_image_compute";
3578 case OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT: return "write_image_compute_indirect";
3579 case OPERATION_NAME_WRITE_CLEAR_COLOR_IMAGE: return "write_clear_color_image";
3580 case OPERATION_NAME_WRITE_CLEAR_DEPTH_STENCIL_IMAGE: return "write_clear_depth_stencil_image";
3581 case OPERATION_NAME_WRITE_DRAW: return "write_draw";
3582 case OPERATION_NAME_WRITE_DRAW_INDEXED: return "write_draw_indexed";
3583 case OPERATION_NAME_WRITE_DRAW_INDIRECT: return "write_draw_indirect";
3584 case OPERATION_NAME_WRITE_DRAW_INDEXED_INDIRECT: return "write_draw_indexed_indirect";
3585 case OPERATION_NAME_WRITE_CLEAR_ATTACHMENTS: return "write_clear_attachments";
3586 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DRAW: return "write_indirect_buffer_draw";
3587 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DRAW_INDEXED: return "write_indirect_buffer_draw_indexed";
3588 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DISPATCH: return "write_indirect_buffer_dispatch";
3589
3590 case OPERATION_NAME_READ_COPY_BUFFER: return "read_copy_buffer";
3591 case OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE: return "read_copy_buffer_to_image";
3592 case OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER: return "read_copy_image_to_buffer";
3593 case OPERATION_NAME_READ_COPY_IMAGE: return "read_copy_image";
3594 case OPERATION_NAME_READ_BLIT_IMAGE: return "read_blit_image";
3595 case OPERATION_NAME_READ_UBO_VERTEX: return "read_ubo_vertex";
3596 case OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL: return "read_ubo_tess_control";
3597 case OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION: return "read_ubo_tess_eval";
3598 case OPERATION_NAME_READ_UBO_GEOMETRY: return "read_ubo_geometry";
3599 case OPERATION_NAME_READ_UBO_FRAGMENT: return "read_ubo_fragment";
3600 case OPERATION_NAME_READ_UBO_COMPUTE: return "read_ubo_compute";
3601 case OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT: return "read_ubo_compute_indirect";
3602 case OPERATION_NAME_READ_SSBO_VERTEX: return "read_ssbo_vertex";
3603 case OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL: return "read_ssbo_tess_control";
3604 case OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION: return "read_ssbo_tess_eval";
3605 case OPERATION_NAME_READ_SSBO_GEOMETRY: return "read_ssbo_geometry";
3606 case OPERATION_NAME_READ_SSBO_FRAGMENT: return "read_ssbo_fragment";
3607 case OPERATION_NAME_READ_SSBO_COMPUTE: return "read_ssbo_compute";
3608 case OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT: return "read_ssbo_compute_indirect";
3609 case OPERATION_NAME_READ_IMAGE_VERTEX: return "read_image_vertex";
3610 case OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL: return "read_image_tess_control";
3611 case OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION: return "read_image_tess_eval";
3612 case OPERATION_NAME_READ_IMAGE_GEOMETRY: return "read_image_geometry";
3613 case OPERATION_NAME_READ_IMAGE_FRAGMENT: return "read_image_fragment";
3614 case OPERATION_NAME_READ_IMAGE_COMPUTE: return "read_image_compute";
3615 case OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT: return "read_image_compute_indirect";
3616 case OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW: return "read_indirect_buffer_draw";
3617 case OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED: return "read_indirect_buffer_draw_indexed";
3618 case OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH: return "read_indirect_buffer_dispatch";
3619 case OPERATION_NAME_READ_VERTEX_INPUT: return "read_vertex_input";
3620
3621 default:
3622 DE_ASSERT(0);
3623 return "";
3624 }
3625 }
3626
makeOperationSupport(const OperationName opName,const ResourceDescription & resourceDesc)3627 de::MovePtr<OperationSupport> makeOperationSupport (const OperationName opName, const ResourceDescription& resourceDesc)
3628 {
3629 switch (opName)
3630 {
3631 case OPERATION_NAME_WRITE_FILL_BUFFER: return de::MovePtr<OperationSupport>(new FillUpdateBuffer ::Support (resourceDesc, FillUpdateBuffer::BUFFER_OP_FILL));
3632 case OPERATION_NAME_WRITE_UPDATE_BUFFER: return de::MovePtr<OperationSupport>(new FillUpdateBuffer ::Support (resourceDesc, FillUpdateBuffer::BUFFER_OP_UPDATE));
3633 case OPERATION_NAME_WRITE_COPY_BUFFER: return de::MovePtr<OperationSupport>(new CopyBuffer ::Support (resourceDesc, ACCESS_MODE_WRITE));
3634 case OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE: return de::MovePtr<OperationSupport>(new CopyBufferToImage ::Support (resourceDesc, ACCESS_MODE_WRITE));
3635 case OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER: return de::MovePtr<OperationSupport>(new CopyImageToBuffer ::Support (resourceDesc, ACCESS_MODE_WRITE));
3636 case OPERATION_NAME_WRITE_COPY_IMAGE: return de::MovePtr<OperationSupport>(new CopyBlitImage ::Support (resourceDesc, CopyBlitImage::TYPE_COPY, ACCESS_MODE_WRITE));
3637 case OPERATION_NAME_WRITE_BLIT_IMAGE: return de::MovePtr<OperationSupport>(new CopyBlitImage ::Support (resourceDesc, CopyBlitImage::TYPE_BLIT, ACCESS_MODE_WRITE));
3638 case OPERATION_NAME_WRITE_SSBO_VERTEX: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_WRITE, VK_SHADER_STAGE_VERTEX_BIT));
3639 case OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_WRITE, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT));
3640 case OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_WRITE, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT));
3641 case OPERATION_NAME_WRITE_SSBO_GEOMETRY: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_WRITE, VK_SHADER_STAGE_GEOMETRY_BIT));
3642 case OPERATION_NAME_WRITE_SSBO_FRAGMENT: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_WRITE, VK_SHADER_STAGE_FRAGMENT_BIT));
3643 case OPERATION_NAME_WRITE_SSBO_COMPUTE: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_WRITE, VK_SHADER_STAGE_COMPUTE_BIT));
3644 case OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_WRITE, VK_SHADER_STAGE_COMPUTE_BIT, ShaderAccess::DISPATCH_CALL_DISPATCH_INDIRECT));
3645 case OPERATION_NAME_WRITE_IMAGE_VERTEX: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_WRITE, VK_SHADER_STAGE_VERTEX_BIT));
3646 case OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_WRITE, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT));
3647 case OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_WRITE, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT));
3648 case OPERATION_NAME_WRITE_IMAGE_GEOMETRY: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_WRITE, VK_SHADER_STAGE_GEOMETRY_BIT));
3649 case OPERATION_NAME_WRITE_IMAGE_FRAGMENT: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_WRITE, VK_SHADER_STAGE_FRAGMENT_BIT));
3650 case OPERATION_NAME_WRITE_IMAGE_COMPUTE: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_WRITE, VK_SHADER_STAGE_COMPUTE_BIT));
3651 case OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_WRITE, VK_SHADER_STAGE_COMPUTE_BIT, ShaderAccess::DISPATCH_CALL_DISPATCH_INDIRECT));
3652 case OPERATION_NAME_WRITE_CLEAR_COLOR_IMAGE: return de::MovePtr<OperationSupport>(new ClearImage ::Support (resourceDesc, ClearImage::CLEAR_MODE_COLOR));
3653 case OPERATION_NAME_WRITE_CLEAR_DEPTH_STENCIL_IMAGE: return de::MovePtr<OperationSupport>(new ClearImage ::Support (resourceDesc, ClearImage::CLEAR_MODE_DEPTH_STENCIL));
3654 case OPERATION_NAME_WRITE_DRAW: return de::MovePtr<OperationSupport>(new Draw ::Support (resourceDesc, Draw::DRAW_CALL_DRAW));
3655 case OPERATION_NAME_WRITE_DRAW_INDEXED: return de::MovePtr<OperationSupport>(new Draw ::Support (resourceDesc, Draw::DRAW_CALL_DRAW_INDEXED));
3656 case OPERATION_NAME_WRITE_DRAW_INDIRECT: return de::MovePtr<OperationSupport>(new Draw ::Support (resourceDesc, Draw::DRAW_CALL_DRAW_INDIRECT));
3657 case OPERATION_NAME_WRITE_DRAW_INDEXED_INDIRECT: return de::MovePtr<OperationSupport>(new Draw ::Support (resourceDesc, Draw::DRAW_CALL_DRAW_INDEXED_INDIRECT));
3658 case OPERATION_NAME_WRITE_CLEAR_ATTACHMENTS: return de::MovePtr<OperationSupport>(new ClearAttachments ::Support (resourceDesc));
3659 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DRAW: return de::MovePtr<OperationSupport>(new IndirectBuffer ::WriteSupport (resourceDesc));
3660 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DRAW_INDEXED: return de::MovePtr<OperationSupport>(new IndirectBuffer ::WriteSupport (resourceDesc));
3661 case OPERATION_NAME_WRITE_INDIRECT_BUFFER_DISPATCH: return de::MovePtr<OperationSupport>(new IndirectBuffer ::WriteSupport (resourceDesc));
3662
3663 case OPERATION_NAME_READ_COPY_BUFFER: return de::MovePtr<OperationSupport>(new CopyBuffer ::Support (resourceDesc, ACCESS_MODE_READ));
3664 case OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE: return de::MovePtr<OperationSupport>(new CopyBufferToImage ::Support (resourceDesc, ACCESS_MODE_READ));
3665 case OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER: return de::MovePtr<OperationSupport>(new CopyImageToBuffer ::Support (resourceDesc, ACCESS_MODE_READ));
3666 case OPERATION_NAME_READ_COPY_IMAGE: return de::MovePtr<OperationSupport>(new CopyBlitImage ::Support (resourceDesc, CopyBlitImage::TYPE_COPY, ACCESS_MODE_READ));
3667 case OPERATION_NAME_READ_BLIT_IMAGE: return de::MovePtr<OperationSupport>(new CopyBlitImage ::Support (resourceDesc, CopyBlitImage::TYPE_BLIT, ACCESS_MODE_READ));
3668 case OPERATION_NAME_READ_UBO_VERTEX: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_UNIFORM, ACCESS_MODE_READ, VK_SHADER_STAGE_VERTEX_BIT));
3669 case OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_UNIFORM, ACCESS_MODE_READ, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT));
3670 case OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_UNIFORM, ACCESS_MODE_READ, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT));
3671 case OPERATION_NAME_READ_UBO_GEOMETRY: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_UNIFORM, ACCESS_MODE_READ, VK_SHADER_STAGE_GEOMETRY_BIT));
3672 case OPERATION_NAME_READ_UBO_FRAGMENT: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_UNIFORM, ACCESS_MODE_READ, VK_SHADER_STAGE_FRAGMENT_BIT));
3673 case OPERATION_NAME_READ_UBO_COMPUTE: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_UNIFORM, ACCESS_MODE_READ, VK_SHADER_STAGE_COMPUTE_BIT));
3674 case OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_UNIFORM, ACCESS_MODE_READ, VK_SHADER_STAGE_COMPUTE_BIT, ShaderAccess::DISPATCH_CALL_DISPATCH_INDIRECT));
3675 case OPERATION_NAME_READ_SSBO_VERTEX: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_READ, VK_SHADER_STAGE_VERTEX_BIT));
3676 case OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_READ, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT));
3677 case OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_READ, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT));
3678 case OPERATION_NAME_READ_SSBO_GEOMETRY: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_READ, VK_SHADER_STAGE_GEOMETRY_BIT));
3679 case OPERATION_NAME_READ_SSBO_FRAGMENT: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_READ, VK_SHADER_STAGE_FRAGMENT_BIT));
3680 case OPERATION_NAME_READ_SSBO_COMPUTE: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_READ, VK_SHADER_STAGE_COMPUTE_BIT));
3681 case OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT: return de::MovePtr<OperationSupport>(new ShaderAccess ::BufferSupport (resourceDesc, BUFFER_TYPE_STORAGE, ACCESS_MODE_READ, VK_SHADER_STAGE_COMPUTE_BIT, ShaderAccess::DISPATCH_CALL_DISPATCH_INDIRECT));
3682 case OPERATION_NAME_READ_IMAGE_VERTEX: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_READ, VK_SHADER_STAGE_VERTEX_BIT));
3683 case OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_READ, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT));
3684 case OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_READ, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT));
3685 case OPERATION_NAME_READ_IMAGE_GEOMETRY: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_READ, VK_SHADER_STAGE_GEOMETRY_BIT));
3686 case OPERATION_NAME_READ_IMAGE_FRAGMENT: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_READ, VK_SHADER_STAGE_FRAGMENT_BIT));
3687 case OPERATION_NAME_READ_IMAGE_COMPUTE: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_READ, VK_SHADER_STAGE_COMPUTE_BIT));
3688 case OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT: return de::MovePtr<OperationSupport>(new ShaderAccess ::ImageSupport (resourceDesc, ACCESS_MODE_READ, VK_SHADER_STAGE_COMPUTE_BIT, ShaderAccess::DISPATCH_CALL_DISPATCH_INDIRECT));
3689 case OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW: return de::MovePtr<OperationSupport>(new IndirectBuffer ::ReadSupport (resourceDesc));
3690 case OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED: return de::MovePtr<OperationSupport>(new IndirectBuffer ::ReadSupport (resourceDesc));
3691 case OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH: return de::MovePtr<OperationSupport>(new IndirectBuffer ::ReadSupport (resourceDesc));
3692 case OPERATION_NAME_READ_VERTEX_INPUT: return de::MovePtr<OperationSupport>(new VertexInput ::Support (resourceDesc));
3693
3694 default:
3695 DE_ASSERT(0);
3696 return de::MovePtr<OperationSupport>();
3697 }
3698 }
3699
3700 } // synchronization
3701 } // vkt
3702