1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 ARM Ltd.
7 * Copyright (c) 2023 LunarG, Inc.
8 * Copyright (c) 2023 Nintendo
9 *
10 * Licensed under the Apache License, Version 2.0 (the "License");
11 * you may not use this file except in compliance with the License.
12 * You may obtain a copy of the License at
13 *
14 * http://www.apache.org/licenses/LICENSE-2.0
15 *
16 * Unless required by applicable law or agreed to in writing, software
17 * distributed under the License is distributed on an "AS IS" BASIS,
18 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 * See the License for the specific language governing permissions and
20 * limitations under the License.
21 *
22 *//*!
23 * \file
24 * \brief Pipeline Cache and Pipeline Binary Tests
25 *//*--------------------------------------------------------------------*/
26
27 #include "vktPipelineCacheTests.hpp"
28 #include "vktPipelineClearUtil.hpp"
29 #include "vktPipelineImageUtil.hpp"
30 #include "vktPipelineVertexUtil.hpp"
31 #include "vktTestCase.hpp"
32 #include "vktTestCaseUtil.hpp"
33 #include "vkPipelineBinaryUtil.hpp"
34 #include "vkImageUtil.hpp"
35 #include "vkMemUtil.hpp"
36 #include "vkPrograms.hpp"
37 #include "vkBuilderUtil.hpp"
38 #include "vkQueryUtil.hpp"
39 #include "vkRef.hpp"
40 #include "vkRefUtil.hpp"
41 #include "vkTypeUtil.hpp"
42 #include "vkCmdUtil.hpp"
43 #include "vkObjUtil.hpp"
44 #include "tcuImageCompare.hpp"
45 #include "deUniquePtr.hpp"
46 #include "deMemory.h"
47 #include "tcuTestLog.hpp"
48
49 #include <sstream>
50 #include <vector>
51 #include <memory>
52
53 namespace vkt
54 {
55 namespace pipeline
56 {
57
58 using namespace vk;
59
60 namespace
61 {
62
63 // helper functions
64
getShaderFlagStr(const VkShaderStageFlags shader,bool isDescription)65 std::string getShaderFlagStr(const VkShaderStageFlags shader, bool isDescription)
66 {
67 std::ostringstream desc;
68 if (shader & VK_SHADER_STAGE_COMPUTE_BIT)
69 {
70 desc << ((isDescription) ? "compute stage" : "compute_stage");
71 }
72 else
73 {
74 desc << ((isDescription) ? "vertex stage" : "vertex_stage");
75 if (shader & VK_SHADER_STAGE_GEOMETRY_BIT)
76 desc << ((isDescription) ? " geometry stage" : "_geometry_stage");
77 if (shader & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
78 desc << ((isDescription) ? " tessellation control stage" : "_tessellation_control_stage");
79 if (shader & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
80 desc << ((isDescription) ? " tessellation evaluation stage" : "_tessellation_evaluation_stage");
81 desc << ((isDescription) ? " fragment stage" : "_fragment_stage");
82 }
83
84 return desc.str();
85 }
86
87 enum class TestMode
88 {
89 CACHE = 0,
90 BINARY
91 };
92
93 // helper classes
94 class TestParam
95 {
96 public:
97 TestParam(TestMode mode, PipelineConstructionType pipelineConstructionType, const VkShaderStageFlags shaders,
98 bool compileMissShaders = false, VkPipelineCacheCreateFlags pipelineCacheCreateFlags = 0u,
99 bool useBinariesFromBinaryData = false);
100 virtual ~TestParam(void) = default;
101 virtual const std::string generateTestName(void) const;
getMode(void) const102 TestMode getMode(void) const
103 {
104 return m_mode;
105 }
getPipelineConstructionType(void) const106 PipelineConstructionType getPipelineConstructionType(void) const
107 {
108 return m_pipelineConstructionType;
109 };
getShaderFlags(void) const110 VkShaderStageFlags getShaderFlags(void) const
111 {
112 return m_shaders;
113 }
getPipelineCacheCreateFlags(void) const114 VkPipelineCacheCreateFlags getPipelineCacheCreateFlags(void) const
115 {
116 return m_pipelineCacheCreateFlags;
117 }
getCompileMissShaders(void) const118 bool getCompileMissShaders(void) const
119 {
120 return m_compileMissShaders;
121 }
getUseBinariesFromBinaryData(void) const122 bool getUseBinariesFromBinaryData(void) const
123 {
124 return m_useBinariesFromBinaryData;
125 }
126
127 protected:
128 TestMode m_mode;
129 PipelineConstructionType m_pipelineConstructionType;
130 VkShaderStageFlags m_shaders;
131 bool m_compileMissShaders;
132 VkPipelineCacheCreateFlags m_pipelineCacheCreateFlags;
133 bool m_useBinariesFromBinaryData;
134 };
135
TestParam(TestMode mode,PipelineConstructionType pipelineConstructionType,const VkShaderStageFlags shaders,bool compileMissShaders,VkPipelineCacheCreateFlags pipelineCacheCreateFlags,bool useBinariesFromBinaryData)136 TestParam::TestParam(TestMode mode, PipelineConstructionType pipelineConstructionType, const VkShaderStageFlags shaders,
137 bool compileMissShaders, VkPipelineCacheCreateFlags pipelineCacheCreateFlags,
138 bool useBinariesFromBinaryData)
139 : m_mode(mode)
140 , m_pipelineConstructionType(pipelineConstructionType)
141 , m_shaders(shaders)
142 , m_compileMissShaders(compileMissShaders)
143 , m_pipelineCacheCreateFlags(pipelineCacheCreateFlags)
144 , m_useBinariesFromBinaryData(useBinariesFromBinaryData)
145 {
146 }
147
generateTestName(void) const148 const std::string TestParam::generateTestName(void) const
149 {
150 std::string name = getShaderFlagStr(m_shaders, false);
151 if (m_pipelineCacheCreateFlags == VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT)
152 name += "_externally_synchronized";
153 if (m_useBinariesFromBinaryData)
154 name += "_use_binary_data";
155 return name;
156 }
157
158 template <class Test>
newTestCase(tcu::TestContext & testContext,const TestParam * testParam)159 vkt::TestCase *newTestCase(tcu::TestContext &testContext, const TestParam *testParam)
160 {
161 return new Test(testContext, testParam->generateTestName().c_str(), testParam);
162 }
163
createBufferAndBindMemory(Context & context,VkDeviceSize size,VkBufferUsageFlags usage,de::MovePtr<Allocation> * pAlloc)164 Move<VkBuffer> createBufferAndBindMemory(Context &context, VkDeviceSize size, VkBufferUsageFlags usage,
165 de::MovePtr<Allocation> *pAlloc)
166 {
167 const DeviceInterface &vk = context.getDeviceInterface();
168 const VkDevice vkDevice = context.getDevice();
169 const uint32_t queueFamilyIndex = context.getUniversalQueueFamilyIndex();
170
171 const VkBufferCreateInfo vertexBufferParams{
172 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
173 nullptr, // const void* pNext;
174 0u, // VkBufferCreateFlags flags;
175 size, // VkDeviceSize size;
176 usage, // VkBufferUsageFlags usage;
177 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
178 1u, // uint32_t queueFamilyCount;
179 &queueFamilyIndex // const uint32_t* pQueueFamilyIndices;
180 };
181
182 Move<VkBuffer> vertexBuffer = createBuffer(vk, vkDevice, &vertexBufferParams);
183
184 *pAlloc = context.getDefaultAllocator().allocate(getBufferMemoryRequirements(vk, vkDevice, *vertexBuffer),
185 MemoryRequirement::HostVisible);
186 VK_CHECK(vk.bindBufferMemory(vkDevice, *vertexBuffer, (*pAlloc)->getMemory(), (*pAlloc)->getOffset()));
187
188 return vertexBuffer;
189 }
190
createImage2DAndBindMemory(Context & context,VkFormat format,uint32_t width,uint32_t height,VkImageUsageFlags usage,VkSampleCountFlagBits sampleCount,de::details::MovePtr<Allocation> * pAlloc)191 Move<VkImage> createImage2DAndBindMemory(Context &context, VkFormat format, uint32_t width, uint32_t height,
192 VkImageUsageFlags usage, VkSampleCountFlagBits sampleCount,
193 de::details::MovePtr<Allocation> *pAlloc)
194 {
195 const DeviceInterface &vk = context.getDeviceInterface();
196 const VkDevice vkDevice = context.getDevice();
197 const uint32_t queueFamilyIndex = context.getUniversalQueueFamilyIndex();
198
199 const VkImageCreateInfo colorImageParams = {
200 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
201 nullptr, // const void* pNext;
202 0u, // VkImageCreateFlags flags;
203 VK_IMAGE_TYPE_2D, // VkImageType imageType;
204 format, // VkFormat format;
205 {width, height, 1u}, // VkExtent3D extent;
206 1u, // uint32_t mipLevels;
207 1u, // uint32_t arraySize;
208 sampleCount, // uint32_t samples;
209 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
210 usage, // VkImageUsageFlags usage;
211 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
212 1u, // uint32_t queueFamilyCount;
213 &queueFamilyIndex, // const uint32_t* pQueueFamilyIndices;
214 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
215 };
216
217 Move<VkImage> image = createImage(vk, vkDevice, &colorImageParams);
218
219 *pAlloc = context.getDefaultAllocator().allocate(getImageMemoryRequirements(vk, vkDevice, *image),
220 MemoryRequirement::Any);
221 VK_CHECK(vk.bindImageMemory(vkDevice, *image, (*pAlloc)->getMemory(), (*pAlloc)->getOffset()));
222
223 return image;
224 }
225
226 // Test Classes
227 class BaseTestCase : public vkt::TestCase
228 {
229 public:
BaseTestCase(tcu::TestContext & testContext,const std::string & name,const TestParam * param)230 BaseTestCase(tcu::TestContext &testContext, const std::string &name, const TestParam *param)
231 : vkt::TestCase(testContext, name)
232 , m_param(*param)
233 {
234 }
235 virtual ~BaseTestCase(void) = default;
236 virtual void checkSupport(Context &context) const;
237
238 protected:
239 const TestParam m_param;
240 };
241
242 class BaseTestInstance : public vkt::TestInstance
243 {
244 public:
245 enum
246 {
247 PIPELINE_NDX_NO_BLOBS,
248 PIPELINE_NDX_USE_BLOBS,
249 PIPELINE_NDX_COUNT,
250 };
251 BaseTestInstance(Context &context, const TestParam *param);
252 virtual ~BaseTestInstance(void) = default;
253 virtual tcu::TestStatus iterate(void);
254
255 protected:
256 virtual tcu::TestStatus verifyTestResult(void) = 0;
257 virtual void prepareCommandBuffer(void) = 0;
258
259 protected:
260 const TestParam *m_param;
261 Move<VkCommandPool> m_cmdPool;
262 Move<VkCommandBuffer> m_cmdBuffer;
263
264 // cache is only used when m_mode is set to TestMode::CACHE
265 Move<VkPipelineCache> m_cache;
266
267 // binary related structures are used when m_mode is set to TestMode::BINARY
268 PipelineBinaryWrapper m_binaries[4];
269 };
270
checkSupport(Context & context) const271 void BaseTestCase::checkSupport(Context &context) const
272 {
273 if (m_param.getMode() == TestMode::BINARY)
274 context.requireDeviceFunctionality("VK_KHR_pipeline_binary");
275 }
276
BaseTestInstance(Context & context,const TestParam * param)277 BaseTestInstance::BaseTestInstance(Context &context, const TestParam *param)
278 : TestInstance(context)
279 , m_param(param)
280 , m_binaries{
281 {context.getDeviceInterface(), context.getDevice()},
282 {context.getDeviceInterface(), context.getDevice()},
283 {context.getDeviceInterface(), context.getDevice()},
284 {context.getDeviceInterface(), context.getDevice()},
285 }
286 {
287 const DeviceInterface &vk = m_context.getDeviceInterface();
288 const VkDevice vkDevice = m_context.getDevice();
289 const uint32_t queueFamilyIndex = context.getUniversalQueueFamilyIndex();
290
291 // Create command pool
292 m_cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
293
294 // Create command buffer
295 m_cmdBuffer = allocateCommandBuffer(vk, vkDevice, *m_cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
296
297 // Create the Pipeline Cache
298 if (m_param->getMode() == TestMode::CACHE)
299 {
300 const VkPipelineCacheCreateInfo pipelineCacheCreateInfo{
301 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
302 nullptr, // const void* pNext;
303 m_param->getPipelineCacheCreateFlags(), // VkPipelineCacheCreateFlags flags;
304 0u, // uintptr_t initialDataSize;
305 nullptr, // const void* pInitialData;
306 };
307
308 m_cache = createPipelineCache(vk, vkDevice, &pipelineCacheCreateInfo);
309 }
310 }
311
iterate(void)312 tcu::TestStatus BaseTestInstance::iterate(void)
313 {
314 const DeviceInterface &vk = m_context.getDeviceInterface();
315 const VkDevice vkDevice = m_context.getDevice();
316 const VkQueue queue = m_context.getUniversalQueue();
317
318 prepareCommandBuffer();
319
320 submitCommandsAndWait(vk, vkDevice, queue, m_cmdBuffer.get());
321
322 return verifyTestResult();
323 }
324
325 class GraphicsTest : public BaseTestCase
326 {
327 public:
GraphicsTest(tcu::TestContext & testContext,const std::string & name,const TestParam * param)328 GraphicsTest(tcu::TestContext &testContext, const std::string &name, const TestParam *param)
329 : BaseTestCase(testContext, name, param)
330 {
331 }
~GraphicsTest(void)332 virtual ~GraphicsTest(void)
333 {
334 }
335 virtual void initPrograms(SourceCollections &programCollection) const;
336 virtual void checkSupport(Context &context) const;
337 virtual TestInstance *createInstance(Context &context) const;
338 };
339
340 class GraphicsTestInstance : public BaseTestInstance
341 {
342 public:
343 GraphicsTestInstance(Context &context, const TestParam *param);
344 virtual ~GraphicsTestInstance(void) = default;
345
346 protected:
347 void preparePipelineWrapper(GraphicsPipelineWrapper &gpw, VkPipelineCache cache, bool useMissShaders,
348 bool useShaderModules, VkPipelineBinaryInfoKHR *monolithicBinaryInfo,
349 VkPipelineBinaryInfoKHR *vertexPartBinaryInfo,
350 VkPipelineBinaryInfoKHR *preRasterizationPartBinaryInfo,
351 VkPipelineBinaryInfoKHR *fragmentShaderPartBinaryInfo,
352 VkPipelineBinaryInfoKHR *fragmentOutputPartBinaryInfo);
353 virtual void preparePipelines(void);
354 void preparePipelinesForBinaries(bool createFromBlobs);
355 void prepareRenderPass(const RenderPassWrapper &renderPassFramebuffer, GraphicsPipelineWrapper &pipeline);
356 virtual void prepareCommandBuffer(void);
357 virtual tcu::TestStatus verifyTestResult(void);
358
359 using GraphicsPipelinePtr = std::unique_ptr<GraphicsPipelineWrapper>;
360
361 protected:
362 const tcu::UVec2 m_renderSize;
363 const VkFormat m_colorFormat;
364 const VkFormat m_depthFormat;
365 PipelineLayoutWrapper m_pipelineLayout;
366
367 Move<VkImage> m_depthImage;
368 de::MovePtr<Allocation> m_depthImageAlloc;
369 de::MovePtr<Allocation> m_colorImageAlloc[PIPELINE_NDX_COUNT];
370 Move<VkImageView> m_depthAttachmentView;
371 VkImageMemoryBarrier m_imageLayoutBarriers[3];
372
373 GraphicsPipelinePtr m_pipeline[PIPELINE_NDX_COUNT];
374 Move<VkBuffer> m_vertexBuffer;
375 de::MovePtr<Allocation> m_vertexBufferMemory;
376 std::vector<Vertex4RGBA> m_vertices;
377
378 Move<VkImage> m_colorImage[PIPELINE_NDX_COUNT];
379 Move<VkImageView> m_colorAttachmentView[PIPELINE_NDX_COUNT];
380 RenderPassWrapper m_renderPassFramebuffer[PIPELINE_NDX_COUNT];
381 };
382
initPrograms(SourceCollections & programCollection) const383 void GraphicsTest::initPrograms(SourceCollections &programCollection) const
384 {
385 enum ShaderCacheOpType
386 {
387 SHADERS_CACHE_OP_HIT = 0,
388 SHADERS_CACHE_OP_MISS,
389
390 SHADERS_CACHE_OP_LAST
391 };
392
393 for (uint32_t shaderOpNdx = 0u; shaderOpNdx < SHADERS_CACHE_OP_LAST; shaderOpNdx++)
394 {
395 const ShaderCacheOpType shaderOp = (ShaderCacheOpType)shaderOpNdx;
396
397 if (shaderOp == SHADERS_CACHE_OP_MISS && !m_param.getCompileMissShaders())
398 continue;
399
400 const std::string missHitDiff = (shaderOp == SHADERS_CACHE_OP_HIT ? "" : " + 0.1");
401 const std::string missSuffix = (shaderOp == SHADERS_CACHE_OP_HIT ? "" : "_miss");
402
403 programCollection.glslSources.add("color_vert" + missSuffix)
404 << glu::VertexSource("#version 450\n"
405 "layout(location = 0) in vec4 position;\n"
406 "layout(location = 1) in vec4 color;\n"
407 "layout(location = 0) out highp vec4 vtxColor;\n"
408 "out gl_PerVertex { vec4 gl_Position; };\n"
409 "void main (void)\n"
410 "{\n"
411 " gl_Position = position;\n"
412 " vtxColor = color" +
413 missHitDiff +
414 ";\n"
415 "}\n");
416
417 programCollection.glslSources.add("color_frag" + missSuffix)
418 << glu::FragmentSource("#version 310 es\n"
419 "layout(location = 0) in highp vec4 vtxColor;\n"
420 "layout(location = 0) out highp vec4 fragColor;\n"
421 "void main (void)\n"
422 "{\n"
423 " fragColor = vtxColor" +
424 missHitDiff +
425 ";\n"
426 "}\n");
427
428 VkShaderStageFlags shaderFlag = m_param.getShaderFlags();
429 if (shaderFlag & VK_SHADER_STAGE_GEOMETRY_BIT)
430 {
431 programCollection.glslSources.add("unused_geo" + missSuffix)
432 << glu::GeometrySource("#version 450 \n"
433 "layout(triangles) in;\n"
434 "layout(triangle_strip, max_vertices = 3) out;\n"
435 "layout(location = 0) in highp vec4 in_vtxColor[];\n"
436 "layout(location = 0) out highp vec4 vtxColor;\n"
437 "out gl_PerVertex { vec4 gl_Position; };\n"
438 "in gl_PerVertex { vec4 gl_Position; } gl_in[];\n"
439 "void main (void)\n"
440 "{\n"
441 " for(int ndx=0; ndx<3; ndx++)\n"
442 " {\n"
443 " gl_Position = gl_in[ndx].gl_Position;\n"
444 " vtxColor = in_vtxColor[ndx]" +
445 missHitDiff +
446 ";\n"
447 " EmitVertex();\n"
448 " }\n"
449 " EndPrimitive();\n"
450 "}\n");
451 }
452 if (shaderFlag & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
453 {
454 programCollection.glslSources.add("basic_tcs" + missSuffix) << glu::TessellationControlSource(
455 "#version 450 \n"
456 "layout(vertices = 3) out;\n"
457 "layout(location = 0) in highp vec4 color[];\n"
458 "layout(location = 0) out highp vec4 vtxColor[];\n"
459 "out gl_PerVertex { vec4 gl_Position; } gl_out[3];\n"
460 "in gl_PerVertex { vec4 gl_Position; } gl_in[gl_MaxPatchVertices];\n"
461 "void main()\n"
462 "{\n"
463 " gl_TessLevelOuter[0] = 4.0;\n"
464 " gl_TessLevelOuter[1] = 4.0;\n"
465 " gl_TessLevelOuter[2] = 4.0;\n"
466 " gl_TessLevelInner[0] = 4.0;\n"
467 " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
468 " vtxColor[gl_InvocationID] = color[gl_InvocationID]" +
469 missHitDiff +
470 ";\n"
471 "}\n");
472 }
473 if (shaderFlag & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
474 {
475 programCollection.glslSources.add("basic_tes" + missSuffix) << glu::TessellationEvaluationSource(
476 "#version 450 \n"
477 "layout(triangles, fractional_even_spacing, ccw) in;\n"
478 "layout(location = 0) in highp vec4 colors[];\n"
479 "layout(location = 0) out highp vec4 vtxColor;\n"
480 "out gl_PerVertex { vec4 gl_Position; };\n"
481 "in gl_PerVertex { vec4 gl_Position; } gl_in[gl_MaxPatchVertices];\n"
482 "void main() \n"
483 "{\n"
484 " float u = gl_TessCoord.x;\n"
485 " float v = gl_TessCoord.y;\n"
486 " float w = gl_TessCoord.z;\n"
487 " vec4 pos = vec4(0);\n"
488 " vec4 color = vec4(0)" +
489 missHitDiff +
490 ";\n"
491 " pos.xyz += u * gl_in[0].gl_Position.xyz;\n"
492 " color.xyz += u * colors[0].xyz;\n"
493 " pos.xyz += v * gl_in[1].gl_Position.xyz;\n"
494 " color.xyz += v * colors[1].xyz;\n"
495 " pos.xyz += w * gl_in[2].gl_Position.xyz;\n"
496 " color.xyz += w * colors[2].xyz;\n"
497 " pos.w = 1.0;\n"
498 " color.w = 1.0;\n"
499 " gl_Position = pos;\n"
500 " vtxColor = color;\n"
501 "}\n");
502 }
503 }
504 }
505
checkSupport(Context & context) const506 void GraphicsTest::checkSupport(Context &context) const
507 {
508 if (m_param.getMode() == TestMode::BINARY)
509 context.requireDeviceFunctionality("VK_KHR_pipeline_binary");
510
511 if (m_param.getShaderFlags() & VK_SHADER_STAGE_GEOMETRY_BIT)
512 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_GEOMETRY_SHADER);
513 if ((m_param.getShaderFlags() & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) ||
514 (m_param.getShaderFlags() & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
515 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_TESSELLATION_SHADER);
516
517 checkPipelineConstructionRequirements(context.getInstanceInterface(), context.getPhysicalDevice(),
518 m_param.getPipelineConstructionType());
519 }
520
createInstance(Context & context) const521 TestInstance *GraphicsTest::createInstance(Context &context) const
522 {
523 return new GraphicsTestInstance(context, &m_param);
524 }
525
GraphicsTestInstance(Context & context,const TestParam * param)526 GraphicsTestInstance::GraphicsTestInstance(Context &context, const TestParam *param)
527 : BaseTestInstance(context, param)
528 , m_renderSize(32u, 32u)
529 , m_colorFormat(VK_FORMAT_R8G8B8A8_UNORM)
530 , m_depthFormat(VK_FORMAT_D16_UNORM)
531 {
532 const DeviceInterface &vk = m_context.getDeviceInterface();
533 const VkDevice vkDevice = m_context.getDevice();
534
535 // pipeline reconstructed from binaries should not use RETAIN_LINK_TIME_OPTIMIZATION/LINK_TIME_OPTIMIZATION
536 PipelineConstructionType pipelineConstructionTypeForUseBlobs = param->getPipelineConstructionType();
537 if ((param->getMode() == TestMode::BINARY) &&
538 (pipelineConstructionTypeForUseBlobs == PIPELINE_CONSTRUCTION_TYPE_LINK_TIME_OPTIMIZED_LIBRARY))
539 pipelineConstructionTypeForUseBlobs = PIPELINE_CONSTRUCTION_TYPE_FAST_LINKED_LIBRARY;
540
541 m_pipeline[PIPELINE_NDX_NO_BLOBS] = GraphicsPipelinePtr(new GraphicsPipelineWrapper(
542 context.getInstanceInterface(), context.getDeviceInterface(), context.getPhysicalDevice(), context.getDevice(),
543 context.getDeviceExtensions(), param->getPipelineConstructionType()));
544 m_pipeline[PIPELINE_NDX_USE_BLOBS] = GraphicsPipelinePtr(new GraphicsPipelineWrapper(
545 context.getInstanceInterface(), context.getDeviceInterface(), context.getPhysicalDevice(), context.getDevice(),
546 context.getDeviceExtensions(), pipelineConstructionTypeForUseBlobs));
547
548 if (param->getMode() == TestMode::BINARY)
549 {
550 m_pipeline[PIPELINE_NDX_NO_BLOBS]->setPipelineCreateFlags2(VK_PIPELINE_CREATE_2_CAPTURE_DATA_BIT_KHR);
551 m_pipeline[PIPELINE_NDX_USE_BLOBS]->setPipelineCreateFlags2(VK_PIPELINE_CREATE_2_CAPTURE_DATA_BIT_KHR);
552 }
553
554 // Create vertex buffer
555 {
556 m_vertexBuffer =
557 createBufferAndBindMemory(m_context, 1024u, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, &m_vertexBufferMemory);
558
559 m_vertices = createOverlappingQuads();
560 // Load vertices into vertex buffer
561 deMemcpy(m_vertexBufferMemory->getHostPtr(), m_vertices.data(), m_vertices.size() * sizeof(Vertex4RGBA));
562 flushAlloc(vk, vkDevice, *m_vertexBufferMemory);
563 }
564
565 // Create render pass
566 m_renderPassFramebuffer[PIPELINE_NDX_NO_BLOBS] =
567 RenderPassWrapper(m_param->getPipelineConstructionType(), vk, vkDevice, m_colorFormat, m_depthFormat);
568 m_renderPassFramebuffer[PIPELINE_NDX_USE_BLOBS] =
569 RenderPassWrapper(m_param->getPipelineConstructionType(), vk, vkDevice, m_colorFormat, m_depthFormat);
570
571 const VkComponentMapping ComponentMappingRGBA = {VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
572 VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A};
573 // Create color image
574 {
575 m_colorImage[PIPELINE_NDX_NO_BLOBS] =
576 createImage2DAndBindMemory(m_context, m_colorFormat, m_renderSize.x(), m_renderSize.y(),
577 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
578 VK_SAMPLE_COUNT_1_BIT, &m_colorImageAlloc[PIPELINE_NDX_NO_BLOBS]);
579 m_colorImage[PIPELINE_NDX_USE_BLOBS] =
580 createImage2DAndBindMemory(m_context, m_colorFormat, m_renderSize.x(), m_renderSize.y(),
581 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
582 VK_SAMPLE_COUNT_1_BIT, &m_colorImageAlloc[PIPELINE_NDX_USE_BLOBS]);
583 }
584
585 // Create depth image
586 {
587 m_depthImage = createImage2DAndBindMemory(m_context, m_depthFormat, m_renderSize.x(), m_renderSize.y(),
588 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_SAMPLE_COUNT_1_BIT,
589 &m_depthImageAlloc);
590 }
591
592 // Set up image layout transition barriers
593 {
594 VkImageMemoryBarrier colorImageBarrier{
595 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
596 nullptr, // const void* pNext;
597 0u, // VkAccessFlags srcAccessMask;
598 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
599 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
600 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
601 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
602 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
603 *m_colorImage[PIPELINE_NDX_NO_BLOBS], // VkImage image;
604 {VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u}, // VkImageSubresourceRange subresourceRange;
605 };
606
607 m_imageLayoutBarriers[0] = colorImageBarrier;
608
609 colorImageBarrier.image = *m_colorImage[PIPELINE_NDX_USE_BLOBS];
610 m_imageLayoutBarriers[1] = colorImageBarrier;
611
612 const VkImageMemoryBarrier depthImageBarrier{
613 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
614 nullptr, // const void* pNext;
615 0u, // VkAccessFlags srcAccessMask;
616 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
617 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
618 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
619 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
620 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
621 *m_depthImage, // VkImage image;
622 {VK_IMAGE_ASPECT_DEPTH_BIT, 0u, 1u, 0u, 1u}, // VkImageSubresourceRange subresourceRange;
623 };
624
625 m_imageLayoutBarriers[2] = depthImageBarrier;
626 }
627 // Create color attachment view
628 {
629 VkImageViewCreateInfo colorAttachmentViewParams{
630 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
631 nullptr, // const void* pNext;
632 0u, // VkImageViewCreateFlags flags;
633 *m_colorImage[PIPELINE_NDX_NO_BLOBS], // VkImage image;
634 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
635 m_colorFormat, // VkFormat format;
636 ComponentMappingRGBA, // VkComponentMapping components;
637 {VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u}, // VkImageSubresourceRange subresourceRange;
638 };
639
640 m_colorAttachmentView[PIPELINE_NDX_NO_BLOBS] = createImageView(vk, vkDevice, &colorAttachmentViewParams);
641
642 colorAttachmentViewParams.image = *m_colorImage[PIPELINE_NDX_USE_BLOBS];
643 m_colorAttachmentView[PIPELINE_NDX_USE_BLOBS] = createImageView(vk, vkDevice, &colorAttachmentViewParams);
644 }
645
646 // Create depth attachment view
647 {
648 const VkImageViewCreateInfo depthAttachmentViewParams{
649 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
650 nullptr, // const void* pNext;
651 0u, // VkImageViewCreateFlags flags;
652 *m_depthImage, // VkImage image;
653 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
654 m_depthFormat, // VkFormat format;
655 ComponentMappingRGBA, // VkComponentMapping components;
656 {VK_IMAGE_ASPECT_DEPTH_BIT, 0u, 1u, 0u, 1u}, // VkImageSubresourceRange subresourceRange;
657 };
658
659 m_depthAttachmentView = createImageView(vk, vkDevice, &depthAttachmentViewParams);
660 }
661
662 // Create framebuffer
663 {
664 std::vector<VkImage> images = {
665 *m_colorImage[PIPELINE_NDX_NO_BLOBS],
666 *m_depthImage,
667 };
668 VkImageView attachmentBindInfos[2] = {
669 *m_colorAttachmentView[PIPELINE_NDX_NO_BLOBS],
670 *m_depthAttachmentView,
671 };
672
673 VkFramebufferCreateInfo framebufferParams = {
674 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
675 nullptr, // const void* pNext;
676 0u, // VkFramebufferCreateFlags flags;
677 *m_renderPassFramebuffer[PIPELINE_NDX_USE_BLOBS], // VkRenderPass renderPass;
678 2u, // uint32_t attachmentCount;
679 attachmentBindInfos, // const VkImageView* pAttachments;
680 (uint32_t)m_renderSize.x(), // uint32_t width;
681 (uint32_t)m_renderSize.y(), // uint32_t height;
682 1u, // uint32_t layers;
683 };
684
685 m_renderPassFramebuffer[PIPELINE_NDX_NO_BLOBS].createFramebuffer(vk, vkDevice, &framebufferParams, images);
686
687 framebufferParams.renderPass = *m_renderPassFramebuffer[PIPELINE_NDX_USE_BLOBS];
688 images[0] = *m_colorImage[PIPELINE_NDX_USE_BLOBS];
689 attachmentBindInfos[0] = *m_colorAttachmentView[PIPELINE_NDX_USE_BLOBS];
690 m_renderPassFramebuffer[PIPELINE_NDX_USE_BLOBS].createFramebuffer(vk, vkDevice, &framebufferParams, images);
691 }
692
693 // Create pipeline layout
694 {
695 const VkPipelineLayoutCreateInfo pipelineLayoutParams = {
696 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
697 nullptr, // const void* pNext;
698 0u, // VkPipelineLayoutCreateFlags flags;
699 0u, // uint32_t setLayoutCount;
700 nullptr, // const VkDescriptorSetLayout* pSetLayouts;
701 0u, // uint32_t pushConstantRangeCount;
702 nullptr // const VkPushConstantRange* pPushConstantRanges;
703 };
704
705 m_pipelineLayout =
706 PipelineLayoutWrapper(m_param->getPipelineConstructionType(), vk, vkDevice, &pipelineLayoutParams);
707 }
708 }
709
preparePipelineWrapper(GraphicsPipelineWrapper & gpw,VkPipelineCache cache=VK_NULL_HANDLE,bool useMissShaders=false,bool useShaderModules=true,VkPipelineBinaryInfoKHR * monolithicBinaryInfo=nullptr,VkPipelineBinaryInfoKHR * vertexPartBinaryInfo=nullptr,VkPipelineBinaryInfoKHR * preRasterizationPartBinaryInfo=nullptr,VkPipelineBinaryInfoKHR * fragmentShaderPartBinaryInfo=nullptr,VkPipelineBinaryInfoKHR * fragmentOutputPartBinaryInfo=nullptr)710 void GraphicsTestInstance::preparePipelineWrapper(GraphicsPipelineWrapper &gpw, VkPipelineCache cache = VK_NULL_HANDLE,
711 bool useMissShaders = false, bool useShaderModules = true,
712 VkPipelineBinaryInfoKHR *monolithicBinaryInfo = nullptr,
713 VkPipelineBinaryInfoKHR *vertexPartBinaryInfo = nullptr,
714 VkPipelineBinaryInfoKHR *preRasterizationPartBinaryInfo = nullptr,
715 VkPipelineBinaryInfoKHR *fragmentShaderPartBinaryInfo = nullptr,
716 VkPipelineBinaryInfoKHR *fragmentOutputPartBinaryInfo = nullptr)
717 {
718 VkStencilOpState frontAndBack;
719 deMemset(&frontAndBack, 0x00, sizeof(VkStencilOpState));
720
721 static const VkPipelineDepthStencilStateCreateInfo defaultDepthStencilState{
722 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
723 nullptr, // const void* pNext;
724 0u, // VkPipelineDepthStencilStateCreateFlags flags;
725 VK_TRUE, // VkBool32 depthTestEnable;
726 VK_TRUE, // VkBool32 depthWriteEnable;
727 VK_COMPARE_OP_LESS_OR_EQUAL, // VkCompareOp depthCompareOp;
728 VK_FALSE, // VkBool32 depthBoundsTestEnable;
729 VK_FALSE, // VkBool32 stencilTestEnable;
730 frontAndBack, // VkStencilOpState front;
731 frontAndBack, // VkStencilOpState back;
732 0.0f, // float minDepthBounds;
733 1.0f, // float maxDepthBounds;
734 };
735
736 static const VkVertexInputBindingDescription defaultVertexInputBindingDescription{
737 0u, // uint32_t binding;
738 sizeof(Vertex4RGBA), // uint32_t strideInBytes;
739 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
740 };
741
742 static const VkVertexInputAttributeDescription defaultVertexInputAttributeDescriptions[]{
743 {
744 0u, // uint32_t location;
745 0u, // uint32_t binding;
746 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
747 0u // uint32_t offsetInBytes;
748 },
749 {
750 1u, // uint32_t location;
751 0u, // uint32_t binding;
752 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
753 offsetof(Vertex4RGBA, color), // uint32_t offsetInBytes;
754 }};
755
756 static const VkPipelineVertexInputStateCreateInfo defaultVertexInputStateParams{
757 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
758 nullptr, // const void* pNext;
759 0u, // VkPipelineVertexInputStateCreateFlags flags;
760 1u, // uint32_t vertexBindingDescriptionCount;
761 &defaultVertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
762 2u, // uint32_t vertexAttributeDescriptionCount;
763 defaultVertexInputAttributeDescriptions, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
764 };
765
766 const DeviceInterface &vk = m_context.getDeviceInterface();
767 const VkDevice vkDevice = m_context.getDevice();
768 const std::string postfix = useMissShaders ? "_miss" : "";
769
770 auto createModule = [&vk, vkDevice, &postfix](Context &context, std::string shaderName)
771 { return ShaderWrapper(vk, vkDevice, context.getBinaryCollection().get(shaderName + postfix), 0); };
772
773 // Bind shader stages
774 ShaderWrapper vertShaderModule = createModule(m_context, "color_vert");
775 ShaderWrapper fragShaderModule = createModule(m_context, "color_frag");
776 ShaderWrapper tescShaderModule;
777 ShaderWrapper teseShaderModule;
778 ShaderWrapper geomShaderModule;
779
780 if (m_param->getShaderFlags() & VK_SHADER_STAGE_GEOMETRY_BIT)
781 geomShaderModule = createModule(m_context, "unused_geo");
782 if (m_param->getShaderFlags() & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
783 tescShaderModule = createModule(m_context, "basic_tcs");
784 if (m_param->getShaderFlags() & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
785 teseShaderModule = createModule(m_context, "basic_tes");
786
787 const std::vector<VkViewport> viewport{makeViewport(m_renderSize)};
788 const std::vector<VkRect2D> scissor{makeRect2D(m_renderSize)};
789
790 gpw.setDefaultTopology((m_param->getShaderFlags() & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) ?
791 VK_PRIMITIVE_TOPOLOGY_PATCH_LIST :
792 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
793 .setDefaultRasterizationState()
794 .setDefaultColorBlendState()
795 .setDefaultMultisampleState()
796 .setMonolithicPipelineLayout(m_pipelineLayout)
797 .disableShaderModules(!useShaderModules)
798 .setupVertexInputState(&defaultVertexInputStateParams, 0, VK_NULL_HANDLE, 0, vertexPartBinaryInfo)
799 .setupPreRasterizationShaderState3(viewport, scissor, m_pipelineLayout, *m_renderPassFramebuffer[0], 0u,
800 vertShaderModule, 0, nullptr, tescShaderModule, 0, teseShaderModule, 0,
801 geomShaderModule, 0, 0, 0, 0, 0, 0, 0, VK_NULL_HANDLE, 0,
802 preRasterizationPartBinaryInfo)
803 .setupFragmentShaderState2(m_pipelineLayout, *m_renderPassFramebuffer[0], 0u, fragShaderModule, 0,
804 &defaultDepthStencilState, 0, 0, VK_NULL_HANDLE, 0, {}, fragmentShaderPartBinaryInfo)
805 .setupFragmentOutputState(*m_renderPassFramebuffer[0], 0, 0, 0, VK_NULL_HANDLE, 0, {},
806 fragmentOutputPartBinaryInfo)
807 .buildPipeline(cache, VK_NULL_HANDLE, 0, {}, monolithicBinaryInfo);
808
809 // reuse graphics tests to also check if pipeline key is valid when pipeline binaries are tested
810 if ((m_param->getMode() == TestMode::BINARY) && useShaderModules)
811 {
812 if (m_param->getPipelineConstructionType() == PIPELINE_CONSTRUCTION_TYPE_MONOLITHIC)
813 {
814 auto &pipelineCreateInfo = gpw.getPipelineCreateInfo();
815 auto pipelineKey = m_binaries[0].getPipelineKey(&pipelineCreateInfo);
816 if (pipelineKey.keySize == 0)
817 TCU_FAIL("vkGetPipelineKeyKHR returned keySize == 0");
818 }
819 else
820 {
821 for (uint32_t i = 0; i < 4; ++i)
822 {
823 auto &pipelineCreateInfo = gpw.getPartialPipelineCreateInfo(i);
824 auto pipelineKey = m_binaries[i].getPipelineKey(&pipelineCreateInfo);
825 if (pipelineKey.keySize == 0)
826 TCU_FAIL("vkGetPipelineKeyKHR returned keySize == 0");
827 }
828 }
829 }
830 }
831
preparePipelinesForBinaries(bool createFromBlobs=false)832 void GraphicsTestInstance::preparePipelinesForBinaries(bool createFromBlobs = false)
833 {
834 DE_ASSERT(m_param->getMode() == TestMode::BINARY);
835
836 const DeviceInterface &vk = m_context.getDeviceInterface();
837 const VkDevice vkDevice = m_context.getDevice();
838
839 preparePipelineWrapper(*m_pipeline[PIPELINE_NDX_NO_BLOBS], VK_NULL_HANDLE, false, true);
840
841 if (m_param->getPipelineConstructionType() == PIPELINE_CONSTRUCTION_TYPE_MONOLITHIC)
842 {
843 VkPipeline pipeline = m_pipeline[PIPELINE_NDX_NO_BLOBS]->getPipeline();
844 m_binaries[0].createPipelineBinariesFromPipeline(pipeline);
845
846 if (createFromBlobs)
847 {
848 // read binaries data out of the device
849 std::vector<VkPipelineBinaryDataKHR> pipelineDataInfo;
850 std::vector<std::vector<uint8_t>> pipelineDataBlob;
851 m_binaries[0].getPipelineBinaryData(pipelineDataInfo, pipelineDataBlob);
852
853 // clear pipeline binaries objects
854 m_binaries[0].deletePipelineBinariesKeepKeys();
855
856 // recreate binaries from data blobs
857 m_binaries[0].createPipelineBinariesFromBinaryData(pipelineDataInfo);
858 }
859 else
860 {
861 VkReleaseCapturedPipelineDataInfoKHR releaseCapturedPipelineDataInfo = initVulkanStructure();
862 releaseCapturedPipelineDataInfo.pipeline = pipeline;
863 vk.releaseCapturedPipelineDataKHR(vkDevice, &releaseCapturedPipelineDataInfo, nullptr);
864 }
865
866 VkPipelineBinaryInfoKHR pipelineBinaryInfo = m_binaries[0].preparePipelineBinaryInfo();
867 preparePipelineWrapper(*m_pipeline[PIPELINE_NDX_USE_BLOBS], VK_NULL_HANDLE, false, false, &pipelineBinaryInfo);
868 }
869 else
870 {
871 for (uint32_t i = 0; i < 4; ++i)
872 {
873 VkPipeline partialPipeline = m_pipeline[PIPELINE_NDX_NO_BLOBS]->getPartialPipeline(i);
874 m_binaries[i].createPipelineBinariesFromPipeline(partialPipeline);
875
876 if (createFromBlobs)
877 {
878 // read binaries data out of the device
879 std::vector<VkPipelineBinaryDataKHR> pipelineDataInfo;
880 std::vector<std::vector<uint8_t>> pipelineDataBlob;
881 m_binaries[i].getPipelineBinaryData(pipelineDataInfo, pipelineDataBlob);
882
883 // clear pipeline binaries objects
884 m_binaries[i].deletePipelineBinariesKeepKeys();
885
886 // recreate binaries from data blobs
887 m_binaries[i].createPipelineBinariesFromBinaryData(pipelineDataInfo);
888 }
889 else
890 {
891 VkReleaseCapturedPipelineDataInfoKHR releaseCapturedPipelineDataInfo = initVulkanStructure();
892 releaseCapturedPipelineDataInfo.pipeline = partialPipeline;
893 vk.releaseCapturedPipelineDataKHR(vkDevice, &releaseCapturedPipelineDataInfo, nullptr);
894 }
895 }
896
897 VkPipelineBinaryInfoKHR pipelinePartsBinaryInfo[4];
898 VkPipelineBinaryInfoKHR *binaryInfoPtr[4];
899 deMemset(binaryInfoPtr, 0, 4 * sizeof(nullptr));
900
901 for (uint32_t i = 0; i < 4; ++i)
902 {
903 if (m_binaries[i].getBinariesCount() == 0)
904 continue;
905 pipelinePartsBinaryInfo[i] = m_binaries[i].preparePipelineBinaryInfo();
906 binaryInfoPtr[i] = &pipelinePartsBinaryInfo[i];
907 };
908
909 preparePipelineWrapper(*m_pipeline[PIPELINE_NDX_USE_BLOBS], VK_NULL_HANDLE, false, false, DE_NULL,
910 binaryInfoPtr[0], binaryInfoPtr[1], binaryInfoPtr[2], binaryInfoPtr[3]);
911 }
912 }
913
preparePipelines(void)914 void GraphicsTestInstance::preparePipelines(void)
915 {
916 if (m_param->getMode() == TestMode::CACHE)
917 {
918 preparePipelineWrapper(*m_pipeline[PIPELINE_NDX_NO_BLOBS], *m_cache);
919 preparePipelineWrapper(*m_pipeline[PIPELINE_NDX_USE_BLOBS], *m_cache);
920 }
921 else
922 preparePipelinesForBinaries();
923 }
924
prepareRenderPass(const RenderPassWrapper & renderPassFramebuffer,GraphicsPipelineWrapper & pipeline)925 void GraphicsTestInstance::prepareRenderPass(const RenderPassWrapper &renderPassFramebuffer,
926 GraphicsPipelineWrapper &pipeline)
927 {
928 const DeviceInterface &vk = m_context.getDeviceInterface();
929
930 const VkClearValue attachmentClearValues[2]{
931 defaultClearValue(m_colorFormat),
932 defaultClearValue(m_depthFormat),
933 };
934
935 renderPassFramebuffer.begin(vk, *m_cmdBuffer, makeRect2D(0, 0, m_renderSize.x(), m_renderSize.y()), 2u,
936 attachmentClearValues);
937
938 pipeline.bind(*m_cmdBuffer);
939 VkDeviceSize offsets = 0u;
940 vk.cmdBindVertexBuffers(*m_cmdBuffer, 0u, 1u, &m_vertexBuffer.get(), &offsets);
941 vk.cmdDraw(*m_cmdBuffer, (uint32_t)m_vertices.size(), 1u, 0u, 0u);
942
943 renderPassFramebuffer.end(vk, *m_cmdBuffer);
944 }
945
prepareCommandBuffer(void)946 void GraphicsTestInstance::prepareCommandBuffer(void)
947 {
948 const DeviceInterface &vk = m_context.getDeviceInterface();
949
950 preparePipelines();
951
952 beginCommandBuffer(vk, *m_cmdBuffer, 0u);
953
954 vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
955 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
956 (VkDependencyFlags)0, 0u, nullptr, 0u, nullptr, DE_LENGTH_OF_ARRAY(m_imageLayoutBarriers),
957 m_imageLayoutBarriers);
958
959 prepareRenderPass(m_renderPassFramebuffer[PIPELINE_NDX_NO_BLOBS], *m_pipeline[PIPELINE_NDX_NO_BLOBS]);
960
961 // After the first render pass, the images are in correct layouts
962
963 prepareRenderPass(m_renderPassFramebuffer[PIPELINE_NDX_USE_BLOBS], *m_pipeline[PIPELINE_NDX_USE_BLOBS]);
964
965 endCommandBuffer(vk, *m_cmdBuffer);
966 }
967
verifyTestResult(void)968 tcu::TestStatus GraphicsTestInstance::verifyTestResult(void)
969 {
970 const DeviceInterface &vk = m_context.getDeviceInterface();
971 const VkDevice vkDevice = m_context.getDevice();
972 const uint32_t queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
973
974 const VkQueue queue = m_context.getUniversalQueue();
975 de::MovePtr<tcu::TextureLevel> resultNoCache =
976 readColorAttachment(vk, vkDevice, queue, queueFamilyIndex, m_context.getDefaultAllocator(),
977 *m_colorImage[PIPELINE_NDX_NO_BLOBS], m_colorFormat, m_renderSize);
978 de::MovePtr<tcu::TextureLevel> resultCache =
979 readColorAttachment(vk, vkDevice, queue, queueFamilyIndex, m_context.getDefaultAllocator(),
980 *m_colorImage[PIPELINE_NDX_USE_BLOBS], m_colorFormat, m_renderSize);
981
982 bool compareOk = tcu::intThresholdCompare(m_context.getTestContext().getLog(), "IntImageCompare",
983 "Image comparison", resultNoCache->getAccess(), resultCache->getAccess(),
984 tcu::UVec4(1, 1, 1, 1), tcu::COMPARE_LOG_RESULT);
985
986 if (compareOk)
987 return tcu::TestStatus::pass("Render images w/o cached pipeline match.");
988 else
989 return tcu::TestStatus::fail("Render Images mismatch.");
990 }
991
992 class ComputeTest : public BaseTestCase
993 {
994 public:
ComputeTest(tcu::TestContext & testContext,const std::string & name,const TestParam * param)995 ComputeTest(tcu::TestContext &testContext, const std::string &name, const TestParam *param)
996 : BaseTestCase(testContext, name, param)
997 {
998 }
999 virtual ~ComputeTest(void) = default;
1000 virtual void initPrograms(SourceCollections &programCollection) const;
1001 virtual TestInstance *createInstance(Context &context) const;
1002 };
1003
1004 class ComputeTestInstance : public BaseTestInstance
1005 {
1006 public:
1007 ComputeTestInstance(Context &context, const TestParam *param);
1008 virtual ~ComputeTestInstance(void) = default;
1009 virtual void prepareCommandBuffer(void);
1010
1011 protected:
1012 virtual tcu::TestStatus verifyTestResult(void);
1013 void buildBuffers(void);
1014 void buildDescriptorSets(uint32_t ndx);
1015 void buildShader(void);
1016 void buildPipeline(uint32_t ndx);
1017
1018 protected:
1019 Move<VkBuffer> m_inputBuf;
1020 de::MovePtr<Allocation> m_inputBufferAlloc;
1021 Move<VkShaderModule> m_computeShaderModule;
1022
1023 Move<VkBuffer> m_outputBuf[PIPELINE_NDX_COUNT];
1024 de::MovePtr<Allocation> m_outputBufferAlloc[PIPELINE_NDX_COUNT];
1025
1026 Move<VkDescriptorPool> m_descriptorPool[PIPELINE_NDX_COUNT];
1027 Move<VkDescriptorSetLayout> m_descriptorSetLayout[PIPELINE_NDX_COUNT];
1028 Move<VkDescriptorSet> m_descriptorSet[PIPELINE_NDX_COUNT];
1029
1030 Move<VkPipelineLayout> m_pipelineLayout[PIPELINE_NDX_COUNT];
1031 Move<VkPipeline> m_pipeline[PIPELINE_NDX_COUNT];
1032 };
1033
initPrograms(SourceCollections & programCollection) const1034 void ComputeTest::initPrograms(SourceCollections &programCollection) const
1035 {
1036 programCollection.glslSources.add("basic_compute") << glu::ComputeSource(
1037 "#version 310 es\n"
1038 "layout(local_size_x = 1) in;\n"
1039 "layout(std430) buffer;\n"
1040 "layout(binding = 0) readonly buffer Input0\n"
1041 "{\n"
1042 " vec4 elements[];\n"
1043 "} input_data0;\n"
1044 "layout(binding = 1) writeonly buffer Output\n"
1045 "{\n"
1046 " vec4 elements[];\n"
1047 "} output_data;\n"
1048 "void main()\n"
1049 "{\n"
1050 " uint ident = gl_GlobalInvocationID.x;\n"
1051 " output_data.elements[ident] = input_data0.elements[ident] * input_data0.elements[ident];\n"
1052 "}");
1053 }
1054
createInstance(Context & context) const1055 TestInstance *ComputeTest::createInstance(Context &context) const
1056 {
1057 return new ComputeTestInstance(context, &m_param);
1058 }
1059
buildBuffers(void)1060 void ComputeTestInstance::buildBuffers(void)
1061 {
1062 const DeviceInterface &vk = m_context.getDeviceInterface();
1063 const VkDevice vkDevice = m_context.getDevice();
1064
1065 // Create buffer object, allocate storage, and generate input data
1066 const VkDeviceSize size = sizeof(tcu::Vec4) * 128u;
1067 m_inputBuf = createBufferAndBindMemory(m_context, size, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, &m_inputBufferAlloc);
1068
1069 // Initialize input buffer
1070 tcu::Vec4 *pVec = reinterpret_cast<tcu::Vec4 *>(m_inputBufferAlloc->getHostPtr());
1071 for (uint32_t ndx = 0u; ndx < 128u; ndx++)
1072 {
1073 for (uint32_t component = 0u; component < 4u; component++)
1074 pVec[ndx][component] = (float)(ndx * (component + 1u));
1075 }
1076 flushAlloc(vk, vkDevice, *m_inputBufferAlloc);
1077
1078 // Clear the output buffer
1079 for (uint32_t ndx = 0; ndx < PIPELINE_NDX_COUNT; ndx++)
1080 {
1081 m_outputBuf[ndx] =
1082 createBufferAndBindMemory(m_context, size, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, &m_outputBufferAlloc[ndx]);
1083
1084 pVec = reinterpret_cast<tcu::Vec4 *>(m_outputBufferAlloc[ndx]->getHostPtr());
1085
1086 for (uint32_t i = 0; i < (size / sizeof(tcu::Vec4)); i++)
1087 pVec[i] = tcu::Vec4(0.0f);
1088
1089 flushAlloc(vk, vkDevice, *m_outputBufferAlloc[ndx]);
1090 }
1091 }
1092
buildDescriptorSets(uint32_t ndx)1093 void ComputeTestInstance::buildDescriptorSets(uint32_t ndx)
1094 {
1095 const DeviceInterface &vk = m_context.getDeviceInterface();
1096 const VkDevice vkDevice = m_context.getDevice();
1097
1098 // Create descriptor set layout
1099 DescriptorSetLayoutBuilder descLayoutBuilder;
1100
1101 for (uint32_t bindingNdx = 0u; bindingNdx < 2u; bindingNdx++)
1102 descLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT);
1103
1104 m_descriptorSetLayout[ndx] = descLayoutBuilder.build(vk, vkDevice);
1105
1106 std::vector<VkDescriptorBufferInfo> descriptorInfos;
1107 descriptorInfos.push_back(makeDescriptorBufferInfo(*m_inputBuf, 0u, sizeof(tcu::Vec4) * 128u));
1108 descriptorInfos.push_back(makeDescriptorBufferInfo(*m_outputBuf[ndx], 0u, sizeof(tcu::Vec4) * 128u));
1109
1110 // Create descriptor pool
1111 m_descriptorPool[ndx] = DescriptorPoolBuilder()
1112 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 2u)
1113 .build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1114
1115 // Create descriptor set
1116 const VkDescriptorSetAllocateInfo descriptorSetAllocInfo = {
1117 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
1118 nullptr, // const void* pNext;
1119 *m_descriptorPool[ndx], // VkDescriptorPool descriptorPool;
1120 1u, // uint32_t setLayoutCount;
1121 &m_descriptorSetLayout[ndx].get(), // const VkDescriptorSetLayout* pSetLayouts;
1122 };
1123 m_descriptorSet[ndx] = allocateDescriptorSet(vk, vkDevice, &descriptorSetAllocInfo);
1124
1125 DescriptorSetUpdateBuilder builder;
1126 for (uint32_t descriptorNdx = 0u; descriptorNdx < 2u; descriptorNdx++)
1127 {
1128 builder.writeSingle(*m_descriptorSet[ndx], DescriptorSetUpdateBuilder::Location::binding(descriptorNdx),
1129 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descriptorInfos[descriptorNdx]);
1130 }
1131 builder.update(vk, vkDevice);
1132 }
1133
buildShader(void)1134 void ComputeTestInstance::buildShader(void)
1135 {
1136 const DeviceInterface &vk = m_context.getDeviceInterface();
1137 const VkDevice vkDevice = m_context.getDevice();
1138
1139 // Create compute shader
1140 VkShaderModuleCreateInfo shaderModuleCreateInfo = {
1141 VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, // VkStructureType sType;
1142 nullptr, // const void* pNext;
1143 0u, // VkShaderModuleCreateFlags flags;
1144 m_context.getBinaryCollection().get("basic_compute").getSize(), // uintptr_t codeSize;
1145 (uint32_t *)m_context.getBinaryCollection()
1146 .get("basic_compute")
1147 .getBinary(), // const uint32_t* pCode;
1148 };
1149 m_computeShaderModule = createShaderModule(vk, vkDevice, &shaderModuleCreateInfo);
1150 }
1151
buildPipeline(uint32_t ndx)1152 void ComputeTestInstance::buildPipeline(uint32_t ndx)
1153 {
1154 const DeviceInterface &vk = m_context.getDeviceInterface();
1155 const VkDevice vkDevice = m_context.getDevice();
1156
1157 // Create compute pipeline layout
1158 const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo = {
1159 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
1160 nullptr, // const void* pNext;
1161 0u, // VkPipelineLayoutCreateFlags flags;
1162 1u, // uint32_t setLayoutCount;
1163 &m_descriptorSetLayout[ndx].get(), // const VkDescriptorSetLayout* pSetLayouts;
1164 0u, // uint32_t pushConstantRangeCount;
1165 nullptr, // const VkPushConstantRange* pPushConstantRanges;
1166 };
1167
1168 m_pipelineLayout[ndx] = createPipelineLayout(vk, vkDevice, &pipelineLayoutCreateInfo);
1169
1170 const VkPipelineShaderStageCreateInfo stageCreateInfo = {
1171 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
1172 nullptr, // const void* pNext;
1173 0u, // VkPipelineShaderStageCreateFlags flags;
1174 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage;
1175 *m_computeShaderModule, // VkShaderModule module;
1176 "main", // const char* pName;
1177 nullptr, // const VkSpecializationInfo* pSpecializationInfo;
1178 };
1179
1180 VkPipelineCreateFlags2CreateInfoKHR pipelineFlags2CreateInfo = initVulkanStructure();
1181 pipelineFlags2CreateInfo.flags = VK_PIPELINE_CREATE_2_CAPTURE_DATA_BIT_KHR;
1182 const void *pNext = (m_param->getMode() == TestMode::BINARY) ? &pipelineFlags2CreateInfo : nullptr;
1183 VkComputePipelineCreateInfo pipelineCreateInfo{
1184 VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
1185 pNext, // const void* pNext;
1186 0, // VkPipelineCreateFlags flags;
1187 stageCreateInfo, // VkPipelineShaderStageCreateInfo stage;
1188 *m_pipelineLayout[ndx], // VkPipelineLayout layout;
1189 VK_NULL_HANDLE, // VkPipeline basePipelineHandle;
1190 0u, // int32_t basePipelineIndex;
1191 };
1192
1193 if (m_param->getMode() == TestMode::CACHE)
1194 m_pipeline[ndx] = createComputePipeline(vk, vkDevice, *m_cache, &pipelineCreateInfo);
1195 else
1196 {
1197 if (ndx == PIPELINE_NDX_NO_BLOBS)
1198 {
1199 auto pipelineKey = m_binaries[0].getPipelineKey(&pipelineCreateInfo);
1200 if (pipelineKey.keySize == 0)
1201 TCU_FAIL("vkGetPipelineKeyKHR returned keySize == 0");
1202
1203 // create pipeline
1204 m_pipeline[ndx] = createComputePipeline(vk, vkDevice, VK_NULL_HANDLE, &pipelineCreateInfo);
1205
1206 // prepare pipeline binaries
1207 m_binaries[0].createPipelineBinariesFromPipeline(*m_pipeline[ndx]);
1208
1209 if (m_param->getUseBinariesFromBinaryData())
1210 {
1211 // read binaries data out of the device
1212 std::vector<VkPipelineBinaryDataKHR> pipelineDataInfo;
1213 std::vector<std::vector<uint8_t>> pipelineDataBlob;
1214 m_binaries[0].getPipelineBinaryData(pipelineDataInfo, pipelineDataBlob);
1215
1216 // clear pipeline binaries objects
1217 m_binaries[0].deletePipelineBinariesKeepKeys();
1218
1219 // recreate binaries from data blobs
1220 m_binaries[0].createPipelineBinariesFromBinaryData(pipelineDataInfo);
1221 }
1222 }
1223 else
1224 {
1225 // create pipeline using binary data and use pipelineCreateInfo with no shader stage
1226 VkPipelineBinaryInfoKHR pipelineBinaryInfo = m_binaries[0].preparePipelineBinaryInfo();
1227 pipelineCreateInfo.pNext = &pipelineBinaryInfo;
1228 pipelineCreateInfo.stage.module = VK_NULL_HANDLE;
1229 m_pipeline[ndx] = createComputePipeline(vk, vkDevice, VK_NULL_HANDLE, &pipelineCreateInfo);
1230 }
1231 }
1232 }
1233
ComputeTestInstance(Context & context,const TestParam * param)1234 ComputeTestInstance::ComputeTestInstance(Context &context, const TestParam *param) : BaseTestInstance(context, param)
1235 {
1236 buildBuffers();
1237
1238 buildDescriptorSets(PIPELINE_NDX_NO_BLOBS);
1239
1240 buildDescriptorSets(PIPELINE_NDX_USE_BLOBS);
1241
1242 buildShader();
1243
1244 buildPipeline(PIPELINE_NDX_NO_BLOBS);
1245
1246 buildPipeline(PIPELINE_NDX_USE_BLOBS);
1247 }
1248
prepareCommandBuffer(void)1249 void ComputeTestInstance::prepareCommandBuffer(void)
1250 {
1251 const DeviceInterface &vk = m_context.getDeviceInterface();
1252
1253 beginCommandBuffer(vk, *m_cmdBuffer, 0u);
1254
1255 for (uint32_t ndx = 0; ndx < PIPELINE_NDX_COUNT; ndx++)
1256 {
1257 vk.cmdBindPipeline(*m_cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipeline[ndx]);
1258 vk.cmdBindDescriptorSets(*m_cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipelineLayout[ndx], 0u, 1u,
1259 &m_descriptorSet[ndx].get(), 0u, nullptr);
1260 vk.cmdDispatch(*m_cmdBuffer, 128u, 1u, 1u);
1261 }
1262
1263 endCommandBuffer(vk, *m_cmdBuffer);
1264 }
1265
verifyTestResult(void)1266 tcu::TestStatus ComputeTestInstance::verifyTestResult(void)
1267 {
1268 const DeviceInterface &vk = m_context.getDeviceInterface();
1269 const VkDevice vkDevice = m_context.getDevice();
1270
1271 // Read the content of output buffers
1272 invalidateAlloc(vk, vkDevice, *m_outputBufferAlloc[PIPELINE_NDX_NO_BLOBS]);
1273
1274 invalidateAlloc(vk, vkDevice, *m_outputBufferAlloc[PIPELINE_NDX_USE_BLOBS]);
1275 // Compare the content
1276 uint8_t *bufNoCache = reinterpret_cast<uint8_t *>(m_outputBufferAlloc[PIPELINE_NDX_NO_BLOBS]->getHostPtr());
1277 uint8_t *bufCached = reinterpret_cast<uint8_t *>(m_outputBufferAlloc[PIPELINE_NDX_USE_BLOBS]->getHostPtr());
1278 for (uint32_t ndx = 0u; ndx < sizeof(tcu::Vec4) * 128u; ndx++)
1279 {
1280 if (bufNoCache[ndx] != bufCached[ndx])
1281 {
1282 return tcu::TestStatus::fail("Output buffers w/o pipeline blobs mismatch.");
1283 }
1284 }
1285
1286 return tcu::TestStatus::pass("Output buffers w/o pipeline blobs match.");
1287 }
1288
1289 class PipelineFromBlobsTest : public GraphicsTest
1290 {
1291 public:
1292 PipelineFromBlobsTest(tcu::TestContext &testContext, const std::string &name, const TestParam *param);
1293 virtual ~PipelineFromBlobsTest(void) = default;
1294 virtual TestInstance *createInstance(Context &context) const;
1295 };
1296
PipelineFromBlobsTest(tcu::TestContext & testContext,const std::string & name,const TestParam * param)1297 PipelineFromBlobsTest::PipelineFromBlobsTest(tcu::TestContext &testContext, const std::string &name,
1298 const TestParam *param)
1299 : GraphicsTest(testContext, name, param)
1300 {
1301 }
1302
1303 class PipelineFromBlobsTestInstance : public GraphicsTestInstance
1304 {
1305 public:
1306 PipelineFromBlobsTestInstance(Context &context, const TestParam *param);
1307 virtual ~PipelineFromBlobsTestInstance(void);
1308
1309 protected:
1310 void preparePipelines(void);
1311
1312 protected:
1313 Move<VkPipelineCache> m_newCache;
1314 uint8_t *m_data;
1315 };
1316
createInstance(Context & context) const1317 TestInstance *PipelineFromBlobsTest::createInstance(Context &context) const
1318 {
1319 return new PipelineFromBlobsTestInstance(context, &m_param);
1320 }
1321
PipelineFromBlobsTestInstance(Context & context,const TestParam * param)1322 PipelineFromBlobsTestInstance::PipelineFromBlobsTestInstance(Context &context, const TestParam *param)
1323 : GraphicsTestInstance(context, param)
1324 , m_data(DE_NULL)
1325 {
1326 const DeviceInterface &vk = m_context.getDeviceInterface();
1327 const VkDevice vkDevice = m_context.getDevice();
1328
1329 // Create more pipeline caches
1330 if (m_param->getMode() == TestMode::CACHE)
1331 {
1332 size_t dataSize = 0u;
1333
1334 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, nullptr));
1335
1336 m_data = new uint8_t[dataSize];
1337 DE_ASSERT(m_data);
1338 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, (void *)m_data));
1339
1340 const VkPipelineCacheCreateInfo pipelineCacheCreateInfo = {
1341 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
1342 nullptr, // const void* pNext;
1343 0u, // VkPipelineCacheCreateFlags flags;
1344 dataSize, // uintptr_t initialDataSize;
1345 m_data, // const void* pInitialData;
1346 };
1347 m_newCache = createPipelineCache(vk, vkDevice, &pipelineCacheCreateInfo);
1348 }
1349 }
1350
~PipelineFromBlobsTestInstance(void)1351 PipelineFromBlobsTestInstance::~PipelineFromBlobsTestInstance(void)
1352 {
1353 delete[] m_data;
1354 }
1355
preparePipelines(void)1356 void PipelineFromBlobsTestInstance::preparePipelines(void)
1357 {
1358 if (m_param->getMode() == TestMode::CACHE)
1359 {
1360 preparePipelineWrapper(*m_pipeline[PIPELINE_NDX_NO_BLOBS], *m_cache);
1361 preparePipelineWrapper(*m_pipeline[PIPELINE_NDX_USE_BLOBS], *m_newCache);
1362 }
1363 else
1364 preparePipelinesForBinaries(true);
1365 }
1366
1367 class PipelineFromIncompleteBlobsTest : public GraphicsTest
1368 {
1369 public:
1370 PipelineFromIncompleteBlobsTest(tcu::TestContext &testContext, const std::string &name, const TestParam *param);
1371 virtual ~PipelineFromIncompleteBlobsTest(void) = default;
1372 virtual TestInstance *createInstance(Context &context) const;
1373 };
1374
PipelineFromIncompleteBlobsTest(tcu::TestContext & testContext,const std::string & name,const TestParam * param)1375 PipelineFromIncompleteBlobsTest::PipelineFromIncompleteBlobsTest(tcu::TestContext &testContext, const std::string &name,
1376 const TestParam *param)
1377 : GraphicsTest(testContext, name, param)
1378 {
1379 }
1380
1381 class PipelineFromIncompleteBlobsTestInstance : public GraphicsTestInstance
1382 {
1383 public:
1384 PipelineFromIncompleteBlobsTestInstance(Context &context, const TestParam *param);
1385 virtual ~PipelineFromIncompleteBlobsTestInstance(void);
1386
1387 protected:
1388 void preparePipelines(void);
1389
1390 protected:
1391 Move<VkPipelineCache> m_newCache;
1392 uint8_t *m_data;
1393 };
1394
createInstance(Context & context) const1395 TestInstance *PipelineFromIncompleteBlobsTest::createInstance(Context &context) const
1396 {
1397 return new PipelineFromIncompleteBlobsTestInstance(context, &m_param);
1398 }
1399
PipelineFromIncompleteBlobsTestInstance(Context & context,const TestParam * param)1400 PipelineFromIncompleteBlobsTestInstance::PipelineFromIncompleteBlobsTestInstance(Context &context,
1401 const TestParam *param)
1402 : GraphicsTestInstance(context, param)
1403 , m_data(DE_NULL)
1404 {
1405 const DeviceInterface &vk = m_context.getDeviceInterface();
1406 const VkDevice vkDevice = m_context.getDevice();
1407
1408 // Create more pipeline caches
1409 size_t dataSize = 0u;
1410 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, nullptr));
1411
1412 if (dataSize == 0)
1413 TCU_THROW(NotSupportedError, "Empty pipeline cache - unable to test");
1414
1415 dataSize--;
1416
1417 m_data = new uint8_t[dataSize];
1418 DE_ASSERT(m_data);
1419 if (vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, (void *)m_data) != VK_INCOMPLETE)
1420 TCU_THROW(TestError, "GetPipelineCacheData should return VK_INCOMPLETE state!");
1421
1422 const VkPipelineCacheCreateInfo pipelineCacheCreateInfo{
1423 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
1424 nullptr, // const void* pNext;
1425 0u, // VkPipelineCacheCreateFlags flags;
1426 dataSize, // uintptr_t initialDataSize;
1427 m_data, // const void* pInitialData;
1428 };
1429 m_newCache = createPipelineCache(vk, vkDevice, &pipelineCacheCreateInfo);
1430 }
1431
~PipelineFromIncompleteBlobsTestInstance(void)1432 PipelineFromIncompleteBlobsTestInstance::~PipelineFromIncompleteBlobsTestInstance(void)
1433 {
1434 delete[] m_data;
1435 }
1436
preparePipelines(void)1437 void PipelineFromIncompleteBlobsTestInstance::preparePipelines(void)
1438 {
1439 preparePipelineWrapper(*m_pipeline[PIPELINE_NDX_NO_BLOBS], *m_cache);
1440 preparePipelineWrapper(*m_pipeline[PIPELINE_NDX_USE_BLOBS], *m_newCache);
1441 }
1442
1443 enum class MergeBlobsType
1444 {
1445 EMPTY = 0,
1446 FROM_DATA,
1447 HIT,
1448 MISS,
1449 MISS_AND_HIT,
1450 MERGED,
1451
1452 LAST = MERGED
1453 };
1454
getMergeBlobsTypeStr(MergeBlobsType type)1455 std::string getMergeBlobsTypeStr(MergeBlobsType type)
1456 {
1457 switch (type)
1458 {
1459 case MergeBlobsType::EMPTY:
1460 return "empty";
1461 case MergeBlobsType::FROM_DATA:
1462 return "from_data";
1463 case MergeBlobsType::HIT:
1464 return "hit";
1465 case MergeBlobsType::MISS_AND_HIT:
1466 return "misshit";
1467 case MergeBlobsType::MISS:
1468 return "miss";
1469 case MergeBlobsType::MERGED:
1470 return "merged";
1471 }
1472 TCU_FAIL("unhandled merge cache type");
1473 }
1474
getMergeBlobsTypesStr(const std::vector<MergeBlobsType> & types)1475 std::string getMergeBlobsTypesStr(const std::vector<MergeBlobsType> &types)
1476 {
1477 std::string ret;
1478 for (size_t idx = 0; idx < types.size(); ++idx)
1479 {
1480 if (ret.size())
1481 ret += '_';
1482 ret += getMergeBlobsTypeStr(types[idx]);
1483 }
1484 return ret;
1485 }
1486
1487 class MergeBlobsTestParam
1488 {
1489 public:
1490 MergeBlobsType destBlobsType;
1491 std::vector<MergeBlobsType> srcBlobTypes;
1492 };
1493
1494 class MergeBlobsTest : public GraphicsTest
1495 {
1496 public:
MergeBlobsTest(tcu::TestContext & testContext,const std::string & name,const TestParam * param,const MergeBlobsTestParam * mergeBlobsParam)1497 MergeBlobsTest(tcu::TestContext &testContext, const std::string &name, const TestParam *param,
1498 const MergeBlobsTestParam *mergeBlobsParam)
1499 : GraphicsTest(testContext, name, param)
1500 , m_mergeBlobsParam(*mergeBlobsParam)
1501 {
1502 }
~MergeBlobsTest(void)1503 virtual ~MergeBlobsTest(void)
1504 {
1505 }
1506 virtual TestInstance *createInstance(Context &context) const;
1507
1508 private:
1509 const MergeBlobsTestParam m_mergeBlobsParam;
1510 };
1511
1512 class MergeBlobsTestInstance : public GraphicsTestInstance
1513 {
1514 public:
1515 MergeBlobsTestInstance(Context &context, const TestParam *param, const MergeBlobsTestParam *mergeBlobsParam);
1516
1517 private:
1518 Move<VkPipelineCache> createPipelineCache(const InstanceInterface &vki, const DeviceInterface &vk,
1519 VkPhysicalDevice physicalDevice, VkDevice device, MergeBlobsType type);
1520
1521 protected:
1522 void preparePipelines(void);
1523
1524 protected:
1525 const MergeBlobsTestParam m_mergeBlobsParam;
1526 Move<VkPipelineCache> m_cacheMerged;
1527 de::MovePtr<PipelineBinaryWrapper> m_secondBinaries;
1528 };
1529
createInstance(Context & context) const1530 TestInstance *MergeBlobsTest::createInstance(Context &context) const
1531 {
1532 return new MergeBlobsTestInstance(context, &m_param, &m_mergeBlobsParam);
1533 }
1534
MergeBlobsTestInstance(Context & context,const TestParam * param,const MergeBlobsTestParam * mergeBlobsParam)1535 MergeBlobsTestInstance::MergeBlobsTestInstance(Context &context, const TestParam *param,
1536 const MergeBlobsTestParam *mergeBlobsParam)
1537 : GraphicsTestInstance(context, param)
1538 , m_mergeBlobsParam(*mergeBlobsParam)
1539 {
1540 const InstanceInterface &vki = context.getInstanceInterface();
1541 const DeviceInterface &vk = m_context.getDeviceInterface();
1542 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
1543 const VkDevice vkDevice = m_context.getDevice();
1544
1545 // this test can't be executed for pipeline binary due to VUID-VkPipelineBinaryInfoKHR-binaryCount-09603
1546 DE_ASSERT(m_param->getMode() == TestMode::CACHE);
1547
1548 // Create a merge destination cache
1549 m_cacheMerged = createPipelineCache(vki, vk, physicalDevice, vkDevice, mergeBlobsParam->destBlobsType);
1550
1551 // Create more pipeline caches
1552 std::vector<VkPipelineCache> sourceCaches(mergeBlobsParam->srcBlobTypes.size());
1553 typedef de::SharedPtr<Move<VkPipelineCache>> PipelineCachePtr;
1554 std::vector<PipelineCachePtr> sourceCachePtrs(sourceCaches.size());
1555 {
1556 for (size_t sourceIdx = 0; sourceIdx < mergeBlobsParam->srcBlobTypes.size(); sourceIdx++)
1557 {
1558 // vk::Move is not copyable, so create it on heap and wrap into de::SharedPtr
1559 PipelineCachePtr pipelineCachePtr(new Move<VkPipelineCache>());
1560 *pipelineCachePtr =
1561 createPipelineCache(vki, vk, physicalDevice, vkDevice, mergeBlobsParam->srcBlobTypes[sourceIdx]);
1562
1563 sourceCachePtrs[sourceIdx] = pipelineCachePtr;
1564 sourceCaches[sourceIdx] = **pipelineCachePtr;
1565 }
1566 }
1567
1568 // Merge the caches
1569 VK_CHECK(
1570 vk.mergePipelineCaches(vkDevice, *m_cacheMerged, static_cast<uint32_t>(sourceCaches.size()), &sourceCaches[0]));
1571 }
1572
createPipelineCache(const InstanceInterface & vki,const DeviceInterface & vk,VkPhysicalDevice physicalDevice,VkDevice device,MergeBlobsType type)1573 Move<VkPipelineCache> MergeBlobsTestInstance::createPipelineCache(const InstanceInterface &vki,
1574 const DeviceInterface &vk,
1575 VkPhysicalDevice physicalDevice, VkDevice device,
1576 MergeBlobsType type)
1577 {
1578 VkPipelineCacheCreateInfo pipelineCacheCreateInfo{
1579 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
1580 nullptr, // const void* pNext;
1581 0u, // VkPipelineCacheCreateFlags flags;
1582 0u, // uintptr_t initialDataSize;
1583 nullptr, // const void* pInitialData;
1584 };
1585
1586 GraphicsPipelineWrapper localPipeline(vki, vk, physicalDevice, device, m_context.getDeviceExtensions(),
1587 m_param->getPipelineConstructionType());
1588 GraphicsPipelineWrapper localMissPipeline(vki, vk, physicalDevice, device, m_context.getDeviceExtensions(),
1589 m_param->getPipelineConstructionType());
1590
1591 switch (type)
1592 {
1593 case MergeBlobsType::EMPTY:
1594 {
1595 return vk::createPipelineCache(vk, device, &pipelineCacheCreateInfo);
1596 }
1597 case MergeBlobsType::FROM_DATA:
1598 {
1599 // Create a cache with init data from m_cache
1600 size_t dataSize = 0u;
1601 VK_CHECK(vk.getPipelineCacheData(device, *m_cache, (uintptr_t *)&dataSize, nullptr));
1602
1603 std::vector<uint8_t> data(dataSize);
1604 VK_CHECK(vk.getPipelineCacheData(device, *m_cache, (uintptr_t *)&dataSize, &data[0]));
1605
1606 pipelineCacheCreateInfo.initialDataSize = data.size();
1607 pipelineCacheCreateInfo.pInitialData = &data[0];
1608 return vk::createPipelineCache(vk, device, &pipelineCacheCreateInfo);
1609 }
1610 case MergeBlobsType::HIT:
1611 {
1612 Move<VkPipelineCache> ret = createPipelineCache(vki, vk, physicalDevice, device, MergeBlobsType::EMPTY);
1613
1614 preparePipelineWrapper(localPipeline, *ret);
1615
1616 return ret;
1617 }
1618 case MergeBlobsType::MISS:
1619 {
1620 Move<VkPipelineCache> ret = createPipelineCache(vki, vk, physicalDevice, device, MergeBlobsType::EMPTY);
1621
1622 preparePipelineWrapper(localMissPipeline, *ret, true);
1623
1624 return ret;
1625 }
1626 case MergeBlobsType::MISS_AND_HIT:
1627 {
1628 Move<VkPipelineCache> ret = createPipelineCache(vki, vk, physicalDevice, device, MergeBlobsType::EMPTY);
1629
1630 preparePipelineWrapper(localPipeline, *ret);
1631 preparePipelineWrapper(localMissPipeline, *ret, true);
1632
1633 return ret;
1634 }
1635 case MergeBlobsType::MERGED:
1636 {
1637 Move<VkPipelineCache> cache1 = createPipelineCache(vki, vk, physicalDevice, device, MergeBlobsType::FROM_DATA);
1638 Move<VkPipelineCache> cache2 = createPipelineCache(vki, vk, physicalDevice, device, MergeBlobsType::HIT);
1639 Move<VkPipelineCache> cache3 = createPipelineCache(vki, vk, physicalDevice, device, MergeBlobsType::MISS);
1640
1641 const VkPipelineCache sourceCaches[] = {*cache1, *cache2, *cache3};
1642
1643 Move<VkPipelineCache> ret = createPipelineCache(vki, vk, physicalDevice, device, MergeBlobsType::EMPTY);
1644
1645 // Merge the caches
1646 VK_CHECK(vk.mergePipelineCaches(device, *ret, DE_LENGTH_OF_ARRAY(sourceCaches), sourceCaches));
1647
1648 return ret;
1649 }
1650 }
1651 TCU_FAIL("unhandled merge cache type");
1652 }
1653
preparePipelines(void)1654 void MergeBlobsTestInstance::preparePipelines(void)
1655 {
1656 preparePipelineWrapper(*m_pipeline[PIPELINE_NDX_NO_BLOBS], *m_cache);
1657
1658 // Create pipeline from merged cache
1659 preparePipelineWrapper(*m_pipeline[PIPELINE_NDX_USE_BLOBS], *m_cacheMerged);
1660 }
1661
1662 class CacheHeaderTest : public GraphicsTest
1663 {
1664 public:
CacheHeaderTest(tcu::TestContext & testContext,const std::string & name,const TestParam * param)1665 CacheHeaderTest(tcu::TestContext &testContext, const std::string &name, const TestParam *param)
1666 : GraphicsTest(testContext, name, param)
1667 {
1668 }
~CacheHeaderTest(void)1669 virtual ~CacheHeaderTest(void)
1670 {
1671 }
1672 virtual TestInstance *createInstance(Context &context) const;
1673 };
1674
1675 class CacheHeaderTestInstance : public GraphicsTestInstance
1676 {
1677 public:
1678 CacheHeaderTestInstance(Context &context, const TestParam *param);
1679 virtual ~CacheHeaderTestInstance(void);
1680
1681 protected:
1682 uint8_t *m_data;
1683
1684 struct CacheHeader
1685 {
1686 uint32_t HeaderLength;
1687 uint32_t HeaderVersion;
1688 uint32_t VendorID;
1689 uint32_t DeviceID;
1690 uint8_t PipelineCacheUUID[VK_UUID_SIZE];
1691 } m_header;
1692 };
1693
createInstance(Context & context) const1694 TestInstance *CacheHeaderTest::createInstance(Context &context) const
1695 {
1696 return new CacheHeaderTestInstance(context, &m_param);
1697 }
1698
CacheHeaderTestInstance(Context & context,const TestParam * param)1699 CacheHeaderTestInstance::CacheHeaderTestInstance(Context &context, const TestParam *param)
1700 : GraphicsTestInstance(context, param)
1701 , m_data(nullptr)
1702 {
1703 const DeviceInterface &vk = m_context.getDeviceInterface();
1704 const VkDevice vkDevice = m_context.getDevice();
1705
1706 // Create more pipeline caches
1707 {
1708 // Create a cache with init data from m_cache
1709 size_t dataSize = 0u;
1710 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, nullptr));
1711
1712 if (dataSize < sizeof(m_header))
1713 TCU_THROW(TestError, "Pipeline cache size is smaller than header size");
1714
1715 m_data = new uint8_t[dataSize];
1716 DE_ASSERT(m_data);
1717 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, (void *)m_data));
1718
1719 deMemcpy(&m_header, m_data, sizeof(m_header));
1720
1721 if (m_header.HeaderLength - VK_UUID_SIZE != 16)
1722 TCU_THROW(TestError, "Invalid header size!");
1723
1724 if (m_header.HeaderVersion != 1)
1725 TCU_THROW(TestError, "Invalid header version!");
1726
1727 if (m_header.VendorID != m_context.getDeviceProperties().vendorID)
1728 TCU_THROW(TestError, "Invalid header vendor ID!");
1729
1730 if (m_header.DeviceID != m_context.getDeviceProperties().deviceID)
1731 TCU_THROW(TestError, "Invalid header device ID!");
1732
1733 if (deMemCmp(&m_header.PipelineCacheUUID, &m_context.getDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE) !=
1734 0)
1735 TCU_THROW(TestError, "Invalid header pipeline cache UUID!");
1736 }
1737 }
1738
~CacheHeaderTestInstance(void)1739 CacheHeaderTestInstance::~CacheHeaderTestInstance(void)
1740 {
1741 delete[] m_data;
1742 }
1743
1744 class InvalidSizeTest : public GraphicsTest
1745 {
1746 public:
1747 InvalidSizeTest(tcu::TestContext &testContext, const std::string &name, const TestParam *param);
~InvalidSizeTest(void)1748 virtual ~InvalidSizeTest(void)
1749 {
1750 }
1751 virtual TestInstance *createInstance(Context &context) const;
1752 };
1753
InvalidSizeTest(tcu::TestContext & testContext,const std::string & name,const TestParam * param)1754 InvalidSizeTest::InvalidSizeTest(tcu::TestContext &testContext, const std::string &name, const TestParam *param)
1755 : GraphicsTest(testContext, name, param)
1756 {
1757 }
1758
1759 class InvalidSizeTestInstance : public GraphicsTestInstance
1760 {
1761 public:
1762 InvalidSizeTestInstance(Context &context, const TestParam *param);
1763 virtual ~InvalidSizeTestInstance(void);
1764
1765 protected:
1766 uint8_t *m_data;
1767 uint8_t *m_zeroBlock;
1768 };
1769
createInstance(Context & context) const1770 TestInstance *InvalidSizeTest::createInstance(Context &context) const
1771 {
1772 return new InvalidSizeTestInstance(context, &m_param);
1773 }
1774
InvalidSizeTestInstance(Context & context,const TestParam * param)1775 InvalidSizeTestInstance::InvalidSizeTestInstance(Context &context, const TestParam *param)
1776 : GraphicsTestInstance(context, param)
1777 , m_data(nullptr)
1778 , m_zeroBlock(nullptr)
1779 {
1780 const DeviceInterface &vk = m_context.getDeviceInterface();
1781 const VkDevice vkDevice = m_context.getDevice();
1782
1783 // Create more pipeline caches
1784 try
1785 {
1786 // Create a cache with init data from m_cache
1787 size_t dataSize = 0u;
1788 size_t savedDataSize = 0u;
1789 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, nullptr));
1790 savedDataSize = dataSize;
1791
1792 // If the value of dataSize is less than the maximum size that can be retrieved by the pipeline cache,
1793 // at most pDataSize bytes will be written to pData, and vkGetPipelineCacheData will return VK_INCOMPLETE.
1794 dataSize--;
1795
1796 m_data = new uint8_t[savedDataSize];
1797 deMemset(m_data, 0, savedDataSize);
1798 DE_ASSERT(m_data);
1799 if (vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, (void *)m_data) != VK_INCOMPLETE)
1800 TCU_THROW(TestError, "GetPipelineCacheData should return VK_INCOMPLETE state!");
1801
1802 delete[] m_data;
1803 m_data = nullptr;
1804
1805 // If the value of dataSize is less than what is necessary to store the header,
1806 // nothing will be written to pData and zero will be written to dataSize.
1807 dataSize = 16 + VK_UUID_SIZE - 1;
1808
1809 m_data = new uint8_t[savedDataSize];
1810 deMemset(m_data, 0, savedDataSize);
1811 DE_ASSERT(m_data);
1812 if (vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, (void *)m_data) != VK_INCOMPLETE)
1813 TCU_THROW(TestError, "GetPipelineCacheData should return VK_INCOMPLETE state!");
1814
1815 m_zeroBlock = new uint8_t[savedDataSize];
1816 deMemset(m_zeroBlock, 0, savedDataSize);
1817 if (deMemCmp(m_data, m_zeroBlock, savedDataSize) != 0 || dataSize != 0)
1818 TCU_THROW(TestError, "Data needs to be empty and data size should be 0 when invalid size is passed to "
1819 "GetPipelineCacheData!");
1820 }
1821 catch (...)
1822 {
1823 delete[] m_data;
1824 delete[] m_zeroBlock;
1825 throw;
1826 }
1827 }
1828
~InvalidSizeTestInstance(void)1829 InvalidSizeTestInstance::~InvalidSizeTestInstance(void)
1830 {
1831 delete[] m_data;
1832 delete[] m_zeroBlock;
1833 }
1834
1835 class ZeroSizeTest : public GraphicsTest
1836 {
1837 public:
1838 ZeroSizeTest(tcu::TestContext &testContext, const std::string &name, const TestParam *param);
~ZeroSizeTest(void)1839 virtual ~ZeroSizeTest(void)
1840 {
1841 }
1842 virtual TestInstance *createInstance(Context &context) const;
1843 };
1844
ZeroSizeTest(tcu::TestContext & testContext,const std::string & name,const TestParam * param)1845 ZeroSizeTest::ZeroSizeTest(tcu::TestContext &testContext, const std::string &name, const TestParam *param)
1846 : GraphicsTest(testContext, name, param)
1847 {
1848 }
1849
1850 class ZeroSizeTestInstance : public GraphicsTestInstance
1851 {
1852 public:
1853 ZeroSizeTestInstance(Context &context, const TestParam *param);
1854 virtual ~ZeroSizeTestInstance(void);
1855
1856 protected:
1857 uint8_t *m_data;
1858 uint8_t *m_zeroBlock;
1859 };
1860
createInstance(Context & context) const1861 TestInstance *ZeroSizeTest::createInstance(Context &context) const
1862 {
1863 return new ZeroSizeTestInstance(context, &m_param);
1864 }
1865
ZeroSizeTestInstance(Context & context,const TestParam * param)1866 ZeroSizeTestInstance::ZeroSizeTestInstance(Context &context, const TestParam *param)
1867 : GraphicsTestInstance(context, param)
1868 , m_data(nullptr)
1869 , m_zeroBlock(nullptr)
1870 {
1871 const DeviceInterface &vk = m_context.getDeviceInterface();
1872 const VkDevice vkDevice = m_context.getDevice();
1873
1874 // Create more pipeline caches
1875 try
1876 {
1877 // Create a cache with init data from m_cache
1878 size_t dataSize = 0u;
1879
1880 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, nullptr));
1881
1882 m_data = new uint8_t[dataSize];
1883 deMemset(m_data, 0, dataSize);
1884 DE_ASSERT(m_data);
1885
1886 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, (void *)m_data));
1887
1888 {
1889 // Create a cache with initialDataSize = 0 & pInitialData != NULL
1890 const VkPipelineCacheCreateInfo pipelineCacheCreateInfo = {
1891 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
1892 nullptr, // const void* pNext;
1893 0u, // VkPipelineCacheCreateFlags flags;
1894 0u, // uintptr_t initialDataSize;
1895 m_data, // const void* pInitialData;
1896 };
1897
1898 const Unique<VkPipelineCache> pipelineCache(createPipelineCache(vk, vkDevice, &pipelineCacheCreateInfo));
1899 }
1900 }
1901 catch (...)
1902 {
1903 delete[] m_data;
1904 delete[] m_zeroBlock;
1905 throw;
1906 }
1907 }
1908
~ZeroSizeTestInstance(void)1909 ZeroSizeTestInstance::~ZeroSizeTestInstance(void)
1910 {
1911 delete[] m_data;
1912 delete[] m_zeroBlock;
1913 }
1914
1915 class InvalidBlobTest : public GraphicsTest
1916 {
1917 public:
1918 InvalidBlobTest(tcu::TestContext &testContext, const std::string &name, const TestParam *param);
~InvalidBlobTest(void)1919 virtual ~InvalidBlobTest(void)
1920 {
1921 }
1922 virtual TestInstance *createInstance(Context &context) const;
1923 };
1924
InvalidBlobTest(tcu::TestContext & testContext,const std::string & name,const TestParam * param)1925 InvalidBlobTest::InvalidBlobTest(tcu::TestContext &testContext, const std::string &name, const TestParam *param)
1926 : GraphicsTest(testContext, name, param)
1927 {
1928 }
1929
1930 class InvalidBlobTestInstance : public GraphicsTestInstance
1931 {
1932 public:
1933 InvalidBlobTestInstance(Context &context, const TestParam *param);
1934 virtual ~InvalidBlobTestInstance(void);
1935
1936 protected:
1937 uint8_t *m_data;
1938 uint8_t *m_zeroBlock;
1939 };
1940
createInstance(Context & context) const1941 TestInstance *InvalidBlobTest::createInstance(Context &context) const
1942 {
1943 return new InvalidBlobTestInstance(context, &m_param);
1944 }
1945
InvalidBlobTestInstance(Context & context,const TestParam * param)1946 InvalidBlobTestInstance::InvalidBlobTestInstance(Context &context, const TestParam *param)
1947 : GraphicsTestInstance(context, param)
1948 , m_data(nullptr)
1949 , m_zeroBlock(nullptr)
1950 {
1951 const DeviceInterface &vk = m_context.getDeviceInterface();
1952 const VkDevice vkDevice = m_context.getDevice();
1953
1954 // Create more pipeline caches
1955 try
1956 {
1957 // Create a cache with init data from m_cache
1958 size_t dataSize = 0u;
1959
1960 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, nullptr));
1961
1962 m_data = new uint8_t[dataSize];
1963 deMemset(m_data, 0, dataSize);
1964 DE_ASSERT(m_data);
1965
1966 VK_CHECK(vk.getPipelineCacheData(vkDevice, *m_cache, (uintptr_t *)&dataSize, (void *)m_data));
1967
1968 const struct
1969 {
1970 uint32_t offset;
1971 std::string name;
1972 } headerLayout[] = {
1973 {4u, "pipeline cache header version"}, {8u, "vendor ID"}, {12u, "device ID"}, {16u, "pipeline cache ID"}};
1974
1975 for (uint32_t i = 0u; i < DE_LENGTH_OF_ARRAY(headerLayout); i++)
1976 {
1977 m_context.getTestContext().getLog()
1978 << tcu::TestLog::Message << "Creating pipeline cache using previously retrieved data with invalid "
1979 << headerLayout[i].name << tcu::TestLog::EndMessage;
1980
1981 m_data[headerLayout[i].offset] =
1982 (uint8_t)(m_data[headerLayout[i].offset] + 13u); // Add arbitrary number to create an invalid value
1983
1984 const VkPipelineCacheCreateInfo pipelineCacheCreateInfo = {
1985 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
1986 nullptr, // const void* pNext;
1987 0u, // VkPipelineCacheCreateFlags flags;
1988 dataSize, // uintptr_t initialDataSize;
1989 m_data, // const void* pInitialData;
1990 };
1991
1992 const Unique<VkPipelineCache> pipelineCache(createPipelineCache(vk, vkDevice, &pipelineCacheCreateInfo));
1993
1994 m_data[headerLayout[i].offset] =
1995 (uint8_t)(m_data[headerLayout[i].offset] - 13u); // Return to original value
1996 }
1997 }
1998 catch (...)
1999 {
2000 delete[] m_data;
2001 delete[] m_zeroBlock;
2002 throw;
2003 }
2004 }
2005
~InvalidBlobTestInstance(void)2006 InvalidBlobTestInstance::~InvalidBlobTestInstance(void)
2007 {
2008 delete[] m_data;
2009 delete[] m_zeroBlock;
2010 }
2011 } // namespace
2012
createPipelineBlobTestsInternal(tcu::TestContext & testCtx,TestMode testMode,PipelineConstructionType pipelineConstructionType,de::MovePtr<tcu::TestCaseGroup> blobTests)2013 de::MovePtr<tcu::TestCaseGroup> createPipelineBlobTestsInternal(tcu::TestContext &testCtx, TestMode testMode,
2014 PipelineConstructionType pipelineConstructionType,
2015 de::MovePtr<tcu::TestCaseGroup> blobTests)
2016 {
2017 const VkShaderStageFlags vertFragStages = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
2018 const VkShaderStageFlags vertGeomFragStages = vertFragStages | VK_SHADER_STAGE_GEOMETRY_BIT;
2019 const VkShaderStageFlags vertTesFragStages =
2020 vertFragStages | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
2021
2022 // Graphics Pipeline Tests
2023 {
2024 de::MovePtr<tcu::TestCaseGroup> graphicsTests(new tcu::TestCaseGroup(testCtx, "graphics_tests"));
2025
2026 const TestParam testParams[]{
2027 {testMode, pipelineConstructionType, vertFragStages, false},
2028 {testMode, pipelineConstructionType, vertGeomFragStages, false},
2029 {testMode, pipelineConstructionType, vertTesFragStages, false},
2030 {testMode, pipelineConstructionType, vertFragStages, false,
2031 VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT},
2032 {testMode, pipelineConstructionType, vertGeomFragStages, false,
2033 VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT},
2034 {testMode, pipelineConstructionType, vertTesFragStages, false,
2035 VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT},
2036 };
2037
2038 for (const auto &testParam : testParams)
2039 {
2040 // cache create flags are tested only for cache cases
2041 if ((testMode == TestMode::BINARY) && testParam.getPipelineCacheCreateFlags())
2042 continue;
2043
2044 graphicsTests->addChild(newTestCase<GraphicsTest>(testCtx, &testParam));
2045 }
2046
2047 blobTests->addChild(graphicsTests.release());
2048 }
2049
2050 // Graphics Pipeline Tests
2051 {
2052 de::MovePtr<tcu::TestCaseGroup> graphicsTests(new tcu::TestCaseGroup(testCtx, "pipeline_from_get_data"));
2053
2054 const TestParam testParams[]{
2055 {testMode, pipelineConstructionType, vertFragStages, false},
2056 {testMode, pipelineConstructionType, vertGeomFragStages, false},
2057 {testMode, pipelineConstructionType, vertTesFragStages, false},
2058 };
2059
2060 for (uint32_t i = 0; i < DE_LENGTH_OF_ARRAY(testParams); i++)
2061 graphicsTests->addChild(newTestCase<PipelineFromBlobsTest>(testCtx, &testParams[i]));
2062
2063 blobTests->addChild(graphicsTests.release());
2064 }
2065
2066 // Graphics Pipeline Tests (for pipeline binary there is dedicated.not_enough_space test)
2067 if (testMode == TestMode::CACHE)
2068 {
2069 de::MovePtr<tcu::TestCaseGroup> graphicsTests(
2070 new tcu::TestCaseGroup(testCtx, "pipeline_from_incomplete_get_data"));
2071
2072 const TestParam testParams[]{
2073 {testMode, pipelineConstructionType, vertFragStages, false},
2074 {testMode, pipelineConstructionType, vertGeomFragStages, false},
2075 {testMode, pipelineConstructionType, vertTesFragStages, false},
2076 };
2077
2078 for (uint32_t i = 0; i < DE_LENGTH_OF_ARRAY(testParams); i++)
2079 graphicsTests->addChild(newTestCase<PipelineFromIncompleteBlobsTest>(testCtx, &testParams[i]));
2080
2081 blobTests->addChild(graphicsTests.release());
2082 }
2083
2084 // Compute Pipeline Tests - don't repeat those tests for graphics pipeline library
2085 if (pipelineConstructionType == PIPELINE_CONSTRUCTION_TYPE_MONOLITHIC)
2086 {
2087 de::MovePtr<tcu::TestCaseGroup> computeTests(new tcu::TestCaseGroup(testCtx, "compute_tests"));
2088
2089 const TestParam testParams[]{
2090 {testMode, pipelineConstructionType, VK_SHADER_STAGE_COMPUTE_BIT, false, 0u, false},
2091 {testMode, pipelineConstructionType, VK_SHADER_STAGE_COMPUTE_BIT, false, 0u, true},
2092 };
2093
2094 computeTests->addChild(newTestCase<ComputeTest>(testCtx, &testParams[0]));
2095 if (testMode == TestMode::BINARY)
2096 computeTests->addChild(newTestCase<ComputeTest>(testCtx, &testParams[1]));
2097
2098 blobTests->addChild(computeTests.release());
2099 }
2100
2101 // Merge blobs tests
2102 if (testMode == TestMode::CACHE)
2103 {
2104 de::MovePtr<tcu::TestCaseGroup> mergeTests(new tcu::TestCaseGroup(testCtx, "merge"));
2105
2106 const TestParam testParams[]{
2107 {testMode, pipelineConstructionType, vertFragStages, true},
2108 {testMode, pipelineConstructionType, vertGeomFragStages, true},
2109 {testMode, pipelineConstructionType, vertTesFragStages, true},
2110 };
2111
2112 const uint32_t firstTypeIdx = 0u;
2113 const uint32_t lastTypeIdx = static_cast<uint32_t>(MergeBlobsType::LAST);
2114
2115 for (uint32_t i = 0; i < DE_LENGTH_OF_ARRAY(testParams); i++)
2116 {
2117 de::MovePtr<tcu::TestCaseGroup> mergeStagesTests(
2118 new tcu::TestCaseGroup(testCtx, testParams[i].generateTestName().c_str()));
2119
2120 for (uint32_t destTypeIdx = firstTypeIdx; destTypeIdx <= lastTypeIdx; destTypeIdx++)
2121 for (uint32_t srcType1Idx = firstTypeIdx; srcType1Idx <= lastTypeIdx; srcType1Idx++)
2122 {
2123 MergeBlobsTestParam mergeTestParam;
2124 mergeTestParam.destBlobsType = MergeBlobsType(destTypeIdx);
2125 mergeTestParam.srcBlobTypes.push_back(MergeBlobsType(srcType1Idx));
2126
2127 // merge with one cache / binaries
2128 {
2129 std::string testName = "src_" + getMergeBlobsTypesStr(mergeTestParam.srcBlobTypes) + "_dst_" +
2130 getMergeBlobsTypeStr(mergeTestParam.destBlobsType);
2131 mergeStagesTests->addChild(
2132 new MergeBlobsTest(testCtx, testName.c_str(), &testParams[i], &mergeTestParam));
2133 }
2134
2135 // merge with two caches
2136 for (uint32_t srcType2Idx = 0u; srcType2Idx <= static_cast<uint32_t>(MergeBlobsType::LAST);
2137 srcType2Idx++)
2138 {
2139 MergeBlobsTestParam cacheTestParamTwoCaches = mergeTestParam;
2140
2141 cacheTestParamTwoCaches.srcBlobTypes.push_back(MergeBlobsType(srcType2Idx));
2142
2143 std::string testName = "src_" + getMergeBlobsTypesStr(cacheTestParamTwoCaches.srcBlobTypes) +
2144 "_dst_" + getMergeBlobsTypeStr(cacheTestParamTwoCaches.destBlobsType);
2145 mergeStagesTests->addChild(
2146 new MergeBlobsTest(testCtx, testName.c_str(), &testParams[i], &cacheTestParamTwoCaches));
2147 }
2148 }
2149 mergeTests->addChild(mergeStagesTests.release());
2150 }
2151 blobTests->addChild(mergeTests.release());
2152 }
2153
2154 // Misc Tests
2155 if (testMode == TestMode::CACHE)
2156 {
2157 de::MovePtr<tcu::TestCaseGroup> miscTests(new tcu::TestCaseGroup(testCtx, "misc_tests"));
2158
2159 const TestParam testParam(testMode, pipelineConstructionType, vertFragStages, false);
2160
2161 miscTests->addChild(new CacheHeaderTest(testCtx, "cache_header_test", &testParam));
2162
2163 miscTests->addChild(new InvalidSizeTest(testCtx, "invalid_size_test", &testParam));
2164
2165 miscTests->addChild(new ZeroSizeTest(testCtx, "zero_size_test", &testParam));
2166
2167 miscTests->addChild(new InvalidBlobTest(testCtx, "invalid_blob_test", &testParam));
2168
2169 blobTests->addChild(miscTests.release());
2170 }
2171
2172 return blobTests;
2173 }
2174
createCacheTests(tcu::TestContext & testCtx,PipelineConstructionType pipelineConstructionType)2175 tcu::TestCaseGroup *createCacheTests(tcu::TestContext &testCtx, PipelineConstructionType pipelineConstructionType)
2176 {
2177 de::MovePtr<tcu::TestCaseGroup> cacheTests(new tcu::TestCaseGroup(testCtx, "cache"));
2178 return createPipelineBlobTestsInternal(testCtx, TestMode::CACHE, pipelineConstructionType, cacheTests).release();
2179 }
2180
addPipelineBinaryBasicTests(tcu::TestContext & testCtx,PipelineConstructionType pipelineConstructionType,de::MovePtr<tcu::TestCaseGroup> binaryTests)2181 de::MovePtr<tcu::TestCaseGroup> addPipelineBinaryBasicTests(tcu::TestContext &testCtx,
2182 PipelineConstructionType pipelineConstructionType,
2183 de::MovePtr<tcu::TestCaseGroup> binaryTests)
2184 {
2185 return createPipelineBlobTestsInternal(testCtx, TestMode::BINARY, pipelineConstructionType, binaryTests);
2186 }
2187
2188 } // namespace pipeline
2189
2190 } // namespace vkt
2191