• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2015 The Khronos Group Inc.
6  * Copyright (c) 2015 Intel Corporation
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *      http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Vulkan Occlusion Query Tests
23  *//*--------------------------------------------------------------------*/
24 
25 #include "vktQueryPoolOcclusionTests.hpp"
26 
27 #include "vktTestCase.hpp"
28 
29 #include "vktDrawImageObjectUtil.hpp"
30 #include "vktDrawBufferObjectUtil.hpp"
31 #include "vktDrawCreateInfoUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPrograms.hpp"
35 #include "vkTypeUtil.hpp"
36 #include "vkCmdUtil.hpp"
37 
38 #include "tcuTestLog.hpp"
39 #include "tcuResource.hpp"
40 #include "tcuImageCompare.hpp"
41 #include "tcuCommandLine.hpp"
42 
43 namespace vkt
44 {
45 
46 namespace QueryPool
47 {
48 
49 using namespace Draw;
50 
51 namespace
52 {
53 
54 struct StateObjects
55 {
56 			StateObjects	(const vk::DeviceInterface&vk, vkt::Context &context, const int numVertices, vk::VkPrimitiveTopology primitive);
57 	void	setVertices		(const vk::DeviceInterface&vk, std::vector<tcu::Vec4> vertices);
58 
59 	enum
60 	{
61 		WIDTH	= 128,
62 		HEIGHT	= 128
63 	};
64 
65 	vkt::Context &m_context;
66 
67 	vk::Move<vk::VkPipeline>		m_pipeline;
68 	vk::Move<vk::VkPipelineLayout>	m_pipelineLayout;
69 
70 	de::SharedPtr<Image>			m_colorAttachmentImage, m_DepthImage;
71 	vk::Move<vk::VkImageView>		m_attachmentView;
72 	vk::Move<vk::VkImageView>		m_depthiew;
73 
74 	vk::Move<vk::VkRenderPass>		m_renderPass;
75 	vk::Move<vk::VkFramebuffer>		m_framebuffer;
76 
77 	de::SharedPtr<Buffer>			m_vertexBuffer;
78 
79 	vk::VkFormat					m_colorAttachmentFormat;
80 };
81 
StateObjects(const vk::DeviceInterface & vk,vkt::Context & context,const int numVertices,vk::VkPrimitiveTopology primitive)82 StateObjects::StateObjects (const vk::DeviceInterface&vk, vkt::Context &context, const int numVertices, vk::VkPrimitiveTopology primitive)
83 	: m_context(context)
84 	, m_colorAttachmentFormat(vk::VK_FORMAT_R8G8B8A8_UNORM)
85 
86 {
87 	vk::VkFormat		depthFormat = vk::VK_FORMAT_D16_UNORM;
88 	const vk::VkDevice	device		= m_context.getDevice();
89 
90 	//attachment images and views
91 	{
92 		vk::VkExtent3D imageExtent =
93 		{
94 			WIDTH,	// width;
95 			HEIGHT,	// height;
96 			1		// depth;
97 		};
98 
99 		const ImageCreateInfo colorImageCreateInfo(vk::VK_IMAGE_TYPE_2D, m_colorAttachmentFormat, imageExtent, 1, 1, vk::VK_SAMPLE_COUNT_1_BIT, vk::VK_IMAGE_TILING_OPTIMAL,
100 												   vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
101 
102 		m_colorAttachmentImage	= Image::createAndAlloc(vk, device, colorImageCreateInfo, m_context.getDefaultAllocator(), m_context.getUniversalQueueFamilyIndex());
103 
104 		const ImageViewCreateInfo attachmentViewInfo(m_colorAttachmentImage->object(), vk::VK_IMAGE_VIEW_TYPE_2D, m_colorAttachmentFormat);
105 		m_attachmentView		= vk::createImageView(vk, device, &attachmentViewInfo);
106 
107 		ImageCreateInfo depthImageCreateInfo(vk::VK_IMAGE_TYPE_2D, depthFormat, imageExtent, 1, 1, vk::VK_SAMPLE_COUNT_1_BIT, vk::VK_IMAGE_TILING_OPTIMAL,
108 			vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
109 
110 		m_DepthImage			= Image::createAndAlloc(vk, device, depthImageCreateInfo, m_context.getDefaultAllocator(), m_context.getUniversalQueueFamilyIndex());
111 
112 		// Construct a depth  view from depth image
113 		const ImageViewCreateInfo depthViewInfo(m_DepthImage->object(), vk::VK_IMAGE_VIEW_TYPE_2D, depthFormat);
114 		m_depthiew				= vk::createImageView(vk, device, &depthViewInfo);
115 	}
116 
117 	{
118 		// Renderpass and Framebuffer
119 
120 		RenderPassCreateInfo renderPassCreateInfo;
121 		renderPassCreateInfo.addAttachment(AttachmentDescription(m_colorAttachmentFormat,									// format
122 																	vk::VK_SAMPLE_COUNT_1_BIT,								// samples
123 																	vk::VK_ATTACHMENT_LOAD_OP_CLEAR,						// loadOp
124 																	vk::VK_ATTACHMENT_STORE_OP_DONT_CARE,					// storeOp
125 																	vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE,					// stencilLoadOp
126 																	vk::VK_ATTACHMENT_STORE_OP_DONT_CARE,					// stencilLoadOp
127 																	vk::VK_IMAGE_LAYOUT_GENERAL,							// initialLauout
128 																	vk::VK_IMAGE_LAYOUT_GENERAL));							// finalLayout
129 
130 		renderPassCreateInfo.addAttachment(AttachmentDescription(depthFormat,												// format
131 																 vk::VK_SAMPLE_COUNT_1_BIT,									// samples
132 																 vk::VK_ATTACHMENT_LOAD_OP_CLEAR,							// loadOp
133 																 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE,						// storeOp
134 																 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE,						// stencilLoadOp
135 																 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE,						// stencilLoadOp
136 																 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,		// initialLauout
137 																 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL));	// finalLayout
138 
139 		const vk::VkAttachmentReference colorAttachmentReference =
140 		{
141 			0,															// attachment
142 			vk::VK_IMAGE_LAYOUT_GENERAL									// layout
143 		};
144 
145 		const vk::VkAttachmentReference depthAttachmentReference =
146 		{
147 			1,															// attachment
148 			vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL		// layout
149 		};
150 
151 		renderPassCreateInfo.addSubpass(SubpassDescription(vk::VK_PIPELINE_BIND_POINT_GRAPHICS,					// pipelineBindPoint
152 														   0,													// flags
153 														   0,													// inputCount
154 														   DE_NULL,												// pInputAttachments
155 														   1,													// colorCount
156 														   &colorAttachmentReference,							// pColorAttachments
157 														   DE_NULL,												// pResolveAttachments
158 														   depthAttachmentReference,							// depthStencilAttachment
159 														   0,													// preserveCount
160 														   DE_NULL));											// preserveAttachments
161 
162 		m_renderPass = vk::createRenderPass(vk, device, &renderPassCreateInfo);
163 
164 		std::vector<vk::VkImageView> attachments(2);
165 		attachments[0] = *m_attachmentView;
166 		attachments[1] = *m_depthiew;
167 
168 		FramebufferCreateInfo framebufferCreateInfo(*m_renderPass, attachments, WIDTH, HEIGHT, 1);
169 		m_framebuffer = vk::createFramebuffer(vk, device, &framebufferCreateInfo);
170 	}
171 
172 	{
173 		// Pipeline
174 
175 		vk::Unique<vk::VkShaderModule> vs(vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0));
176 		vk::Unique<vk::VkShaderModule> fs(vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("frag"), 0));
177 
178 		const PipelineCreateInfo::ColorBlendState::Attachment attachmentState;
179 
180 		const PipelineLayoutCreateInfo pipelineLayoutCreateInfo;
181 		m_pipelineLayout = vk::createPipelineLayout(vk, device, &pipelineLayoutCreateInfo);
182 
183 		const vk::VkVertexInputBindingDescription vf_binding_desc		=
184 		{
185 			0,																// binding;
186 			4 * (deUint32)sizeof(float),									// stride;
187 			vk::VK_VERTEX_INPUT_RATE_VERTEX									// inputRate
188 		};
189 
190 		const vk::VkVertexInputAttributeDescription vf_attribute_desc	=
191 		{
192 			0,																// location;
193 			0,																// binding;
194 			vk::VK_FORMAT_R32G32B32A32_SFLOAT,								// format;
195 			0																// offset;
196 		};
197 
198 		const vk::VkPipelineVertexInputStateCreateInfo vf_info			=
199 		{																	// sType;
200 			vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,	// pNext;
201 			NULL,															// flags;
202 			0u,																// vertexBindingDescriptionCount;
203 			1,																// pVertexBindingDescriptions;
204 			&vf_binding_desc,												// vertexAttributeDescriptionCount;
205 			1,																// pVertexAttributeDescriptions;
206 			&vf_attribute_desc
207 		};
208 
209 		PipelineCreateInfo pipelineCreateInfo(*m_pipelineLayout, *m_renderPass, 0, 0);
210 		pipelineCreateInfo.addShader(PipelineCreateInfo::PipelineShaderStage(*vs, "main", vk::VK_SHADER_STAGE_VERTEX_BIT));
211 		pipelineCreateInfo.addShader(PipelineCreateInfo::PipelineShaderStage(*fs, "main", vk::VK_SHADER_STAGE_FRAGMENT_BIT));
212 		pipelineCreateInfo.addState(PipelineCreateInfo::InputAssemblerState(primitive));
213 		pipelineCreateInfo.addState(PipelineCreateInfo::ColorBlendState(1, &attachmentState));
214 		const vk::VkViewport viewport	= vk::makeViewport(WIDTH, HEIGHT);
215 		const vk::VkRect2D scissor		= vk::makeRect2D(WIDTH, HEIGHT);
216 		pipelineCreateInfo.addState(PipelineCreateInfo::ViewportState(1, std::vector<vk::VkViewport>(1, viewport), std::vector<vk::VkRect2D>(1, scissor)));
217 		pipelineCreateInfo.addState(PipelineCreateInfo::DepthStencilState(true, true, vk::VK_COMPARE_OP_GREATER_OR_EQUAL));
218 		pipelineCreateInfo.addState(PipelineCreateInfo::RasterizerState());
219 		pipelineCreateInfo.addState(PipelineCreateInfo::MultiSampleState());
220 		pipelineCreateInfo.addState(vf_info);
221 		m_pipeline = vk::createGraphicsPipeline(vk, device, DE_NULL, &pipelineCreateInfo);
222 	}
223 
224 	{
225 		// Vertex buffer
226 		const size_t kBufferSize = numVertices * sizeof(tcu::Vec4);
227 		m_vertexBuffer = Buffer::createAndAlloc(vk, device, BufferCreateInfo(kBufferSize, vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT), m_context.getDefaultAllocator(), vk::MemoryRequirement::HostVisible);
228 	}
229 }
230 
setVertices(const vk::DeviceInterface & vk,std::vector<tcu::Vec4> vertices)231 void StateObjects::setVertices (const vk::DeviceInterface&vk, std::vector<tcu::Vec4> vertices)
232 {
233 	const vk::VkDevice device			= m_context.getDevice();
234 
235 	tcu::Vec4 *ptr = reinterpret_cast<tcu::Vec4*>(m_vertexBuffer->getBoundMemory().getHostPtr());
236 	std::copy(vertices.begin(), vertices.end(), ptr);
237 
238 	vk::flushAlloc(vk, device,	m_vertexBuffer->getBoundMemory());
239 }
240 
241 enum OcclusionQueryResultSize
242 {
243 	RESULT_SIZE_64_BIT,
244 	RESULT_SIZE_32_BIT,
245 };
246 
247 enum OcclusionQueryWait
248 {
249 	WAIT_QUEUE,
250 	WAIT_QUERY,
251 	WAIT_NONE
252 };
253 
254 enum OcclusionQueryResultsMode
255 {
256 	RESULTS_MODE_GET,
257 	RESULTS_MODE_GET_RESET,
258 	RESULTS_MODE_COPY,
259 	RESULTS_MODE_COPY_RESET
260 };
261 
262 struct OcclusionQueryTestVector
263 {
264 	vk::VkQueryControlFlags		queryControlFlags;
265 	OcclusionQueryResultSize	queryResultSize;
266 	OcclusionQueryWait			queryWait;
267 	OcclusionQueryResultsMode	queryResultsMode;
268 	vk::VkDeviceSize			queryResultsStride;
269 	bool						queryResultsAvailability;
270 	vk::VkPrimitiveTopology		primitiveTopology;
271 	bool						discardHalf;
272 	deBool						queryResultsDstOffset;
273 };
274 
275 class BasicOcclusionQueryTestInstance : public vkt::TestInstance
276 {
277 public:
278 					BasicOcclusionQueryTestInstance		(vkt::Context &context, const OcclusionQueryTestVector&  testVector);
279 					~BasicOcclusionQueryTestInstance	(void);
280 private:
281 	tcu::TestStatus	iterate								(void);
282 
283 	enum
284 	{
285 		NUM_QUERIES_IN_POOL				= 2,
286 		QUERY_INDEX_CAPTURE_EMPTY		= 0,
287 		QUERY_INDEX_CAPTURE_DRAWCALL	= 1,
288 		NUM_VERTICES_IN_DRAWCALL		= 3
289 	};
290 
291 	OcclusionQueryTestVector	m_testVector;
292 	StateObjects*				m_stateObjects;
293 	vk::VkQueryPool				m_queryPool;
294 };
295 
BasicOcclusionQueryTestInstance(vkt::Context & context,const OcclusionQueryTestVector & testVector)296 BasicOcclusionQueryTestInstance::BasicOcclusionQueryTestInstance (vkt::Context &context, const OcclusionQueryTestVector&  testVector)
297 	: TestInstance		(context)
298 	, m_testVector		(testVector)
299 {
300 	DE_ASSERT(testVector.queryResultSize			== RESULT_SIZE_64_BIT
301 			&& testVector.queryWait					== WAIT_QUEUE
302 			&& (testVector.queryResultsMode			== RESULTS_MODE_GET || testVector.queryResultsMode == RESULTS_MODE_GET_RESET)
303 			&& testVector.queryResultsStride		== sizeof(deUint64)
304 			&& testVector.queryResultsAvailability	== false
305 			&& testVector.primitiveTopology			== vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST);
306 
307 	if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) && !m_context.getDeviceFeatures().occlusionQueryPrecise)
308 		throw tcu::NotSupportedError("Precise occlusion queries are not supported");
309 
310 	m_stateObjects = new StateObjects(m_context.getDeviceInterface(), m_context, NUM_VERTICES_IN_DRAWCALL, m_testVector.primitiveTopology);
311 
312 	const vk::VkDevice			device	= m_context.getDevice();
313 	const vk::DeviceInterface&	vk		= m_context.getDeviceInterface();
314 
315 	const vk::VkQueryPoolCreateInfo queryPoolCreateInfo =
316 	{
317 		vk::VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
318 		DE_NULL,
319 		0u,
320 		vk::VK_QUERY_TYPE_OCCLUSION,
321 		NUM_QUERIES_IN_POOL,
322 		0
323 	};
324 	VK_CHECK(vk.createQueryPool(device, &queryPoolCreateInfo, /*pAllocator*/ DE_NULL, &m_queryPool));
325 
326 	std::vector<tcu::Vec4> vertices(NUM_VERTICES_IN_DRAWCALL);
327 	vertices[0] = tcu::Vec4(0.5, 0.5, 0.0, 1.0);
328 	vertices[1] = tcu::Vec4(0.5, 0.0, 0.0, 1.0);
329 	vertices[2] = tcu::Vec4(0.0, 0.5, 0.0, 1.0);
330 	m_stateObjects->setVertices(vk, vertices);
331 }
332 
~BasicOcclusionQueryTestInstance(void)333 BasicOcclusionQueryTestInstance::~BasicOcclusionQueryTestInstance (void)
334 {
335 	if (m_stateObjects)
336 		delete m_stateObjects;
337 
338 	if (m_queryPool != DE_NULL)
339 	{
340 		const vk::VkDevice device		= m_context.getDevice();
341 		const vk::DeviceInterface& vk	= m_context.getDeviceInterface();
342 
343 		vk.destroyQueryPool(device, m_queryPool, /*pAllocator*/ DE_NULL);
344 	}
345 }
346 
iterate(void)347 tcu::TestStatus	BasicOcclusionQueryTestInstance::iterate (void)
348 {
349 	tcu::TestLog &log				= m_context.getTestContext().getLog();
350 	const vk::VkDevice device		= m_context.getDevice();
351 	const vk::VkQueue queue			= m_context.getUniversalQueue();
352 	const vk::DeviceInterface& vk	= m_context.getDeviceInterface();
353 
354 	if (m_testVector.queryResultsMode == RESULTS_MODE_GET_RESET)
355 	{
356 		// Check VK_EXT_host_query_reset is supported
357 		m_context.requireDeviceFunctionality("VK_EXT_host_query_reset");
358 		if(m_context.getHostQueryResetFeatures().hostQueryReset == VK_FALSE)
359 			throw tcu::NotSupportedError(std::string("Implementation doesn't support resetting queries from the host").c_str());
360 	}
361 
362 	const CmdPoolCreateInfo			cmdPoolCreateInfo	(m_context.getUniversalQueueFamilyIndex());
363 	vk::Move<vk::VkCommandPool>		cmdPool				= vk::createCommandPool(vk, device, &cmdPoolCreateInfo);
364 
365 	vk::Unique<vk::VkCommandBuffer> cmdBuffer			(vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
366 
367 	beginCommandBuffer(vk, *cmdBuffer);
368 
369 	initialTransitionColor2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_LAYOUT_GENERAL,
370 								  vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
371 	initialTransitionDepth2DImage(vk, *cmdBuffer, m_stateObjects->m_DepthImage->object(), vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
372 								  vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
373 
374 	std::vector<vk::VkClearValue> renderPassClearValues(2);
375 	deMemset(&renderPassClearValues[0], 0, static_cast<int>(renderPassClearValues.size()) * sizeof(vk::VkClearValue));
376 
377 	if (m_testVector.queryResultsMode != RESULTS_MODE_GET_RESET)
378 		vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
379 
380 	beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer, vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT), (deUint32)renderPassClearValues.size(), &renderPassClearValues[0]);
381 
382 	vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
383 
384 	vk::VkBuffer vertexBuffer = m_stateObjects->m_vertexBuffer->object();
385 	const vk::VkDeviceSize vertexBufferOffset = 0;
386 	vk.cmdBindVertexBuffers(*cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
387 
388 	vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_EMPTY, m_testVector.queryControlFlags);
389 	vk.cmdEndQuery(*cmdBuffer, m_queryPool,	QUERY_INDEX_CAPTURE_EMPTY);
390 
391 	vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_DRAWCALL, m_testVector.queryControlFlags);
392 	vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, 0, 0);
393 	vk.cmdEndQuery(*cmdBuffer, m_queryPool,	QUERY_INDEX_CAPTURE_DRAWCALL);
394 
395 	endRenderPass(vk, *cmdBuffer);
396 
397 	transition2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_ASPECT_COLOR_BIT,
398 					  vk::VK_IMAGE_LAYOUT_GENERAL, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
399 					  vk::VK_ACCESS_TRANSFER_READ_BIT, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT);
400 
401 	endCommandBuffer(vk, *cmdBuffer);
402 
403 	if (m_testVector.queryResultsMode == RESULTS_MODE_GET_RESET)
404 		vk.resetQueryPool(device, m_queryPool, 0, NUM_QUERIES_IN_POOL);
405 
406 	submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
407 
408 	deUint64 queryResults[NUM_QUERIES_IN_POOL] = { 0 };
409 	size_t queryResultsSize		= sizeof(queryResults);
410 
411 	vk::VkResult queryResult	= vk.getQueryPoolResults(device, m_queryPool, 0, NUM_QUERIES_IN_POOL, queryResultsSize, queryResults, sizeof(queryResults[0]), vk::VK_QUERY_RESULT_64_BIT);
412 
413 	if (queryResult == vk::VK_NOT_READY)
414 	{
415 		TCU_FAIL("Query result not avaliable, but vkWaitIdle() was called.");
416 	}
417 
418 	VK_CHECK(queryResult);
419 
420 	log << tcu::TestLog::Section("OcclusionQueryResults",
421 		"Occlusion query results");
422 	for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(queryResults); ++ndx)
423 	{
424 		log << tcu::TestLog::Message << "query[slot == " << ndx
425 			<< "] result == " << queryResults[ndx] << tcu::TestLog::EndMessage;
426 	}
427 
428 	bool passed = true;
429 
430 	for (int queryNdx = 0; queryNdx < DE_LENGTH_OF_ARRAY(queryResults); ++queryNdx)
431 	{
432 
433 		deUint64 expectedValue;
434 
435 		switch (queryNdx)
436 		{
437 			case QUERY_INDEX_CAPTURE_EMPTY:
438 				expectedValue = 0;
439 				break;
440 			case QUERY_INDEX_CAPTURE_DRAWCALL:
441 				expectedValue = NUM_VERTICES_IN_DRAWCALL;
442 				break;
443 		}
444 
445 		if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) || expectedValue == 0)
446 		{
447 			// require precise value
448 			if (queryResults[queryNdx] != expectedValue)
449 			{
450 				log << tcu::TestLog::Message << "vkGetQueryPoolResults returned "
451 					"wrong value of query for index "
452 					<< queryNdx << ", expected " << expectedValue << ", got "
453 					<< queryResults[0] << "." << tcu::TestLog::EndMessage;
454 				passed = false;
455 			}
456 		}
457 		else
458 		{
459 			// require imprecize value > 0
460 			if (queryResults[queryNdx] == 0)
461 			{
462 				log << tcu::TestLog::Message << "vkGetQueryPoolResults returned "
463 					"wrong value of query for index "
464 					<< queryNdx << ", expected any non-zero value, got "
465 					<< queryResults[0] << "." << tcu::TestLog::EndMessage;
466 				passed = false;
467 			}
468 		}
469 	}
470 	log << tcu::TestLog::EndSection;
471 
472 	if (passed)
473 	{
474 		return tcu::TestStatus(QP_TEST_RESULT_PASS, "Query result verification passed");
475 	}
476 	return tcu::TestStatus(QP_TEST_RESULT_FAIL, "Query result verification failed");
477 }
478 
479 class OcclusionQueryTestInstance : public vkt::TestInstance
480 {
481 public:
482 	OcclusionQueryTestInstance		(vkt::Context &context, const OcclusionQueryTestVector& testVector);
483 	~OcclusionQueryTestInstance		(void);
484 private:
485 	tcu::TestStatus					iterate							(void);
486 
487 	bool							hasSeparateResetCmdBuf			(void) const;
488 	bool							hasSeparateCopyCmdBuf			(void) const;
489 
490 	vk::Move<vk::VkCommandBuffer>	recordQueryPoolReset			(vk::VkCommandPool commandPool);
491 	vk::Move<vk::VkCommandBuffer>	recordRender					(vk::VkCommandPool commandPool);
492 	vk::Move<vk::VkCommandBuffer>	recordCopyResults				(vk::VkCommandPool commandPool);
493 
494 	void							captureResults					(deUint64*			retResults,	deUint64*		retAvailability,	bool	allowNotReady);
495 	void							logResults						(const deUint64*	results,	const deUint64* availability);
496 	bool							validateResults					(const deUint64*	results,	const deUint64* availability,		bool	allowUnavailable,	vk::VkPrimitiveTopology primitiveTopology);
497 
498 	enum
499 	{
500 		NUM_QUERIES_IN_POOL							= 3,
501 		QUERY_INDEX_CAPTURE_ALL						= 0,
502 		QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED		= 1,
503 		QUERY_INDEX_CAPTURE_OCCLUDED				= 2
504 	};
505 	enum
506 	{
507 		NUM_VERTICES_IN_DRAWCALL					= 3,
508 		NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL	= 3,
509 		NUM_VERTICES_IN_OCCLUDER_DRAWCALL			= 3,
510 		NUM_VERTICES								= NUM_VERTICES_IN_DRAWCALL + NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL + NUM_VERTICES_IN_OCCLUDER_DRAWCALL
511 	};
512 	enum
513 	{
514 		START_VERTEX								= 0,
515 		START_VERTEX_PARTIALLY_OCCLUDED				= START_VERTEX + NUM_VERTICES_IN_DRAWCALL,
516 		START_VERTEX_OCCLUDER						= START_VERTEX_PARTIALLY_OCCLUDED + NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL
517 	};
518 
519 	OcclusionQueryTestVector		m_testVector;
520 
521 	const vk::VkQueryResultFlags	m_queryResultFlags;
522 
523 	StateObjects*					m_stateObjects;
524 	vk::VkQueryPool					m_queryPool;
525 	de::SharedPtr<Buffer>			m_queryPoolResultsBuffer;
526 
527 	vk::Move<vk::VkCommandPool>		m_commandPool;
528 	vk::Move<vk::VkCommandBuffer>	m_queryPoolResetCommandBuffer;
529 	vk::Move<vk::VkCommandBuffer>	m_renderCommandBuffer;
530 	vk::Move<vk::VkCommandBuffer>	m_copyResultsCommandBuffer;
531 };
532 
OcclusionQueryTestInstance(vkt::Context & context,const OcclusionQueryTestVector & testVector)533 OcclusionQueryTestInstance::OcclusionQueryTestInstance (vkt::Context &context, const OcclusionQueryTestVector& testVector)
534 	: vkt::TestInstance		(context)
535 	, m_testVector			(testVector)
536 	, m_queryResultFlags	(((m_testVector.queryWait == WAIT_QUERY && m_testVector.queryResultsMode != RESULTS_MODE_COPY_RESET)? vk::VK_QUERY_RESULT_WAIT_BIT				: 0)
537 							| (m_testVector.queryResultSize == RESULT_SIZE_64_BIT												? vk::VK_QUERY_RESULT_64_BIT				: 0)
538 							| (m_testVector.queryResultsAvailability															? vk::VK_QUERY_RESULT_WITH_AVAILABILITY_BIT	: 0))
539 {
540 	const vk::VkDevice			device				= m_context.getDevice();
541 	const vk::DeviceInterface&	vk					= m_context.getDeviceInterface();
542 
543 	if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) && !m_context.getDeviceFeatures().occlusionQueryPrecise)
544 		throw tcu::NotSupportedError("Precise occlusion queries are not supported");
545 
546 	m_stateObjects  = new StateObjects(m_context.getDeviceInterface(), m_context, NUM_VERTICES_IN_DRAWCALL + NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL + NUM_VERTICES_IN_OCCLUDER_DRAWCALL, m_testVector.primitiveTopology);
547 
548 	const vk::VkQueryPoolCreateInfo queryPoolCreateInfo	=
549 	{
550 		vk::VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
551 		DE_NULL,
552 		0u,
553 		vk::VK_QUERY_TYPE_OCCLUSION,
554 		NUM_QUERIES_IN_POOL,
555 		0
556 	};
557 
558 	VK_CHECK(vk.createQueryPool(device, &queryPoolCreateInfo, /*pAllocator*/ DE_NULL, &m_queryPool));
559 
560 	if (m_testVector.queryResultsMode == RESULTS_MODE_COPY || m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET)
561 	{
562 		deUint32				numQueriesinPool			= NUM_QUERIES_IN_POOL + (m_testVector.queryResultsDstOffset ? 1 : 0);
563 		const vk::VkDeviceSize	elementSize					= m_testVector.queryResultSize == RESULT_SIZE_32_BIT ? sizeof(deUint32) : sizeof(deUint64);
564 		const vk::VkDeviceSize	resultsBufferSize			= m_testVector.queryResultsStride == 0
565 															? (elementSize + elementSize * m_testVector.queryResultsAvailability) * numQueriesinPool
566 															: m_testVector.queryResultsStride * numQueriesinPool;
567 								m_queryPoolResultsBuffer	= Buffer::createAndAlloc(vk, device, BufferCreateInfo(resultsBufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT), m_context.getDefaultAllocator(), vk::MemoryRequirement::HostVisible);
568 	}
569 
570 	const CmdPoolCreateInfo	cmdPoolCreateInfo		(m_context.getUniversalQueueFamilyIndex());
571 							m_commandPool			= vk::createCommandPool(vk, device, &cmdPoolCreateInfo);
572 							m_renderCommandBuffer	= recordRender(*m_commandPool);
573 
574 	if (hasSeparateResetCmdBuf())
575 	{
576 		m_queryPoolResetCommandBuffer	= recordQueryPoolReset(*m_commandPool);
577 	}
578 
579 	if (hasSeparateCopyCmdBuf())
580 	{
581 		m_copyResultsCommandBuffer = recordCopyResults(*m_commandPool);
582 	}
583 }
584 
~OcclusionQueryTestInstance(void)585 OcclusionQueryTestInstance::~OcclusionQueryTestInstance (void)
586 {
587 	const vk::VkDevice device = m_context.getDevice();
588 
589 	if (m_stateObjects)
590 		delete m_stateObjects;
591 
592 	if (m_queryPool != DE_NULL)
593 	{
594 		const vk::DeviceInterface& vk = m_context.getDeviceInterface();
595 		vk.destroyQueryPool(device, m_queryPool, /*pAllocator*/ DE_NULL);
596 	}
597 }
598 
iterate(void)599 tcu::TestStatus OcclusionQueryTestInstance::iterate (void)
600 {
601 	const vk::VkQueue			queue		= m_context.getUniversalQueue();
602 	const vk::DeviceInterface&	vk			= m_context.getDeviceInterface();
603 	tcu::TestLog&				log			= m_context.getTestContext().getLog();
604 	std::vector<tcu::Vec4>		vertices	(NUM_VERTICES);
605 
606 	if (m_testVector.queryResultsMode == RESULTS_MODE_GET_RESET)
607 	{
608 		// Check VK_EXT_host_query_reset is supported
609 		m_context.requireDeviceFunctionality("VK_EXT_host_query_reset");
610 		if(m_context.getHostQueryResetFeatures().hostQueryReset == VK_FALSE)
611 			throw tcu::NotSupportedError(std::string("Implementation doesn't support resetting queries from the host").c_str());
612 	}
613 
614 	// 1st triangle
615 	vertices[START_VERTEX + 0] = tcu::Vec4( 0.5,  0.5, 0.5, 1.0);
616 	vertices[START_VERTEX + 1] = tcu::Vec4( 0.5, -0.5, 0.5, 1.0);
617 	vertices[START_VERTEX + 2] = tcu::Vec4(-0.5,  0.5, 0.5, 1.0);
618 	// 2nd triangle - partially occluding the scene
619 	vertices[START_VERTEX_PARTIALLY_OCCLUDED + 0] = tcu::Vec4(-0.5, -0.5, 1.0, 1.0);
620 	vertices[START_VERTEX_PARTIALLY_OCCLUDED + 1] = tcu::Vec4( 0.5, -0.5, 1.0, 1.0);
621 	vertices[START_VERTEX_PARTIALLY_OCCLUDED + 2] = tcu::Vec4(-0.5,  0.5, 1.0, 1.0);
622 	// 3nd triangle - fully occluding the scene
623 	vertices[START_VERTEX_OCCLUDER + 0] = tcu::Vec4( 0.5,  0.5, 1.0, 1.0);
624 	vertices[START_VERTEX_OCCLUDER + 1] = tcu::Vec4( 0.5, -0.5, 1.0, 1.0);
625 	vertices[START_VERTEX_OCCLUDER + 2] = tcu::Vec4(-0.5,  0.5, 1.0, 1.0);
626 
627 	m_stateObjects->setVertices(vk, vertices);
628 
629 	if (hasSeparateResetCmdBuf())
630 	{
631 		const vk::VkSubmitInfo		submitInfoReset =
632 		{
633 			vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,			// VkStructureType			sType;
634 			DE_NULL,									// const void*				pNext;
635 			0u,											// deUint32					waitSemaphoreCount;
636 			DE_NULL,									// const VkSemaphore*		pWaitSemaphores;
637 			(const vk::VkPipelineStageFlags*)DE_NULL,
638 			1u,											// deUint32					commandBufferCount;
639 			&m_queryPoolResetCommandBuffer.get(),		// const VkCommandBuffer*	pCommandBuffers;
640 			0u,											// deUint32					signalSemaphoreCount;
641 			DE_NULL										// const VkSemaphore*		pSignalSemaphores;
642 		};
643 
644 		vk.queueSubmit(queue, 1, &submitInfoReset, DE_NULL);
645 
646 		// Trivially wait for reset to complete. This is to ensure the query pool is in reset state before
647 		// host accesses, so as to not insert any synchronization before capturing the results needed for WAIT_NONE
648 		// variant of test.
649 		VK_CHECK(vk.queueWaitIdle(queue));
650 	}
651 
652 	{
653 		const vk::VkSubmitInfo submitInfoRender =
654 		{
655 			vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,	// VkStructureType			sType;
656 			DE_NULL,							// const void*				pNext;
657 			0,									// deUint32					waitSemaphoreCount;
658 			DE_NULL,							// const VkSemaphore*		pWaitSemaphores;
659 			(const vk::VkPipelineStageFlags*)DE_NULL,
660 			1,									// deUint32					commandBufferCount;
661 			&m_renderCommandBuffer.get(),		// const VkCommandBuffer*	pCommandBuffers;
662 			0,									// deUint32					signalSemaphoreCount;
663 			DE_NULL								// const VkSemaphore*		pSignalSemaphores;
664 		};
665 
666 		if (!hasSeparateResetCmdBuf() && m_testVector.queryResultsMode == RESULTS_MODE_GET_RESET)
667 			vk.resetQueryPool(m_context.getDevice(), m_queryPool, 0, NUM_QUERIES_IN_POOL);
668 		vk.queueSubmit(queue, 1, &submitInfoRender, DE_NULL);
669 	}
670 
671 	if (m_testVector.queryWait == WAIT_QUEUE)
672 	{
673 		VK_CHECK(vk.queueWaitIdle(queue));
674 	}
675 
676 	if (hasSeparateCopyCmdBuf())
677 	{
678 		// In case of WAIT_QUEUE test variant, the previously submitted m_renderCommandBuffer did not
679 		// contain vkCmdCopyQueryResults, so additional cmd buffer is needed.
680 
681 		// In the case of WAIT_NONE or WAIT_QUERY, vkCmdCopyQueryResults is stored in m_renderCommandBuffer.
682 
683 		const vk::VkSubmitInfo submitInfo =
684 		{
685 			vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,	// VkStructureType			sType;
686 			DE_NULL,							// const void*				pNext;
687 			0,									// deUint32					waitSemaphoreCount;
688 			DE_NULL,							// const VkSemaphore*		pWaitSemaphores;
689 			(const vk::VkPipelineStageFlags*)DE_NULL,
690 			1,									// deUint32					commandBufferCount;
691 			&m_copyResultsCommandBuffer.get(),	// const VkCommandBuffer*	pCommandBuffers;
692 			0,									// deUint32					signalSemaphoreCount;
693 			DE_NULL								// const VkSemaphore*		pSignalSemaphores;
694 		};
695 		vk.queueSubmit(queue, 1, &submitInfo, DE_NULL);
696 	}
697 
698 	if (m_testVector.queryResultsMode == RESULTS_MODE_COPY || m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET)
699 	{
700 		// In case of vkCmdCopyQueryResults is used, test must always wait for it
701 		// to complete before we can read the result buffer.
702 
703 		VK_CHECK(vk.queueWaitIdle(queue));
704 	}
705 
706 	deUint64	queryResults		[NUM_QUERIES_IN_POOL];
707 	deUint64	queryAvailability	[NUM_QUERIES_IN_POOL];
708 
709 	// Allow not ready results only if nobody waited before getting the query results
710 	const bool	allowNotReady		= (m_testVector.queryWait == WAIT_NONE);
711 
712 	captureResults(queryResults, queryAvailability, allowNotReady);
713 
714 	log << tcu::TestLog::Section("OcclusionQueryResults", "Occlusion query results");
715 
716 	logResults(queryResults, queryAvailability);
717 	bool passed = validateResults(queryResults, queryAvailability, allowNotReady, m_testVector.primitiveTopology);
718 
719 	log << tcu::TestLog::EndSection;
720 
721 	if (m_testVector.queryResultsMode != RESULTS_MODE_COPY && m_testVector.queryResultsMode != RESULTS_MODE_COPY_RESET)
722 	{
723 		VK_CHECK(vk.queueWaitIdle(queue));
724 	}
725 
726 		if (passed)
727 	{
728 		return tcu::TestStatus(QP_TEST_RESULT_PASS, "Query result verification passed");
729 	}
730 	return tcu::TestStatus(QP_TEST_RESULT_FAIL, "Query result verification failed");
731 }
732 
hasSeparateResetCmdBuf(void) const733 bool OcclusionQueryTestInstance::hasSeparateResetCmdBuf (void) const
734 {
735 	// Determine if resetting query pool should be performed in separate command buffer
736 	// to avoid race condition between host query access and device query reset.
737 
738 	if (m_testVector.queryResultsMode == RESULTS_MODE_COPY || m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET)
739 	{
740 		// We copy query results on device, so there is no race condition between
741 		// host and device
742 		return false;
743 	}
744 	if (m_testVector.queryWait == WAIT_QUEUE)
745 	{
746 		// We wait for queue to be complete before accessing query results
747 		return false;
748 	}
749 
750 	// Separate command buffer with reset must be submitted & completed before
751 	// host accesses the query results
752 	return true;
753 }
754 
hasSeparateCopyCmdBuf(void) const755 bool OcclusionQueryTestInstance::hasSeparateCopyCmdBuf (void) const
756 {
757 	// Copy query results must go into separate command buffer, if we want to wait on queue before that
758 	return ((m_testVector.queryResultsMode == RESULTS_MODE_COPY || m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET)
759 			&& m_testVector.queryWait == WAIT_QUEUE);
760 }
761 
recordQueryPoolReset(vk::VkCommandPool cmdPool)762 vk::Move<vk::VkCommandBuffer> OcclusionQueryTestInstance::recordQueryPoolReset (vk::VkCommandPool cmdPool)
763 {
764 	const vk::VkDevice				device		= m_context.getDevice();
765 	const vk::DeviceInterface&		vk			= m_context.getDeviceInterface();
766 
767 	DE_ASSERT(hasSeparateResetCmdBuf());
768 
769 	vk::Move<vk::VkCommandBuffer>	cmdBuffer	(vk::allocateCommandBuffer(vk, device, cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
770 
771 	beginCommandBuffer(vk, *cmdBuffer);
772 	vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
773 	endCommandBuffer(vk, *cmdBuffer);
774 
775 	return cmdBuffer;
776 }
777 
recordRender(vk::VkCommandPool cmdPool)778 vk::Move<vk::VkCommandBuffer> OcclusionQueryTestInstance::recordRender (vk::VkCommandPool cmdPool)
779 {
780 	const vk::VkDevice				device		= m_context.getDevice();
781 	const vk::DeviceInterface&		vk			= m_context.getDeviceInterface();
782 
783 	vk::Move<vk::VkCommandBuffer>	cmdBuffer	(vk::allocateCommandBuffer(vk, device, cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
784 
785 	beginCommandBuffer(vk, *cmdBuffer);
786 
787 	initialTransitionColor2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_LAYOUT_GENERAL,
788 								  vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
789 	initialTransitionDepth2DImage(vk, *cmdBuffer, m_stateObjects->m_DepthImage->object(), vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
790 								  vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
791 
792 	std::vector<vk::VkClearValue>	renderPassClearValues(2);
793 	deMemset(&renderPassClearValues[0], 0, static_cast<int>(renderPassClearValues.size()) * sizeof(vk::VkClearValue));
794 
795 	if (!hasSeparateResetCmdBuf() && m_testVector.queryResultsMode != RESULTS_MODE_GET_RESET)
796 	{
797 		vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
798 	}
799 
800 	beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer, vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT), (deUint32)renderPassClearValues.size(), &renderPassClearValues[0]);
801 
802 	vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS,	*m_stateObjects->m_pipeline);
803 
804 	vk::VkBuffer vertexBuffer = m_stateObjects->m_vertexBuffer->object();
805 	const vk::VkDeviceSize vertexBufferOffset = 0;
806 	vk.cmdBindVertexBuffers(*cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
807 
808 	// Draw un-occluded geometry
809 	vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_ALL, m_testVector.queryControlFlags);
810 	vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
811 	vk.cmdEndQuery(*cmdBuffer, m_queryPool,	QUERY_INDEX_CAPTURE_ALL);
812 
813 	endRenderPass(vk, *cmdBuffer);
814 
815 	beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer, vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT), (deUint32)renderPassClearValues.size(), &renderPassClearValues[0]);
816 
817 	vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
818 
819 	// Draw un-occluded geometry
820 	vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
821 
822 	// Partially occlude geometry
823 	vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL, 1, START_VERTEX_PARTIALLY_OCCLUDED, 0);
824 
825 	// Draw partially-occluded geometry
826 	vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED, m_testVector.queryControlFlags);
827 	vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
828 	vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED);
829 
830 	endRenderPass(vk, *cmdBuffer);
831 
832 	beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer, vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT), (deUint32)renderPassClearValues.size(), &renderPassClearValues[0]);
833 
834 	vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
835 
836 	// Draw un-occluded geometry
837 	vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
838 
839 	// Partially occlude geometry
840 	vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL, 1, START_VERTEX_PARTIALLY_OCCLUDED, 0);
841 
842 	// Occlude geometry
843 	vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_OCCLUDER_DRAWCALL, 1, START_VERTEX_OCCLUDER, 0);
844 
845 	// Draw occluded geometry
846 	vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_OCCLUDED, m_testVector.queryControlFlags);
847 	vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
848 	vk.cmdEndQuery(*cmdBuffer, m_queryPool,	QUERY_INDEX_CAPTURE_OCCLUDED);
849 
850 	endRenderPass(vk, *cmdBuffer);
851 
852 	if (m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET)
853 	{
854 		vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
855 	}
856 
857 	if ((m_testVector.queryResultsMode == RESULTS_MODE_COPY || m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET)
858 		&& !hasSeparateCopyCmdBuf())
859 	{
860 		vk::VkDeviceSize dstOffset = m_testVector.queryResultsDstOffset ? m_testVector.queryResultsStride : 0u;
861 
862 		if (m_testVector.queryResultsStride != 0u)
863 		{
864 			vk.cmdCopyQueryPoolResults(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL, m_queryPoolResultsBuffer->object(), dstOffset, m_testVector.queryResultsStride, m_queryResultFlags);
865 		}
866 		else
867 		{
868 			const vk::VkDeviceSize	elementSize	= m_testVector.queryResultSize == RESULT_SIZE_32_BIT ? sizeof(deUint32) : sizeof(deUint64);
869 			const vk::VkDeviceSize	strideSize	= elementSize + elementSize * m_testVector.queryResultsAvailability;
870 
871 			for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; queryNdx++)
872 			{
873 				vk.cmdCopyQueryPoolResults(*cmdBuffer, m_queryPool, queryNdx, 1, m_queryPoolResultsBuffer->object(), strideSize * queryNdx, 0, m_queryResultFlags);
874 			}
875 		}
876 
877 		bufferBarrier(vk, *cmdBuffer, m_queryPoolResultsBuffer->object(), vk::VK_ACCESS_TRANSFER_WRITE_BIT, vk::VK_ACCESS_HOST_READ_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT);
878 	}
879 
880 	transition2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_ASPECT_COLOR_BIT, vk::VK_IMAGE_LAYOUT_GENERAL,
881 					  vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT,
882 					  vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT);
883 
884 	endCommandBuffer(vk, *cmdBuffer);
885 
886 	return cmdBuffer;
887 }
888 
recordCopyResults(vk::VkCommandPool cmdPool)889 vk::Move<vk::VkCommandBuffer> OcclusionQueryTestInstance::recordCopyResults (vk::VkCommandPool cmdPool)
890 {
891 	const vk::VkDevice				device		= m_context.getDevice();
892 	const vk::DeviceInterface&		vk			= m_context.getDeviceInterface();
893 
894 	vk::Move<vk::VkCommandBuffer>	cmdBuffer	(vk::allocateCommandBuffer(vk, device, cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
895 
896 	beginCommandBuffer(vk, *cmdBuffer);
897 
898 	vk::VkDeviceSize dstOffset = m_testVector.queryResultsDstOffset ? m_testVector.queryResultsStride : 0u;
899 
900 	if (m_testVector.queryResultsStride != 0u)
901 	{
902 		vk.cmdCopyQueryPoolResults(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL, m_queryPoolResultsBuffer->object(), dstOffset, m_testVector.queryResultsStride, m_queryResultFlags);
903 	}
904 	else
905 	{
906 		const vk::VkDeviceSize	elementSize	= m_testVector.queryResultSize == RESULT_SIZE_32_BIT ? sizeof(deUint32) : sizeof(deUint64);
907 		const vk::VkDeviceSize	strideSize	= elementSize + elementSize * m_testVector.queryResultsAvailability;
908 
909 		for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; queryNdx++)
910 		{
911 			vk.cmdCopyQueryPoolResults(*cmdBuffer, m_queryPool, queryNdx, 1, m_queryPoolResultsBuffer->object(), strideSize * queryNdx, 0, m_queryResultFlags);
912 		}
913 	}
914 
915 	bufferBarrier(vk, *cmdBuffer, m_queryPoolResultsBuffer->object(), vk::VK_ACCESS_TRANSFER_WRITE_BIT, vk::VK_ACCESS_HOST_READ_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT);
916 
917 	endCommandBuffer(vk, *cmdBuffer);
918 
919 	return cmdBuffer;
920 }
921 
captureResults(deUint64 * retResults,deUint64 * retAvailAbility,bool allowNotReady)922 void OcclusionQueryTestInstance::captureResults (deUint64* retResults, deUint64* retAvailAbility, bool allowNotReady)
923 {
924 	const vk::VkDevice			device			= m_context.getDevice();
925 	const vk::DeviceInterface&	vk				= m_context.getDeviceInterface();
926 	const vk::VkDeviceSize		elementSize		= m_testVector.queryResultSize == RESULT_SIZE_32_BIT ? sizeof(deUint32) : sizeof(deUint64);
927 	const vk::VkDeviceSize		resultsSize		= m_testVector.queryResultsStride == 0
928 												? elementSize + elementSize * m_testVector.queryResultsAvailability
929 												: m_testVector.queryResultsStride;
930 	std::vector<deUint8>		resultsBuffer	(static_cast<size_t>(resultsSize * NUM_QUERIES_IN_POOL));
931 
932 	if (m_testVector.queryResultsMode == RESULTS_MODE_GET || m_testVector.queryResultsMode == RESULTS_MODE_GET_RESET)
933 	{
934 		vk::VkResult queryResult = vk.getQueryPoolResults(device, m_queryPool, 0, NUM_QUERIES_IN_POOL, resultsBuffer.size(), &resultsBuffer[0], m_testVector.queryResultsStride, m_queryResultFlags);
935 		if (queryResult == vk::VK_NOT_READY && !allowNotReady)
936 		{
937 			TCU_FAIL("getQueryPoolResults returned VK_NOT_READY, but results should be already available.");
938 		}
939 		else
940 		{
941 			VK_CHECK(queryResult);
942 		}
943 	}
944 	else if (m_testVector.queryResultsMode == RESULTS_MODE_COPY || m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET)
945 	{
946 		const vk::Allocation& allocation = m_queryPoolResultsBuffer->getBoundMemory();
947 		const deUint8* allocationData = static_cast<deUint8*>(allocation.getHostPtr());
948 		const deInt32 indexData = m_testVector.queryResultsDstOffset ? (deInt32)m_testVector.queryResultsStride : 0u;
949 
950 		vk::invalidateAlloc(vk, device, allocation);
951 
952 		deMemcpy(&resultsBuffer[0], &allocationData[indexData], resultsBuffer.size());
953 	}
954 
955 	for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; queryNdx++)
956 	{
957 		const void* srcPtr = &resultsBuffer[queryNdx * static_cast<size_t>(resultsSize)];
958 
959 		if (m_testVector.queryResultSize == RESULT_SIZE_32_BIT)
960 		{
961 			const deUint32* srcPtrTyped = static_cast<const deUint32*>(srcPtr);
962 			retResults[queryNdx]		= *srcPtrTyped;
963 			if (m_testVector.queryResultsAvailability)
964 			{
965 				retAvailAbility[queryNdx] = *(srcPtrTyped + 1);
966 			}
967 		}
968 		else if (m_testVector.queryResultSize == RESULT_SIZE_64_BIT)
969 		{
970 			const deUint64* srcPtrTyped = static_cast<const deUint64*>(srcPtr);
971 			retResults[queryNdx]		= *srcPtrTyped;
972 
973 			if (m_testVector.queryResultsAvailability)
974 			{
975 				retAvailAbility[queryNdx] = *(srcPtrTyped + 1);
976 			}
977 		}
978 		else
979 		{
980 			TCU_FAIL("Wrong m_testVector.queryResultSize");
981 		}
982 	}
983 
984 	if (m_testVector.queryResultsMode == RESULTS_MODE_GET_RESET)
985 	{
986 		vk.resetQueryPool(device, m_queryPool, 0, NUM_QUERIES_IN_POOL);
987 
988 		vk::VkResult queryResult = vk.getQueryPoolResults(device, m_queryPool, 0, NUM_QUERIES_IN_POOL, resultsBuffer.size(), &resultsBuffer[0], m_testVector.queryResultsStride, m_queryResultFlags);
989 
990 		if (queryResult != vk::VK_NOT_READY)
991 		{
992 			TCU_FAIL("getQueryPoolResults did not return VK_NOT_READY");
993 		}
994 
995 		/* From Vulkan spec:
996 		 *
997 		 * If VK_QUERY_RESULT_WAIT_BIT and VK_QUERY_RESULT_PARTIAL_BIT are both not set then no result values are written to pData
998 		 * for queries that are in the unavailable state at the time of the call, and vkGetQueryPoolResults returns VK_NOT_READY.
999 		 * However, availability state is still written to pData for those queries if VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set.
1000 		 */
1001 		for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; queryNdx++)
1002 		{
1003 			const void* srcPtr = &resultsBuffer[queryNdx * static_cast<size_t>(resultsSize)];
1004 			if (m_testVector.queryResultSize == RESULT_SIZE_32_BIT)
1005 			{
1006 				const deUint32* srcPtrTyped = static_cast<const deUint32*>(srcPtr);
1007 				if (*srcPtrTyped != retResults[queryNdx])
1008 				{
1009 					TCU_FAIL("getQueryPoolResults returned modified values");
1010 				}
1011 
1012 				if (m_testVector.queryResultsAvailability && *(srcPtrTyped + 1) != 0)
1013 				{
1014 					TCU_FAIL("resetQueryPool did not disable availability bit");
1015 				}
1016 			}
1017 			else if (m_testVector.queryResultSize == RESULT_SIZE_64_BIT)
1018 			{
1019 				const deUint64* srcPtrTyped = static_cast<const deUint64*>(srcPtr);
1020 				if (*srcPtrTyped != retResults[queryNdx])
1021 				{
1022 					TCU_FAIL("getQueryPoolResults returned modified values");
1023 				}
1024 
1025 				if (m_testVector.queryResultsAvailability && *(srcPtrTyped + 1) != 0)
1026 				{
1027 					TCU_FAIL("resetQueryPool did not disable availability bit");
1028 				}
1029 			}
1030 			else
1031 			{
1032 				TCU_FAIL("Wrong m_testVector.queryResultSize");
1033 			}
1034 		}
1035 	}
1036 }
1037 
logResults(const deUint64 * results,const deUint64 * availability)1038 void OcclusionQueryTestInstance::logResults (const deUint64* results, const deUint64* availability)
1039 {
1040 	tcu::TestLog& log = m_context.getTestContext().getLog();
1041 
1042 	for (int ndx = 0; ndx < NUM_QUERIES_IN_POOL; ++ndx)
1043 	{
1044 		if (!m_testVector.queryResultsAvailability)
1045 		{
1046 			log << tcu::TestLog::Message << "query[slot == " << ndx << "] result == " << results[ndx] << tcu::TestLog::EndMessage;
1047 		}
1048 		else
1049 		{
1050 			log << tcu::TestLog::Message << "query[slot == " << ndx << "] result == " << results[ndx] << ", availability == " << availability[ndx] << tcu::TestLog::EndMessage;
1051 		}
1052 	}
1053 }
1054 
validateResults(const deUint64 * results,const deUint64 * availability,bool allowUnavailable,vk::VkPrimitiveTopology primitiveTopology)1055 bool OcclusionQueryTestInstance::validateResults (const deUint64* results , const deUint64* availability, bool allowUnavailable, vk::VkPrimitiveTopology primitiveTopology)
1056 {
1057 	bool passed			= true;
1058 	tcu::TestLog& log	= m_context.getTestContext().getLog();
1059 
1060 	for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; ++queryNdx)
1061 	{
1062 		deUint64 expectedValueMin = 0;
1063 		deUint64 expectedValueMax = 0;
1064 
1065 		if (m_testVector.queryResultsMode == RESULTS_MODE_COPY_RESET)
1066 		{
1067 			DE_ASSERT(m_testVector.queryResultsAvailability);
1068 			if (availability[queryNdx] != 0)
1069 			{
1070 				// In copy-reset mode results should always be unavailable due to the reset command issued before copying results.
1071 				log << tcu::TestLog::Message << "query results availability was nonzero for index "
1072 					<< queryNdx << " when resetting the query before copying results"
1073 					<< tcu::TestLog::EndMessage;
1074 				passed = false;
1075 			}
1076 
1077 			// Not interested in the actual results.
1078 			continue;
1079 		}
1080 		else if (m_testVector.queryResultsAvailability && availability[queryNdx] == 0)
1081 		{
1082 			// query result was not available
1083 			if (!allowUnavailable)
1084 			{
1085 				log << tcu::TestLog::Message << "query results availability was 0 for index "
1086 					<< queryNdx << ", expected any value greater than 0." << tcu::TestLog::EndMessage;
1087 				passed = false;
1088 				continue;
1089 			}
1090 		}
1091 		else
1092 		{
1093 			// query is available, so expect proper result values
1094 			if (primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST)
1095 			{
1096 				switch (queryNdx)
1097 				{
1098 					case QUERY_INDEX_CAPTURE_OCCLUDED:
1099 						expectedValueMin = 0;
1100 						expectedValueMax = 0;
1101 						break;
1102 					case QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED:
1103 						expectedValueMin = 1;
1104 						expectedValueMax = 1;
1105 						break;
1106 					case QUERY_INDEX_CAPTURE_ALL:
1107 						expectedValueMin = NUM_VERTICES_IN_DRAWCALL;
1108 						expectedValueMax = NUM_VERTICES_IN_DRAWCALL;
1109 						break;
1110 				}
1111 			}
1112 			else if (primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
1113 			{
1114 				switch (queryNdx)
1115 				{
1116 					case QUERY_INDEX_CAPTURE_OCCLUDED:
1117 						expectedValueMin = 0;
1118 						expectedValueMax = 0;
1119 						break;
1120 					case QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED:
1121 					case QUERY_INDEX_CAPTURE_ALL:
1122 						{
1123 							const int primWidth		= StateObjects::WIDTH  / 2;
1124 							const int primHeight	= StateObjects::HEIGHT / 2;
1125 							const int primArea		= primWidth * primHeight / 2;
1126 
1127 							if (m_testVector.discardHalf)
1128 							{
1129 								expectedValueMin	= (int)(0.95f * primArea * 0.5f);
1130 								expectedValueMax	= (int)(1.05f * primArea * 0.5f);
1131 							}
1132 							else
1133 							{
1134 								expectedValueMin	= (int)(0.97f * primArea);
1135 								expectedValueMax	= (int)(1.03f * primArea);
1136 							}
1137 						}
1138 				}
1139 			}
1140 			else
1141 			{
1142 				TCU_FAIL("Unsupported primitive topology");
1143 			}
1144 		}
1145 
1146 		if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) || (expectedValueMin == 0 && expectedValueMax == 0))
1147 		{
1148 			// require precise value
1149 			if (results[queryNdx] < expectedValueMin || results[queryNdx] > expectedValueMax)
1150 			{
1151 				log << tcu::TestLog::Message << "wrong value of query for index "
1152 					<< queryNdx << ", expected the value minimum of " << expectedValueMin << ", maximum of " << expectedValueMax << " got "
1153 					<< results[queryNdx] << "." << tcu::TestLog::EndMessage;
1154 				passed = false;
1155 			}
1156 		}
1157 		else
1158 		{
1159 			// require imprecise value greater than 0
1160 			if (results[queryNdx] == 0)
1161 			{
1162 				log << tcu::TestLog::Message << "wrong value of query for index "
1163 					<< queryNdx << ", expected any non-zero value, got "
1164 					<< results[queryNdx] << "." << tcu::TestLog::EndMessage;
1165 				passed = false;
1166 			}
1167 		}
1168 	}
1169 	return passed;
1170 }
1171 
1172 template<class Instance>
1173 class QueryPoolOcclusionTest : public vkt::TestCase
1174 {
1175 public:
QueryPoolOcclusionTest(tcu::TestContext & context,const char * name,const char * description,const OcclusionQueryTestVector & testVector)1176 	QueryPoolOcclusionTest (tcu::TestContext &context, const char *name, const char *description, const OcclusionQueryTestVector& testVector)
1177 		: TestCase			(context, name, description)
1178 		, m_testVector		(testVector)
1179 	{
1180 	}
1181 private:
createInstance(vkt::Context & context) const1182 	vkt::TestInstance* createInstance (vkt::Context& context) const
1183 	{
1184 		return new Instance(context, m_testVector);
1185 	}
1186 
initPrograms(vk::SourceCollections & programCollection) const1187 	void initPrograms(vk::SourceCollections& programCollection) const
1188 	{
1189 		const char* const discard =
1190 			"	if ((int(gl_FragCoord.x) % 2) == (int(gl_FragCoord.y) % 2))\n"
1191 			"		discard;\n";
1192 
1193 		const std::string fragSrc = std::string(
1194 			"#version 400\n"
1195 			"layout(location = 0) out vec4 out_FragColor;\n"
1196 			"void main()\n"
1197 			"{\n"
1198 			"	out_FragColor = vec4(0.07, 0.48, 0.75, 1.0);\n")
1199 			+ std::string(m_testVector.discardHalf ? discard : "")
1200 			+ "}\n";
1201 
1202 		programCollection.glslSources.add("frag") << glu::FragmentSource(fragSrc.c_str());
1203 
1204 		programCollection.glslSources.add("vert") << glu::VertexSource("#version 430\n"
1205 																		 "layout(location = 0) in vec4 in_Position;\n"
1206 																		 "out gl_PerVertex { vec4 gl_Position; float gl_PointSize; };\n"
1207 																		 "void main() {\n"
1208 																		 "	gl_Position  = in_Position;\n"
1209 																		 "	gl_PointSize = 1.0;\n"
1210 																		 "}\n");
1211 	}
1212 
1213 	OcclusionQueryTestVector m_testVector;
1214 };
1215 
1216 } //anonymous
1217 
QueryPoolOcclusionTests(tcu::TestContext & testCtx)1218 QueryPoolOcclusionTests::QueryPoolOcclusionTests (tcu::TestContext &testCtx)
1219 	: TestCaseGroup(testCtx, "occlusion_query", "Tests for occlusion queries")
1220 {
1221 	/* Left blank on purpose */
1222 }
1223 
~QueryPoolOcclusionTests(void)1224 QueryPoolOcclusionTests::~QueryPoolOcclusionTests (void)
1225 {
1226 	/* Left blank on purpose */
1227 }
1228 
init(void)1229 void QueryPoolOcclusionTests::init (void)
1230 {
1231 	OcclusionQueryTestVector baseTestVector;
1232 	baseTestVector.queryControlFlags		= 0;
1233 	baseTestVector.queryResultSize			= RESULT_SIZE_64_BIT;
1234 	baseTestVector.queryWait				= WAIT_QUEUE;
1235 	baseTestVector.queryResultsMode			= RESULTS_MODE_GET;
1236 	baseTestVector.queryResultsStride		= sizeof(deUint64);
1237 	baseTestVector.queryResultsAvailability = false;
1238 	baseTestVector.primitiveTopology		= vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
1239 	baseTestVector.discardHalf				= false;
1240 
1241 	//Basic tests
1242 	{
1243 		OcclusionQueryTestVector testVector = baseTestVector;
1244 		testVector.queryControlFlags = 0;
1245 		addChild(new QueryPoolOcclusionTest<BasicOcclusionQueryTestInstance>(m_testCtx,	"basic_conservative",	"draw with conservative occlusion query",	testVector));
1246 		testVector.queryControlFlags = vk::VK_QUERY_CONTROL_PRECISE_BIT;
1247 		addChild(new QueryPoolOcclusionTest<BasicOcclusionQueryTestInstance>(m_testCtx,	"basic_precise",		"draw with precise occlusion query",		testVector));
1248 	}
1249 
1250 	// Functional test
1251 	{
1252 		const vk::VkQueryControlFlags	controlFlags[]		= { 0,					vk::VK_QUERY_CONTROL_PRECISE_BIT	};
1253 		const char* const				controlFlagsStr[]	= { "conservative",		"precise"							};
1254 
1255 		for (int controlFlagIdx = 0; controlFlagIdx < DE_LENGTH_OF_ARRAY(controlFlags); ++controlFlagIdx)
1256 		{
1257 
1258 			const vk::VkPrimitiveTopology	primitiveTopology[]		= { vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST };
1259 			const char* const				primitiveTopologyStr[]	= { "points", "triangles" };
1260 			for (int primitiveTopologyIdx = 0; primitiveTopologyIdx < DE_LENGTH_OF_ARRAY(primitiveTopology); ++primitiveTopologyIdx)
1261 			{
1262 
1263 				const OcclusionQueryResultSize	resultSize[]	= { RESULT_SIZE_32_BIT, RESULT_SIZE_64_BIT };
1264 				const char* const				resultSizeStr[] = { "32",				"64" };
1265 
1266 				for (int resultSizeIdx = 0; resultSizeIdx < DE_LENGTH_OF_ARRAY(resultSize); ++resultSizeIdx)
1267 				{
1268 
1269 					const OcclusionQueryWait	wait[]		= { WAIT_QUEUE, WAIT_QUERY };
1270 					const char* const			waitStr[]	= { "queue",	"query" };
1271 
1272 					for (int waitIdx = 0; waitIdx < DE_LENGTH_OF_ARRAY(wait); ++waitIdx)
1273 					{
1274 						const OcclusionQueryResultsMode	resultsMode[]		= { RESULTS_MODE_GET,	RESULTS_MODE_GET_RESET,	RESULTS_MODE_COPY,	RESULTS_MODE_COPY_RESET };
1275 						const char* const				resultsModeStr[]	= { "get",				"get_reset",			"copy",				"copy_reset" };
1276 
1277 						for (int resultsModeIdx = 0; resultsModeIdx < DE_LENGTH_OF_ARRAY(resultsMode); ++resultsModeIdx)
1278 						{
1279 							if (wait[waitIdx] == WAIT_QUERY && resultsMode[resultsModeIdx] == RESULTS_MODE_GET_RESET)
1280 							{
1281 								/* In RESULTS_MODE_GET_RESET we are going to reset the queries and get the query pool results again
1282 								 * without issueing them, in order to check the availability field. In Vulkan spec it mentions that
1283 								 * vkGetQueryPoolResults may not return in finite time. Because of that, we skip those tests.
1284 								 */
1285 								continue;
1286 							}
1287 
1288 							const bool			testAvailability[]		= { false, true };
1289 							const char* const	testAvailabilityStr[]	= { "without", "with"};
1290 
1291 							for (int testAvailabilityIdx = 0; testAvailabilityIdx < DE_LENGTH_OF_ARRAY(testAvailability); ++testAvailabilityIdx)
1292 							{
1293 								if (resultsMode[resultsModeIdx] == RESULTS_MODE_COPY_RESET && (! testAvailability[testAvailabilityIdx]))
1294 								{
1295 									/* In RESULTS_MODE_COPY_RESET mode we will reset queries and make sure the availability flag is
1296 									 * set to zero. It does not make sense to run in this mode without obtaining the availability
1297 									 * flag.
1298 									 */
1299 									continue;
1300 								}
1301 
1302 								const bool			discardHalf[]		= { false, true };
1303 								const char* const	discardHalfStr[]	= { "", "_discard" };
1304 
1305 								for (int discardHalfIdx = 0; discardHalfIdx < DE_LENGTH_OF_ARRAY(discardHalf); ++discardHalfIdx)
1306 								{
1307 									OcclusionQueryTestVector testVector			= baseTestVector;
1308 									testVector.queryControlFlags				= controlFlags[controlFlagIdx];
1309 									testVector.queryResultSize					= resultSize[resultSizeIdx];
1310 									testVector.queryWait						= wait[waitIdx];
1311 									testVector.queryResultsMode					= resultsMode[resultsModeIdx];
1312 									testVector.queryResultsStride				= testVector.queryResultSize == RESULT_SIZE_32_BIT ? sizeof(deUint32) : sizeof(deUint64);
1313 									testVector.queryResultsAvailability			= testAvailability[testAvailabilityIdx];
1314 									testVector.primitiveTopology				= primitiveTopology[primitiveTopologyIdx];
1315 									testVector.discardHalf						= discardHalf[discardHalfIdx];
1316 
1317 									if (testVector.discardHalf && testVector.primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST)
1318 										continue; // Discarding half of the pixels in fragment shader doesn't make sense with one-pixel-sized points.
1319 
1320 									if (testVector.queryResultsAvailability)
1321 									{
1322 										testVector.queryResultsStride *= 2;
1323 									}
1324 
1325 									std::ostringstream testName;
1326 									std::ostringstream testDescr;
1327 
1328 									testName << resultsModeStr[resultsModeIdx] << "_results"
1329 											 << "_" << controlFlagsStr[controlFlagIdx]
1330 											 << "_size_" << resultSizeStr[resultSizeIdx]
1331 											 << "_wait_" << waitStr[waitIdx]
1332 											 << "_" << testAvailabilityStr[testAvailabilityIdx] << "_availability"
1333 											 << "_draw_" <<  primitiveTopologyStr[primitiveTopologyIdx]
1334 											 << discardHalfStr[discardHalfIdx];
1335 
1336 									testDescr << "draw occluded " << primitiveTopologyStr[primitiveTopologyIdx]
1337 											  << "with " << controlFlagsStr[controlFlagIdx] << ", "
1338 											  << resultsModeStr[resultsModeIdx] << " results "
1339 											  << testAvailabilityStr[testAvailabilityIdx] << " availability bit as "
1340 											  << resultSizeStr[resultSizeIdx] << "bit variables,"
1341 											  << (testVector.discardHalf ? " discarding half of the fragments," : "")
1342 											  << "wait for results on" << waitStr[waitIdx];
1343 
1344 									addChild(new QueryPoolOcclusionTest<OcclusionQueryTestInstance>(m_testCtx, testName.str().c_str(), testDescr.str().c_str(), testVector));
1345 								}
1346 							}
1347 						}
1348 					}
1349 				}
1350 			}
1351 		}
1352 	}
1353 	// Test different strides
1354 	{
1355 		const OcclusionQueryResultsMode	resultsMode[]		= { RESULTS_MODE_GET,	RESULTS_MODE_GET_RESET,	RESULTS_MODE_COPY,	RESULTS_MODE_COPY_RESET	};
1356 		const char* const				resultsModeStr[]	= { "get",				"get_reset",			"copy",				"copy_reset"			};
1357 
1358 		for (int resultsModeIdx = 0; resultsModeIdx < DE_LENGTH_OF_ARRAY(resultsMode); ++resultsModeIdx)
1359 		{
1360 			const OcclusionQueryResultSize	resultSizes[]	= { RESULT_SIZE_32_BIT, RESULT_SIZE_64_BIT };
1361 			const char* const				resultSizeStr[] = { "32", "64" };
1362 
1363 			const deBool		copyQueryDstOffset[] =		{ DE_TRUE,		DE_FALSE };
1364 			const char *const	copyQueryDstOffsetStr[] =	{ "_dstoffset",	""};
1365 
1366 			const bool			testAvailability[]		= { false,		true	};
1367 			const char* const	testAvailabilityStr[]	= { "without",	"with"	};
1368 
1369 			for (int testAvailabilityIdx = 0; testAvailabilityIdx < DE_LENGTH_OF_ARRAY(testAvailability); ++testAvailabilityIdx)
1370 			{
1371 				if (resultsMode[resultsModeIdx] == RESULTS_MODE_COPY_RESET && (! testAvailability[testAvailabilityIdx]))
1372 				{
1373 					/* In RESULTS_MODE_COPY_RESET mode we will reset queries and make sure the availability flag is set to zero. It
1374 					 * does not make sense to run in this mode without obtaining the availability flag.
1375 					 */
1376 					continue;
1377 				}
1378 
1379 				for (int resultSizeIdx = 0; resultSizeIdx < DE_LENGTH_OF_ARRAY(resultSizes); ++resultSizeIdx)
1380 				{
1381 					const vk::VkDeviceSize resultSize	= (resultSizes[resultSizeIdx] == RESULT_SIZE_32_BIT ? sizeof(deUint32) : sizeof(deUint64));
1382 
1383 					// \todo [2015-12-18 scygan] Ensure only stride values aligned to resultSize are allowed. Otherwise test should be extended.
1384 					const vk::VkDeviceSize strides[]	=
1385 					{
1386 						0u,
1387 						1 * resultSize,
1388 						2 * resultSize,
1389 						3 * resultSize,
1390 						4 * resultSize,
1391 						5 * resultSize,
1392 						13 * resultSize,
1393 						1024 * resultSize
1394 					};
1395 
1396 					for (int dstOffsetIdx = 0; dstOffsetIdx < DE_LENGTH_OF_ARRAY(copyQueryDstOffset); dstOffsetIdx++)
1397 					{
1398 						for (int strideIdx = 0; strideIdx < DE_LENGTH_OF_ARRAY(strides); strideIdx++)
1399 						{
1400 							OcclusionQueryTestVector testVector		= baseTestVector;
1401 							testVector.queryResultsMode				= resultsMode[resultsModeIdx];
1402 							testVector.queryResultSize				= resultSizes[resultSizeIdx];
1403 							testVector.queryResultsAvailability		= testAvailability[testAvailabilityIdx];
1404 							testVector.queryResultsStride			= strides[strideIdx];
1405 							testVector.queryResultsDstOffset		= copyQueryDstOffset[dstOffsetIdx];
1406 
1407 							const vk::VkDeviceSize elementSize		= (testVector.queryResultsAvailability ? resultSize * 2 : resultSize);
1408 
1409 							if (elementSize > testVector.queryResultsStride && strides[strideIdx] != 0)
1410 							{
1411 								continue;
1412 							}
1413 
1414 							if (strides[strideIdx] == 0)
1415 							{
1416 								// Due to the nature of the test, the dstOffset is tested automatically when stride size is 0.
1417 								if (testVector.queryResultsDstOffset)
1418 								{
1419 									continue;
1420 								}
1421 
1422 								// We are testing only VkCmdCopyQueryPoolResults with stride 0.
1423 								if (testVector.queryResultsMode != RESULTS_MODE_COPY)
1424 								{
1425 									continue;
1426 								}
1427 							}
1428 
1429 							std::ostringstream testName;
1430 							std::ostringstream testDescr;
1431 
1432 							testName << resultsModeStr[resultsModeIdx]
1433 									 << "_results_size_" << resultSizeStr[resultSizeIdx]
1434 									 << "_stride_" << strides[strideIdx]
1435 									 << "_" << testAvailabilityStr[testAvailabilityIdx] << "_availability"
1436 									 << copyQueryDstOffsetStr[dstOffsetIdx];
1437 
1438 							testDescr << resultsModeStr[resultsModeIdx] << " results "
1439 									  << testAvailabilityStr[testAvailabilityIdx] << " availability bit as "
1440 									  << resultSizeStr[resultSizeIdx] << "bit variables, with stride" << strides[strideIdx];
1441 
1442 							addChild(new QueryPoolOcclusionTest<OcclusionQueryTestInstance>(m_testCtx, testName.str().c_str(), testDescr.str().c_str(), testVector));
1443 						}
1444 					}
1445 				}
1446 			}
1447 		}
1448 
1449 	}
1450 }
1451 
1452 } //QueryPool
1453 } //vkt
1454