• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2015 Google Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file
21  * \brief Pipeline barrier tests
22  *//*--------------------------------------------------------------------*/
23 
24 #include "vktMemoryPipelineBarrierTests.hpp"
25 
26 #include "vktTestCaseUtil.hpp"
27 
28 #include "vkDefs.hpp"
29 #include "vkPlatform.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkQueryUtil.hpp"
32 #include "vkMemUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkPrograms.hpp"
35 #include "vkCmdUtil.hpp"
36 #include "vkObjUtil.hpp"
37 
38 #include "tcuMaybe.hpp"
39 #include "tcuTextureUtil.hpp"
40 #include "tcuTestLog.hpp"
41 #include "tcuResultCollector.hpp"
42 #include "tcuTexture.hpp"
43 #include "tcuImageCompare.hpp"
44 
45 #include "deUniquePtr.hpp"
46 #include "deStringUtil.hpp"
47 #include "deRandom.hpp"
48 
49 #include "deInt32.h"
50 #include "deMath.h"
51 #include "deMemory.h"
52 
53 #include <map>
54 #include <set>
55 #include <sstream>
56 #include <string>
57 #include <vector>
58 
59 using tcu::TestLog;
60 using tcu::Maybe;
61 
62 using de::MovePtr;
63 
64 using std::string;
65 using std::vector;
66 using std::map;
67 using std::set;
68 using std::pair;
69 
70 using tcu::IVec2;
71 using tcu::UVec2;
72 using tcu::UVec4;
73 using tcu::Vec4;
74 using tcu::ConstPixelBufferAccess;
75 using tcu::PixelBufferAccess;
76 using tcu::TextureFormat;
77 using tcu::TextureLevel;
78 
79 namespace vkt
80 {
81 namespace memory
82 {
83 namespace
84 {
85 
86 #define ONE_MEGABYTE						1024*1024
87 #define DEFAULT_VERTEX_BUFFER_STRIDE		2
88 #define ALTERNATIVE_VERTEX_BUFFER_STRIDE	4
89 
90 enum
91 {
92 	MAX_UNIFORM_BUFFER_SIZE = 1024,
93 	MAX_STORAGE_BUFFER_SIZE = (1<<28),
94 	MAX_SIZE = (128 * 1024)
95 };
96 
97 // \todo [mika] Add to utilities
98 template<typename T>
divRoundUp(const T & a,const T & b)99 T divRoundUp (const T& a, const T& b)
100 {
101 	return (a / b) + (a % b == 0 ? 0 : 1);
102 }
103 
104 enum Usage
105 {
106 	// Mapped host read and write
107 	USAGE_HOST_READ = (0x1u<<0),
108 	USAGE_HOST_WRITE = (0x1u<<1),
109 
110 	// Copy and other transfer operations
111 	USAGE_TRANSFER_SRC = (0x1u<<2),
112 	USAGE_TRANSFER_DST = (0x1u<<3),
113 
114 	// Buffer usage flags
115 	USAGE_INDEX_BUFFER = (0x1u<<4),
116 	USAGE_VERTEX_BUFFER = (0x1u<<5),
117 
118 	USAGE_UNIFORM_BUFFER = (0x1u<<6),
119 	USAGE_STORAGE_BUFFER = (0x1u<<7),
120 
121 	USAGE_UNIFORM_TEXEL_BUFFER = (0x1u<<8),
122 	USAGE_STORAGE_TEXEL_BUFFER = (0x1u<<9),
123 
124 	// \todo [2016-03-09 mika] This is probably almost impossible to do
125 	USAGE_INDIRECT_BUFFER = (0x1u<<10),
126 
127 	// Texture usage flags
128 	USAGE_SAMPLED_IMAGE = (0x1u<<11),
129 	USAGE_STORAGE_IMAGE = (0x1u<<12),
130 	USAGE_COLOR_ATTACHMENT = (0x1u<<13),
131 	USAGE_INPUT_ATTACHMENT = (0x1u<<14),
132 	USAGE_DEPTH_STENCIL_ATTACHMENT = (0x1u<<15),
133 };
134 
supportsDeviceBufferWrites(Usage usage)135 bool supportsDeviceBufferWrites (Usage usage)
136 {
137 	if (usage & USAGE_TRANSFER_DST)
138 		return true;
139 
140 	if (usage & USAGE_STORAGE_BUFFER)
141 		return true;
142 
143 	if (usage & USAGE_STORAGE_TEXEL_BUFFER)
144 		return true;
145 
146 	return false;
147 }
148 
supportsDeviceImageWrites(Usage usage)149 bool supportsDeviceImageWrites (Usage usage)
150 {
151 	if (usage & USAGE_TRANSFER_DST)
152 		return true;
153 
154 	if (usage & USAGE_STORAGE_IMAGE)
155 		return true;
156 
157 	if (usage & USAGE_COLOR_ATTACHMENT)
158 		return true;
159 
160 	return false;
161 }
162 
163 // Sequential access enums
164 enum Access
165 {
166 	ACCESS_INDIRECT_COMMAND_READ_BIT = 0,
167 	ACCESS_INDEX_READ_BIT,
168 	ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
169 	ACCESS_UNIFORM_READ_BIT,
170 	ACCESS_INPUT_ATTACHMENT_READ_BIT,
171 	ACCESS_SHADER_READ_BIT,
172 	ACCESS_SHADER_WRITE_BIT,
173 	ACCESS_COLOR_ATTACHMENT_READ_BIT,
174 	ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
175 	ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
176 	ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
177 	ACCESS_TRANSFER_READ_BIT,
178 	ACCESS_TRANSFER_WRITE_BIT,
179 	ACCESS_HOST_READ_BIT,
180 	ACCESS_HOST_WRITE_BIT,
181 	ACCESS_MEMORY_READ_BIT,
182 	ACCESS_MEMORY_WRITE_BIT,
183 
184 	ACCESS_LAST
185 };
186 
accessFlagToAccess(vk::VkAccessFlagBits flag)187 Access accessFlagToAccess (vk::VkAccessFlagBits flag)
188 {
189 	switch (flag)
190 	{
191 	case vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT:			return ACCESS_INDIRECT_COMMAND_READ_BIT;
192 	case vk::VK_ACCESS_INDEX_READ_BIT:						return ACCESS_INDEX_READ_BIT;
193 	case vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:			return ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
194 	case vk::VK_ACCESS_UNIFORM_READ_BIT:					return ACCESS_UNIFORM_READ_BIT;
195 	case vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:			return ACCESS_INPUT_ATTACHMENT_READ_BIT;
196 	case vk::VK_ACCESS_SHADER_READ_BIT:						return ACCESS_SHADER_READ_BIT;
197 	case vk::VK_ACCESS_SHADER_WRITE_BIT:					return ACCESS_SHADER_WRITE_BIT;
198 	case vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:			return ACCESS_COLOR_ATTACHMENT_READ_BIT;
199 	case vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:			return ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
200 	case vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:	return ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
201 	case vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:	return ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
202 	case vk::VK_ACCESS_TRANSFER_READ_BIT:					return ACCESS_TRANSFER_READ_BIT;
203 	case vk::VK_ACCESS_TRANSFER_WRITE_BIT:					return ACCESS_TRANSFER_WRITE_BIT;
204 	case vk::VK_ACCESS_HOST_READ_BIT:						return ACCESS_HOST_READ_BIT;
205 	case vk::VK_ACCESS_HOST_WRITE_BIT:						return ACCESS_HOST_WRITE_BIT;
206 	case vk::VK_ACCESS_MEMORY_READ_BIT:						return ACCESS_MEMORY_READ_BIT;
207 	case vk::VK_ACCESS_MEMORY_WRITE_BIT:					return ACCESS_MEMORY_WRITE_BIT;
208 
209 	default:
210 		DE_FATAL("Unknown access flags");
211 		return ACCESS_LAST;
212 	}
213 }
214 
215 // Sequential stage enums
216 enum PipelineStage
217 {
218 	PIPELINESTAGE_TOP_OF_PIPE_BIT = 0,
219 	PIPELINESTAGE_BOTTOM_OF_PIPE_BIT,
220 	PIPELINESTAGE_DRAW_INDIRECT_BIT,
221 	PIPELINESTAGE_VERTEX_INPUT_BIT,
222 	PIPELINESTAGE_VERTEX_SHADER_BIT,
223 	PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT,
224 	PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT,
225 	PIPELINESTAGE_GEOMETRY_SHADER_BIT,
226 	PIPELINESTAGE_FRAGMENT_SHADER_BIT,
227 	PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT,
228 	PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT,
229 	PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
230 	PIPELINESTAGE_COMPUTE_SHADER_BIT,
231 	PIPELINESTAGE_TRANSFER_BIT,
232 	PIPELINESTAGE_HOST_BIT,
233 
234 	PIPELINESTAGE_LAST
235 };
236 
pipelineStageFlagToPipelineStage(vk::VkPipelineStageFlagBits flag)237 PipelineStage pipelineStageFlagToPipelineStage (vk::VkPipelineStageFlagBits flag)
238 {
239 	switch (flag)
240 	{
241 		case vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:						return PIPELINESTAGE_TOP_OF_PIPE_BIT;
242 		case vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT:					return PIPELINESTAGE_BOTTOM_OF_PIPE_BIT;
243 		case vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT:					return PIPELINESTAGE_DRAW_INDIRECT_BIT;
244 		case vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT:					return PIPELINESTAGE_VERTEX_INPUT_BIT;
245 		case vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT:					return PIPELINESTAGE_VERTEX_SHADER_BIT;
246 		case vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT:		return PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT;
247 		case vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT:	return PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT;
248 		case vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT:					return PIPELINESTAGE_GEOMETRY_SHADER_BIT;
249 		case vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT:					return PIPELINESTAGE_FRAGMENT_SHADER_BIT;
250 		case vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT:			return PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT;
251 		case vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT:				return PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT;
252 		case vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT:			return PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
253 		case vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT:					return PIPELINESTAGE_COMPUTE_SHADER_BIT;
254 		case vk::VK_PIPELINE_STAGE_TRANSFER_BIT:						return PIPELINESTAGE_TRANSFER_BIT;
255 		case vk::VK_PIPELINE_STAGE_HOST_BIT:							return PIPELINESTAGE_HOST_BIT;
256 
257 		default:
258 			DE_FATAL("Unknown pipeline stage flags");
259 			return PIPELINESTAGE_LAST;
260 	}
261 }
262 
operator |(Usage a,Usage b)263 Usage operator| (Usage a, Usage b)
264 {
265 	return (Usage)((deUint32)a | (deUint32)b);
266 }
267 
operator &(Usage a,Usage b)268 Usage operator& (Usage a, Usage b)
269 {
270 	return (Usage)((deUint32)a & (deUint32)b);
271 }
272 
usageToName(Usage usage)273 string usageToName (Usage usage)
274 {
275 	const struct
276 	{
277 		Usage				usage;
278 		const char* const	name;
279 	} usageNames[] =
280 	{
281 		{ USAGE_HOST_READ,					"host_read" },
282 		{ USAGE_HOST_WRITE,					"host_write" },
283 
284 		{ USAGE_TRANSFER_SRC,				"transfer_src" },
285 		{ USAGE_TRANSFER_DST,				"transfer_dst" },
286 
287 		{ USAGE_INDEX_BUFFER,				"index_buffer" },
288 		{ USAGE_VERTEX_BUFFER,				"vertex_buffer" },
289 		{ USAGE_UNIFORM_BUFFER,				"uniform_buffer" },
290 		{ USAGE_STORAGE_BUFFER,				"storage_buffer" },
291 		{ USAGE_UNIFORM_TEXEL_BUFFER,		"uniform_texel_buffer" },
292 		{ USAGE_STORAGE_TEXEL_BUFFER,		"storage_texel_buffer" },
293 		{ USAGE_INDIRECT_BUFFER,			"indirect_buffer" },
294 		{ USAGE_SAMPLED_IMAGE,				"image_sampled" },
295 		{ USAGE_STORAGE_IMAGE,				"storage_image" },
296 		{ USAGE_COLOR_ATTACHMENT,			"color_attachment" },
297 		{ USAGE_INPUT_ATTACHMENT,			"input_attachment" },
298 		{ USAGE_DEPTH_STENCIL_ATTACHMENT,	"depth_stencil_attachment" },
299 	};
300 
301 	std::ostringstream	stream;
302 	bool				first = true;
303 
304 	for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usageNames); usageNdx++)
305 	{
306 		if (usage & usageNames[usageNdx].usage)
307 		{
308 			if (!first)
309 				stream << "_";
310 			else
311 				first = false;
312 
313 			stream << usageNames[usageNdx].name;
314 		}
315 	}
316 
317 	return stream.str();
318 }
319 
usageToBufferUsageFlags(Usage usage)320 vk::VkBufferUsageFlags usageToBufferUsageFlags (Usage usage)
321 {
322 	vk::VkBufferUsageFlags flags = 0;
323 
324 	if (usage & USAGE_TRANSFER_SRC)
325 		flags |= vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
326 
327 	if (usage & USAGE_TRANSFER_DST)
328 		flags |= vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT;
329 
330 	if (usage & USAGE_INDEX_BUFFER)
331 		flags |= vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
332 
333 	if (usage & USAGE_VERTEX_BUFFER)
334 		flags |= vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
335 
336 	if (usage & USAGE_INDIRECT_BUFFER)
337 		flags |= vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
338 
339 	if (usage & USAGE_UNIFORM_BUFFER)
340 		flags |= vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
341 
342 	if (usage & USAGE_STORAGE_BUFFER)
343 		flags |= vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
344 
345 	if (usage & USAGE_UNIFORM_TEXEL_BUFFER)
346 		flags |= vk::VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
347 
348 	if (usage & USAGE_STORAGE_TEXEL_BUFFER)
349 		flags |= vk::VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
350 
351 	return flags;
352 }
353 
usageToImageUsageFlags(Usage usage)354 vk::VkImageUsageFlags usageToImageUsageFlags (Usage usage)
355 {
356 	vk::VkImageUsageFlags flags = 0;
357 
358 	if (usage & USAGE_TRANSFER_SRC)
359 		flags |= vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
360 
361 	if (usage & USAGE_TRANSFER_DST)
362 		flags |= vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT;
363 
364 	if (usage & USAGE_SAMPLED_IMAGE)
365 		flags |= vk::VK_IMAGE_USAGE_SAMPLED_BIT;
366 
367 	if (usage & USAGE_STORAGE_IMAGE)
368 		flags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
369 
370 	if (usage & USAGE_COLOR_ATTACHMENT)
371 		flags |= vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
372 
373 	if (usage & USAGE_INPUT_ATTACHMENT)
374 		flags |= vk::VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
375 
376 	if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
377 		flags |= vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
378 
379 	return flags;
380 }
381 
usageToStageFlags(Usage usage)382 vk::VkPipelineStageFlags usageToStageFlags (Usage usage)
383 {
384 	vk::VkPipelineStageFlags flags = 0;
385 
386 	if (usage & (USAGE_HOST_READ|USAGE_HOST_WRITE))
387 		flags |= vk::VK_PIPELINE_STAGE_HOST_BIT;
388 
389 	if (usage & (USAGE_TRANSFER_SRC|USAGE_TRANSFER_DST))
390 		flags |= vk::VK_PIPELINE_STAGE_TRANSFER_BIT;
391 
392 	if (usage & (USAGE_VERTEX_BUFFER|USAGE_INDEX_BUFFER))
393 		flags |= vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
394 
395 	if (usage & USAGE_INDIRECT_BUFFER)
396 		flags |= vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
397 
398 	if (usage &
399 			(USAGE_UNIFORM_BUFFER
400 			| USAGE_STORAGE_BUFFER
401 			| USAGE_UNIFORM_TEXEL_BUFFER
402 			| USAGE_STORAGE_TEXEL_BUFFER
403 			| USAGE_SAMPLED_IMAGE
404 			| USAGE_STORAGE_IMAGE))
405 	{
406 		flags |= (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
407 				| vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
408 				| vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
409 	}
410 
411 	if (usage & USAGE_INPUT_ATTACHMENT)
412 		flags |= vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
413 
414 	if (usage & USAGE_COLOR_ATTACHMENT)
415 		flags |= vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
416 
417 	if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
418 	{
419 		flags |= vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
420 				| vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
421 	}
422 
423 	return flags;
424 }
425 
usageToAccessFlags(Usage usage)426 vk::VkAccessFlags usageToAccessFlags (Usage usage)
427 {
428 	vk::VkAccessFlags flags = 0;
429 
430 	if (usage & USAGE_HOST_READ)
431 		flags |= vk::VK_ACCESS_HOST_READ_BIT;
432 
433 	if (usage & USAGE_HOST_WRITE)
434 		flags |= vk::VK_ACCESS_HOST_WRITE_BIT;
435 
436 	if (usage & USAGE_TRANSFER_SRC)
437 		flags |= vk::VK_ACCESS_TRANSFER_READ_BIT;
438 
439 	if (usage & USAGE_TRANSFER_DST)
440 		flags |= vk::VK_ACCESS_TRANSFER_WRITE_BIT;
441 
442 	if (usage & USAGE_INDEX_BUFFER)
443 		flags |= vk::VK_ACCESS_INDEX_READ_BIT;
444 
445 	if (usage & USAGE_VERTEX_BUFFER)
446 		flags |= vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
447 
448 	if (usage & (USAGE_UNIFORM_BUFFER | USAGE_UNIFORM_TEXEL_BUFFER))
449 		flags |= vk::VK_ACCESS_UNIFORM_READ_BIT;
450 
451 	if (usage & USAGE_SAMPLED_IMAGE)
452 		flags |= vk::VK_ACCESS_SHADER_READ_BIT;
453 
454 	if (usage & (USAGE_STORAGE_BUFFER
455 				| USAGE_STORAGE_TEXEL_BUFFER
456 				| USAGE_STORAGE_IMAGE))
457 		flags |= vk::VK_ACCESS_SHADER_READ_BIT | vk::VK_ACCESS_SHADER_WRITE_BIT;
458 
459 	if (usage & USAGE_INDIRECT_BUFFER)
460 		flags |= vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
461 
462 	if (usage & USAGE_COLOR_ATTACHMENT)
463 		flags |= vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
464 
465 	if (usage & USAGE_INPUT_ATTACHMENT)
466 		flags |= vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
467 
468 	if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
469 		flags |= vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
470 			| vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
471 
472 	return flags;
473 }
474 
475 struct TestConfig
476 {
477 	Usage				usage;
478 	deUint32			vertexBufferStride;
479 	vk::VkDeviceSize	size;
480 	vk::VkSharingMode	sharing;
481 };
482 
createBeginCommandBuffer(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkCommandPool pool,vk::VkCommandBufferLevel level)483 vk::Move<vk::VkCommandBuffer> createBeginCommandBuffer (const vk::DeviceInterface&	vkd,
484 														vk::VkDevice				device,
485 														vk::VkCommandPool			pool,
486 														vk::VkCommandBufferLevel	level)
487 {
488 	const vk::VkCommandBufferInheritanceInfo	inheritInfo	=
489 	{
490 		vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
491 		DE_NULL,
492 		0,
493 		0,
494 		0,
495 		VK_FALSE,
496 		0u,
497 		0u
498 	};
499 	const vk::VkCommandBufferBeginInfo			beginInfo =
500 	{
501 		vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
502 		DE_NULL,
503 		0u,
504 		(level == vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY ? &inheritInfo : (const vk::VkCommandBufferInheritanceInfo*)DE_NULL),
505 	};
506 
507 	vk::Move<vk::VkCommandBuffer> commandBuffer (allocateCommandBuffer(vkd, device, pool, level));
508 
509 	VK_CHECK(vkd.beginCommandBuffer(*commandBuffer, &beginInfo));
510 
511 	return commandBuffer;
512 }
513 
createBuffer(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkDeviceSize size,vk::VkBufferUsageFlags usage,vk::VkSharingMode sharingMode,const vector<deUint32> & queueFamilies)514 vk::Move<vk::VkBuffer> createBuffer (const vk::DeviceInterface&	vkd,
515 									 vk::VkDevice				device,
516 									 vk::VkDeviceSize			size,
517 									 vk::VkBufferUsageFlags		usage,
518 									 vk::VkSharingMode			sharingMode,
519 									 const vector<deUint32>&	queueFamilies)
520 {
521 	const vk::VkBufferCreateInfo	createInfo =
522 	{
523 		vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
524 		DE_NULL,
525 
526 		0,	// flags
527 		size,
528 		usage,
529 		sharingMode,
530 		(deUint32)queueFamilies.size(),
531 		&queueFamilies[0]
532 	};
533 
534 	return vk::createBuffer(vkd, device, &createInfo);
535 }
536 
allocMemory(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkDeviceSize size,deUint32 memoryTypeIndex)537 vk::Move<vk::VkDeviceMemory> allocMemory (const vk::DeviceInterface&	vkd,
538 										  vk::VkDevice					device,
539 										  vk::VkDeviceSize				size,
540 										  deUint32						memoryTypeIndex)
541 {
542 	const vk::VkMemoryAllocateInfo alloc =
543 	{
544 		vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,	// sType
545 		DE_NULL,									// pNext
546 
547 		size,
548 		memoryTypeIndex
549 	};
550 
551 	return vk::allocateMemory(vkd, device, &alloc);
552 }
553 
bindBufferMemory(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkBuffer buffer,vk::VkMemoryPropertyFlags properties)554 vk::Move<vk::VkDeviceMemory> bindBufferMemory (const vk::InstanceInterface&	vki,
555 											   const vk::DeviceInterface&	vkd,
556 											   vk::VkPhysicalDevice			physicalDevice,
557 											   vk::VkDevice					device,
558 											   vk::VkBuffer					buffer,
559 											   vk::VkMemoryPropertyFlags	properties)
560 {
561 	const vk::VkMemoryRequirements				memoryRequirements	= vk::getBufferMemoryRequirements(vkd, device, buffer);
562 	const vk::VkPhysicalDeviceMemoryProperties	memoryProperties	= vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
563 	deUint32									memoryTypeIndex;
564 
565 	for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
566 	{
567 		if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
568 			&& (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
569 		{
570 			try
571 			{
572 				const vk::VkMemoryAllocateInfo	allocationInfo	=
573 				{
574 					vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
575 					DE_NULL,
576 					memoryRequirements.size,
577 					memoryTypeIndex
578 				};
579 				vk::Move<vk::VkDeviceMemory>	memory			(vk::allocateMemory(vkd, device, &allocationInfo));
580 
581 				VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0));
582 
583 				return memory;
584 			}
585 			catch (const vk::Error& error)
586 			{
587 				if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
588 					|| error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
589 				{
590 					// Try next memory type/heap if out of memory
591 				}
592 				else
593 				{
594 					// Throw all other errors forward
595 					throw;
596 				}
597 			}
598 		}
599 	}
600 
601 	TCU_FAIL("Failed to allocate memory for buffer");
602 }
603 
bindImageMemory(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkImage image,vk::VkMemoryPropertyFlags properties)604 vk::Move<vk::VkDeviceMemory> bindImageMemory (const vk::InstanceInterface&	vki,
605 											   const vk::DeviceInterface&	vkd,
606 											   vk::VkPhysicalDevice			physicalDevice,
607 											   vk::VkDevice					device,
608 											   vk::VkImage					image,
609 											   vk::VkMemoryPropertyFlags	properties)
610 {
611 	const vk::VkMemoryRequirements				memoryRequirements	= vk::getImageMemoryRequirements(vkd, device, image);
612 	const vk::VkPhysicalDeviceMemoryProperties	memoryProperties	= vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
613 	deUint32									memoryTypeIndex;
614 
615 	for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
616 	{
617 		if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
618 			&& (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
619 		{
620 			try
621 			{
622 				const vk::VkMemoryAllocateInfo	allocationInfo	=
623 				{
624 					vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
625 					DE_NULL,
626 					memoryRequirements.size,
627 					memoryTypeIndex
628 				};
629 				vk::Move<vk::VkDeviceMemory>	memory			(vk::allocateMemory(vkd, device, &allocationInfo));
630 
631 				VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0));
632 
633 				return memory;
634 			}
635 			catch (const vk::Error& error)
636 			{
637 				if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
638 					|| error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
639 				{
640 					// Try next memory type/heap if out of memory
641 				}
642 				else
643 				{
644 					// Throw all other errors forward
645 					throw;
646 				}
647 			}
648 		}
649 	}
650 
651 	TCU_FAIL("Failed to allocate memory for image");
652 }
653 
mapMemory(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkDeviceMemory memory,vk::VkDeviceSize size)654 void* mapMemory (const vk::DeviceInterface&	vkd,
655 				 vk::VkDevice				device,
656 				 vk::VkDeviceMemory			memory,
657 				 vk::VkDeviceSize			size)
658 {
659 	void* ptr;
660 
661 	VK_CHECK(vkd.mapMemory(device, memory, 0, size, 0, &ptr));
662 
663 	return ptr;
664 }
665 
666 class ReferenceMemory
667 {
668 public:
669 			ReferenceMemory	(size_t size);
670 
671 	void	set				(size_t pos, deUint8 val);
672 	deUint8	get				(size_t pos) const;
673 	bool	isDefined		(size_t pos) const;
674 
675 	void	setDefined		(size_t offset, size_t size, const void* data);
676 	void	setUndefined	(size_t offset, size_t size);
677 	void	setData			(size_t offset, size_t size, const void* data);
678 
getSize(void) const679 	size_t	getSize			(void) const { return m_data.size(); }
680 
681 private:
682 	vector<deUint8>		m_data;
683 	vector<deUint64>	m_defined;
684 };
685 
ReferenceMemory(size_t size)686 ReferenceMemory::ReferenceMemory (size_t size)
687 	: m_data	(size, 0)
688 	, m_defined	(size / 64 + (size % 64 == 0 ? 0 : 1), 0ull)
689 {
690 }
691 
set(size_t pos,deUint8 val)692 void ReferenceMemory::set (size_t pos, deUint8 val)
693 {
694 	DE_ASSERT(pos < m_data.size());
695 
696 	m_data[pos] = val;
697 	m_defined[pos / 64] |= 0x1ull << (pos % 64);
698 }
699 
setData(size_t offset,size_t size,const void * data_)700 void ReferenceMemory::setData (size_t offset, size_t size, const void* data_)
701 {
702 	const deUint8* data = (const deUint8*)data_;
703 
704 	DE_ASSERT(offset < m_data.size());
705 	DE_ASSERT(offset + size <= m_data.size());
706 
707 	// \todo [2016-03-09 mika] Optimize
708 	for (size_t pos = 0; pos < size; pos++)
709 	{
710 		m_data[offset + pos] = data[pos];
711 		m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
712 	}
713 }
714 
setUndefined(size_t offset,size_t size)715 void ReferenceMemory::setUndefined	(size_t offset, size_t size)
716 {
717 	// \todo [2016-03-09 mika] Optimize
718 	for (size_t pos = 0; pos < size; pos++)
719 		m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
720 }
721 
get(size_t pos) const722 deUint8 ReferenceMemory::get (size_t pos) const
723 {
724 	DE_ASSERT(pos < m_data.size());
725 	DE_ASSERT(isDefined(pos));
726 	return m_data[pos];
727 }
728 
isDefined(size_t pos) const729 bool ReferenceMemory::isDefined (size_t pos) const
730 {
731 	DE_ASSERT(pos < m_data.size());
732 
733 	return (m_defined[pos / 64] & (0x1ull << (pos % 64))) != 0;
734 }
735 
736 class Memory
737 {
738 public:
739 							Memory				(const vk::InstanceInterface&	vki,
740 												 const vk::DeviceInterface&		vkd,
741 												 vk::VkPhysicalDevice			physicalDevice,
742 												 vk::VkDevice					device,
743 												 vk::VkDeviceSize				size,
744 												 deUint32						memoryTypeIndex,
745 												 vk::VkDeviceSize				maxBufferSize,
746 												 deInt32						maxImageWidth,
747 												 deInt32						maxImageHeight);
748 
getSize(void) const749 	vk::VkDeviceSize		getSize				(void) const { return m_size; }
getMaxBufferSize(void) const750 	vk::VkDeviceSize		getMaxBufferSize	(void) const { return m_maxBufferSize; }
getSupportBuffers(void) const751 	bool					getSupportBuffers	(void) const { return m_maxBufferSize > 0; }
752 
getMaxImageWidth(void) const753 	deInt32					getMaxImageWidth	(void) const { return m_maxImageWidth; }
getMaxImageHeight(void) const754 	deInt32					getMaxImageHeight	(void) const { return m_maxImageHeight; }
getSupportImages(void) const755 	bool					getSupportImages	(void) const { return m_maxImageWidth > 0; }
756 
getMemoryType(void) const757 	const vk::VkMemoryType&	getMemoryType		(void) const { return m_memoryType; }
getMemoryTypeIndex(void) const758 	deUint32				getMemoryTypeIndex	(void) const { return m_memoryTypeIndex; }
getMemory(void) const759 	vk::VkDeviceMemory		getMemory			(void) const { return *m_memory; }
760 
761 private:
762 	const vk::VkDeviceSize					m_size;
763 	const deUint32							m_memoryTypeIndex;
764 	const vk::VkMemoryType					m_memoryType;
765 	const vk::Unique<vk::VkDeviceMemory>	m_memory;
766 	const vk::VkDeviceSize					m_maxBufferSize;
767 	const deInt32							m_maxImageWidth;
768 	const deInt32							m_maxImageHeight;
769 };
770 
getMemoryTypeInfo(const vk::InstanceInterface & vki,vk::VkPhysicalDevice device,deUint32 memoryTypeIndex)771 vk::VkMemoryType getMemoryTypeInfo (const vk::InstanceInterface&	vki,
772 									vk::VkPhysicalDevice			device,
773 									deUint32						memoryTypeIndex)
774 {
775 	const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, device);
776 
777 	DE_ASSERT(memoryTypeIndex < memoryProperties.memoryTypeCount);
778 
779 	return memoryProperties.memoryTypes[memoryTypeIndex];
780 }
781 
findMaxBufferSize(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkBufferUsageFlags usage,vk::VkSharingMode sharingMode,const vector<deUint32> & queueFamilies,vk::VkDeviceSize memorySize,deUint32 memoryTypeIndex)782 vk::VkDeviceSize findMaxBufferSize (const vk::DeviceInterface&		vkd,
783 									vk::VkDevice					device,
784 
785 									vk::VkBufferUsageFlags			usage,
786 									vk::VkSharingMode				sharingMode,
787 									const vector<deUint32>&			queueFamilies,
788 
789 									vk::VkDeviceSize				memorySize,
790 									deUint32						memoryTypeIndex)
791 {
792 	vk::VkDeviceSize	lastSuccess	= 0;
793 	vk::VkDeviceSize	currentSize	= memorySize / 2;
794 
795 	{
796 		const vk::Unique<vk::VkBuffer>  buffer			(createBuffer(vkd, device, memorySize, usage, sharingMode, queueFamilies));
797 		const vk::VkMemoryRequirements  requirements	(vk::getBufferMemoryRequirements(vkd, device, *buffer));
798 
799 		if (requirements.size == memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
800 			return memorySize;
801 	}
802 
803 	for (vk::VkDeviceSize stepSize = memorySize / 4; currentSize > 0; stepSize /= 2)
804 	{
805 		const vk::Unique<vk::VkBuffer>	buffer			(createBuffer(vkd, device, currentSize, usage, sharingMode, queueFamilies));
806 		const vk::VkMemoryRequirements	requirements	(vk::getBufferMemoryRequirements(vkd, device, *buffer));
807 
808 		if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
809 		{
810 			lastSuccess = currentSize;
811 			currentSize += stepSize;
812 		}
813 		else
814 			currentSize -= stepSize;
815 
816 		if (stepSize == 0)
817 			break;
818 	}
819 
820 	return lastSuccess;
821 }
822 
823 // Round size down maximum W * H * 4, where W and H < 4096
roundBufferSizeToWxHx4(vk::VkDeviceSize size)824 vk::VkDeviceSize roundBufferSizeToWxHx4 (vk::VkDeviceSize size)
825 {
826 	const vk::VkDeviceSize	maxTextureSize	= 4096;
827 	vk::VkDeviceSize		maxTexelCount	= size / 4;
828 	vk::VkDeviceSize		bestW			= de::max(maxTexelCount, maxTextureSize);
829 	vk::VkDeviceSize		bestH			= maxTexelCount / bestW;
830 
831 	// \todo [2016-03-09 mika] Could probably be faster?
832 	for (vk::VkDeviceSize w = 1; w * w < maxTexelCount && w < maxTextureSize && bestW * bestH * 4 < size; w++)
833 	{
834 		const vk::VkDeviceSize h = maxTexelCount / w;
835 
836 		if (bestW * bestH < w * h)
837 		{
838 			bestW = w;
839 			bestH = h;
840 		}
841 	}
842 
843 	return bestW * bestH * 4;
844 }
845 
846 // Find RGBA8 image size that has exactly "size" of number of bytes.
847 // "size" must be W * H * 4 where W and H < 4096
findImageSizeWxHx4(vk::VkDeviceSize size)848 IVec2 findImageSizeWxHx4 (vk::VkDeviceSize size)
849 {
850 	const vk::VkDeviceSize	maxTextureSize	= 4096;
851 	vk::VkDeviceSize		texelCount		= size / 4;
852 
853 	DE_ASSERT((size % 4) == 0);
854 
855 	// \todo [2016-03-09 mika] Could probably be faster?
856 	for (vk::VkDeviceSize w = 1; w < maxTextureSize && w < texelCount; w++)
857 	{
858 		const vk::VkDeviceSize	h	= texelCount / w;
859 
860 		if ((texelCount  % w) == 0 && h < maxTextureSize)
861 			return IVec2((int)w, (int)h);
862 	}
863 
864 	DE_FATAL("Invalid size");
865 	return IVec2(-1, -1);
866 }
867 
findMaxRGBA8ImageSize(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkImageUsageFlags usage,vk::VkSharingMode sharingMode,const vector<deUint32> & queueFamilies,vk::VkDeviceSize memorySize,deUint32 memoryTypeIndex)868 IVec2 findMaxRGBA8ImageSize (const vk::DeviceInterface&	vkd,
869 							 vk::VkDevice				device,
870 
871 							 vk::VkImageUsageFlags		usage,
872 							 vk::VkSharingMode			sharingMode,
873 							 const vector<deUint32>&	queueFamilies,
874 
875 							 vk::VkDeviceSize			memorySize,
876 							 deUint32					memoryTypeIndex)
877 {
878 	IVec2		lastSuccess		(0);
879 	IVec2		currentSize;
880 
881 	{
882 		const deUint32	texelCount	= (deUint32)(memorySize / 4);
883 		const deUint32	width		= (deUint32)deFloatSqrt((float)texelCount);
884 		const deUint32	height		= texelCount / width;
885 
886 		currentSize[0] = deMaxu32(width, height);
887 		currentSize[1] = deMinu32(width, height);
888 	}
889 
890 	for (deInt32 stepSize = currentSize[0] / 2; currentSize[0] > 0; stepSize /= 2)
891 	{
892 		const vk::VkImageCreateInfo	createInfo		=
893 		{
894 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
895 			DE_NULL,
896 
897 			0u,
898 			vk::VK_IMAGE_TYPE_2D,
899 			vk::VK_FORMAT_R8G8B8A8_UNORM,
900 			{
901 				(deUint32)currentSize[0],
902 				(deUint32)currentSize[1],
903 				1u,
904 			},
905 			1u, 1u,
906 			vk::VK_SAMPLE_COUNT_1_BIT,
907 			vk::VK_IMAGE_TILING_OPTIMAL,
908 			usage,
909 			sharingMode,
910 			(deUint32)queueFamilies.size(),
911 			&queueFamilies[0],
912 			vk::VK_IMAGE_LAYOUT_UNDEFINED
913 		};
914 		const vk::Unique<vk::VkImage>	image			(vk::createImage(vkd, device, &createInfo));
915 		const vk::VkMemoryRequirements	requirements	(vk::getImageMemoryRequirements(vkd, device, *image));
916 
917 		if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
918 		{
919 			lastSuccess = currentSize;
920 			currentSize[0] += stepSize;
921 			currentSize[1] += stepSize;
922 		}
923 		else
924 		{
925 			currentSize[0] -= stepSize;
926 			currentSize[1] -= stepSize;
927 		}
928 
929 		if (stepSize == 0)
930 			break;
931 	}
932 
933 	return lastSuccess;
934 }
935 
Memory(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkDeviceSize size,deUint32 memoryTypeIndex,vk::VkDeviceSize maxBufferSize,deInt32 maxImageWidth,deInt32 maxImageHeight)936 Memory::Memory (const vk::InstanceInterface&	vki,
937 				const vk::DeviceInterface&		vkd,
938 				vk::VkPhysicalDevice			physicalDevice,
939 				vk::VkDevice					device,
940 				vk::VkDeviceSize				size,
941 				deUint32						memoryTypeIndex,
942 				vk::VkDeviceSize				maxBufferSize,
943 				deInt32							maxImageWidth,
944 				deInt32							maxImageHeight)
945 	: m_size			(size)
946 	, m_memoryTypeIndex	(memoryTypeIndex)
947 	, m_memoryType		(getMemoryTypeInfo(vki, physicalDevice, memoryTypeIndex))
948 	, m_memory			(allocMemory(vkd, device, size, memoryTypeIndex))
949 	, m_maxBufferSize	(maxBufferSize)
950 	, m_maxImageWidth	(maxImageWidth)
951 	, m_maxImageHeight	(maxImageHeight)
952 {
953 }
954 
955 class Context
956 {
957 public:
Context(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkQueue queue,deUint32 queueFamilyIndex,const vector<pair<deUint32,vk::VkQueue>> & queues,const vk::BinaryCollection & binaryCollection)958 													Context					(const vk::InstanceInterface&					vki,
959 																			 const vk::DeviceInterface&						vkd,
960 																			 vk::VkPhysicalDevice							physicalDevice,
961 																			 vk::VkDevice									device,
962 																			 vk::VkQueue									queue,
963 																			 deUint32										queueFamilyIndex,
964 																			 const vector<pair<deUint32, vk::VkQueue> >&	queues,
965 																			 const vk::BinaryCollection&					binaryCollection)
966 		: m_vki					(vki)
967 		, m_vkd					(vkd)
968 		, m_physicalDevice		(physicalDevice)
969 		, m_device				(device)
970 		, m_queue				(queue)
971 		, m_queueFamilyIndex	(queueFamilyIndex)
972 		, m_queues				(queues)
973 		, m_commandPool			(createCommandPool(vkd, device, vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex))
974 		, m_binaryCollection	(binaryCollection)
975 	{
976 		for (size_t queueNdx = 0; queueNdx < m_queues.size(); queueNdx++)
977 			m_queueFamilies.push_back(m_queues[queueNdx].first);
978 	}
979 
getInstanceInterface(void) const980 	const vk::InstanceInterface&					getInstanceInterface	(void) const { return m_vki; }
getPhysicalDevice(void) const981 	vk::VkPhysicalDevice							getPhysicalDevice		(void) const { return m_physicalDevice; }
getDevice(void) const982 	vk::VkDevice									getDevice				(void) const { return m_device; }
getDeviceInterface(void) const983 	const vk::DeviceInterface&						getDeviceInterface		(void) const { return m_vkd; }
getQueue(void) const984 	vk::VkQueue										getQueue				(void) const { return m_queue; }
getQueueFamily(void) const985 	deUint32										getQueueFamily			(void) const { return m_queueFamilyIndex; }
getQueues(void) const986 	const vector<pair<deUint32, vk::VkQueue> >&		getQueues				(void) const { return m_queues; }
getQueueFamilies(void) const987 	const vector<deUint32>							getQueueFamilies		(void) const { return m_queueFamilies; }
getCommandPool(void) const988 	vk::VkCommandPool								getCommandPool			(void) const { return *m_commandPool; }
getBinaryCollection(void) const989 	const vk::BinaryCollection&						getBinaryCollection		(void) const { return m_binaryCollection; }
990 
991 private:
992 	const vk::InstanceInterface&					m_vki;
993 	const vk::DeviceInterface&						m_vkd;
994 	const vk::VkPhysicalDevice						m_physicalDevice;
995 	const vk::VkDevice								m_device;
996 	const vk::VkQueue								m_queue;
997 	const deUint32									m_queueFamilyIndex;
998 	const vector<pair<deUint32, vk::VkQueue> >		m_queues;
999 	const vk::Unique<vk::VkCommandPool>				m_commandPool;
1000 	const vk::BinaryCollection&						m_binaryCollection;
1001 	vector<deUint32>								m_queueFamilies;
1002 };
1003 
1004 class PrepareContext
1005 {
1006 public:
PrepareContext(const Context & context,const Memory & memory)1007 													PrepareContext			(const Context&	context,
1008 																			 const Memory&	memory)
1009 		: m_context	(context)
1010 		, m_memory	(memory)
1011 	{
1012 	}
1013 
getMemory(void) const1014 	const Memory&									getMemory				(void) const { return m_memory; }
getContext(void) const1015 	const Context&									getContext				(void) const { return m_context; }
getBinaryCollection(void) const1016 	const vk::BinaryCollection&						getBinaryCollection		(void) const { return m_context.getBinaryCollection(); }
1017 
setBuffer(vk::Move<vk::VkBuffer> buffer,vk::VkDeviceSize size)1018 	void				setBuffer		(vk::Move<vk::VkBuffer>	buffer,
1019 										 vk::VkDeviceSize		size)
1020 	{
1021 		DE_ASSERT(!m_currentImage);
1022 		DE_ASSERT(!m_currentBuffer);
1023 
1024 		m_currentBuffer		= buffer;
1025 		m_currentBufferSize	= size;
1026 	}
1027 
getBuffer(void) const1028 	vk::VkBuffer		getBuffer		(void) const { return *m_currentBuffer; }
getBufferSize(void) const1029 	vk::VkDeviceSize	getBufferSize	(void) const
1030 	{
1031 		DE_ASSERT(m_currentBuffer);
1032 		return m_currentBufferSize;
1033 	}
1034 
releaseBuffer(void)1035 	void				releaseBuffer	(void) { m_currentBuffer.disown(); }
1036 
setImage(vk::Move<vk::VkImage> image,vk::VkImageLayout layout,vk::VkDeviceSize memorySize,deInt32 width,deInt32 height)1037 	void				setImage		(vk::Move<vk::VkImage>	image,
1038 										 vk::VkImageLayout		layout,
1039 										 vk::VkDeviceSize		memorySize,
1040 										 deInt32				width,
1041 										 deInt32				height)
1042 	{
1043 		DE_ASSERT(!m_currentImage);
1044 		DE_ASSERT(!m_currentBuffer);
1045 
1046 		m_currentImage				= image;
1047 		m_currentImageMemorySize	= memorySize;
1048 		m_currentImageLayout		= layout;
1049 		m_currentImageWidth			= width;
1050 		m_currentImageHeight		= height;
1051 	}
1052 
setImageLayout(vk::VkImageLayout layout)1053 	void				setImageLayout	(vk::VkImageLayout layout)
1054 	{
1055 		DE_ASSERT(m_currentImage);
1056 		m_currentImageLayout = layout;
1057 	}
1058 
getImage(void) const1059 	vk::VkImage			getImage		(void) const { return *m_currentImage; }
getImageWidth(void) const1060 	deInt32				getImageWidth	(void) const
1061 	{
1062 		DE_ASSERT(m_currentImage);
1063 		return m_currentImageWidth;
1064 	}
getImageHeight(void) const1065 	deInt32				getImageHeight	(void) const
1066 	{
1067 		DE_ASSERT(m_currentImage);
1068 		return m_currentImageHeight;
1069 	}
getImageMemorySize(void) const1070 	vk::VkDeviceSize	getImageMemorySize	(void) const
1071 	{
1072 		DE_ASSERT(m_currentImage);
1073 		return m_currentImageMemorySize;
1074 	}
1075 
releaseImage(void)1076 	void				releaseImage	(void) { m_currentImage.disown(); }
1077 
getImageLayout(void) const1078 	vk::VkImageLayout	getImageLayout	(void) const
1079 	{
1080 		DE_ASSERT(m_currentImage);
1081 		return m_currentImageLayout;
1082 	}
1083 
1084 private:
1085 	const Context&			m_context;
1086 	const Memory&			m_memory;
1087 
1088 	vk::Move<vk::VkBuffer>	m_currentBuffer;
1089 	vk::VkDeviceSize		m_currentBufferSize;
1090 
1091 	vk::Move<vk::VkImage>	m_currentImage;
1092 	vk::VkDeviceSize		m_currentImageMemorySize;
1093 	vk::VkImageLayout		m_currentImageLayout;
1094 	deInt32					m_currentImageWidth;
1095 	deInt32					m_currentImageHeight;
1096 };
1097 
1098 class ExecuteContext
1099 {
1100 public:
ExecuteContext(const Context & context)1101 					ExecuteContext	(const Context&	context)
1102 		: m_context	(context)
1103 	{
1104 	}
1105 
getContext(void) const1106 	const Context&	getContext		(void) const { return m_context; }
setMapping(void * ptr)1107 	void			setMapping		(void* ptr) { m_mapping = ptr; }
getMapping(void) const1108 	void*			getMapping		(void) const { return m_mapping; }
1109 
1110 private:
1111 	const Context&	m_context;
1112 	void*			m_mapping;
1113 };
1114 
1115 class VerifyContext
1116 {
1117 public:
VerifyContext(TestLog & log,tcu::ResultCollector & resultCollector,const Context & context,vk::VkDeviceSize size)1118 							VerifyContext		(TestLog&				log,
1119 												 tcu::ResultCollector&	resultCollector,
1120 												 const Context&			context,
1121 												 vk::VkDeviceSize		size)
1122 		: m_log				(log)
1123 		, m_resultCollector	(resultCollector)
1124 		, m_context			(context)
1125 		, m_reference		((size_t)size)
1126 	{
1127 	}
1128 
getContext(void) const1129 	const Context&			getContext			(void) const { return m_context; }
getLog(void) const1130 	TestLog&				getLog				(void) const { return m_log; }
getResultCollector(void) const1131 	tcu::ResultCollector&	getResultCollector	(void) const { return m_resultCollector; }
1132 
getReference(void)1133 	ReferenceMemory&		getReference		(void) { return m_reference; }
getReferenceImage(void)1134 	TextureLevel&			getReferenceImage	(void) { return m_referenceImage;}
1135 
1136 private:
1137 	TestLog&				m_log;
1138 	tcu::ResultCollector&	m_resultCollector;
1139 	const Context&			m_context;
1140 	ReferenceMemory			m_reference;
1141 	TextureLevel			m_referenceImage;
1142 };
1143 
1144 class Command
1145 {
1146 public:
1147 	// Constructor should allocate all non-vulkan resources.
~Command(void)1148 	virtual				~Command	(void) {}
1149 
1150 	// Get name of the command
1151 	virtual const char*	getName		(void) const = 0;
1152 
1153 	// Log prepare operations
logPrepare(TestLog &,size_t) const1154 	virtual void		logPrepare	(TestLog&, size_t) const {}
1155 	// Log executed operations
logExecute(TestLog &,size_t) const1156 	virtual void		logExecute	(TestLog&, size_t) const {}
1157 
1158 	// Prepare should allocate all vulkan resources and resources that require
1159 	// that buffer or memory has been already allocated. This should build all
1160 	// command buffers etc.
prepare(PrepareContext &)1161 	virtual void		prepare		(PrepareContext&) {}
1162 
1163 	// Execute command. Write or read mapped memory, submit commands to queue
1164 	// etc.
execute(ExecuteContext &)1165 	virtual void		execute		(ExecuteContext&) {}
1166 
1167 	// Verify that results are correct.
verify(VerifyContext &,size_t)1168 	virtual void		verify		(VerifyContext&, size_t) {}
1169 
1170 protected:
1171 	// Allow only inheritance
Command(void)1172 						Command		(void) {}
1173 
1174 private:
1175 	// Disallow copying
1176 						Command		(const Command&);
1177 	Command&			operator&	(const Command&);
1178 };
1179 
1180 class Map : public Command
1181 {
1182 public:
Map(void)1183 						Map			(void) {}
~Map(void)1184 						~Map		(void) {}
getName(void) const1185 	const char*			getName		(void) const { return "Map"; }
1186 
1187 
logExecute(TestLog & log,size_t commandIndex) const1188 	void				logExecute	(TestLog& log, size_t commandIndex) const
1189 	{
1190 		log << TestLog::Message << commandIndex << ":" << getName() << " Map memory" << TestLog::EndMessage;
1191 	}
1192 
prepare(PrepareContext & context)1193 	void				prepare		(PrepareContext& context)
1194 	{
1195 		m_memory	= context.getMemory().getMemory();
1196 		m_size		= context.getMemory().getSize();
1197 	}
1198 
execute(ExecuteContext & context)1199 	void				execute		(ExecuteContext& context)
1200 	{
1201 		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1202 		const vk::VkDevice			device	= context.getContext().getDevice();
1203 
1204 		context.setMapping(mapMemory(vkd, device, m_memory, m_size));
1205 	}
1206 
1207 private:
1208 	vk::VkDeviceMemory	m_memory;
1209 	vk::VkDeviceSize	m_size;
1210 };
1211 
1212 class UnMap : public Command
1213 {
1214 public:
UnMap(void)1215 						UnMap		(void) {}
~UnMap(void)1216 						~UnMap		(void) {}
getName(void) const1217 	const char*			getName		(void) const { return "UnMap"; }
1218 
logExecute(TestLog & log,size_t commandIndex) const1219 	void				logExecute	(TestLog& log, size_t commandIndex) const
1220 	{
1221 		log << TestLog::Message << commandIndex << ": Unmap memory" << TestLog::EndMessage;
1222 	}
1223 
prepare(PrepareContext & context)1224 	void				prepare		(PrepareContext& context)
1225 	{
1226 		m_memory	= context.getMemory().getMemory();
1227 	}
1228 
execute(ExecuteContext & context)1229 	void				execute		(ExecuteContext& context)
1230 	{
1231 		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1232 		const vk::VkDevice			device	= context.getContext().getDevice();
1233 
1234 		vkd.unmapMemory(device, m_memory);
1235 		context.setMapping(DE_NULL);
1236 	}
1237 
1238 private:
1239 	vk::VkDeviceMemory	m_memory;
1240 };
1241 
1242 class Invalidate : public Command
1243 {
1244 public:
Invalidate(void)1245 						Invalidate	(void) {}
~Invalidate(void)1246 						~Invalidate	(void) {}
getName(void) const1247 	const char*			getName		(void) const { return "Invalidate"; }
1248 
logExecute(TestLog & log,size_t commandIndex) const1249 	void				logExecute	(TestLog& log, size_t commandIndex) const
1250 	{
1251 		log << TestLog::Message << commandIndex << ": Invalidate mapped memory" << TestLog::EndMessage;
1252 	}
1253 
prepare(PrepareContext & context)1254 	void				prepare		(PrepareContext& context)
1255 	{
1256 		m_memory	= context.getMemory().getMemory();
1257 		m_size		= context.getMemory().getSize();
1258 	}
1259 
execute(ExecuteContext & context)1260 	void				execute		(ExecuteContext& context)
1261 	{
1262 		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1263 		const vk::VkDevice			device	= context.getContext().getDevice();
1264 
1265 		vk::invalidateMappedMemoryRange(vkd, device, m_memory, 0, VK_WHOLE_SIZE);
1266 	}
1267 
1268 private:
1269 	vk::VkDeviceMemory	m_memory;
1270 	vk::VkDeviceSize	m_size;
1271 };
1272 
1273 class Flush : public Command
1274 {
1275 public:
Flush(void)1276 						Flush		(void) {}
~Flush(void)1277 						~Flush		(void) {}
getName(void) const1278 	const char*			getName		(void) const { return "Flush"; }
1279 
logExecute(TestLog & log,size_t commandIndex) const1280 	void				logExecute	(TestLog& log, size_t commandIndex) const
1281 	{
1282 		log << TestLog::Message << commandIndex << ": Flush mapped memory" << TestLog::EndMessage;
1283 	}
1284 
prepare(PrepareContext & context)1285 	void				prepare		(PrepareContext& context)
1286 	{
1287 		m_memory	= context.getMemory().getMemory();
1288 		m_size		= context.getMemory().getSize();
1289 	}
1290 
execute(ExecuteContext & context)1291 	void				execute		(ExecuteContext& context)
1292 	{
1293 		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1294 		const vk::VkDevice			device	= context.getContext().getDevice();
1295 
1296 		vk::flushMappedMemoryRange(vkd, device, m_memory, 0, VK_WHOLE_SIZE);
1297 	}
1298 
1299 private:
1300 	vk::VkDeviceMemory	m_memory;
1301 	vk::VkDeviceSize	m_size;
1302 };
1303 
1304 // Host memory reads and writes
1305 class HostMemoryAccess : public Command
1306 {
1307 public:
1308 					HostMemoryAccess	(bool read, bool write, deUint32 seed);
~HostMemoryAccess(void)1309 					~HostMemoryAccess	(void) {}
getName(void) const1310 	const char*		getName				(void) const { return "HostMemoryAccess"; }
1311 
1312 	void			logExecute			(TestLog& log, size_t commandIndex) const;
1313 	void			prepare				(PrepareContext& context);
1314 	void			execute				(ExecuteContext& context);
1315 	void			verify				(VerifyContext& context, size_t commandIndex);
1316 
1317 private:
1318 	const bool		m_read;
1319 	const bool		m_write;
1320 	const deUint32	m_seed;
1321 
1322 	size_t			m_size;
1323 	vector<deUint8>	m_readData;
1324 };
1325 
HostMemoryAccess(bool read,bool write,deUint32 seed)1326 HostMemoryAccess::HostMemoryAccess (bool read, bool write, deUint32 seed)
1327 	: m_read	(read)
1328 	, m_write	(write)
1329 	, m_seed	(seed)
1330 	, m_size	(0)
1331 {
1332 }
1333 
logExecute(TestLog & log,size_t commandIndex) const1334 void HostMemoryAccess::logExecute (TestLog& log, size_t commandIndex) const
1335 {
1336 	log << TestLog::Message << commandIndex << ": Host memory access:" << (m_read ? " read" : "") << (m_write ? " write" : "")  << ", seed: " << m_seed << TestLog::EndMessage;
1337 }
1338 
prepare(PrepareContext & context)1339 void HostMemoryAccess::prepare (PrepareContext& context)
1340 {
1341 	m_size = (size_t)context.getMemory().getSize();
1342 
1343 	if (m_read)
1344 		m_readData.resize(m_size, 0);
1345 }
1346 
execute(ExecuteContext & context)1347 void HostMemoryAccess::execute (ExecuteContext& context)
1348 {
1349 	if (m_read && m_write)
1350 	{
1351 		de::Random		rng	(m_seed);
1352 		deUint8* const	ptr	= (deUint8*)context.getMapping();
1353 		if (m_size >= ONE_MEGABYTE)
1354 		{
1355 			deMemcpy(&m_readData[0], ptr, m_size);
1356 			for (size_t pos = 0; pos < m_size; ++pos)
1357 			{
1358 				ptr[pos] = m_readData[pos] ^ rng.getUint8();
1359 			}
1360 		}
1361 		else
1362 		{
1363 			for (size_t pos = 0; pos < m_size; ++pos)
1364 			{
1365 				const deUint8	mask	= rng.getUint8();
1366 				const deUint8	value	= ptr[pos];
1367 
1368 				m_readData[pos] = value;
1369 				ptr[pos] = value ^ mask;
1370 			}
1371 		}
1372 	}
1373 	else if (m_read)
1374 	{
1375 		const deUint8* const	ptr = (deUint8*)context.getMapping();
1376 		if (m_size >= ONE_MEGABYTE)
1377 		{
1378 			deMemcpy(&m_readData[0], ptr, m_size);
1379 		}
1380 		else
1381 		{
1382 			for (size_t pos = 0; pos < m_size; ++pos)
1383 			{
1384 				m_readData[pos] = ptr[pos];
1385 			}
1386 		}
1387 	}
1388 	else if (m_write)
1389 	{
1390 		de::Random		rng	(m_seed);
1391 		deUint8* const	ptr	= (deUint8*)context.getMapping();
1392 		for (size_t pos = 0; pos < m_size; ++pos)
1393 		{
1394 			ptr[pos] = rng.getUint8();
1395 		}
1396 	}
1397 	else
1398 		DE_FATAL("Host memory access without read or write.");
1399 }
1400 
verify(VerifyContext & context,size_t commandIndex)1401 void HostMemoryAccess::verify (VerifyContext& context, size_t commandIndex)
1402 {
1403 	tcu::ResultCollector&	resultCollector	= context.getResultCollector();
1404 	ReferenceMemory&		reference		= context.getReference();
1405 	de::Random				rng				(m_seed);
1406 
1407 	if (m_read && m_write)
1408 	{
1409 		for (size_t pos = 0; pos < m_size; pos++)
1410 		{
1411 			const deUint8	mask	= rng.getUint8();
1412 			const deUint8	value	= m_readData[pos];
1413 
1414 			if (reference.isDefined(pos))
1415 			{
1416 				if (value != reference.get(pos))
1417 				{
1418 					resultCollector.fail(
1419 							de::toString(commandIndex) + ":" + getName()
1420 							+ " Result differs from reference, Expected: "
1421 							+ de::toString(tcu::toHex<8>(reference.get(pos)))
1422 							+ ", Got: "
1423 							+ de::toString(tcu::toHex<8>(value))
1424 							+ ", At offset: "
1425 							+ de::toString(pos));
1426 					break;
1427 				}
1428 
1429 				reference.set(pos, reference.get(pos) ^ mask);
1430 			}
1431 		}
1432 	}
1433 	else if (m_read)
1434 	{
1435 		for (size_t pos = 0; pos < m_size; pos++)
1436 		{
1437 			const deUint8	value	= m_readData[pos];
1438 
1439 			if (reference.isDefined(pos))
1440 			{
1441 				if (value != reference.get(pos))
1442 				{
1443 					resultCollector.fail(
1444 							de::toString(commandIndex) + ":" + getName()
1445 							+ " Result differs from reference, Expected: "
1446 							+ de::toString(tcu::toHex<8>(reference.get(pos)))
1447 							+ ", Got: "
1448 							+ de::toString(tcu::toHex<8>(value))
1449 							+ ", At offset: "
1450 							+ de::toString(pos));
1451 					break;
1452 				}
1453 			}
1454 		}
1455 	}
1456 	else if (m_write)
1457 	{
1458 		for (size_t pos = 0; pos < m_size; pos++)
1459 		{
1460 			const deUint8	value	= rng.getUint8();
1461 
1462 			reference.set(pos, value);
1463 		}
1464 	}
1465 	else
1466 		DE_FATAL("Host memory access without read or write.");
1467 }
1468 
1469 class CreateBuffer : public Command
1470 {
1471 public:
1472 									CreateBuffer	(vk::VkBufferUsageFlags	usage,
1473 													 vk::VkSharingMode		sharing);
~CreateBuffer(void)1474 									~CreateBuffer	(void) {}
getName(void) const1475 	const char*						getName			(void) const { return "CreateBuffer"; }
1476 
1477 	void							logPrepare		(TestLog& log, size_t commandIndex) const;
1478 	void							prepare			(PrepareContext& context);
1479 
1480 private:
1481 	const vk::VkBufferUsageFlags	m_usage;
1482 	const vk::VkSharingMode			m_sharing;
1483 };
1484 
CreateBuffer(vk::VkBufferUsageFlags usage,vk::VkSharingMode sharing)1485 CreateBuffer::CreateBuffer (vk::VkBufferUsageFlags	usage,
1486 							vk::VkSharingMode		sharing)
1487 	: m_usage	(usage)
1488 	, m_sharing	(sharing)
1489 {
1490 }
1491 
logPrepare(TestLog & log,size_t commandIndex) const1492 void CreateBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1493 {
1494 	log << TestLog::Message << commandIndex << ":" << getName() << " Create buffer, Sharing mode: " << m_sharing << ", Usage: " << vk::getBufferUsageFlagsStr(m_usage) << TestLog::EndMessage;
1495 }
1496 
prepare(PrepareContext & context)1497 void CreateBuffer::prepare (PrepareContext& context)
1498 {
1499 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1500 	const vk::VkDevice			device			= context.getContext().getDevice();
1501 	const vk::VkDeviceSize		bufferSize		= context.getMemory().getMaxBufferSize();
1502 	const vector<deUint32>&		queueFamilies	= context.getContext().getQueueFamilies();
1503 
1504 	context.setBuffer(createBuffer(vkd, device, bufferSize, m_usage, m_sharing, queueFamilies), bufferSize);
1505 }
1506 
1507 class DestroyBuffer : public Command
1508 {
1509 public:
1510 							DestroyBuffer	(void);
~DestroyBuffer(void)1511 							~DestroyBuffer	(void) {}
getName(void) const1512 	const char*				getName			(void) const { return "DestroyBuffer"; }
1513 
1514 	void					logExecute		(TestLog& log, size_t commandIndex) const;
1515 	void					prepare			(PrepareContext& context);
1516 	void					execute			(ExecuteContext& context);
1517 
1518 private:
1519 	vk::Move<vk::VkBuffer>	m_buffer;
1520 };
1521 
DestroyBuffer(void)1522 DestroyBuffer::DestroyBuffer (void)
1523 {
1524 }
1525 
prepare(PrepareContext & context)1526 void DestroyBuffer::prepare (PrepareContext& context)
1527 {
1528 	m_buffer = vk::Move<vk::VkBuffer>(vk::check(context.getBuffer()), vk::Deleter<vk::VkBuffer>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1529 	context.releaseBuffer();
1530 }
1531 
logExecute(TestLog & log,size_t commandIndex) const1532 void DestroyBuffer::logExecute (TestLog& log, size_t commandIndex) const
1533 {
1534 	log << TestLog::Message << commandIndex << ":" << getName() << " Destroy buffer" << TestLog::EndMessage;
1535 }
1536 
execute(ExecuteContext & context)1537 void DestroyBuffer::execute (ExecuteContext& context)
1538 {
1539 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1540 	const vk::VkDevice			device			= context.getContext().getDevice();
1541 
1542 	vkd.destroyBuffer(device, m_buffer.disown(), DE_NULL);
1543 }
1544 
1545 class BindBufferMemory : public Command
1546 {
1547 public:
BindBufferMemory(void)1548 				BindBufferMemory	(void) {}
~BindBufferMemory(void)1549 				~BindBufferMemory	(void) {}
getName(void) const1550 	const char*	getName				(void) const { return "BindBufferMemory"; }
1551 
1552 	void		logPrepare			(TestLog& log, size_t commandIndex) const;
1553 	void		prepare				(PrepareContext& context);
1554 };
1555 
logPrepare(TestLog & log,size_t commandIndex) const1556 void BindBufferMemory::logPrepare (TestLog& log, size_t commandIndex) const
1557 {
1558 	log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to buffer" << TestLog::EndMessage;
1559 }
1560 
prepare(PrepareContext & context)1561 void BindBufferMemory::prepare (PrepareContext& context)
1562 {
1563 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1564 	const vk::VkDevice			device			= context.getContext().getDevice();
1565 
1566 	VK_CHECK(vkd.bindBufferMemory(device, context.getBuffer(), context.getMemory().getMemory(), 0));
1567 }
1568 
1569 class CreateImage : public Command
1570 {
1571 public:
1572 									CreateImage		(vk::VkImageUsageFlags	usage,
1573 													 vk::VkSharingMode		sharing);
~CreateImage(void)1574 									~CreateImage	(void) {}
getName(void) const1575 	const char*						getName			(void) const { return "CreateImage"; }
1576 
1577 	void							logPrepare		(TestLog& log, size_t commandIndex) const;
1578 	void							prepare			(PrepareContext& context);
1579 	void							verify			(VerifyContext& context, size_t commandIndex);
1580 
1581 private:
1582 	const vk::VkImageUsageFlags	m_usage;
1583 	const vk::VkSharingMode		m_sharing;
1584 	deInt32						m_imageWidth;
1585 	deInt32						m_imageHeight;
1586 };
1587 
CreateImage(vk::VkImageUsageFlags usage,vk::VkSharingMode sharing)1588 CreateImage::CreateImage (vk::VkImageUsageFlags	usage,
1589 						  vk::VkSharingMode		sharing)
1590 	: m_usage		(usage)
1591 	, m_sharing		(sharing)
1592 	, m_imageWidth	(0)
1593 	, m_imageHeight	(0)
1594 {
1595 }
1596 
logPrepare(TestLog & log,size_t commandIndex) const1597 void CreateImage::logPrepare (TestLog& log, size_t commandIndex) const
1598 {
1599 	log << TestLog::Message << commandIndex << ":" << getName() << " Create image, sharing: " << m_sharing << ", usage: " << vk::getImageUsageFlagsStr(m_usage)  << TestLog::EndMessage;
1600 }
1601 
prepare(PrepareContext & context)1602 void CreateImage::prepare (PrepareContext& context)
1603 {
1604 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1605 	const vk::VkDevice			device			= context.getContext().getDevice();
1606 	const vector<deUint32>&		queueFamilies	= context.getContext().getQueueFamilies();
1607 
1608 	m_imageWidth	= context.getMemory().getMaxImageWidth();
1609 	m_imageHeight	= context.getMemory().getMaxImageHeight();
1610 
1611 	{
1612 		const vk::VkImageCreateInfo	createInfo		=
1613 		{
1614 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1615 			DE_NULL,
1616 
1617 			0u,
1618 			vk::VK_IMAGE_TYPE_2D,
1619 			vk::VK_FORMAT_R8G8B8A8_UNORM,
1620 			{
1621 				(deUint32)m_imageWidth,
1622 				(deUint32)m_imageHeight,
1623 				1u,
1624 			},
1625 			1u, 1u,
1626 			vk::VK_SAMPLE_COUNT_1_BIT,
1627 			vk::VK_IMAGE_TILING_OPTIMAL,
1628 			m_usage,
1629 			m_sharing,
1630 			(deUint32)queueFamilies.size(),
1631 			&queueFamilies[0],
1632 			vk::VK_IMAGE_LAYOUT_UNDEFINED
1633 		};
1634 		vk::Move<vk::VkImage>			image			(createImage(vkd, device, &createInfo));
1635 		const vk::VkMemoryRequirements	requirements	= vk::getImageMemoryRequirements(vkd, device, *image);
1636 
1637 		context.setImage(image, vk::VK_IMAGE_LAYOUT_UNDEFINED, requirements.size, m_imageWidth, m_imageHeight);
1638 	}
1639 }
1640 
verify(VerifyContext & context,size_t)1641 void CreateImage::verify (VerifyContext& context, size_t)
1642 {
1643 	context.getReferenceImage() = TextureLevel(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight);
1644 }
1645 
1646 class DestroyImage : public Command
1647 {
1648 public:
1649 							DestroyImage	(void);
~DestroyImage(void)1650 							~DestroyImage	(void) {}
getName(void) const1651 	const char*				getName			(void) const { return "DestroyImage"; }
1652 
1653 	void					logExecute		(TestLog& log, size_t commandIndex) const;
1654 	void					prepare			(PrepareContext& context);
1655 	void					execute			(ExecuteContext& context);
1656 
1657 private:
1658 	vk::Move<vk::VkImage>	m_image;
1659 };
1660 
DestroyImage(void)1661 DestroyImage::DestroyImage (void)
1662 {
1663 }
1664 
prepare(PrepareContext & context)1665 void DestroyImage::prepare (PrepareContext& context)
1666 {
1667 	m_image = vk::Move<vk::VkImage>(vk::check(context.getImage()), vk::Deleter<vk::VkImage>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1668 	context.releaseImage();
1669 }
1670 
1671 
logExecute(TestLog & log,size_t commandIndex) const1672 void DestroyImage::logExecute (TestLog& log, size_t commandIndex) const
1673 {
1674 	log << TestLog::Message << commandIndex << ":" << getName() << " Destroy image" << TestLog::EndMessage;
1675 }
1676 
execute(ExecuteContext & context)1677 void DestroyImage::execute (ExecuteContext& context)
1678 {
1679 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1680 	const vk::VkDevice			device			= context.getContext().getDevice();
1681 
1682 	vkd.destroyImage(device, m_image.disown(), DE_NULL);
1683 }
1684 
1685 class BindImageMemory : public Command
1686 {
1687 public:
BindImageMemory(void)1688 				BindImageMemory		(void) {}
~BindImageMemory(void)1689 				~BindImageMemory	(void) {}
getName(void) const1690 	const char*	getName				(void) const { return "BindImageMemory"; }
1691 
1692 	void		logPrepare			(TestLog& log, size_t commandIndex) const;
1693 	void		prepare				(PrepareContext& context);
1694 };
1695 
logPrepare(TestLog & log,size_t commandIndex) const1696 void BindImageMemory::logPrepare (TestLog& log, size_t commandIndex) const
1697 {
1698 	log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to image" << TestLog::EndMessage;
1699 }
1700 
prepare(PrepareContext & context)1701 void BindImageMemory::prepare (PrepareContext& context)
1702 {
1703 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
1704 	const vk::VkDevice				device			= context.getContext().getDevice();
1705 
1706 	VK_CHECK(vkd.bindImageMemory(device, context.getImage(), context.getMemory().getMemory(), 0));
1707 }
1708 
1709 class QueueWaitIdle : public Command
1710 {
1711 public:
QueueWaitIdle(void)1712 				QueueWaitIdle	(void) {}
~QueueWaitIdle(void)1713 				~QueueWaitIdle	(void) {}
getName(void) const1714 	const char*	getName			(void) const { return "QueuetWaitIdle"; }
1715 
1716 	void		logExecute		(TestLog& log, size_t commandIndex) const;
1717 	void		execute			(ExecuteContext& context);
1718 };
1719 
logExecute(TestLog & log,size_t commandIndex) const1720 void QueueWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1721 {
1722 	log << TestLog::Message << commandIndex << ":" << getName() << " Queue wait idle" << TestLog::EndMessage;
1723 }
1724 
execute(ExecuteContext & context)1725 void QueueWaitIdle::execute (ExecuteContext& context)
1726 {
1727 	const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1728 	const vk::VkQueue			queue	= context.getContext().getQueue();
1729 
1730 	VK_CHECK(vkd.queueWaitIdle(queue));
1731 }
1732 
1733 class DeviceWaitIdle : public Command
1734 {
1735 public:
DeviceWaitIdle(void)1736 				DeviceWaitIdle	(void) {}
~DeviceWaitIdle(void)1737 				~DeviceWaitIdle	(void) {}
getName(void) const1738 	const char*	getName			(void) const { return "DeviceWaitIdle"; }
1739 
1740 	void		logExecute		(TestLog& log, size_t commandIndex) const;
1741 	void		execute			(ExecuteContext& context);
1742 };
1743 
logExecute(TestLog & log,size_t commandIndex) const1744 void DeviceWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1745 {
1746 	log << TestLog::Message << commandIndex << ":" << getName() << " Device wait idle" << TestLog::EndMessage;
1747 }
1748 
execute(ExecuteContext & context)1749 void DeviceWaitIdle::execute (ExecuteContext& context)
1750 {
1751 	const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1752 	const vk::VkDevice			device	= context.getContext().getDevice();
1753 
1754 	VK_CHECK(vkd.deviceWaitIdle(device));
1755 }
1756 
1757 class SubmitContext
1758 {
1759 public:
SubmitContext(const PrepareContext & context,const vk::VkCommandBuffer commandBuffer)1760 								SubmitContext		(const PrepareContext&		context,
1761 													 const vk::VkCommandBuffer	commandBuffer)
1762 		: m_context			(context)
1763 		, m_commandBuffer	(commandBuffer)
1764 	{
1765 	}
1766 
getMemory(void) const1767 	const Memory&				getMemory			(void) const { return m_context.getMemory(); }
getContext(void) const1768 	const Context&				getContext			(void) const { return m_context.getContext(); }
getCommandBuffer(void) const1769 	vk::VkCommandBuffer			getCommandBuffer	(void) const { return m_commandBuffer; }
1770 
getBuffer(void) const1771 	vk::VkBuffer				getBuffer			(void) const { return m_context.getBuffer(); }
getBufferSize(void) const1772 	vk::VkDeviceSize			getBufferSize		(void) const { return m_context.getBufferSize(); }
1773 
getImage(void) const1774 	vk::VkImage					getImage			(void) const { return m_context.getImage(); }
getImageWidth(void) const1775 	deInt32						getImageWidth		(void) const { return m_context.getImageWidth(); }
getImageHeight(void) const1776 	deInt32						getImageHeight		(void) const { return m_context.getImageHeight(); }
1777 
1778 private:
1779 	const PrepareContext&		m_context;
1780 	const vk::VkCommandBuffer	m_commandBuffer;
1781 };
1782 
1783 class CmdCommand
1784 {
1785 public:
~CmdCommand(void)1786 	virtual				~CmdCommand	(void) {}
1787 	virtual const char*	getName		(void) const = 0;
1788 
1789 	// Log things that are done during prepare
logPrepare(TestLog &,size_t) const1790 	virtual void		logPrepare	(TestLog&, size_t) const {}
1791 	// Log submitted calls etc.
logSubmit(TestLog &,size_t) const1792 	virtual void		logSubmit	(TestLog&, size_t) const {}
1793 
1794 	// Allocate vulkan resources and prepare for submit.
prepare(PrepareContext &)1795 	virtual void		prepare		(PrepareContext&) {}
1796 
1797 	// Submit commands to command buffer.
submit(SubmitContext &)1798 	virtual void		submit		(SubmitContext&) {}
1799 
1800 	// Verify results
verify(VerifyContext &,size_t)1801 	virtual void		verify		(VerifyContext&, size_t) {}
1802 };
1803 
1804 class SubmitCommandBuffer : public Command
1805 {
1806 public:
1807 					SubmitCommandBuffer		(const vector<CmdCommand*>& commands);
1808 					~SubmitCommandBuffer	(void);
1809 
getName(void) const1810 	const char*		getName					(void) const { return "SubmitCommandBuffer"; }
1811 	void			logExecute				(TestLog& log, size_t commandIndex) const;
1812 	void			logPrepare				(TestLog& log, size_t commandIndex) const;
1813 
1814 	// Allocate command buffer and submit commands to command buffer
1815 	void			prepare					(PrepareContext& context);
1816 	void			execute					(ExecuteContext& context);
1817 
1818 	// Verify that results are correct.
1819 	void			verify					(VerifyContext& context, size_t commandIndex);
1820 
1821 private:
1822 	vector<CmdCommand*>				m_commands;
1823 	vk::Move<vk::VkCommandBuffer>	m_commandBuffer;
1824 };
1825 
SubmitCommandBuffer(const vector<CmdCommand * > & commands)1826 SubmitCommandBuffer::SubmitCommandBuffer (const vector<CmdCommand*>& commands)
1827 	: m_commands	(commands)
1828 {
1829 }
1830 
~SubmitCommandBuffer(void)1831 SubmitCommandBuffer::~SubmitCommandBuffer (void)
1832 {
1833 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1834 		delete m_commands[cmdNdx];
1835 }
1836 
prepare(PrepareContext & context)1837 void SubmitCommandBuffer::prepare (PrepareContext& context)
1838 {
1839 	const vk::DeviceInterface&	vkd			= context.getContext().getDeviceInterface();
1840 	const vk::VkDevice			device		= context.getContext().getDevice();
1841 	const vk::VkCommandPool		commandPool	= context.getContext().getCommandPool();
1842 
1843 	m_commandBuffer = createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1844 
1845 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1846 	{
1847 		CmdCommand& command = *m_commands[cmdNdx];
1848 
1849 		command.prepare(context);
1850 	}
1851 
1852 	{
1853 		SubmitContext submitContext (context, *m_commandBuffer);
1854 
1855 		for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1856 		{
1857 			CmdCommand& command = *m_commands[cmdNdx];
1858 
1859 			command.submit(submitContext);
1860 		}
1861 
1862 		endCommandBuffer(vkd, *m_commandBuffer);
1863 	}
1864 }
1865 
execute(ExecuteContext & context)1866 void SubmitCommandBuffer::execute (ExecuteContext& context)
1867 {
1868 	const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1869 	const vk::VkCommandBuffer	cmd		= *m_commandBuffer;
1870 	const vk::VkQueue			queue	= context.getContext().getQueue();
1871 	const vk::VkSubmitInfo		submit	=
1872 	{
1873 		vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
1874 		DE_NULL,
1875 
1876 		0,
1877 		DE_NULL,
1878 		(const vk::VkPipelineStageFlags*)DE_NULL,
1879 
1880 		1,
1881 		&cmd,
1882 
1883 		0,
1884 		DE_NULL
1885 	};
1886 
1887 	vkd.queueSubmit(queue, 1, &submit, 0);
1888 }
1889 
verify(VerifyContext & context,size_t commandIndex)1890 void SubmitCommandBuffer::verify (VerifyContext& context, size_t commandIndex)
1891 {
1892 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
1893 	const tcu::ScopedLogSection	section		(context.getLog(), sectionName, sectionName);
1894 
1895 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1896 		m_commands[cmdNdx]->verify(context, cmdNdx);
1897 }
1898 
logPrepare(TestLog & log,size_t commandIndex) const1899 void SubmitCommandBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1900 {
1901 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
1902 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
1903 
1904 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1905 		m_commands[cmdNdx]->logPrepare(log, cmdNdx);
1906 }
1907 
logExecute(TestLog & log,size_t commandIndex) const1908 void SubmitCommandBuffer::logExecute (TestLog& log, size_t commandIndex) const
1909 {
1910 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
1911 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
1912 
1913 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1914 		m_commands[cmdNdx]->logSubmit(log, cmdNdx);
1915 }
1916 
1917 class PipelineBarrier : public CmdCommand
1918 {
1919 public:
1920 	enum Type
1921 	{
1922 		TYPE_GLOBAL = 0,
1923 		TYPE_BUFFER,
1924 		TYPE_IMAGE,
1925 		TYPE_LAST
1926 	};
1927 									PipelineBarrier		(const vk::VkPipelineStageFlags			srcStages,
1928 														 const vk::VkAccessFlags				srcAccesses,
1929 														 const vk::VkPipelineStageFlags			dstStages,
1930 														 const vk::VkAccessFlags				dstAccesses,
1931 														 Type									type,
1932 														 const tcu::Maybe<vk::VkImageLayout>	imageLayout);
~PipelineBarrier(void)1933 									~PipelineBarrier	(void) {}
getName(void) const1934 	const char*						getName				(void) const { return "PipelineBarrier"; }
1935 
1936 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
1937 	void							submit				(SubmitContext& context);
1938 
1939 private:
1940 	const vk::VkPipelineStageFlags		m_srcStages;
1941 	const vk::VkAccessFlags				m_srcAccesses;
1942 	const vk::VkPipelineStageFlags		m_dstStages;
1943 	const vk::VkAccessFlags				m_dstAccesses;
1944 	const Type							m_type;
1945 	const tcu::Maybe<vk::VkImageLayout>	m_imageLayout;
1946 };
1947 
PipelineBarrier(const vk::VkPipelineStageFlags srcStages,const vk::VkAccessFlags srcAccesses,const vk::VkPipelineStageFlags dstStages,const vk::VkAccessFlags dstAccesses,Type type,const tcu::Maybe<vk::VkImageLayout> imageLayout)1948 PipelineBarrier::PipelineBarrier (const vk::VkPipelineStageFlags		srcStages,
1949 								  const vk::VkAccessFlags				srcAccesses,
1950 								  const vk::VkPipelineStageFlags		dstStages,
1951 								  const vk::VkAccessFlags				dstAccesses,
1952 								  Type									type,
1953 								  const tcu::Maybe<vk::VkImageLayout>	imageLayout)
1954 	: m_srcStages	(srcStages)
1955 	, m_srcAccesses	(srcAccesses)
1956 	, m_dstStages	(dstStages)
1957 	, m_dstAccesses	(dstAccesses)
1958 	, m_type		(type)
1959 	, m_imageLayout	(imageLayout)
1960 {
1961 }
1962 
logSubmit(TestLog & log,size_t commandIndex) const1963 void PipelineBarrier::logSubmit (TestLog& log, size_t commandIndex) const
1964 {
1965 	log << TestLog::Message << commandIndex << ":" << getName()
1966 		<< " " << (m_type == TYPE_GLOBAL ? "Global pipeline barrier"
1967 					: m_type == TYPE_BUFFER ? "Buffer pipeline barrier"
1968 					: "Image pipeline barrier")
1969 		<< ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
1970 		<< ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses) << TestLog::EndMessage;
1971 }
1972 
submit(SubmitContext & context)1973 void PipelineBarrier::submit (SubmitContext& context)
1974 {
1975 	const vk::DeviceInterface&	vkd	= context.getContext().getDeviceInterface();
1976 	const vk::VkCommandBuffer	cmd	= context.getCommandBuffer();
1977 
1978 	switch (m_type)
1979 	{
1980 		case TYPE_GLOBAL:
1981 		{
1982 			const vk::VkMemoryBarrier	barrier		=
1983 			{
1984 				vk::VK_STRUCTURE_TYPE_MEMORY_BARRIER,
1985 				DE_NULL,
1986 
1987 				m_srcAccesses,
1988 				m_dstAccesses
1989 			};
1990 
1991 			vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 1, &barrier, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
1992 			break;
1993 		}
1994 
1995 		case TYPE_BUFFER:
1996 		{
1997 			const vk::VkBufferMemoryBarrier	barrier		=
1998 			{
1999 				vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2000 				DE_NULL,
2001 
2002 				m_srcAccesses,
2003 				m_dstAccesses,
2004 
2005 				VK_QUEUE_FAMILY_IGNORED,
2006 				VK_QUEUE_FAMILY_IGNORED,
2007 
2008 				context.getBuffer(),
2009 				0,
2010 				VK_WHOLE_SIZE
2011 			};
2012 
2013 			vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2014 			break;
2015 		}
2016 
2017 		case TYPE_IMAGE:
2018 		{
2019 			const vk::VkImageMemoryBarrier	barrier		=
2020 			{
2021 				vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2022 				DE_NULL,
2023 
2024 				m_srcAccesses,
2025 				m_dstAccesses,
2026 
2027 				*m_imageLayout,
2028 				*m_imageLayout,
2029 
2030 				VK_QUEUE_FAMILY_IGNORED,
2031 				VK_QUEUE_FAMILY_IGNORED,
2032 
2033 				context.getImage(),
2034 				{
2035 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
2036 					0, 1,
2037 					0, 1
2038 				}
2039 			};
2040 
2041 			vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2042 			break;
2043 		}
2044 
2045 		default:
2046 			DE_FATAL("Unknown pipeline barrier type");
2047 	}
2048 }
2049 
2050 class ImageTransition : public CmdCommand
2051 {
2052 public:
2053 						ImageTransition		(vk::VkPipelineStageFlags	srcStages,
2054 											 vk::VkAccessFlags			srcAccesses,
2055 
2056 											 vk::VkPipelineStageFlags	dstStages,
2057 											 vk::VkAccessFlags			dstAccesses,
2058 
2059 											 vk::VkImageLayout			srcLayout,
2060 											 vk::VkImageLayout			dstLayout);
2061 
~ImageTransition(void)2062 						~ImageTransition	(void) {}
getName(void) const2063 	const char*			getName				(void) const { return "ImageTransition"; }
2064 
2065 	void				prepare				(PrepareContext& context);
2066 	void				logSubmit			(TestLog& log, size_t commandIndex) const;
2067 	void				submit				(SubmitContext& context);
2068 	void				verify				(VerifyContext& context, size_t);
2069 
2070 private:
2071 	const vk::VkPipelineStageFlags	m_srcStages;
2072 	const vk::VkAccessFlags			m_srcAccesses;
2073 	const vk::VkPipelineStageFlags	m_dstStages;
2074 	const vk::VkAccessFlags			m_dstAccesses;
2075 	const vk::VkImageLayout			m_srcLayout;
2076 	const vk::VkImageLayout			m_dstLayout;
2077 
2078 	vk::VkDeviceSize				m_imageMemorySize;
2079 };
2080 
ImageTransition(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses,vk::VkImageLayout srcLayout,vk::VkImageLayout dstLayout)2081 ImageTransition::ImageTransition (vk::VkPipelineStageFlags	srcStages,
2082 								  vk::VkAccessFlags			srcAccesses,
2083 
2084 								  vk::VkPipelineStageFlags	dstStages,
2085 								  vk::VkAccessFlags			dstAccesses,
2086 
2087 								  vk::VkImageLayout			srcLayout,
2088 								  vk::VkImageLayout			dstLayout)
2089 	: m_srcStages		(srcStages)
2090 	, m_srcAccesses		(srcAccesses)
2091 	, m_dstStages		(dstStages)
2092 	, m_dstAccesses		(dstAccesses)
2093 	, m_srcLayout		(srcLayout)
2094 	, m_dstLayout		(dstLayout)
2095 	, m_imageMemorySize	(0)
2096 {
2097 }
2098 
logSubmit(TestLog & log,size_t commandIndex) const2099 void ImageTransition::logSubmit (TestLog& log, size_t commandIndex) const
2100 {
2101 	log << TestLog::Message << commandIndex << ":" << getName()
2102 		<< " Image transition pipeline barrier"
2103 		<< ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
2104 		<< ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses)
2105 		<< ", srcLayout: " << m_srcLayout << ", dstLayout: " << m_dstLayout << TestLog::EndMessage;
2106 }
2107 
prepare(PrepareContext & context)2108 void ImageTransition::prepare (PrepareContext& context)
2109 {
2110 	DE_ASSERT(context.getImageLayout() == vk::VK_IMAGE_LAYOUT_UNDEFINED || m_srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED || context.getImageLayout() == m_srcLayout);
2111 
2112 	context.setImageLayout(m_dstLayout);
2113 	m_imageMemorySize = context.getImageMemorySize();
2114 }
2115 
submit(SubmitContext & context)2116 void ImageTransition::submit (SubmitContext& context)
2117 {
2118 	const vk::DeviceInterface&		vkd			= context.getContext().getDeviceInterface();
2119 	const vk::VkCommandBuffer		cmd			= context.getCommandBuffer();
2120 	const vk::VkImageMemoryBarrier	barrier		=
2121 	{
2122 		vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2123 		DE_NULL,
2124 
2125 		m_srcAccesses,
2126 		m_dstAccesses,
2127 
2128 		m_srcLayout,
2129 		m_dstLayout,
2130 
2131 		VK_QUEUE_FAMILY_IGNORED,
2132 		VK_QUEUE_FAMILY_IGNORED,
2133 
2134 		context.getImage(),
2135 		{
2136 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2137 			0u, 1u,
2138 			0u, 1u
2139 		}
2140 	};
2141 
2142 	vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2143 }
2144 
verify(VerifyContext & context,size_t)2145 void ImageTransition::verify (VerifyContext& context, size_t)
2146 {
2147 	context.getReference().setUndefined(0, (size_t)m_imageMemorySize);
2148 }
2149 
2150 class FillBuffer : public CmdCommand
2151 {
2152 public:
FillBuffer(deUint32 value)2153 						FillBuffer	(deUint32 value) : m_value(value), m_bufferSize(0) {}
~FillBuffer(void)2154 						~FillBuffer	(void) {}
getName(void) const2155 	const char*			getName		(void) const { return "FillBuffer"; }
2156 
2157 	void				logSubmit	(TestLog& log, size_t commandIndex) const;
2158 	void				submit		(SubmitContext& context);
2159 	void				verify		(VerifyContext& context, size_t commandIndex);
2160 
2161 private:
2162 	const deUint32		m_value;
2163 	vk::VkDeviceSize	m_bufferSize;
2164 };
2165 
logSubmit(TestLog & log,size_t commandIndex) const2166 void FillBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2167 {
2168 	log << TestLog::Message << commandIndex << ":" << getName() << " Fill value: " << m_value << TestLog::EndMessage;
2169 }
2170 
submit(SubmitContext & context)2171 void FillBuffer::submit (SubmitContext& context)
2172 {
2173 	const vk::DeviceInterface&	vkd			= context.getContext().getDeviceInterface();
2174 	const vk::VkCommandBuffer	cmd			= context.getCommandBuffer();
2175 	const vk::VkBuffer			buffer		= context.getBuffer();
2176 	const vk::VkDeviceSize		sizeMask	= ~(0x3ull); // \note Round down to multiple of 4
2177 
2178 	m_bufferSize = sizeMask & context.getBufferSize();
2179 	vkd.cmdFillBuffer(cmd, buffer, 0, m_bufferSize, m_value);
2180 }
2181 
verify(VerifyContext & context,size_t)2182 void FillBuffer::verify (VerifyContext& context, size_t)
2183 {
2184 	ReferenceMemory&	reference	= context.getReference();
2185 
2186 	for (size_t ndx = 0; ndx < m_bufferSize; ndx++)
2187 	{
2188 #if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
2189 		reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(ndx % 4)))));
2190 #else
2191 		reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(3 - (ndx % 4))))));
2192 #endif
2193 	}
2194 }
2195 
2196 class UpdateBuffer : public CmdCommand
2197 {
2198 public:
UpdateBuffer(deUint32 seed)2199 						UpdateBuffer	(deUint32 seed) : m_seed(seed), m_bufferSize(0) {}
~UpdateBuffer(void)2200 						~UpdateBuffer	(void) {}
getName(void) const2201 	const char*			getName			(void) const { return "UpdateBuffer"; }
2202 
2203 	void				logSubmit		(TestLog& log, size_t commandIndex) const;
2204 	void				submit			(SubmitContext& context);
2205 	void				verify			(VerifyContext& context, size_t commandIndex);
2206 
2207 private:
2208 	const deUint32		m_seed;
2209 	vk::VkDeviceSize	m_bufferSize;
2210 };
2211 
logSubmit(TestLog & log,size_t commandIndex) const2212 void UpdateBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2213 {
2214 	log << TestLog::Message << commandIndex << ":" << getName() << " Update buffer, seed: " << m_seed << TestLog::EndMessage;
2215 }
2216 
submit(SubmitContext & context)2217 void UpdateBuffer::submit (SubmitContext& context)
2218 {
2219 	const vk::DeviceInterface&	vkd			= context.getContext().getDeviceInterface();
2220 	const vk::VkCommandBuffer	cmd			= context.getCommandBuffer();
2221 	const vk::VkBuffer			buffer		= context.getBuffer();
2222 	const size_t				blockSize	= 65536;
2223 	std::vector<deUint8>		data		(blockSize, 0);
2224 	de::Random					rng			(m_seed);
2225 
2226 	m_bufferSize = context.getBufferSize();
2227 
2228 	for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2229 	{
2230 		for (size_t ndx = 0; ndx < data.size(); ndx++)
2231 			data[ndx] = rng.getUint8();
2232 
2233 		if (m_bufferSize - updated > blockSize)
2234 			vkd.cmdUpdateBuffer(cmd, buffer, updated, blockSize, (const deUint32*)(&data[0]));
2235 		else
2236 			vkd.cmdUpdateBuffer(cmd, buffer, updated, m_bufferSize - updated, (const deUint32*)(&data[0]));
2237 	}
2238 }
2239 
verify(VerifyContext & context,size_t)2240 void UpdateBuffer::verify (VerifyContext& context, size_t)
2241 {
2242 	ReferenceMemory&	reference	= context.getReference();
2243 	const size_t		blockSize	= 65536;
2244 	vector<deUint8>		data		(blockSize, 0);
2245 	de::Random			rng			(m_seed);
2246 
2247 	for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2248 	{
2249 		for (size_t ndx = 0; ndx < data.size(); ndx++)
2250 			data[ndx] = rng.getUint8();
2251 
2252 		if (m_bufferSize - updated > blockSize)
2253 			reference.setData(updated, blockSize, &data[0]);
2254 		else
2255 			reference.setData(updated, (size_t)(m_bufferSize - updated), &data[0]);
2256 	}
2257 }
2258 
2259 class BufferCopyToBuffer : public CmdCommand
2260 {
2261 public:
BufferCopyToBuffer(void)2262 									BufferCopyToBuffer	(void) {}
~BufferCopyToBuffer(void)2263 									~BufferCopyToBuffer	(void) {}
getName(void) const2264 	const char*						getName				(void) const { return "BufferCopyToBuffer"; }
2265 
2266 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
2267 	void							prepare				(PrepareContext& context);
2268 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
2269 	void							submit				(SubmitContext& context);
2270 	void							verify				(VerifyContext& context, size_t commandIndex);
2271 
2272 private:
2273 	vk::VkDeviceSize				m_bufferSize;
2274 	vk::Move<vk::VkBuffer>			m_dstBuffer;
2275 	vk::Move<vk::VkDeviceMemory>	m_memory;
2276 };
2277 
logPrepare(TestLog & log,size_t commandIndex) const2278 void BufferCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2279 {
2280 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for buffer to buffer copy." << TestLog::EndMessage;
2281 }
2282 
prepare(PrepareContext & context)2283 void BufferCopyToBuffer::prepare (PrepareContext& context)
2284 {
2285 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2286 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2287 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2288 	const vk::VkDevice				device			= context.getContext().getDevice();
2289 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2290 
2291 	m_bufferSize = context.getBufferSize();
2292 
2293 	m_dstBuffer	= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2294 	m_memory	= bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2295 }
2296 
logSubmit(TestLog & log,size_t commandIndex) const2297 void BufferCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2298 {
2299 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to another buffer" << TestLog::EndMessage;
2300 }
2301 
submit(SubmitContext & context)2302 void BufferCopyToBuffer::submit (SubmitContext& context)
2303 {
2304 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2305 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2306 	const vk::VkBufferCopy		range			=
2307 	{
2308 		0, 0, // Offsets
2309 		m_bufferSize
2310 	};
2311 
2312 	vkd.cmdCopyBuffer(commandBuffer, context.getBuffer(), *m_dstBuffer, 1, &range);
2313 }
2314 
verify(VerifyContext & context,size_t commandIndex)2315 void BufferCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2316 {
2317 	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
2318 	ReferenceMemory&						reference		(context.getReference());
2319 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
2320 	const vk::VkDevice						device			= context.getContext().getDevice();
2321 	const vk::VkQueue						queue			= context.getContext().getQueue();
2322 	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
2323 	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2324 	const vk::VkBufferMemoryBarrier			barrier			=
2325 	{
2326 		vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2327 		DE_NULL,
2328 
2329 		vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2330 		vk::VK_ACCESS_HOST_READ_BIT,
2331 
2332 		VK_QUEUE_FAMILY_IGNORED,
2333 		VK_QUEUE_FAMILY_IGNORED,
2334 		*m_dstBuffer,
2335 		0,
2336 		VK_WHOLE_SIZE
2337 	};
2338 
2339 	vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2340 
2341 	endCommandBuffer(vkd, *commandBuffer);
2342 	submitCommandsAndWait(vkd, device, queue, *commandBuffer);
2343 
2344 	{
2345 		void* const	ptr		= mapMemory(vkd, device, *m_memory, m_bufferSize);
2346 		bool		isOk	= true;
2347 
2348 		vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, VK_WHOLE_SIZE);
2349 
2350 		{
2351 			const deUint8* const data = (const deUint8*)ptr;
2352 
2353 			for (size_t pos = 0; pos < (size_t)m_bufferSize; pos++)
2354 			{
2355 				if (reference.isDefined(pos))
2356 				{
2357 					if (data[pos] != reference.get(pos))
2358 					{
2359 						resultCollector.fail(
2360 								de::toString(commandIndex) + ":" + getName()
2361 								+ " Result differs from reference, Expected: "
2362 								+ de::toString(tcu::toHex<8>(reference.get(pos)))
2363 								+ ", Got: "
2364 								+ de::toString(tcu::toHex<8>(data[pos]))
2365 								+ ", At offset: "
2366 								+ de::toString(pos));
2367 						break;
2368 					}
2369 				}
2370 			}
2371 		}
2372 
2373 		vkd.unmapMemory(device, *m_memory);
2374 
2375 		if (!isOk)
2376 			context.getLog() << TestLog::Message << commandIndex << ": Buffer copy to buffer verification failed" << TestLog::EndMessage;
2377 	}
2378 }
2379 
2380 class BufferCopyFromBuffer : public CmdCommand
2381 {
2382 public:
BufferCopyFromBuffer(deUint32 seed)2383 									BufferCopyFromBuffer	(deUint32 seed) : m_seed(seed), m_bufferSize(0) {}
~BufferCopyFromBuffer(void)2384 									~BufferCopyFromBuffer	(void) {}
getName(void) const2385 	const char*						getName					(void) const { return "BufferCopyFromBuffer"; }
2386 
2387 	void							logPrepare				(TestLog& log, size_t commandIndex) const;
2388 	void							prepare					(PrepareContext& context);
2389 	void							logSubmit				(TestLog& log, size_t commandIndex) const;
2390 	void							submit					(SubmitContext& context);
2391 	void							verify					(VerifyContext& context, size_t commandIndex);
2392 
2393 private:
2394 	const deUint32					m_seed;
2395 	vk::VkDeviceSize				m_bufferSize;
2396 	vk::Move<vk::VkBuffer>			m_srcBuffer;
2397 	vk::Move<vk::VkDeviceMemory>	m_memory;
2398 };
2399 
logPrepare(TestLog & log,size_t commandIndex) const2400 void BufferCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2401 {
2402 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to buffer copy. Seed: " << m_seed << TestLog::EndMessage;
2403 }
2404 
prepare(PrepareContext & context)2405 void BufferCopyFromBuffer::prepare (PrepareContext& context)
2406 {
2407 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2408 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2409 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2410 	const vk::VkDevice				device			= context.getContext().getDevice();
2411 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2412 
2413 	m_bufferSize	= context.getBufferSize();
2414 	m_srcBuffer		= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2415 	m_memory		= bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2416 
2417 	{
2418 		void* const	ptr	= mapMemory(vkd, device, *m_memory, m_bufferSize);
2419 		de::Random	rng	(m_seed);
2420 
2421 		{
2422 			deUint8* const	data = (deUint8*)ptr;
2423 
2424 			for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2425 				data[ndx] = rng.getUint8();
2426 		}
2427 
2428 		vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, VK_WHOLE_SIZE);
2429 		vkd.unmapMemory(device, *m_memory);
2430 	}
2431 }
2432 
logSubmit(TestLog & log,size_t commandIndex) const2433 void BufferCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2434 {
2435 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from another buffer" << TestLog::EndMessage;
2436 }
2437 
submit(SubmitContext & context)2438 void BufferCopyFromBuffer::submit (SubmitContext& context)
2439 {
2440 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2441 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2442 	const vk::VkBufferCopy		range			=
2443 	{
2444 		0, 0, // Offsets
2445 		m_bufferSize
2446 	};
2447 
2448 	vkd.cmdCopyBuffer(commandBuffer, *m_srcBuffer, context.getBuffer(), 1, &range);
2449 }
2450 
verify(VerifyContext & context,size_t)2451 void BufferCopyFromBuffer::verify (VerifyContext& context, size_t)
2452 {
2453 	ReferenceMemory&	reference	(context.getReference());
2454 	de::Random			rng			(m_seed);
2455 
2456 	for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2457 		reference.set(ndx, rng.getUint8());
2458 }
2459 
2460 class BufferCopyToImage : public CmdCommand
2461 {
2462 public:
BufferCopyToImage(void)2463 									BufferCopyToImage	(void) {}
~BufferCopyToImage(void)2464 									~BufferCopyToImage	(void) {}
getName(void) const2465 	const char*						getName				(void) const { return "BufferCopyToImage"; }
2466 
2467 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
2468 	void							prepare				(PrepareContext& context);
2469 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
2470 	void							submit				(SubmitContext& context);
2471 	void							verify				(VerifyContext& context, size_t commandIndex);
2472 
2473 private:
2474 	deInt32							m_imageWidth;
2475 	deInt32							m_imageHeight;
2476 	vk::Move<vk::VkImage>			m_dstImage;
2477 	vk::Move<vk::VkDeviceMemory>	m_memory;
2478 };
2479 
logPrepare(TestLog & log,size_t commandIndex) const2480 void BufferCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
2481 {
2482 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for buffer to image copy." << TestLog::EndMessage;
2483 }
2484 
prepare(PrepareContext & context)2485 void BufferCopyToImage::prepare (PrepareContext& context)
2486 {
2487 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2488 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2489 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2490 	const vk::VkDevice				device			= context.getContext().getDevice();
2491 	const vk::VkQueue				queue			= context.getContext().getQueue();
2492 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
2493 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2494 	const IVec2						imageSize		= findImageSizeWxHx4(context.getBufferSize());
2495 
2496 	m_imageWidth	= imageSize[0];
2497 	m_imageHeight	= imageSize[1];
2498 
2499 	{
2500 		const vk::VkImageCreateInfo	createInfo =
2501 		{
2502 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2503 			DE_NULL,
2504 
2505 			0,
2506 			vk::VK_IMAGE_TYPE_2D,
2507 			vk::VK_FORMAT_R8G8B8A8_UNORM,
2508 			{
2509 				(deUint32)m_imageWidth,
2510 				(deUint32)m_imageHeight,
2511 				1u,
2512 			},
2513 			1, 1, // mipLevels, arrayLayers
2514 			vk::VK_SAMPLE_COUNT_1_BIT,
2515 
2516 			vk::VK_IMAGE_TILING_OPTIMAL,
2517 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2518 			vk::VK_SHARING_MODE_EXCLUSIVE,
2519 
2520 			(deUint32)queueFamilies.size(),
2521 			&queueFamilies[0],
2522 			vk::VK_IMAGE_LAYOUT_UNDEFINED
2523 		};
2524 
2525 		m_dstImage = vk::createImage(vkd, device, &createInfo);
2526 	}
2527 
2528 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
2529 
2530 	{
2531 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2532 		const vk::VkImageMemoryBarrier			barrier			=
2533 		{
2534 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2535 			DE_NULL,
2536 
2537 			0,
2538 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2539 
2540 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
2541 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2542 
2543 			VK_QUEUE_FAMILY_IGNORED,
2544 			VK_QUEUE_FAMILY_IGNORED,
2545 
2546 			*m_dstImage,
2547 			{
2548 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2549 				0,	// Mip level
2550 				1,	// Mip level count
2551 				0,	// Layer
2552 				1	// Layer count
2553 			}
2554 		};
2555 
2556 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2557 
2558 		endCommandBuffer(vkd, *commandBuffer);
2559 		submitCommandsAndWait(vkd, device, queue, *commandBuffer);
2560 	}
2561 }
2562 
logSubmit(TestLog & log,size_t commandIndex) const2563 void BufferCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
2564 {
2565 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to image" << TestLog::EndMessage;
2566 }
2567 
submit(SubmitContext & context)2568 void BufferCopyToImage::submit (SubmitContext& context)
2569 {
2570 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2571 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2572 	const vk::VkBufferImageCopy	region			=
2573 	{
2574 		0,
2575 		0, 0,
2576 		{
2577 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2578 			0,	// mipLevel
2579 			0,	// arrayLayer
2580 			1	// layerCount
2581 		},
2582 		{ 0, 0, 0 },
2583 		{
2584 			(deUint32)m_imageWidth,
2585 			(deUint32)m_imageHeight,
2586 			1u
2587 		}
2588 	};
2589 
2590 	vkd.cmdCopyBufferToImage(commandBuffer, context.getBuffer(), *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
2591 }
2592 
verify(VerifyContext & context,size_t commandIndex)2593 void BufferCopyToImage::verify (VerifyContext& context, size_t commandIndex)
2594 {
2595 	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
2596 	ReferenceMemory&						reference		(context.getReference());
2597 	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
2598 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
2599 	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
2600 	const vk::VkDevice						device			= context.getContext().getDevice();
2601 	const vk::VkQueue						queue			= context.getContext().getQueue();
2602 	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
2603 	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2604 	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
2605 	const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2606 	const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2607 	{
2608 		const vk::VkImageMemoryBarrier		imageBarrier	=
2609 		{
2610 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2611 			DE_NULL,
2612 
2613 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2614 			vk::VK_ACCESS_TRANSFER_READ_BIT,
2615 
2616 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2617 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2618 
2619 			VK_QUEUE_FAMILY_IGNORED,
2620 			VK_QUEUE_FAMILY_IGNORED,
2621 
2622 			*m_dstImage,
2623 			{
2624 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2625 				0,	// Mip level
2626 				1,	// Mip level count
2627 				0,	// Layer
2628 				1	// Layer count
2629 			}
2630 		};
2631 		const vk::VkBufferMemoryBarrier bufferBarrier =
2632 		{
2633 			vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2634 			DE_NULL,
2635 
2636 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2637 			vk::VK_ACCESS_HOST_READ_BIT,
2638 
2639 			VK_QUEUE_FAMILY_IGNORED,
2640 			VK_QUEUE_FAMILY_IGNORED,
2641 			*dstBuffer,
2642 			0,
2643 			VK_WHOLE_SIZE
2644 		};
2645 
2646 		const vk::VkBufferImageCopy	region =
2647 		{
2648 			0,
2649 			0, 0,
2650 			{
2651 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2652 				0,	// mipLevel
2653 				0,	// arrayLayer
2654 				1	// layerCount
2655 			},
2656 			{ 0, 0, 0 },
2657 			{
2658 				(deUint32)m_imageWidth,
2659 				(deUint32)m_imageHeight,
2660 				1u
2661 			}
2662 		};
2663 
2664 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
2665 		vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
2666 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2667 	}
2668 
2669 	endCommandBuffer(vkd, *commandBuffer);
2670 	submitCommandsAndWait(vkd, device, queue, *commandBuffer);
2671 
2672 	{
2673 		void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2674 
2675 		invalidateMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
2676 
2677 		{
2678 			const deUint8* const	data = (const deUint8*)ptr;
2679 
2680 			for (size_t pos = 0; pos < (size_t)( 4 * m_imageWidth * m_imageHeight); pos++)
2681 			{
2682 				if (reference.isDefined(pos))
2683 				{
2684 					if (data[pos] != reference.get(pos))
2685 					{
2686 						resultCollector.fail(
2687 								de::toString(commandIndex) + ":" + getName()
2688 								+ " Result differs from reference, Expected: "
2689 								+ de::toString(tcu::toHex<8>(reference.get(pos)))
2690 								+ ", Got: "
2691 								+ de::toString(tcu::toHex<8>(data[pos]))
2692 								+ ", At offset: "
2693 								+ de::toString(pos));
2694 						break;
2695 					}
2696 				}
2697 			}
2698 		}
2699 
2700 		vkd.unmapMemory(device, *memory);
2701 	}
2702 }
2703 
2704 class BufferCopyFromImage : public CmdCommand
2705 {
2706 public:
BufferCopyFromImage(deUint32 seed)2707 									BufferCopyFromImage		(deUint32 seed) : m_seed(seed) {}
~BufferCopyFromImage(void)2708 									~BufferCopyFromImage	(void) {}
getName(void) const2709 	const char*						getName					(void) const { return "BufferCopyFromImage"; }
2710 
2711 	void							logPrepare				(TestLog& log, size_t commandIndex) const;
2712 	void							prepare					(PrepareContext& context);
2713 	void							logSubmit				(TestLog& log, size_t commandIndex) const;
2714 	void							submit					(SubmitContext& context);
2715 	void							verify					(VerifyContext& context, size_t commandIndex);
2716 
2717 private:
2718 	const deUint32					m_seed;
2719 	deInt32							m_imageWidth;
2720 	deInt32							m_imageHeight;
2721 	vk::Move<vk::VkImage>			m_srcImage;
2722 	vk::Move<vk::VkDeviceMemory>	m_memory;
2723 };
2724 
logPrepare(TestLog & log,size_t commandIndex) const2725 void BufferCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
2726 {
2727 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to buffer copy." << TestLog::EndMessage;
2728 }
2729 
prepare(PrepareContext & context)2730 void BufferCopyFromImage::prepare (PrepareContext& context)
2731 {
2732 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2733 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2734 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2735 	const vk::VkDevice				device			= context.getContext().getDevice();
2736 	const vk::VkQueue				queue			= context.getContext().getQueue();
2737 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
2738 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2739 	const IVec2						imageSize		= findImageSizeWxHx4(context.getBufferSize());
2740 
2741 	m_imageWidth	= imageSize[0];
2742 	m_imageHeight	= imageSize[1];
2743 
2744 	{
2745 		const vk::VkImageCreateInfo	createInfo =
2746 		{
2747 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2748 			DE_NULL,
2749 
2750 			0,
2751 			vk::VK_IMAGE_TYPE_2D,
2752 			vk::VK_FORMAT_R8G8B8A8_UNORM,
2753 			{
2754 				(deUint32)m_imageWidth,
2755 				(deUint32)m_imageHeight,
2756 				1u,
2757 			},
2758 			1, 1, // mipLevels, arrayLayers
2759 			vk::VK_SAMPLE_COUNT_1_BIT,
2760 
2761 			vk::VK_IMAGE_TILING_OPTIMAL,
2762 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2763 			vk::VK_SHARING_MODE_EXCLUSIVE,
2764 
2765 			(deUint32)queueFamilies.size(),
2766 			&queueFamilies[0],
2767 			vk::VK_IMAGE_LAYOUT_UNDEFINED
2768 		};
2769 
2770 		m_srcImage = vk::createImage(vkd, device, &createInfo);
2771 	}
2772 
2773 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
2774 
2775 	{
2776 		const vk::Unique<vk::VkBuffer>			srcBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2777 		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2778 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2779 		const vk::VkImageMemoryBarrier			preImageBarrier	=
2780 		{
2781 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2782 			DE_NULL,
2783 
2784 			0,
2785 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2786 
2787 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
2788 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2789 
2790 			VK_QUEUE_FAMILY_IGNORED,
2791 			VK_QUEUE_FAMILY_IGNORED,
2792 
2793 			*m_srcImage,
2794 			{
2795 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2796 				0,	// Mip level
2797 				1,	// Mip level count
2798 				0,	// Layer
2799 				1	// Layer count
2800 			}
2801 		};
2802 		const vk::VkImageMemoryBarrier			postImageBarrier =
2803 		{
2804 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2805 			DE_NULL,
2806 
2807 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2808 			0,
2809 
2810 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2811 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2812 
2813 			VK_QUEUE_FAMILY_IGNORED,
2814 			VK_QUEUE_FAMILY_IGNORED,
2815 
2816 			*m_srcImage,
2817 			{
2818 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2819 				0,	// Mip level
2820 				1,	// Mip level count
2821 				0,	// Layer
2822 				1	// Layer count
2823 			}
2824 		};
2825 		const vk::VkBufferImageCopy				region				=
2826 		{
2827 			0,
2828 			0, 0,
2829 			{
2830 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2831 				0,	// mipLevel
2832 				0,	// arrayLayer
2833 				1	// layerCount
2834 			},
2835 			{ 0, 0, 0 },
2836 			{
2837 				(deUint32)m_imageWidth,
2838 				(deUint32)m_imageHeight,
2839 				1u
2840 			}
2841 		};
2842 
2843 		{
2844 			void* const	ptr	= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2845 			de::Random	rng	(m_seed);
2846 
2847 			{
2848 				deUint8* const	data = (deUint8*)ptr;
2849 
2850 				for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2851 					data[ndx] = rng.getUint8();
2852 			}
2853 
2854 			vk::flushMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
2855 			vkd.unmapMemory(device, *memory);
2856 		}
2857 
2858 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2859 		vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
2860 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2861 
2862 		endCommandBuffer(vkd, *commandBuffer);
2863 		submitCommandsAndWait(vkd, device, queue, *commandBuffer);
2864 	}
2865 }
2866 
logSubmit(TestLog & log,size_t commandIndex) const2867 void BufferCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
2868 {
2869 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from image" << TestLog::EndMessage;
2870 }
2871 
submit(SubmitContext & context)2872 void BufferCopyFromImage::submit (SubmitContext& context)
2873 {
2874 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2875 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2876 	const vk::VkBufferImageCopy	region			=
2877 	{
2878 		0,
2879 		0, 0,
2880 		{
2881 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2882 			0,	// mipLevel
2883 			0,	// arrayLayer
2884 			1	// layerCount
2885 		},
2886 		{ 0, 0, 0 },
2887 		{
2888 			(deUint32)m_imageWidth,
2889 			(deUint32)m_imageHeight,
2890 			1u
2891 		}
2892 	};
2893 
2894 	vkd.cmdCopyImageToBuffer(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getBuffer(), 1, &region);
2895 }
2896 
verify(VerifyContext & context,size_t)2897 void BufferCopyFromImage::verify (VerifyContext& context, size_t)
2898 {
2899 	ReferenceMemory&	reference		(context.getReference());
2900 	de::Random			rng	(m_seed);
2901 
2902 	for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2903 		reference.set(ndx, rng.getUint8());
2904 }
2905 
2906 class ImageCopyToBuffer : public CmdCommand
2907 {
2908 public:
ImageCopyToBuffer(vk::VkImageLayout imageLayout)2909 									ImageCopyToBuffer	(vk::VkImageLayout imageLayout) : m_imageLayout (imageLayout) {}
~ImageCopyToBuffer(void)2910 									~ImageCopyToBuffer	(void) {}
getName(void) const2911 	const char*						getName				(void) const { return "BufferCopyToImage"; }
2912 
2913 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
2914 	void							prepare				(PrepareContext& context);
2915 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
2916 	void							submit				(SubmitContext& context);
2917 	void							verify				(VerifyContext& context, size_t commandIndex);
2918 
2919 private:
2920 	vk::VkImageLayout				m_imageLayout;
2921 	vk::VkDeviceSize				m_bufferSize;
2922 	vk::Move<vk::VkBuffer>			m_dstBuffer;
2923 	vk::Move<vk::VkDeviceMemory>	m_memory;
2924 	vk::VkDeviceSize				m_imageMemorySize;
2925 	deInt32							m_imageWidth;
2926 	deInt32							m_imageHeight;
2927 };
2928 
logPrepare(TestLog & log,size_t commandIndex) const2929 void ImageCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2930 {
2931 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for image to buffer copy." << TestLog::EndMessage;
2932 }
2933 
prepare(PrepareContext & context)2934 void ImageCopyToBuffer::prepare (PrepareContext& context)
2935 {
2936 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2937 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2938 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2939 	const vk::VkDevice				device			= context.getContext().getDevice();
2940 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2941 
2942 	m_imageWidth		= context.getImageWidth();
2943 	m_imageHeight		= context.getImageHeight();
2944 	m_bufferSize		= 4 * m_imageWidth * m_imageHeight;
2945 	m_imageMemorySize	= context.getImageMemorySize();
2946 	m_dstBuffer			= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2947 	m_memory			= bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2948 }
2949 
logSubmit(TestLog & log,size_t commandIndex) const2950 void ImageCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2951 {
2952 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to buffer" << TestLog::EndMessage;
2953 }
2954 
submit(SubmitContext & context)2955 void ImageCopyToBuffer::submit (SubmitContext& context)
2956 {
2957 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2958 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2959 	const vk::VkBufferImageCopy	region			=
2960 	{
2961 		0,
2962 		0, 0,
2963 		{
2964 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2965 			0,	// mipLevel
2966 			0,	// arrayLayer
2967 			1	// layerCount
2968 		},
2969 		{ 0, 0, 0 },
2970 		{
2971 			(deUint32)m_imageWidth,
2972 			(deUint32)m_imageHeight,
2973 			1u
2974 		}
2975 	};
2976 
2977 	vkd.cmdCopyImageToBuffer(commandBuffer, context.getImage(), m_imageLayout, *m_dstBuffer, 1, &region);
2978 }
2979 
verify(VerifyContext & context,size_t commandIndex)2980 void ImageCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2981 {
2982 	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
2983 	ReferenceMemory&						reference		(context.getReference());
2984 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
2985 	const vk::VkDevice						device			= context.getContext().getDevice();
2986 	const vk::VkQueue						queue			= context.getContext().getQueue();
2987 	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
2988 	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2989 	const vk::VkBufferMemoryBarrier			barrier			=
2990 	{
2991 		vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2992 		DE_NULL,
2993 
2994 		vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2995 		vk::VK_ACCESS_HOST_READ_BIT,
2996 
2997 		VK_QUEUE_FAMILY_IGNORED,
2998 		VK_QUEUE_FAMILY_IGNORED,
2999 		*m_dstBuffer,
3000 		0,
3001 		VK_WHOLE_SIZE
3002 	};
3003 
3004 	vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3005 
3006 	endCommandBuffer(vkd, *commandBuffer);
3007 	submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3008 
3009 	reference.setUndefined(0, (size_t)m_imageMemorySize);
3010 	{
3011 		void* const						ptr				= mapMemory(vkd, device, *m_memory, m_bufferSize);
3012 		const ConstPixelBufferAccess	referenceImage	(context.getReferenceImage().getAccess());
3013 		const ConstPixelBufferAccess	resultImage		(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, ptr);
3014 
3015 		vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, VK_WHOLE_SIZE);
3016 
3017 		if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), referenceImage, resultImage, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3018 			resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3019 
3020 		vkd.unmapMemory(device, *m_memory);
3021 	}
3022 }
3023 
3024 class ImageCopyFromBuffer : public CmdCommand
3025 {
3026 public:
ImageCopyFromBuffer(deUint32 seed,vk::VkImageLayout imageLayout)3027 									ImageCopyFromBuffer		(deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
~ImageCopyFromBuffer(void)3028 									~ImageCopyFromBuffer	(void) {}
getName(void) const3029 	const char*						getName					(void) const { return "ImageCopyFromBuffer"; }
3030 
3031 	void							logPrepare				(TestLog& log, size_t commandIndex) const;
3032 	void							prepare					(PrepareContext& context);
3033 	void							logSubmit				(TestLog& log, size_t commandIndex) const;
3034 	void							submit					(SubmitContext& context);
3035 	void							verify					(VerifyContext& context, size_t commandIndex);
3036 
3037 private:
3038 	const deUint32					m_seed;
3039 	const vk::VkImageLayout			m_imageLayout;
3040 	deInt32							m_imageWidth;
3041 	deInt32							m_imageHeight;
3042 	vk::VkDeviceSize				m_imageMemorySize;
3043 	vk::VkDeviceSize				m_bufferSize;
3044 	vk::Move<vk::VkBuffer>			m_srcBuffer;
3045 	vk::Move<vk::VkDeviceMemory>	m_memory;
3046 };
3047 
logPrepare(TestLog & log,size_t commandIndex) const3048 void ImageCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
3049 {
3050 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to image copy. Seed: " << m_seed << TestLog::EndMessage;
3051 }
3052 
prepare(PrepareContext & context)3053 void ImageCopyFromBuffer::prepare (PrepareContext& context)
3054 {
3055 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3056 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3057 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3058 	const vk::VkDevice				device			= context.getContext().getDevice();
3059 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3060 
3061 	m_imageWidth		= context.getImageHeight();
3062 	m_imageHeight		= context.getImageWidth();
3063 	m_imageMemorySize	= context.getImageMemorySize();
3064 	m_bufferSize		= m_imageWidth * m_imageHeight * 4;
3065 	m_srcBuffer			= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
3066 	m_memory			= bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
3067 
3068 	{
3069 		void* const	ptr	= mapMemory(vkd, device, *m_memory, m_bufferSize);
3070 		de::Random	rng	(m_seed);
3071 
3072 		{
3073 			deUint8* const	data = (deUint8*)ptr;
3074 
3075 			for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
3076 				data[ndx] = rng.getUint8();
3077 		}
3078 
3079 		vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, VK_WHOLE_SIZE);
3080 		vkd.unmapMemory(device, *m_memory);
3081 	}
3082 }
3083 
logSubmit(TestLog & log,size_t commandIndex) const3084 void ImageCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
3085 {
3086 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from buffer" << TestLog::EndMessage;
3087 }
3088 
submit(SubmitContext & context)3089 void ImageCopyFromBuffer::submit (SubmitContext& context)
3090 {
3091 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3092 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3093 	const vk::VkBufferImageCopy	region			=
3094 	{
3095 		0,
3096 		0, 0,
3097 		{
3098 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3099 			0,	// mipLevel
3100 			0,	// arrayLayer
3101 			1	// layerCount
3102 		},
3103 		{ 0, 0, 0 },
3104 		{
3105 			(deUint32)m_imageWidth,
3106 			(deUint32)m_imageHeight,
3107 			1u
3108 		}
3109 	};
3110 
3111 	vkd.cmdCopyBufferToImage(commandBuffer, *m_srcBuffer, context.getImage(), m_imageLayout, 1, &region);
3112 }
3113 
verify(VerifyContext & context,size_t)3114 void ImageCopyFromBuffer::verify (VerifyContext& context, size_t)
3115 {
3116 	ReferenceMemory&	reference	(context.getReference());
3117 	de::Random			rng			(m_seed);
3118 
3119 	reference.setUndefined(0, (size_t)m_imageMemorySize);
3120 
3121 	{
3122 		const PixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3123 
3124 		for (deInt32 y = 0; y < m_imageHeight; y++)
3125 		for (deInt32 x = 0; x < m_imageWidth; x++)
3126 		{
3127 			const deUint8 r8 = rng.getUint8();
3128 			const deUint8 g8 = rng.getUint8();
3129 			const deUint8 b8 = rng.getUint8();
3130 			const deUint8 a8 = rng.getUint8();
3131 
3132 			refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3133 		}
3134 	}
3135 }
3136 
3137 class ImageCopyFromImage : public CmdCommand
3138 {
3139 public:
ImageCopyFromImage(deUint32 seed,vk::VkImageLayout imageLayout)3140 									ImageCopyFromImage	(deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
~ImageCopyFromImage(void)3141 									~ImageCopyFromImage	(void) {}
getName(void) const3142 	const char*						getName				(void) const { return "ImageCopyFromImage"; }
3143 
3144 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3145 	void							prepare				(PrepareContext& context);
3146 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3147 	void							submit				(SubmitContext& context);
3148 	void							verify				(VerifyContext& context, size_t commandIndex);
3149 
3150 private:
3151 	const deUint32					m_seed;
3152 	const vk::VkImageLayout			m_imageLayout;
3153 	deInt32							m_imageWidth;
3154 	deInt32							m_imageHeight;
3155 	vk::VkDeviceSize				m_imageMemorySize;
3156 	vk::Move<vk::VkImage>			m_srcImage;
3157 	vk::Move<vk::VkDeviceMemory>	m_memory;
3158 };
3159 
logPrepare(TestLog & log,size_t commandIndex) const3160 void ImageCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3161 {
3162 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image copy." << TestLog::EndMessage;
3163 }
3164 
prepare(PrepareContext & context)3165 void ImageCopyFromImage::prepare (PrepareContext& context)
3166 {
3167 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3168 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3169 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3170 	const vk::VkDevice				device			= context.getContext().getDevice();
3171 	const vk::VkQueue				queue			= context.getContext().getQueue();
3172 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3173 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3174 
3175 	m_imageWidth		= context.getImageWidth();
3176 	m_imageHeight		= context.getImageHeight();
3177 	m_imageMemorySize	= context.getImageMemorySize();
3178 
3179 	{
3180 		const vk::VkImageCreateInfo	createInfo =
3181 		{
3182 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3183 			DE_NULL,
3184 
3185 			0,
3186 			vk::VK_IMAGE_TYPE_2D,
3187 			vk::VK_FORMAT_R8G8B8A8_UNORM,
3188 			{
3189 				(deUint32)m_imageWidth,
3190 				(deUint32)m_imageHeight,
3191 				1u,
3192 			},
3193 			1, 1, // mipLevels, arrayLayers
3194 			vk::VK_SAMPLE_COUNT_1_BIT,
3195 
3196 			vk::VK_IMAGE_TILING_OPTIMAL,
3197 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3198 			vk::VK_SHARING_MODE_EXCLUSIVE,
3199 
3200 			(deUint32)queueFamilies.size(),
3201 			&queueFamilies[0],
3202 			vk::VK_IMAGE_LAYOUT_UNDEFINED
3203 		};
3204 
3205 		m_srcImage = vk::createImage(vkd, device, &createInfo);
3206 	}
3207 
3208 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3209 
3210 	{
3211 		const vk::Unique<vk::VkBuffer>			srcBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3212 		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3213 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3214 		const vk::VkImageMemoryBarrier			preImageBarrier	=
3215 		{
3216 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3217 			DE_NULL,
3218 
3219 			0,
3220 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3221 
3222 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3223 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3224 
3225 			VK_QUEUE_FAMILY_IGNORED,
3226 			VK_QUEUE_FAMILY_IGNORED,
3227 
3228 			*m_srcImage,
3229 			{
3230 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3231 				0,	// Mip level
3232 				1,	// Mip level count
3233 				0,	// Layer
3234 				1	// Layer count
3235 			}
3236 		};
3237 		const vk::VkImageMemoryBarrier			postImageBarrier =
3238 		{
3239 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3240 			DE_NULL,
3241 
3242 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3243 			0,
3244 
3245 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3246 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3247 
3248 			VK_QUEUE_FAMILY_IGNORED,
3249 			VK_QUEUE_FAMILY_IGNORED,
3250 
3251 			*m_srcImage,
3252 			{
3253 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3254 				0,	// Mip level
3255 				1,	// Mip level count
3256 				0,	// Layer
3257 				1	// Layer count
3258 			}
3259 		};
3260 		const vk::VkBufferImageCopy				region				=
3261 		{
3262 			0,
3263 			0, 0,
3264 			{
3265 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3266 				0,	// mipLevel
3267 				0,	// arrayLayer
3268 				1	// layerCount
3269 			},
3270 			{ 0, 0, 0 },
3271 			{
3272 				(deUint32)m_imageWidth,
3273 				(deUint32)m_imageHeight,
3274 				1u
3275 			}
3276 		};
3277 
3278 		{
3279 			void* const	ptr	= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3280 			de::Random	rng	(m_seed);
3281 
3282 			{
3283 				deUint8* const	data = (deUint8*)ptr;
3284 
3285 				for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
3286 					data[ndx] = rng.getUint8();
3287 			}
3288 
3289 			vk::flushMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
3290 			vkd.unmapMemory(device, *memory);
3291 		}
3292 
3293 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3294 		vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
3295 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3296 
3297 		endCommandBuffer(vkd, *commandBuffer);
3298 		submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3299 	}
3300 }
3301 
logSubmit(TestLog & log,size_t commandIndex) const3302 void ImageCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3303 {
3304 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from another image" << TestLog::EndMessage;
3305 }
3306 
submit(SubmitContext & context)3307 void ImageCopyFromImage::submit (SubmitContext& context)
3308 {
3309 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3310 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3311 	const vk::VkImageCopy		region			=
3312 	{
3313 		{
3314 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3315 			0,	// mipLevel
3316 			0,	// arrayLayer
3317 			1	// layerCount
3318 		},
3319 		{ 0, 0, 0 },
3320 
3321 		{
3322 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3323 			0,	// mipLevel
3324 			0,	// arrayLayer
3325 			1	// layerCount
3326 		},
3327 		{ 0, 0, 0 },
3328 		{
3329 			(deUint32)m_imageWidth,
3330 			(deUint32)m_imageHeight,
3331 			1u
3332 		}
3333 	};
3334 
3335 	vkd.cmdCopyImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, &region);
3336 }
3337 
verify(VerifyContext & context,size_t)3338 void ImageCopyFromImage::verify (VerifyContext& context, size_t)
3339 {
3340 	ReferenceMemory&	reference	(context.getReference());
3341 	de::Random			rng			(m_seed);
3342 
3343 	reference.setUndefined(0, (size_t)m_imageMemorySize);
3344 
3345 	{
3346 		const PixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3347 
3348 		for (deInt32 y = 0; y < m_imageHeight; y++)
3349 		for (deInt32 x = 0; x < m_imageWidth; x++)
3350 		{
3351 			const deUint8 r8 = rng.getUint8();
3352 			const deUint8 g8 = rng.getUint8();
3353 			const deUint8 b8 = rng.getUint8();
3354 			const deUint8 a8 = rng.getUint8();
3355 
3356 			refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3357 		}
3358 	}
3359 }
3360 
3361 class ImageCopyToImage : public CmdCommand
3362 {
3363 public:
ImageCopyToImage(vk::VkImageLayout imageLayout)3364 									ImageCopyToImage	(vk::VkImageLayout imageLayout) : m_imageLayout(imageLayout) {}
~ImageCopyToImage(void)3365 									~ImageCopyToImage	(void) {}
getName(void) const3366 	const char*						getName				(void) const { return "ImageCopyToImage"; }
3367 
3368 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3369 	void							prepare				(PrepareContext& context);
3370 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3371 	void							submit				(SubmitContext& context);
3372 	void							verify				(VerifyContext& context, size_t commandIndex);
3373 
3374 private:
3375 	const vk::VkImageLayout			m_imageLayout;
3376 	deInt32							m_imageWidth;
3377 	deInt32							m_imageHeight;
3378 	vk::VkDeviceSize				m_imageMemorySize;
3379 	vk::Move<vk::VkImage>			m_dstImage;
3380 	vk::Move<vk::VkDeviceMemory>	m_memory;
3381 };
3382 
logPrepare(TestLog & log,size_t commandIndex) const3383 void ImageCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
3384 {
3385 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image copy." << TestLog::EndMessage;
3386 }
3387 
prepare(PrepareContext & context)3388 void ImageCopyToImage::prepare (PrepareContext& context)
3389 {
3390 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3391 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3392 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3393 	const vk::VkDevice				device			= context.getContext().getDevice();
3394 	const vk::VkQueue				queue			= context.getContext().getQueue();
3395 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3396 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3397 
3398 	m_imageWidth		= context.getImageWidth();
3399 	m_imageHeight		= context.getImageHeight();
3400 	m_imageMemorySize	= context.getImageMemorySize();
3401 
3402 	{
3403 		const vk::VkImageCreateInfo	createInfo =
3404 		{
3405 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3406 			DE_NULL,
3407 
3408 			0,
3409 			vk::VK_IMAGE_TYPE_2D,
3410 			vk::VK_FORMAT_R8G8B8A8_UNORM,
3411 			{
3412 				(deUint32)m_imageWidth,
3413 				(deUint32)m_imageHeight,
3414 				1u,
3415 			},
3416 			1, 1, // mipLevels, arrayLayers
3417 			vk::VK_SAMPLE_COUNT_1_BIT,
3418 
3419 			vk::VK_IMAGE_TILING_OPTIMAL,
3420 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3421 			vk::VK_SHARING_MODE_EXCLUSIVE,
3422 
3423 			(deUint32)queueFamilies.size(),
3424 			&queueFamilies[0],
3425 			vk::VK_IMAGE_LAYOUT_UNDEFINED
3426 		};
3427 
3428 		m_dstImage = vk::createImage(vkd, device, &createInfo);
3429 	}
3430 
3431 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3432 
3433 	{
3434 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3435 		const vk::VkImageMemoryBarrier			barrier			=
3436 		{
3437 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3438 			DE_NULL,
3439 
3440 			0,
3441 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3442 
3443 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3444 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3445 
3446 			VK_QUEUE_FAMILY_IGNORED,
3447 			VK_QUEUE_FAMILY_IGNORED,
3448 
3449 			*m_dstImage,
3450 			{
3451 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3452 				0,	// Mip level
3453 				1,	// Mip level count
3454 				0,	// Layer
3455 				1	// Layer count
3456 			}
3457 		};
3458 
3459 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
3460 
3461 		endCommandBuffer(vkd, *commandBuffer);
3462 		submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3463 	}
3464 }
3465 
logSubmit(TestLog & log,size_t commandIndex) const3466 void ImageCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
3467 {
3468 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to another image" << TestLog::EndMessage;
3469 }
3470 
submit(SubmitContext & context)3471 void ImageCopyToImage::submit (SubmitContext& context)
3472 {
3473 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3474 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3475 	const vk::VkImageCopy		region			=
3476 	{
3477 		{
3478 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3479 			0,	// mipLevel
3480 			0,	// arrayLayer
3481 			1	// layerCount
3482 		},
3483 		{ 0, 0, 0 },
3484 
3485 		{
3486 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3487 			0,	// mipLevel
3488 			0,	// arrayLayer
3489 			1	// layerCount
3490 		},
3491 		{ 0, 0, 0 },
3492 		{
3493 			(deUint32)m_imageWidth,
3494 			(deUint32)m_imageHeight,
3495 			1u
3496 		}
3497 	};
3498 
3499 	vkd.cmdCopyImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
3500 }
3501 
verify(VerifyContext & context,size_t commandIndex)3502 void ImageCopyToImage::verify (VerifyContext& context, size_t commandIndex)
3503 {
3504 	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
3505 	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
3506 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
3507 	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
3508 	const vk::VkDevice						device			= context.getContext().getDevice();
3509 	const vk::VkQueue						queue			= context.getContext().getQueue();
3510 	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
3511 	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3512 	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
3513 	const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3514 	const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3515 	{
3516 		const vk::VkImageMemoryBarrier		imageBarrier	=
3517 		{
3518 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3519 			DE_NULL,
3520 
3521 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3522 			vk::VK_ACCESS_TRANSFER_READ_BIT,
3523 
3524 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3525 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3526 
3527 			VK_QUEUE_FAMILY_IGNORED,
3528 			VK_QUEUE_FAMILY_IGNORED,
3529 
3530 			*m_dstImage,
3531 			{
3532 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3533 				0,	// Mip level
3534 				1,	// Mip level count
3535 				0,	// Layer
3536 				1	// Layer count
3537 			}
3538 		};
3539 		const vk::VkBufferMemoryBarrier bufferBarrier =
3540 		{
3541 			vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3542 			DE_NULL,
3543 
3544 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3545 			vk::VK_ACCESS_HOST_READ_BIT,
3546 
3547 			VK_QUEUE_FAMILY_IGNORED,
3548 			VK_QUEUE_FAMILY_IGNORED,
3549 			*dstBuffer,
3550 			0,
3551 			VK_WHOLE_SIZE
3552 		};
3553 		const vk::VkBufferImageCopy	region =
3554 		{
3555 			0,
3556 			0, 0,
3557 			{
3558 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3559 				0,	// mipLevel
3560 				0,	// arrayLayer
3561 				1	// layerCount
3562 			},
3563 			{ 0, 0, 0 },
3564 			{
3565 				(deUint32)m_imageWidth,
3566 				(deUint32)m_imageHeight,
3567 				1u
3568 			}
3569 		};
3570 
3571 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
3572 		vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
3573 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3574 	}
3575 
3576 	endCommandBuffer(vkd, *commandBuffer);
3577 	submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3578 
3579 	{
3580 		void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3581 
3582 		vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
3583 
3584 		{
3585 			const deUint8* const			data		= (const deUint8*)ptr;
3586 			const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, data);
3587 			const ConstPixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3588 
3589 			if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3590 				resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3591 		}
3592 
3593 		vkd.unmapMemory(device, *memory);
3594 	}
3595 }
3596 
3597 enum BlitScale
3598 {
3599 	BLIT_SCALE_20,
3600 	BLIT_SCALE_10,
3601 };
3602 
3603 class ImageBlitFromImage : public CmdCommand
3604 {
3605 public:
ImageBlitFromImage(deUint32 seed,BlitScale scale,vk::VkImageLayout imageLayout)3606 									ImageBlitFromImage	(deUint32 seed, BlitScale scale, vk::VkImageLayout imageLayout) : m_seed(seed), m_scale(scale), m_imageLayout(imageLayout) {}
~ImageBlitFromImage(void)3607 									~ImageBlitFromImage	(void) {}
getName(void) const3608 	const char*						getName				(void) const { return "ImageBlitFromImage"; }
3609 
3610 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3611 	void							prepare				(PrepareContext& context);
3612 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3613 	void							submit				(SubmitContext& context);
3614 	void							verify				(VerifyContext& context, size_t commandIndex);
3615 
3616 private:
3617 	const deUint32					m_seed;
3618 	const BlitScale					m_scale;
3619 	const vk::VkImageLayout			m_imageLayout;
3620 	deInt32							m_imageWidth;
3621 	deInt32							m_imageHeight;
3622 	vk::VkDeviceSize				m_imageMemorySize;
3623 	deInt32							m_srcImageWidth;
3624 	deInt32							m_srcImageHeight;
3625 	vk::Move<vk::VkImage>			m_srcImage;
3626 	vk::Move<vk::VkDeviceMemory>	m_memory;
3627 };
3628 
logPrepare(TestLog & log,size_t commandIndex) const3629 void ImageBlitFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3630 {
3631 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image blit." << TestLog::EndMessage;
3632 }
3633 
prepare(PrepareContext & context)3634 void ImageBlitFromImage::prepare (PrepareContext& context)
3635 {
3636 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3637 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3638 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3639 	const vk::VkDevice				device			= context.getContext().getDevice();
3640 	const vk::VkQueue				queue			= context.getContext().getQueue();
3641 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3642 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3643 
3644 	m_imageWidth		= context.getImageWidth();
3645 	m_imageHeight		= context.getImageHeight();
3646 	m_imageMemorySize	= context.getImageMemorySize();
3647 
3648 	if (m_scale == BLIT_SCALE_10)
3649 	{
3650 		m_srcImageWidth			= m_imageWidth;
3651 		m_srcImageHeight		= m_imageHeight;
3652 	}
3653 	else if (m_scale == BLIT_SCALE_20)
3654 	{
3655 		m_srcImageWidth			= m_imageWidth == 1 ? 1 : m_imageWidth / 2;
3656 		m_srcImageHeight		= m_imageHeight == 1 ? 1 : m_imageHeight / 2;
3657 	}
3658 	else
3659 		DE_FATAL("Unsupported scale");
3660 
3661 	{
3662 		const vk::VkImageCreateInfo	createInfo =
3663 		{
3664 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3665 			DE_NULL,
3666 
3667 			0,
3668 			vk::VK_IMAGE_TYPE_2D,
3669 			vk::VK_FORMAT_R8G8B8A8_UNORM,
3670 			{
3671 				(deUint32)m_srcImageWidth,
3672 				(deUint32)m_srcImageHeight,
3673 				1u,
3674 			},
3675 			1, 1, // mipLevels, arrayLayers
3676 			vk::VK_SAMPLE_COUNT_1_BIT,
3677 
3678 			vk::VK_IMAGE_TILING_OPTIMAL,
3679 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3680 			vk::VK_SHARING_MODE_EXCLUSIVE,
3681 
3682 			(deUint32)queueFamilies.size(),
3683 			&queueFamilies[0],
3684 			vk::VK_IMAGE_LAYOUT_UNDEFINED
3685 		};
3686 
3687 		m_srcImage = vk::createImage(vkd, device, &createInfo);
3688 	}
3689 
3690 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3691 
3692 	{
3693 		const vk::Unique<vk::VkBuffer>			srcBuffer		(createBuffer(vkd, device, 4 * m_srcImageWidth * m_srcImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3694 		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3695 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3696 		const vk::VkImageMemoryBarrier			preImageBarrier	=
3697 		{
3698 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3699 			DE_NULL,
3700 
3701 			0,
3702 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3703 
3704 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3705 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3706 
3707 			VK_QUEUE_FAMILY_IGNORED,
3708 			VK_QUEUE_FAMILY_IGNORED,
3709 
3710 			*m_srcImage,
3711 			{
3712 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3713 				0,	// Mip level
3714 				1,	// Mip level count
3715 				0,	// Layer
3716 				1	// Layer count
3717 			}
3718 		};
3719 		const vk::VkImageMemoryBarrier			postImageBarrier =
3720 		{
3721 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3722 			DE_NULL,
3723 
3724 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3725 			0,
3726 
3727 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3728 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3729 
3730 			VK_QUEUE_FAMILY_IGNORED,
3731 			VK_QUEUE_FAMILY_IGNORED,
3732 
3733 			*m_srcImage,
3734 			{
3735 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3736 				0,	// Mip level
3737 				1,	// Mip level count
3738 				0,	// Layer
3739 				1	// Layer count
3740 			}
3741 		};
3742 		const vk::VkBufferImageCopy				region				=
3743 		{
3744 			0,
3745 			0, 0,
3746 			{
3747 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3748 				0,	// mipLevel
3749 				0,	// arrayLayer
3750 				1	// layerCount
3751 			},
3752 			{ 0, 0, 0 },
3753 			{
3754 				(deUint32)m_srcImageWidth,
3755 				(deUint32)m_srcImageHeight,
3756 				1u
3757 			}
3758 		};
3759 
3760 		{
3761 			void* const	ptr	= mapMemory(vkd, device, *memory, 4 * m_srcImageWidth * m_srcImageHeight);
3762 			de::Random	rng	(m_seed);
3763 
3764 			{
3765 				deUint8* const	data = (deUint8*)ptr;
3766 
3767 				for (size_t ndx = 0; ndx < (size_t)(4 * m_srcImageWidth * m_srcImageHeight); ndx++)
3768 					data[ndx] = rng.getUint8();
3769 			}
3770 
3771 			vk::flushMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
3772 			vkd.unmapMemory(device, *memory);
3773 		}
3774 
3775 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3776 		vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
3777 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3778 
3779 		endCommandBuffer(vkd, *commandBuffer);
3780 		submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3781 	}
3782 }
3783 
logSubmit(TestLog & log,size_t commandIndex) const3784 void ImageBlitFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3785 {
3786 	log << TestLog::Message << commandIndex << ":" << getName() << " Blit from another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "")  << TestLog::EndMessage;
3787 }
3788 
submit(SubmitContext & context)3789 void ImageBlitFromImage::submit (SubmitContext& context)
3790 {
3791 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3792 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3793 	const vk::VkImageBlit		region			=
3794 	{
3795 		// Src
3796 		{
3797 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3798 			0,	// mipLevel
3799 			0,	// arrayLayer
3800 			1	// layerCount
3801 		},
3802 		{
3803 			{ 0, 0, 0 },
3804 			{
3805 				m_srcImageWidth,
3806 				m_srcImageHeight,
3807 				1
3808 			},
3809 		},
3810 
3811 		// Dst
3812 		{
3813 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3814 			0,	// mipLevel
3815 			0,	// arrayLayer
3816 			1	// layerCount
3817 		},
3818 		{
3819 			{ 0, 0, 0 },
3820 			{
3821 				m_imageWidth,
3822 				m_imageHeight,
3823 				1u
3824 			}
3825 		}
3826 	};
3827 	vkd.cmdBlitImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, &region, vk::VK_FILTER_NEAREST);
3828 }
3829 
verify(VerifyContext & context,size_t)3830 void ImageBlitFromImage::verify (VerifyContext& context, size_t)
3831 {
3832 	ReferenceMemory&	reference	(context.getReference());
3833 	de::Random			rng			(m_seed);
3834 
3835 	reference.setUndefined(0, (size_t)m_imageMemorySize);
3836 
3837 	{
3838 		const PixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3839 
3840 		if (m_scale == BLIT_SCALE_10)
3841 		{
3842 			for (deInt32 y = 0; y < m_imageHeight; y++)
3843 			for (deInt32 x = 0; x < m_imageWidth; x++)
3844 			{
3845 				const deUint8 r8 = rng.getUint8();
3846 				const deUint8 g8 = rng.getUint8();
3847 				const deUint8 b8 = rng.getUint8();
3848 				const deUint8 a8 = rng.getUint8();
3849 
3850 				refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3851 			}
3852 		}
3853 		else if (m_scale == BLIT_SCALE_20)
3854 		{
3855 			tcu::TextureLevel	source	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_srcImageWidth, m_srcImageHeight);
3856 			const float			xscale	= ((float)m_srcImageWidth)  / (float)m_imageWidth;
3857 			const float			yscale	= ((float)m_srcImageHeight) / (float)m_imageHeight;
3858 
3859 			for (deInt32 y = 0; y < m_srcImageHeight; y++)
3860 			for (deInt32 x = 0; x < m_srcImageWidth; x++)
3861 			{
3862 				const deUint8 r8 = rng.getUint8();
3863 				const deUint8 g8 = rng.getUint8();
3864 				const deUint8 b8 = rng.getUint8();
3865 				const deUint8 a8 = rng.getUint8();
3866 
3867 				source.getAccess().setPixel(UVec4(r8, g8, b8, a8), x, y);
3868 			}
3869 
3870 			for (deInt32 y = 0; y < m_imageHeight; y++)
3871 			for (deInt32 x = 0; x < m_imageWidth; x++)
3872 				refAccess.setPixel(source.getAccess().getPixelUint(int((float(x) + 0.5f) * xscale), int((float(y) + 0.5f) * yscale)), x, y);
3873 		}
3874 		else
3875 			DE_FATAL("Unsupported scale");
3876 	}
3877 }
3878 
3879 class ImageBlitToImage : public CmdCommand
3880 {
3881 public:
ImageBlitToImage(BlitScale scale,vk::VkImageLayout imageLayout)3882 									ImageBlitToImage	(BlitScale scale, vk::VkImageLayout imageLayout) : m_scale(scale), m_imageLayout(imageLayout) {}
~ImageBlitToImage(void)3883 									~ImageBlitToImage	(void) {}
getName(void) const3884 	const char*						getName				(void) const { return "ImageBlitToImage"; }
3885 
3886 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3887 	void							prepare				(PrepareContext& context);
3888 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3889 	void							submit				(SubmitContext& context);
3890 	void							verify				(VerifyContext& context, size_t commandIndex);
3891 
3892 private:
3893 	const BlitScale					m_scale;
3894 	const vk::VkImageLayout			m_imageLayout;
3895 	deInt32							m_imageWidth;
3896 	deInt32							m_imageHeight;
3897 	vk::VkDeviceSize				m_imageMemorySize;
3898 	deInt32							m_dstImageWidth;
3899 	deInt32							m_dstImageHeight;
3900 	vk::Move<vk::VkImage>			m_dstImage;
3901 	vk::Move<vk::VkDeviceMemory>	m_memory;
3902 };
3903 
logPrepare(TestLog & log,size_t commandIndex) const3904 void ImageBlitToImage::logPrepare (TestLog& log, size_t commandIndex) const
3905 {
3906 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image blit." << TestLog::EndMessage;
3907 }
3908 
prepare(PrepareContext & context)3909 void ImageBlitToImage::prepare (PrepareContext& context)
3910 {
3911 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3912 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3913 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3914 	const vk::VkDevice				device			= context.getContext().getDevice();
3915 	const vk::VkQueue				queue			= context.getContext().getQueue();
3916 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3917 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3918 
3919 	m_imageWidth		= context.getImageWidth();
3920 	m_imageHeight		= context.getImageHeight();
3921 	m_imageMemorySize	= context.getImageMemorySize();
3922 
3923 	if (m_scale == BLIT_SCALE_10)
3924 	{
3925 		m_dstImageWidth		= context.getImageWidth();
3926 		m_dstImageHeight	= context.getImageHeight();
3927 	}
3928 	else if (m_scale == BLIT_SCALE_20)
3929 	{
3930 		m_dstImageWidth		= context.getImageWidth() * 2;
3931 		m_dstImageHeight	= context.getImageHeight() * 2;
3932 	}
3933 	else
3934 		DE_FATAL("Unsupportd blit scale");
3935 
3936 	{
3937 		const vk::VkImageCreateInfo	createInfo =
3938 		{
3939 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3940 			DE_NULL,
3941 
3942 			0,
3943 			vk::VK_IMAGE_TYPE_2D,
3944 			vk::VK_FORMAT_R8G8B8A8_UNORM,
3945 			{
3946 				(deUint32)m_dstImageWidth,
3947 				(deUint32)m_dstImageHeight,
3948 				1u,
3949 			},
3950 			1, 1, // mipLevels, arrayLayers
3951 			vk::VK_SAMPLE_COUNT_1_BIT,
3952 
3953 			vk::VK_IMAGE_TILING_OPTIMAL,
3954 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3955 			vk::VK_SHARING_MODE_EXCLUSIVE,
3956 
3957 			(deUint32)queueFamilies.size(),
3958 			&queueFamilies[0],
3959 			vk::VK_IMAGE_LAYOUT_UNDEFINED
3960 		};
3961 
3962 		m_dstImage = vk::createImage(vkd, device, &createInfo);
3963 	}
3964 
3965 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3966 
3967 	{
3968 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3969 		const vk::VkImageMemoryBarrier			barrier			=
3970 		{
3971 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3972 			DE_NULL,
3973 
3974 			0,
3975 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3976 
3977 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3978 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3979 
3980 			VK_QUEUE_FAMILY_IGNORED,
3981 			VK_QUEUE_FAMILY_IGNORED,
3982 
3983 			*m_dstImage,
3984 			{
3985 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3986 				0,	// Mip level
3987 				1,	// Mip level count
3988 				0,	// Layer
3989 				1	// Layer count
3990 			}
3991 		};
3992 
3993 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
3994 
3995 		endCommandBuffer(vkd, *commandBuffer);
3996 		submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3997 	}
3998 }
3999 
logSubmit(TestLog & log,size_t commandIndex) const4000 void ImageBlitToImage::logSubmit (TestLog& log, size_t commandIndex) const
4001 {
4002 	log << TestLog::Message << commandIndex << ":" << getName() << " Blit image to another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "")  << TestLog::EndMessage;
4003 }
4004 
submit(SubmitContext & context)4005 void ImageBlitToImage::submit (SubmitContext& context)
4006 {
4007 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
4008 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
4009 	const vk::VkImageBlit		region			=
4010 	{
4011 		// Src
4012 		{
4013 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
4014 			0,	// mipLevel
4015 			0,	// arrayLayer
4016 			1	// layerCount
4017 		},
4018 		{
4019 			{ 0, 0, 0 },
4020 			{
4021 				m_imageWidth,
4022 				m_imageHeight,
4023 				1
4024 			},
4025 		},
4026 
4027 		// Dst
4028 		{
4029 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
4030 			0,	// mipLevel
4031 			0,	// arrayLayer
4032 			1	// layerCount
4033 		},
4034 		{
4035 			{ 0, 0, 0 },
4036 			{
4037 				m_dstImageWidth,
4038 				m_dstImageHeight,
4039 				1u
4040 			}
4041 		}
4042 	};
4043 	vkd.cmdBlitImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region, vk::VK_FILTER_NEAREST);
4044 }
4045 
verify(VerifyContext & context,size_t commandIndex)4046 void ImageBlitToImage::verify (VerifyContext& context, size_t commandIndex)
4047 {
4048 	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
4049 	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
4050 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
4051 	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
4052 	const vk::VkDevice						device			= context.getContext().getDevice();
4053 	const vk::VkQueue						queue			= context.getContext().getQueue();
4054 	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
4055 	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4056 	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
4057 	const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_dstImageWidth * m_dstImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4058 	const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4059 	{
4060 		const vk::VkImageMemoryBarrier		imageBarrier	=
4061 		{
4062 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4063 			DE_NULL,
4064 
4065 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4066 			vk::VK_ACCESS_TRANSFER_READ_BIT,
4067 
4068 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
4069 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4070 
4071 			VK_QUEUE_FAMILY_IGNORED,
4072 			VK_QUEUE_FAMILY_IGNORED,
4073 
4074 			*m_dstImage,
4075 			{
4076 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
4077 				0,	// Mip level
4078 				1,	// Mip level count
4079 				0,	// Layer
4080 				1	// Layer count
4081 			}
4082 		};
4083 		const vk::VkBufferMemoryBarrier bufferBarrier =
4084 		{
4085 			vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4086 			DE_NULL,
4087 
4088 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4089 			vk::VK_ACCESS_HOST_READ_BIT,
4090 
4091 			VK_QUEUE_FAMILY_IGNORED,
4092 			VK_QUEUE_FAMILY_IGNORED,
4093 			*dstBuffer,
4094 			0,
4095 			VK_WHOLE_SIZE
4096 		};
4097 		const vk::VkBufferImageCopy	region =
4098 		{
4099 			0,
4100 			0, 0,
4101 			{
4102 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
4103 				0,	// mipLevel
4104 				0,	// arrayLayer
4105 				1	// layerCount
4106 			},
4107 			{ 0, 0, 0 },
4108 			{
4109 				(deUint32)m_dstImageWidth,
4110 				(deUint32)m_dstImageHeight,
4111 				1
4112 			}
4113 		};
4114 
4115 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4116 		vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
4117 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4118 	}
4119 
4120 	endCommandBuffer(vkd, *commandBuffer);
4121 	submitCommandsAndWait(vkd, device, queue, *commandBuffer);
4122 
4123 	{
4124 		void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_dstImageWidth * m_dstImageHeight);
4125 
4126 		vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
4127 
4128 		if (m_scale == BLIT_SCALE_10)
4129 		{
4130 			const deUint8* const			data		= (const deUint8*)ptr;
4131 			const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4132 			const ConstPixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
4133 
4134 			if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4135 				resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4136 		}
4137 		else if (m_scale == BLIT_SCALE_20)
4138 		{
4139 			const deUint8* const			data		= (const deUint8*)ptr;
4140 			const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4141 			tcu::TextureLevel				reference	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1);
4142 
4143 			{
4144 				const ConstPixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
4145 
4146 				for (deInt32 y = 0; y < m_dstImageHeight; y++)
4147 				for (deInt32 x = 0; x < m_dstImageWidth; x++)
4148 				{
4149 					reference.getAccess().setPixel(refAccess.getPixel(x/2, y/2), x, y);
4150 				}
4151 			}
4152 
4153 			if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), reference.getAccess(), resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4154 				resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4155 		}
4156 		else
4157 			DE_FATAL("Unknown scale");
4158 
4159 		vkd.unmapMemory(device, *memory);
4160 	}
4161 }
4162 
4163 class PrepareRenderPassContext
4164 {
4165 public:
PrepareRenderPassContext(PrepareContext & context,vk::VkRenderPass renderPass,vk::VkFramebuffer framebuffer,deInt32 targetWidth,deInt32 targetHeight)4166 								PrepareRenderPassContext	(PrepareContext&	context,
4167 															 vk::VkRenderPass	renderPass,
4168 															 vk::VkFramebuffer	framebuffer,
4169 															 deInt32			targetWidth,
4170 															 deInt32			targetHeight)
4171 		: m_context			(context)
4172 		, m_renderPass		(renderPass)
4173 		, m_framebuffer		(framebuffer)
4174 		, m_targetWidth		(targetWidth)
4175 		, m_targetHeight	(targetHeight)
4176 	{
4177 	}
4178 
getMemory(void) const4179 	const Memory&				getMemory					(void) const { return m_context.getMemory(); }
getContext(void) const4180 	const Context&				getContext					(void) const { return m_context.getContext(); }
getBinaryCollection(void) const4181 	const vk::BinaryCollection&	getBinaryCollection			(void) const { return m_context.getBinaryCollection(); }
4182 
getBuffer(void) const4183 	vk::VkBuffer				getBuffer					(void) const { return m_context.getBuffer(); }
getBufferSize(void) const4184 	vk::VkDeviceSize			getBufferSize				(void) const { return m_context.getBufferSize(); }
4185 
getImage(void) const4186 	vk::VkImage					getImage					(void) const { return m_context.getImage(); }
getImageWidth(void) const4187 	deInt32						getImageWidth				(void) const { return m_context.getImageWidth(); }
getImageHeight(void) const4188 	deInt32						getImageHeight				(void) const { return m_context.getImageHeight(); }
getImageLayout(void) const4189 	vk::VkImageLayout			getImageLayout				(void) const { return m_context.getImageLayout(); }
4190 
getTargetWidth(void) const4191 	deInt32						getTargetWidth				(void) const { return m_targetWidth; }
getTargetHeight(void) const4192 	deInt32						getTargetHeight				(void) const { return m_targetHeight; }
4193 
getRenderPass(void) const4194 	vk::VkRenderPass			getRenderPass				(void) const { return m_renderPass; }
4195 
4196 private:
4197 	PrepareContext&				m_context;
4198 	const vk::VkRenderPass		m_renderPass;
4199 	const vk::VkFramebuffer		m_framebuffer;
4200 	const deInt32				m_targetWidth;
4201 	const deInt32				m_targetHeight;
4202 };
4203 
4204 class VerifyRenderPassContext
4205 {
4206 public:
VerifyRenderPassContext(VerifyContext & context,deInt32 targetWidth,deInt32 targetHeight)4207 							VerifyRenderPassContext		(VerifyContext&			context,
4208 														 deInt32				targetWidth,
4209 														 deInt32				targetHeight)
4210 		: m_context			(context)
4211 		, m_referenceTarget	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), targetWidth, targetHeight)
4212 	{
4213 	}
4214 
getContext(void) const4215 	const Context&			getContext			(void) const { return m_context.getContext(); }
getLog(void) const4216 	TestLog&				getLog				(void) const { return m_context.getLog(); }
getResultCollector(void) const4217 	tcu::ResultCollector&	getResultCollector	(void) const { return m_context.getResultCollector(); }
4218 
getReferenceTarget(void)4219 	TextureLevel&			getReferenceTarget	(void) { return m_referenceTarget; }
4220 
getReference(void)4221 	ReferenceMemory&		getReference		(void) { return m_context.getReference(); }
getReferenceImage(void)4222 	TextureLevel&			getReferenceImage	(void) { return m_context.getReferenceImage();}
4223 
4224 private:
4225 	VerifyContext&	m_context;
4226 	TextureLevel	m_referenceTarget;
4227 };
4228 
4229 class RenderPassCommand
4230 {
4231 public:
~RenderPassCommand(void)4232 	virtual				~RenderPassCommand	(void) {}
4233 	virtual const char*	getName				(void) const = 0;
4234 
4235 	// Log things that are done during prepare
logPrepare(TestLog &,size_t) const4236 	virtual void		logPrepare			(TestLog&, size_t) const {}
4237 	// Log submitted calls etc.
logSubmit(TestLog &,size_t) const4238 	virtual void		logSubmit			(TestLog&, size_t) const {}
4239 
4240 	// Allocate vulkan resources and prepare for submit.
prepare(PrepareRenderPassContext &)4241 	virtual void		prepare				(PrepareRenderPassContext&) {}
4242 
4243 	// Submit commands to command buffer.
submit(SubmitContext &)4244 	virtual void		submit				(SubmitContext&) {}
4245 
4246 	// Verify results
verify(VerifyRenderPassContext &,size_t)4247 	virtual void		verify				(VerifyRenderPassContext&, size_t) {}
4248 };
4249 
4250 class SubmitRenderPass : public CmdCommand
4251 {
4252 public:
4253 				SubmitRenderPass	(const vector<RenderPassCommand*>& commands);
4254 				~SubmitRenderPass	(void);
getName(void) const4255 	const char*	getName				(void) const { return "SubmitRenderPass"; }
4256 
4257 	void		logPrepare			(TestLog&, size_t) const;
4258 	void		logSubmit			(TestLog&, size_t) const;
4259 
4260 	void		prepare				(PrepareContext&);
4261 	void		submit				(SubmitContext&);
4262 
4263 	void		verify				(VerifyContext&, size_t);
4264 
4265 private:
4266 	const deInt32					m_targetWidth;
4267 	const deInt32					m_targetHeight;
4268 	vk::Move<vk::VkRenderPass>		m_renderPass;
4269 	vk::Move<vk::VkDeviceMemory>	m_colorTargetMemory;
4270 	de::MovePtr<vk::Allocation>		m_colorTargetMemory2;
4271 	vk::Move<vk::VkImage>			m_colorTarget;
4272 	vk::Move<vk::VkImageView>		m_colorTargetView;
4273 	vk::Move<vk::VkFramebuffer>		m_framebuffer;
4274 	vector<RenderPassCommand*>		m_commands;
4275 };
4276 
SubmitRenderPass(const vector<RenderPassCommand * > & commands)4277 SubmitRenderPass::SubmitRenderPass (const vector<RenderPassCommand*>& commands)
4278 	: m_targetWidth		(256)
4279 	, m_targetHeight	(256)
4280 	, m_commands		(commands)
4281 {
4282 }
4283 
~SubmitRenderPass()4284 SubmitRenderPass::~SubmitRenderPass()
4285 {
4286 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4287 		delete m_commands[cmdNdx];
4288 }
4289 
logPrepare(TestLog & log,size_t commandIndex) const4290 void SubmitRenderPass::logPrepare (TestLog& log, size_t commandIndex) const
4291 {
4292 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
4293 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
4294 
4295 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4296 	{
4297 		RenderPassCommand& command = *m_commands[cmdNdx];
4298 		command.logPrepare(log, cmdNdx);
4299 	}
4300 }
4301 
logSubmit(TestLog & log,size_t commandIndex) const4302 void SubmitRenderPass::logSubmit (TestLog& log, size_t commandIndex) const
4303 {
4304 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
4305 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
4306 
4307 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4308 	{
4309 		RenderPassCommand& command = *m_commands[cmdNdx];
4310 		command.logSubmit(log, cmdNdx);
4311 	}
4312 }
4313 
prepare(PrepareContext & context)4314 void SubmitRenderPass::prepare (PrepareContext& context)
4315 {
4316 	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
4317 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
4318 	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
4319 	const vk::VkDevice						device			= context.getContext().getDevice();
4320 	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
4321 
4322 	{
4323 		const vk::VkImageCreateInfo createInfo =
4324 		{
4325 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4326 			DE_NULL,
4327 			0u,
4328 
4329 			vk::VK_IMAGE_TYPE_2D,
4330 			vk::VK_FORMAT_R8G8B8A8_UNORM,
4331 			{ (deUint32)m_targetWidth, (deUint32)m_targetHeight, 1u },
4332 			1u,
4333 			1u,
4334 			vk::VK_SAMPLE_COUNT_1_BIT,
4335 			vk::VK_IMAGE_TILING_OPTIMAL,
4336 			vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
4337 			vk::VK_SHARING_MODE_EXCLUSIVE,
4338 			(deUint32)queueFamilies.size(),
4339 			&queueFamilies[0],
4340 			vk::VK_IMAGE_LAYOUT_UNDEFINED
4341 		};
4342 
4343 		m_colorTarget = vk::createImage(vkd, device, &createInfo);
4344 	}
4345 
4346 	m_colorTargetMemory = bindImageMemory(vki, vkd, physicalDevice, device, *m_colorTarget, 0);
4347 
4348 	{
4349 		const vk::VkImageViewCreateInfo createInfo =
4350 		{
4351 			vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
4352 			DE_NULL,
4353 
4354 			0u,
4355 			*m_colorTarget,
4356 			vk::VK_IMAGE_VIEW_TYPE_2D,
4357 			vk::VK_FORMAT_R8G8B8A8_UNORM,
4358 			{
4359 				vk::VK_COMPONENT_SWIZZLE_R,
4360 				vk::VK_COMPONENT_SWIZZLE_G,
4361 				vk::VK_COMPONENT_SWIZZLE_B,
4362 				vk::VK_COMPONENT_SWIZZLE_A
4363 			},
4364 			{
4365 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
4366 				0u,
4367 				1u,
4368 				0u,
4369 				1u
4370 			}
4371 		};
4372 
4373 		m_colorTargetView = vk::createImageView(vkd, device, &createInfo);
4374 	}
4375 
4376 	m_renderPass = vk::makeRenderPass(vkd, device, vk::VK_FORMAT_R8G8B8A8_UNORM, vk::VK_FORMAT_UNDEFINED, vk::VK_ATTACHMENT_LOAD_OP_CLEAR, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
4377 
4378 	{
4379 		const vk::VkImageView				imageViews[]	=
4380 		{
4381 			*m_colorTargetView
4382 		};
4383 		const vk::VkFramebufferCreateInfo	createInfo		=
4384 		{
4385 			vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
4386 			DE_NULL,
4387 			0u,
4388 
4389 			*m_renderPass,
4390 			DE_LENGTH_OF_ARRAY(imageViews),
4391 			imageViews,
4392 			(deUint32)m_targetWidth,
4393 			(deUint32)m_targetHeight,
4394 			1u
4395 		};
4396 
4397 		m_framebuffer = vk::createFramebuffer(vkd, device, &createInfo);
4398 	}
4399 
4400 	{
4401 		PrepareRenderPassContext renderpassContext (context, *m_renderPass, *m_framebuffer, m_targetWidth, m_targetHeight);
4402 
4403 		for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4404 		{
4405 			RenderPassCommand& command = *m_commands[cmdNdx];
4406 			command.prepare(renderpassContext);
4407 		}
4408 	}
4409 }
4410 
submit(SubmitContext & context)4411 void SubmitRenderPass::submit (SubmitContext& context)
4412 {
4413 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
4414 	const vk::VkCommandBuffer		commandBuffer	= context.getCommandBuffer();
4415 
4416 	beginRenderPass(vkd, commandBuffer, *m_renderPass, *m_framebuffer, vk::makeRect2D(0, 0, m_targetWidth, m_targetHeight), tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f));
4417 
4418 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4419 	{
4420 		RenderPassCommand& command = *m_commands[cmdNdx];
4421 
4422 		command.submit(context);
4423 	}
4424 
4425 	endRenderPass(vkd, commandBuffer);
4426 }
4427 
verify(VerifyContext & context,size_t commandIndex)4428 void SubmitRenderPass::verify (VerifyContext& context, size_t commandIndex)
4429 {
4430 	TestLog&					log				(context.getLog());
4431 	tcu::ResultCollector&		resultCollector	(context.getResultCollector());
4432 	const string				sectionName		(de::toString(commandIndex) + ":" + getName());
4433 	const tcu::ScopedLogSection	section			(log, sectionName, sectionName);
4434 	VerifyRenderPassContext		verifyContext	(context, m_targetWidth, m_targetHeight);
4435 
4436 	tcu::clear(verifyContext.getReferenceTarget().getAccess(), Vec4(0.0f, 0.0f, 0.0f, 1.0f));
4437 
4438 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4439 	{
4440 		RenderPassCommand& command = *m_commands[cmdNdx];
4441 		command.verify(verifyContext, cmdNdx);
4442 	}
4443 
4444 	{
4445 		const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
4446 		const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
4447 		const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
4448 		const vk::VkDevice						device			= context.getContext().getDevice();
4449 		const vk::VkQueue						queue			= context.getContext().getQueue();
4450 		const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
4451 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4452 		const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
4453 		const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_targetWidth * m_targetHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4454 		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4455 		{
4456 			const vk::VkImageMemoryBarrier		imageBarrier	=
4457 			{
4458 				vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4459 				DE_NULL,
4460 
4461 				vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
4462 				vk::VK_ACCESS_TRANSFER_READ_BIT,
4463 
4464 				vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4465 				vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4466 
4467 				VK_QUEUE_FAMILY_IGNORED,
4468 				VK_QUEUE_FAMILY_IGNORED,
4469 
4470 				*m_colorTarget,
4471 				{
4472 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
4473 					0,	// Mip level
4474 					1,	// Mip level count
4475 					0,	// Layer
4476 					1	// Layer count
4477 				}
4478 			};
4479 			const vk::VkBufferMemoryBarrier bufferBarrier =
4480 			{
4481 				vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4482 				DE_NULL,
4483 
4484 				vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4485 				vk::VK_ACCESS_HOST_READ_BIT,
4486 
4487 				VK_QUEUE_FAMILY_IGNORED,
4488 				VK_QUEUE_FAMILY_IGNORED,
4489 				*dstBuffer,
4490 				0,
4491 				VK_WHOLE_SIZE
4492 			};
4493 			const vk::VkBufferImageCopy	region =
4494 			{
4495 				0,
4496 				0, 0,
4497 				{
4498 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
4499 					0,	// mipLevel
4500 					0,	// arrayLayer
4501 					1	// layerCount
4502 				},
4503 				{ 0, 0, 0 },
4504 				{
4505 					(deUint32)m_targetWidth,
4506 					(deUint32)m_targetHeight,
4507 					1u
4508 				}
4509 			};
4510 
4511 			vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4512 			vkd.cmdCopyImageToBuffer(*commandBuffer, *m_colorTarget, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
4513 			vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4514 		}
4515 
4516 		endCommandBuffer(vkd, *commandBuffer);
4517 		submitCommandsAndWait(vkd, device, queue, *commandBuffer);
4518 
4519 		{
4520 			void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_targetWidth * m_targetHeight);
4521 
4522 			vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
4523 
4524 			{
4525 				const deUint8* const			data		= (const deUint8*)ptr;
4526 				const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_targetWidth, m_targetHeight, 1, data);
4527 				const ConstPixelBufferAccess&	refAccess	(verifyContext.getReferenceTarget().getAccess());
4528 
4529 				if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4530 					resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4531 			}
4532 
4533 			vkd.unmapMemory(device, *memory);
4534 		}
4535 	}
4536 }
4537 
4538 class ExecuteSecondaryCommandBuffer : public CmdCommand
4539 {
4540 public:
4541 				ExecuteSecondaryCommandBuffer	(const vector<CmdCommand*>& commands);
4542 				~ExecuteSecondaryCommandBuffer	(void);
getName(void) const4543 	const char*	getName							(void) const { return "ExecuteSecondaryCommandBuffer"; }
4544 
4545 	void		logPrepare						(TestLog&, size_t) const;
4546 	void		logSubmit						(TestLog&, size_t) const;
4547 
4548 	void		prepare							(PrepareContext&);
4549 	void		submit							(SubmitContext&);
4550 
4551 	void		verify							(VerifyContext&, size_t);
4552 
4553 private:
4554 	vk::Move<vk::VkCommandBuffer>				m_commandBuffer;
4555 	vk::Move<vk::VkDeviceMemory>				m_colorTargetMemory;
4556 	de::MovePtr<vk::Allocation>					m_colorTargetMemory2;
4557 	vk::Move<vk::VkImage>						m_colorTarget;
4558 	vk::Move<vk::VkImageView>					m_colorTargetView;
4559 	vk::Move<vk::VkFramebuffer>					m_framebuffer;
4560 	vector<CmdCommand*>							m_commands;
4561 };
4562 
ExecuteSecondaryCommandBuffer(const vector<CmdCommand * > & commands)4563 ExecuteSecondaryCommandBuffer::ExecuteSecondaryCommandBuffer(const vector<CmdCommand*>& commands)
4564 	: m_commands		(commands)
4565 {
4566 }
4567 
~ExecuteSecondaryCommandBuffer(void)4568 ExecuteSecondaryCommandBuffer::~ExecuteSecondaryCommandBuffer (void)
4569 {
4570 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4571 		delete m_commands[cmdNdx];
4572 }
4573 
logPrepare(TestLog & log,size_t commandIndex) const4574 void ExecuteSecondaryCommandBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4575 {
4576 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
4577 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
4578 
4579 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4580 	{
4581 		CmdCommand& command = *m_commands[cmdNdx];
4582 		command.logPrepare(log, cmdNdx);
4583 	}
4584 }
4585 
logSubmit(TestLog & log,size_t commandIndex) const4586 void ExecuteSecondaryCommandBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4587 {
4588 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
4589 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
4590 
4591 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4592 	{
4593 		CmdCommand& command = *m_commands[cmdNdx];
4594 		command.logSubmit(log, cmdNdx);
4595 	}
4596 }
4597 
prepare(PrepareContext & context)4598 void ExecuteSecondaryCommandBuffer::prepare (PrepareContext& context)
4599 {
4600 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
4601 	const vk::VkDevice				device			= context.getContext().getDevice();
4602 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
4603 
4604 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4605 	{
4606 		CmdCommand& command = *m_commands[cmdNdx];
4607 
4608 		command.prepare(context);
4609 	}
4610 
4611 	m_commandBuffer = createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY);
4612 	{
4613 		SubmitContext submitContext (context, *m_commandBuffer);
4614 
4615 		for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4616 		{
4617 			CmdCommand& command = *m_commands[cmdNdx];
4618 
4619 			command.submit(submitContext);
4620 		}
4621 
4622 		endCommandBuffer(vkd, *m_commandBuffer);
4623 	}
4624 }
4625 
submit(SubmitContext & context)4626 void ExecuteSecondaryCommandBuffer::submit (SubmitContext& context)
4627 {
4628 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
4629 	const vk::VkCommandBuffer		commandBuffer	= context.getCommandBuffer();
4630 
4631 
4632 	{
4633 		vkd.cmdExecuteCommands(commandBuffer, 1, &m_commandBuffer.get());
4634 	}
4635 }
4636 
verify(VerifyContext & context,size_t commandIndex)4637 void ExecuteSecondaryCommandBuffer::verify (VerifyContext& context, size_t commandIndex)
4638 {
4639 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
4640 	const tcu::ScopedLogSection	section		(context.getLog(), sectionName, sectionName);
4641 
4642 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4643 		m_commands[cmdNdx]->verify(context, cmdNdx);
4644 }
4645 
4646 struct PipelineResources
4647 {
4648 	vk::Move<vk::VkPipeline>			pipeline;
4649 	vk::Move<vk::VkDescriptorSetLayout>	descriptorSetLayout;
4650 	vk::Move<vk::VkPipelineLayout>		pipelineLayout;
4651 };
4652 
createPipelineWithResources(const vk::DeviceInterface & vkd,const vk::VkDevice device,const vk::VkRenderPass renderPass,const deUint32 subpass,const vk::VkShaderModule & vertexShaderModule,const vk::VkShaderModule & fragmentShaderModule,const deUint32 viewPortWidth,const deUint32 viewPortHeight,const vector<vk::VkVertexInputBindingDescription> & vertexBindingDescriptions,const vector<vk::VkVertexInputAttributeDescription> & vertexAttributeDescriptions,const vector<vk::VkDescriptorSetLayoutBinding> & bindings,const vk::VkPrimitiveTopology topology,deUint32 pushConstantRangeCount,const vk::VkPushConstantRange * pushConstantRanges,PipelineResources & resources)4653 void createPipelineWithResources (const vk::DeviceInterface&							vkd,
4654 								  const vk::VkDevice									device,
4655 								  const vk::VkRenderPass								renderPass,
4656 								  const deUint32										subpass,
4657 								  const vk::VkShaderModule&								vertexShaderModule,
4658 								  const vk::VkShaderModule&								fragmentShaderModule,
4659 								  const deUint32										viewPortWidth,
4660 								  const deUint32										viewPortHeight,
4661 								  const vector<vk::VkVertexInputBindingDescription>&	vertexBindingDescriptions,
4662 								  const vector<vk::VkVertexInputAttributeDescription>&	vertexAttributeDescriptions,
4663 								  const vector<vk::VkDescriptorSetLayoutBinding>&		bindings,
4664 								  const vk::VkPrimitiveTopology							topology,
4665 								  deUint32												pushConstantRangeCount,
4666 								  const vk::VkPushConstantRange*						pushConstantRanges,
4667 								  PipelineResources&									resources)
4668 {
4669 	if (!bindings.empty())
4670 	{
4671 		const vk::VkDescriptorSetLayoutCreateInfo createInfo =
4672 		{
4673 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
4674 			DE_NULL,
4675 
4676 			0u,
4677 			(deUint32)bindings.size(),
4678 			bindings.empty() ? DE_NULL : &bindings[0]
4679 		};
4680 
4681 		resources.descriptorSetLayout = vk::createDescriptorSetLayout(vkd, device, &createInfo);
4682 	}
4683 
4684 	{
4685 		const vk::VkDescriptorSetLayout			descriptorSetLayout_	= *resources.descriptorSetLayout;
4686 		const vk::VkPipelineLayoutCreateInfo	createInfo				=
4687 		{
4688 			vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
4689 			DE_NULL,
4690 			0,
4691 
4692 			resources.descriptorSetLayout ? 1u : 0u,
4693 			resources.descriptorSetLayout ? &descriptorSetLayout_ : DE_NULL,
4694 
4695 			pushConstantRangeCount,
4696 			pushConstantRanges
4697 		};
4698 
4699 		resources.pipelineLayout = vk::createPipelineLayout(vkd, device, &createInfo);
4700 	}
4701 
4702 	{
4703 		const std::vector<vk::VkViewport>				viewports			(1, vk::makeViewport(0.0f, 0.0f, (float)viewPortWidth, (float)viewPortHeight, 0.0f, 1.0f));
4704 		const std::vector<vk::VkRect2D>					scissors			(1, vk::makeRect2D(0, 0, viewPortWidth, viewPortHeight));
4705 
4706 		const vk::VkPipelineVertexInputStateCreateInfo	vertexInputState	=
4707 		{
4708 			vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
4709 			DE_NULL,
4710 			0u,
4711 
4712 			(deUint32)vertexBindingDescriptions.size(),
4713 			vertexBindingDescriptions.empty() ? DE_NULL : &vertexBindingDescriptions[0],
4714 
4715 			(deUint32)vertexAttributeDescriptions.size(),
4716 			vertexAttributeDescriptions.empty() ? DE_NULL : &vertexAttributeDescriptions[0]
4717 		};
4718 
4719 		resources.pipeline = vk::makeGraphicsPipeline(vkd,							// const DeviceInterface&                        vk
4720 													  device,						// const VkDevice                                device
4721 													  *resources.pipelineLayout,	// const VkPipelineLayout                        pipelineLayout
4722 													  vertexShaderModule,			// const VkShaderModule                          vertexShaderModule
4723 													  DE_NULL,						// const VkShaderModule                          tessellationControlModule
4724 													  DE_NULL,						// const VkShaderModule                          tessellationEvalModule
4725 													  DE_NULL,						// const VkShaderModule                          geometryShaderModule
4726 													  fragmentShaderModule,			// const VkShaderModule                          fragmentShaderModule
4727 													  renderPass,					// const VkRenderPass                            renderPass
4728 													  viewports,					// const std::vector<VkViewport>&                viewports
4729 													  scissors,						// const std::vector<VkRect2D>&                  scissors
4730 													  topology,						// const VkPrimitiveTopology                     topology
4731 													  subpass,						// const deUint32                                subpass
4732 													  0u,							// const deUint32                                patchControlPoints
4733 													  &vertexInputState);			// const VkPipelineVertexInputStateCreateInfo*   vertexInputStateCreateInfo
4734 	}
4735 }
4736 
4737 class RenderIndexBuffer : public RenderPassCommand
4738 {
4739 public:
RenderIndexBuffer(void)4740 				RenderIndexBuffer	(void) {}
~RenderIndexBuffer(void)4741 				~RenderIndexBuffer	(void) {}
4742 
getName(void) const4743 	const char*	getName				(void) const { return "RenderIndexBuffer"; }
4744 	void		logPrepare			(TestLog&, size_t) const;
4745 	void		logSubmit			(TestLog&, size_t) const;
4746 	void		prepare				(PrepareRenderPassContext&);
4747 	void		submit				(SubmitContext& context);
4748 	void		verify				(VerifyRenderPassContext&, size_t);
4749 
4750 private:
4751 	PipelineResources				m_resources;
4752 	vk::VkDeviceSize				m_bufferSize;
4753 };
4754 
logPrepare(TestLog & log,size_t commandIndex) const4755 void RenderIndexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4756 {
4757 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as index buffer." << TestLog::EndMessage;
4758 }
4759 
logSubmit(TestLog & log,size_t commandIndex) const4760 void RenderIndexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4761 {
4762 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as index buffer." << TestLog::EndMessage;
4763 }
4764 
prepare(PrepareRenderPassContext & context)4765 void RenderIndexBuffer::prepare (PrepareRenderPassContext& context)
4766 {
4767 	const vk::DeviceInterface&				vkd						= context.getContext().getDeviceInterface();
4768 	const vk::VkDevice						device					= context.getContext().getDevice();
4769 	const vk::VkRenderPass					renderPass				= context.getRenderPass();
4770 	const deUint32							subpass					= 0;
4771 	const vk::Unique<vk::VkShaderModule>	vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("index-buffer.vert"), 0));
4772 	const vk::Unique<vk::VkShaderModule>	fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4773 
4774 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4775 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4776 	m_bufferSize = context.getBufferSize();
4777 }
4778 
submit(SubmitContext & context)4779 void RenderIndexBuffer::submit (SubmitContext& context)
4780 {
4781 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
4782 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
4783 
4784 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4785 	vkd.cmdBindIndexBuffer(commandBuffer, context.getBuffer(), 0, vk::VK_INDEX_TYPE_UINT16);
4786 	vkd.cmdDrawIndexed(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0, 0);
4787 }
4788 
verify(VerifyRenderPassContext & context,size_t)4789 void RenderIndexBuffer::verify (VerifyRenderPassContext& context, size_t)
4790 {
4791 	for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
4792 	{
4793 		const deUint8 x  = context.getReference().get(pos * 2);
4794 		const deUint8 y  = context.getReference().get((pos * 2) + 1);
4795 
4796 		context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
4797 	}
4798 }
4799 
4800 class RenderVertexBuffer : public RenderPassCommand
4801 {
4802 public:
RenderVertexBuffer(deUint32 stride)4803 				RenderVertexBuffer	(deUint32 stride)
4804 					: m_stride(stride)
4805 					, m_name("RenderVertexBuffer" + de::toString(stride))
4806 					, m_bufferSize(0)
4807 					{}
~RenderVertexBuffer(void)4808 				~RenderVertexBuffer	(void) {}
4809 
getName(void) const4810 	const char*	getName				(void) const { return m_name.c_str(); }
4811 	void		logPrepare			(TestLog&, size_t) const;
4812 	void		logSubmit			(TestLog&, size_t) const;
4813 	void		prepare				(PrepareRenderPassContext&);
4814 	void		submit				(SubmitContext& context);
4815 	void		verify				(VerifyRenderPassContext&, size_t);
4816 
4817 private:
4818 	const deUint32		m_stride;
4819 	const std::string	m_name;
4820 	PipelineResources	m_resources;
4821 	vk::VkDeviceSize	m_bufferSize;
4822 };
4823 
logPrepare(TestLog & log,size_t commandIndex) const4824 void RenderVertexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4825 {
4826 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as vertex buffer." << TestLog::EndMessage;
4827 }
4828 
logSubmit(TestLog & log,size_t commandIndex) const4829 void RenderVertexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4830 {
4831 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as vertex buffer." << TestLog::EndMessage;
4832 }
4833 
prepare(PrepareRenderPassContext & context)4834 void RenderVertexBuffer::prepare (PrepareRenderPassContext& context)
4835 {
4836 	const vk::DeviceInterface&						vkd						= context.getContext().getDeviceInterface();
4837 	const vk::VkDevice								device					= context.getContext().getDevice();
4838 	const vk::VkRenderPass							renderPass				= context.getRenderPass();
4839 	const deUint32									subpass					= 0;
4840 	const vk::Unique<vk::VkShaderModule>			vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("vertex-buffer.vert"), 0));
4841 	const vk::Unique<vk::VkShaderModule>			fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4842 
4843 	vector<vk::VkVertexInputAttributeDescription>	vertexAttributeDescriptions;
4844 	vector<vk::VkVertexInputBindingDescription>		vertexBindingDescriptions;
4845 
4846 	{
4847 		const vk::VkVertexInputBindingDescription vertexBindingDescription =
4848 			{
4849 				0,
4850 				m_stride,
4851 				vk::VK_VERTEX_INPUT_RATE_VERTEX
4852 			};
4853 
4854 		vertexBindingDescriptions.push_back(vertexBindingDescription);
4855 	}
4856 	{
4857 		const vk::VkVertexInputAttributeDescription vertexAttributeDescription =
4858 		{
4859 			0,
4860 			0,
4861 			vk::VK_FORMAT_R8G8_UNORM,
4862 			0
4863 		};
4864 
4865 		vertexAttributeDescriptions.push_back(vertexAttributeDescription);
4866 	}
4867 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4868 								vertexBindingDescriptions, vertexAttributeDescriptions, vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4869 
4870 	m_bufferSize = context.getBufferSize();
4871 }
4872 
submit(SubmitContext & context)4873 void RenderVertexBuffer::submit (SubmitContext& context)
4874 {
4875 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
4876 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
4877 	const vk::VkDeviceSize		offset			= 0;
4878 	const vk::VkBuffer			buffer			= context.getBuffer();
4879 
4880 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4881 	vkd.cmdBindVertexBuffers(commandBuffer, 0, 1, &buffer, &offset);
4882 	vkd.cmdDraw(commandBuffer, (deUint32)(context.getBufferSize() / m_stride), 1, 0, 0);
4883 }
4884 
verify(VerifyRenderPassContext & context,size_t)4885 void RenderVertexBuffer::verify (VerifyRenderPassContext& context, size_t)
4886 {
4887 	for (size_t pos = 0; pos < (size_t)m_bufferSize / m_stride; pos++)
4888 	{
4889 		const deUint8 x  = context.getReference().get(pos * m_stride);
4890 		const deUint8 y  = context.getReference().get((pos * m_stride) + 1);
4891 
4892 		context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
4893 	}
4894 }
4895 
4896 class RenderVertexUniformBuffer : public RenderPassCommand
4897 {
4898 public:
RenderVertexUniformBuffer(void)4899 									RenderVertexUniformBuffer	(void) {}
4900 									~RenderVertexUniformBuffer	(void);
4901 
getName(void) const4902 	const char*						getName						(void) const { return "RenderVertexUniformBuffer"; }
4903 	void							logPrepare					(TestLog&, size_t) const;
4904 	void							logSubmit					(TestLog&, size_t) const;
4905 	void							prepare						(PrepareRenderPassContext&);
4906 	void							submit						(SubmitContext& context);
4907 	void							verify						(VerifyRenderPassContext&, size_t);
4908 
4909 protected:
4910 
4911 	deUint32						calculateBufferPartSize		(size_t descriptorSetNdx) const;
4912 
4913 private:
4914 	PipelineResources				m_resources;
4915 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
4916 	vector<vk::VkDescriptorSet>		m_descriptorSets;
4917 
4918 	vk::VkDeviceSize				m_bufferSize;
4919 };
4920 
~RenderVertexUniformBuffer(void)4921 RenderVertexUniformBuffer::~RenderVertexUniformBuffer (void)
4922 {
4923 }
4924 
logPrepare(TestLog & log,size_t commandIndex) const4925 void RenderVertexUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4926 {
4927 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
4928 }
4929 
logSubmit(TestLog & log,size_t commandIndex) const4930 void RenderVertexUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4931 {
4932 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
4933 }
4934 
prepare(PrepareRenderPassContext & context)4935 void RenderVertexUniformBuffer::prepare (PrepareRenderPassContext& context)
4936 {
4937 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
4938 	const vk::VkDevice							device					= context.getContext().getDevice();
4939 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
4940 	const deUint32								subpass					= 0;
4941 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.vert"), 0));
4942 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4943 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
4944 
4945 	// make sure buffer size is multiple of 16 (in glsl we use uvec4 to store 16 values)
4946 	m_bufferSize = context.getBufferSize();
4947 	m_bufferSize = static_cast<vk::VkDeviceSize>(m_bufferSize / 16u) * 16u;
4948 
4949 	{
4950 		const vk::VkDescriptorSetLayoutBinding binding =
4951 		{
4952 			0u,
4953 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
4954 			1,
4955 			vk::VK_SHADER_STAGE_VERTEX_BIT,
4956 			DE_NULL
4957 		};
4958 
4959 		bindings.push_back(binding);
4960 	}
4961 
4962 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4963 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4964 
4965 	{
4966 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
4967 		const vk::VkDescriptorPoolSize			poolSizes		=
4968 		{
4969 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
4970 			descriptorCount
4971 		};
4972 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
4973 		{
4974 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
4975 			DE_NULL,
4976 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
4977 
4978 			descriptorCount,
4979 			1u,
4980 			&poolSizes,
4981 		};
4982 
4983 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
4984 		m_descriptorSets.resize(descriptorCount);
4985 	}
4986 
4987 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
4988 	{
4989 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
4990 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
4991 		{
4992 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
4993 			DE_NULL,
4994 
4995 			*m_descriptorPool,
4996 			1,
4997 			&layout
4998 		};
4999 
5000 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5001 
5002 		{
5003 			const vk::VkDescriptorBufferInfo		bufferInfo	=
5004 			{
5005 				context.getBuffer(),
5006 				(vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
5007 				calculateBufferPartSize(descriptorSetNdx)
5008 			};
5009 			const vk::VkWriteDescriptorSet			write		=
5010 			{
5011 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5012 				DE_NULL,
5013 				m_descriptorSets[descriptorSetNdx],
5014 				0u,
5015 				0u,
5016 				1u,
5017 				vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5018 				DE_NULL,
5019 				&bufferInfo,
5020 				DE_NULL,
5021 			};
5022 
5023 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5024 		}
5025 	}
5026 }
5027 
submit(SubmitContext & context)5028 void RenderVertexUniformBuffer::submit (SubmitContext& context)
5029 {
5030 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5031 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5032 
5033 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5034 
5035 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5036 	{
5037 		const size_t	size	= calculateBufferPartSize(descriptorSetNdx);
5038 		const deUint32	count	= (deUint32)(size / 2);
5039 
5040 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5041 		vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5042 	}
5043 }
5044 
verify(VerifyRenderPassContext & context,size_t)5045 void RenderVertexUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
5046 {
5047 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5048 	{
5049 		const size_t	offset	= descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
5050 		const size_t	size	= calculateBufferPartSize(descriptorSetNdx);
5051 		const size_t	count	= size / 2;
5052 
5053 		for (size_t pos = 0; pos < count; pos++)
5054 		{
5055 			const deUint8 x  = context.getReference().get(offset + pos * 2);
5056 			const deUint8 y  = context.getReference().get(offset + (pos * 2) + 1);
5057 
5058 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5059 		}
5060 	}
5061 }
5062 
calculateBufferPartSize(size_t descriptorSetNdx) const5063 deUint32 RenderVertexUniformBuffer::calculateBufferPartSize(size_t descriptorSetNdx) const
5064 {
5065 	deUint32 size = static_cast<deUint32>(m_bufferSize) - static_cast<deUint32>(descriptorSetNdx) * MAX_UNIFORM_BUFFER_SIZE;
5066 	if (size < MAX_UNIFORM_BUFFER_SIZE)
5067 		return size;
5068 	return MAX_UNIFORM_BUFFER_SIZE;
5069 }
5070 
5071 class RenderVertexUniformTexelBuffer : public RenderPassCommand
5072 {
5073 public:
RenderVertexUniformTexelBuffer(void)5074 				RenderVertexUniformTexelBuffer	(void) {}
5075 				~RenderVertexUniformTexelBuffer	(void);
5076 
getName(void) const5077 	const char*	getName							(void) const { return "RenderVertexUniformTexelBuffer"; }
5078 	void		logPrepare						(TestLog&, size_t) const;
5079 	void		logSubmit						(TestLog&, size_t) const;
5080 	void		prepare							(PrepareRenderPassContext&);
5081 	void		submit							(SubmitContext& context);
5082 	void		verify							(VerifyRenderPassContext&, size_t);
5083 
5084 private:
5085 	PipelineResources				m_resources;
5086 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5087 	vector<vk::VkDescriptorSet>		m_descriptorSets;
5088 	vector<vk::VkBufferView>		m_bufferViews;
5089 
5090 	const vk::DeviceInterface*		m_vkd;
5091 	vk::VkDevice					m_device;
5092 	vk::VkDeviceSize				m_bufferSize;
5093 	deUint32						m_maxUniformTexelCount;
5094 };
5095 
~RenderVertexUniformTexelBuffer(void)5096 RenderVertexUniformTexelBuffer::~RenderVertexUniformTexelBuffer (void)
5097 {
5098 	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5099 	{
5100 		if (!!m_bufferViews[bufferViewNdx])
5101 		{
5102 			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5103 			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5104 		}
5105 	}
5106 }
5107 
logPrepare(TestLog & log,size_t commandIndex) const5108 void RenderVertexUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5109 {
5110 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5111 }
5112 
logSubmit(TestLog & log,size_t commandIndex) const5113 void RenderVertexUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5114 {
5115 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
5116 }
5117 
prepare(PrepareRenderPassContext & context)5118 void RenderVertexUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
5119 {
5120 	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
5121 	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
5122 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5123 	const vk::VkDevice							device					= context.getContext().getDevice();
5124 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5125 	const deUint32								subpass					= 0;
5126 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.vert"), 0));
5127 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5128 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5129 
5130 	m_device				= device;
5131 	m_vkd					= &vkd;
5132 	m_bufferSize			= context.getBufferSize();
5133 	m_maxUniformTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5134 
5135 	{
5136 		const vk::VkDescriptorSetLayoutBinding binding =
5137 		{
5138 			0u,
5139 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5140 			1,
5141 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5142 			DE_NULL
5143 		};
5144 
5145 		bindings.push_back(binding);
5146 	}
5147 
5148 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5149 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5150 
5151 	{
5152 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 2));
5153 		const vk::VkDescriptorPoolSize			poolSizes		=
5154 		{
5155 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5156 			descriptorCount
5157 		};
5158 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5159 		{
5160 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5161 			DE_NULL,
5162 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5163 
5164 			descriptorCount,
5165 			1u,
5166 			&poolSizes,
5167 		};
5168 
5169 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5170 		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5171 		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5172 	}
5173 
5174 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5175 	{
5176 		const deUint32							count			= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5177 																? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5178 																: m_maxUniformTexelCount * 2) / 2;
5179 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5180 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5181 		{
5182 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5183 			DE_NULL,
5184 
5185 			*m_descriptorPool,
5186 			1,
5187 			&layout
5188 		};
5189 
5190 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5191 
5192 		{
5193 			const vk::VkBufferViewCreateInfo createInfo =
5194 			{
5195 				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5196 				DE_NULL,
5197 				0u,
5198 
5199 				context.getBuffer(),
5200 				vk::VK_FORMAT_R16_UINT,
5201 				descriptorSetNdx * m_maxUniformTexelCount * 2,
5202 				count * 2
5203 			};
5204 
5205 			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5206 		}
5207 
5208 		{
5209 			const vk::VkWriteDescriptorSet			write		=
5210 			{
5211 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5212 				DE_NULL,
5213 				m_descriptorSets[descriptorSetNdx],
5214 				0u,
5215 				0u,
5216 				1u,
5217 				vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5218 				DE_NULL,
5219 				DE_NULL,
5220 				&m_bufferViews[descriptorSetNdx]
5221 			};
5222 
5223 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5224 		}
5225 	}
5226 }
5227 
submit(SubmitContext & context)5228 void RenderVertexUniformTexelBuffer::submit (SubmitContext& context)
5229 {
5230 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5231 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5232 
5233 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5234 
5235 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5236 	{
5237 		const deUint32 count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5238 								? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5239 								: m_maxUniformTexelCount * 2) / 2;
5240 
5241 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5242 		vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5243 	}
5244 }
5245 
verify(VerifyRenderPassContext & context,size_t)5246 void RenderVertexUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5247 {
5248 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5249 	{
5250 		const size_t	offset	= descriptorSetNdx * m_maxUniformTexelCount * 2;
5251 		const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5252 								? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5253 								: m_maxUniformTexelCount * 2) / 2;
5254 
5255 		for (size_t pos = 0; pos < (size_t)count; pos++)
5256 		{
5257 			const deUint8 x  = context.getReference().get(offset + pos * 2);
5258 			const deUint8 y  = context.getReference().get(offset + (pos * 2) + 1);
5259 
5260 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5261 		}
5262 	}
5263 }
5264 
5265 class RenderVertexStorageBuffer : public RenderPassCommand
5266 {
5267 public:
RenderVertexStorageBuffer(void)5268 				RenderVertexStorageBuffer	(void) {}
5269 				~RenderVertexStorageBuffer	(void);
5270 
getName(void) const5271 	const char*	getName						(void) const { return "RenderVertexStorageBuffer"; }
5272 	void		logPrepare					(TestLog&, size_t) const;
5273 	void		logSubmit					(TestLog&, size_t) const;
5274 	void		prepare						(PrepareRenderPassContext&);
5275 	void		submit						(SubmitContext& context);
5276 	void		verify						(VerifyRenderPassContext&, size_t);
5277 
5278 private:
5279 	PipelineResources				m_resources;
5280 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5281 	vector<vk::VkDescriptorSet>		m_descriptorSets;
5282 
5283 	vk::VkDeviceSize				m_bufferSize;
5284 };
5285 
~RenderVertexStorageBuffer(void)5286 RenderVertexStorageBuffer::~RenderVertexStorageBuffer (void)
5287 {
5288 }
5289 
logPrepare(TestLog & log,size_t commandIndex) const5290 void RenderVertexStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5291 {
5292 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5293 }
5294 
logSubmit(TestLog & log,size_t commandIndex) const5295 void RenderVertexStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5296 {
5297 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5298 }
5299 
prepare(PrepareRenderPassContext & context)5300 void RenderVertexStorageBuffer::prepare (PrepareRenderPassContext& context)
5301 {
5302 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5303 	const vk::VkDevice							device					= context.getContext().getDevice();
5304 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5305 	const deUint32								subpass					= 0;
5306 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.vert"), 0));
5307 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5308 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5309 
5310 	m_bufferSize = context.getBufferSize();
5311 
5312 	{
5313 		const vk::VkDescriptorSetLayoutBinding binding =
5314 		{
5315 			0u,
5316 			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5317 			1,
5318 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5319 			DE_NULL
5320 		};
5321 
5322 		bindings.push_back(binding);
5323 	}
5324 
5325 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5326 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5327 
5328 	{
5329 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE));
5330 		const vk::VkDescriptorPoolSize			poolSizes		=
5331 		{
5332 			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5333 			descriptorCount
5334 		};
5335 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5336 		{
5337 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5338 			DE_NULL,
5339 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5340 
5341 			descriptorCount,
5342 			1u,
5343 			&poolSizes,
5344 		};
5345 
5346 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5347 		m_descriptorSets.resize(descriptorCount);
5348 	}
5349 
5350 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5351 	{
5352 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5353 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5354 		{
5355 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5356 			DE_NULL,
5357 
5358 			*m_descriptorPool,
5359 			1,
5360 			&layout
5361 		};
5362 
5363 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5364 
5365 		{
5366 			const vk::VkDescriptorBufferInfo		bufferInfo	=
5367 			{
5368 				context.getBuffer(),
5369 				descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE,
5370 				de::min(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE,  (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE)
5371 			};
5372 			const vk::VkWriteDescriptorSet			write		=
5373 			{
5374 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5375 				DE_NULL,
5376 				m_descriptorSets[descriptorSetNdx],
5377 				0u,
5378 				0u,
5379 				1u,
5380 				vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5381 				DE_NULL,
5382 				&bufferInfo,
5383 				DE_NULL,
5384 			};
5385 
5386 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5387 		}
5388 	}
5389 }
5390 
submit(SubmitContext & context)5391 void RenderVertexStorageBuffer::submit (SubmitContext& context)
5392 {
5393 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5394 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5395 
5396 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5397 
5398 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5399 	{
5400 		const size_t size	= m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5401 							? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5402 							: (size_t)(MAX_STORAGE_BUFFER_SIZE);
5403 
5404 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5405 		vkd.cmdDraw(commandBuffer, (deUint32)(size / 2), 1, 0, 0);
5406 	}
5407 }
5408 
verify(VerifyRenderPassContext & context,size_t)5409 void RenderVertexStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
5410 {
5411 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5412 	{
5413 		const size_t offset	= descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE;
5414 		const size_t size	= m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5415 							? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5416 							: (size_t)(MAX_STORAGE_BUFFER_SIZE);
5417 
5418 		for (size_t pos = 0; pos < size / 2; pos++)
5419 		{
5420 			const deUint8 x  = context.getReference().get(offset + pos * 2);
5421 			const deUint8 y  = context.getReference().get(offset + (pos * 2) + 1);
5422 
5423 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5424 		}
5425 	}
5426 }
5427 
5428 class RenderVertexStorageTexelBuffer : public RenderPassCommand
5429 {
5430 public:
RenderVertexStorageTexelBuffer(void)5431 				RenderVertexStorageTexelBuffer	(void) {}
5432 				~RenderVertexStorageTexelBuffer	(void);
5433 
getName(void) const5434 	const char*	getName							(void) const { return "RenderVertexStorageTexelBuffer"; }
5435 	void		logPrepare						(TestLog&, size_t) const;
5436 	void		logSubmit						(TestLog&, size_t) const;
5437 	void		prepare							(PrepareRenderPassContext&);
5438 	void		submit							(SubmitContext& context);
5439 	void		verify							(VerifyRenderPassContext&, size_t);
5440 
5441 private:
5442 	PipelineResources				m_resources;
5443 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5444 	vector<vk::VkDescriptorSet>		m_descriptorSets;
5445 	vector<vk::VkBufferView>		m_bufferViews;
5446 
5447 	const vk::DeviceInterface*		m_vkd;
5448 	vk::VkDevice					m_device;
5449 	vk::VkDeviceSize				m_bufferSize;
5450 	deUint32						m_maxStorageTexelCount;
5451 };
5452 
~RenderVertexStorageTexelBuffer(void)5453 RenderVertexStorageTexelBuffer::~RenderVertexStorageTexelBuffer (void)
5454 {
5455 	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5456 	{
5457 		if (!!m_bufferViews[bufferViewNdx])
5458 		{
5459 			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5460 			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5461 		}
5462 	}
5463 }
5464 
logPrepare(TestLog & log,size_t commandIndex) const5465 void RenderVertexStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5466 {
5467 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5468 }
5469 
logSubmit(TestLog & log,size_t commandIndex) const5470 void RenderVertexStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5471 {
5472 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5473 }
5474 
prepare(PrepareRenderPassContext & context)5475 void RenderVertexStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
5476 {
5477 	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
5478 	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
5479 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5480 	const vk::VkDevice							device					= context.getContext().getDevice();
5481 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5482 	const deUint32								subpass					= 0;
5483 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.vert"), 0));
5484 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5485 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5486 
5487 	m_device				= device;
5488 	m_vkd					= &vkd;
5489 	m_bufferSize			= context.getBufferSize();
5490 	m_maxStorageTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5491 
5492 	{
5493 		const vk::VkDescriptorSetLayoutBinding binding =
5494 		{
5495 			0u,
5496 			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5497 			1,
5498 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5499 			DE_NULL
5500 		};
5501 
5502 		bindings.push_back(binding);
5503 	}
5504 
5505 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5506 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5507 
5508 	{
5509 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
5510 		const vk::VkDescriptorPoolSize			poolSizes		=
5511 		{
5512 			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5513 			descriptorCount
5514 		};
5515 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5516 		{
5517 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5518 			DE_NULL,
5519 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5520 
5521 			descriptorCount,
5522 			1u,
5523 			&poolSizes,
5524 		};
5525 
5526 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5527 		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5528 		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5529 	}
5530 
5531 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5532 	{
5533 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5534 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5535 		{
5536 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5537 			DE_NULL,
5538 
5539 			*m_descriptorPool,
5540 			1,
5541 			&layout
5542 		};
5543 
5544 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5545 
5546 		{
5547 			const vk::VkBufferViewCreateInfo createInfo =
5548 			{
5549 				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5550 				DE_NULL,
5551 				0u,
5552 
5553 				context.getBuffer(),
5554 				vk::VK_FORMAT_R32_UINT,
5555 				descriptorSetNdx * m_maxStorageTexelCount * 4,
5556 				(deUint32)de::min<vk::VkDeviceSize>(m_maxStorageTexelCount * 4, m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4)
5557 			};
5558 
5559 			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5560 		}
5561 
5562 		{
5563 			const vk::VkWriteDescriptorSet			write		=
5564 			{
5565 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5566 				DE_NULL,
5567 				m_descriptorSets[descriptorSetNdx],
5568 				0u,
5569 				0u,
5570 				1u,
5571 				vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5572 				DE_NULL,
5573 				DE_NULL,
5574 				&m_bufferViews[descriptorSetNdx]
5575 			};
5576 
5577 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5578 		}
5579 	}
5580 }
5581 
submit(SubmitContext & context)5582 void RenderVertexStorageTexelBuffer::submit (SubmitContext& context)
5583 {
5584 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5585 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5586 
5587 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5588 
5589 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5590 	{
5591 		const deUint32 count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5592 								? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5593 								: m_maxStorageTexelCount * 4) / 2;
5594 
5595 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5596 		vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5597 	}
5598 }
5599 
verify(VerifyRenderPassContext & context,size_t)5600 void RenderVertexStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5601 {
5602 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5603 	{
5604 		const size_t	offset	= descriptorSetNdx * m_maxStorageTexelCount * 4;
5605 		const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5606 								? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5607 								: m_maxStorageTexelCount * 4) / 2;
5608 
5609 		DE_ASSERT(context.getReference().getSize() <= 4 * m_maxStorageTexelCount * m_descriptorSets.size());
5610 		DE_ASSERT(context.getReference().getSize() > offset);
5611 		DE_ASSERT(offset + count * 2 <= context.getReference().getSize());
5612 
5613 		for (size_t pos = 0; pos < (size_t)count; pos++)
5614 		{
5615 			const deUint8 x = context.getReference().get(offset + pos * 2);
5616 			const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5617 
5618 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5619 		}
5620 	}
5621 }
5622 
5623 class RenderVertexStorageImage : public RenderPassCommand
5624 {
5625 public:
RenderVertexStorageImage(void)5626 				RenderVertexStorageImage	(void) {}
5627 				~RenderVertexStorageImage	(void);
5628 
getName(void) const5629 	const char*	getName						(void) const { return "RenderVertexStorageImage"; }
5630 	void		logPrepare					(TestLog&, size_t) const;
5631 	void		logSubmit					(TestLog&, size_t) const;
5632 	void		prepare						(PrepareRenderPassContext&);
5633 	void		submit						(SubmitContext& context);
5634 	void		verify						(VerifyRenderPassContext&, size_t);
5635 
5636 private:
5637 	PipelineResources				m_resources;
5638 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5639 	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
5640 	vk::Move<vk::VkImageView>		m_imageView;
5641 };
5642 
~RenderVertexStorageImage(void)5643 RenderVertexStorageImage::~RenderVertexStorageImage (void)
5644 {
5645 }
5646 
logPrepare(TestLog & log,size_t commandIndex) const5647 void RenderVertexStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
5648 {
5649 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
5650 }
5651 
logSubmit(TestLog & log,size_t commandIndex) const5652 void RenderVertexStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
5653 {
5654 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
5655 }
5656 
prepare(PrepareRenderPassContext & context)5657 void RenderVertexStorageImage::prepare (PrepareRenderPassContext& context)
5658 {
5659 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5660 	const vk::VkDevice							device					= context.getContext().getDevice();
5661 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5662 	const deUint32								subpass					= 0;
5663 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.vert"), 0));
5664 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5665 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5666 
5667 	{
5668 		const vk::VkDescriptorSetLayoutBinding binding =
5669 		{
5670 			0u,
5671 			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5672 			1,
5673 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5674 			DE_NULL
5675 		};
5676 
5677 		bindings.push_back(binding);
5678 	}
5679 
5680 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5681 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5682 
5683 	{
5684 		const vk::VkDescriptorPoolSize			poolSizes		=
5685 		{
5686 			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5687 			1
5688 		};
5689 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5690 		{
5691 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5692 			DE_NULL,
5693 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5694 
5695 			1u,
5696 			1u,
5697 			&poolSizes,
5698 		};
5699 
5700 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5701 	}
5702 
5703 	{
5704 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5705 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5706 		{
5707 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5708 			DE_NULL,
5709 
5710 			*m_descriptorPool,
5711 			1,
5712 			&layout
5713 		};
5714 
5715 		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5716 
5717 		{
5718 			const vk::VkImageViewCreateInfo createInfo =
5719 			{
5720 				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5721 				DE_NULL,
5722 				0u,
5723 
5724 				context.getImage(),
5725 				vk::VK_IMAGE_VIEW_TYPE_2D,
5726 				vk::VK_FORMAT_R8G8B8A8_UNORM,
5727 				vk::makeComponentMappingRGBA(),
5728 				{
5729 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
5730 					0u,
5731 					1u,
5732 					0u,
5733 					1u
5734 				}
5735 			};
5736 
5737 			m_imageView = vk::createImageView(vkd, device, &createInfo);
5738 		}
5739 
5740 		{
5741 			const vk::VkDescriptorImageInfo			imageInfo	=
5742 			{
5743 				0,
5744 				*m_imageView,
5745 				context.getImageLayout()
5746 			};
5747 			const vk::VkWriteDescriptorSet			write		=
5748 			{
5749 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5750 				DE_NULL,
5751 				*m_descriptorSet,
5752 				0u,
5753 				0u,
5754 				1u,
5755 				vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5756 				&imageInfo,
5757 				DE_NULL,
5758 				DE_NULL,
5759 			};
5760 
5761 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5762 		}
5763 	}
5764 }
5765 
submit(SubmitContext & context)5766 void RenderVertexStorageImage::submit (SubmitContext& context)
5767 {
5768 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5769 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5770 
5771 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5772 
5773 	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
5774 	vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
5775 }
5776 
verify(VerifyRenderPassContext & context,size_t)5777 void RenderVertexStorageImage::verify (VerifyRenderPassContext& context, size_t)
5778 {
5779 	for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
5780 	{
5781 		const tcu::IVec3		size	= context.getReferenceImage().getAccess().getSize();
5782 		const tcu::UVec4		pixel	= context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
5783 
5784 		if (pos % 2 == 0)
5785 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
5786 		else
5787 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
5788 	}
5789 }
5790 
5791 class RenderVertexSampledImage : public RenderPassCommand
5792 {
5793 public:
RenderVertexSampledImage(void)5794 				RenderVertexSampledImage	(void) {}
5795 				~RenderVertexSampledImage	(void);
5796 
getName(void) const5797 	const char*	getName						(void) const { return "RenderVertexSampledImage"; }
5798 	void		logPrepare					(TestLog&, size_t) const;
5799 	void		logSubmit					(TestLog&, size_t) const;
5800 	void		prepare						(PrepareRenderPassContext&);
5801 	void		submit						(SubmitContext& context);
5802 	void		verify						(VerifyRenderPassContext&, size_t);
5803 
5804 private:
5805 	PipelineResources				m_resources;
5806 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5807 	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
5808 	vk::Move<vk::VkImageView>		m_imageView;
5809 	vk::Move<vk::VkSampler>			m_sampler;
5810 };
5811 
~RenderVertexSampledImage(void)5812 RenderVertexSampledImage::~RenderVertexSampledImage (void)
5813 {
5814 }
5815 
logPrepare(TestLog & log,size_t commandIndex) const5816 void RenderVertexSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
5817 {
5818 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render sampled image." << TestLog::EndMessage;
5819 }
5820 
logSubmit(TestLog & log,size_t commandIndex) const5821 void RenderVertexSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
5822 {
5823 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using sampled image." << TestLog::EndMessage;
5824 }
5825 
prepare(PrepareRenderPassContext & context)5826 void RenderVertexSampledImage::prepare (PrepareRenderPassContext& context)
5827 {
5828 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5829 	const vk::VkDevice							device					= context.getContext().getDevice();
5830 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5831 	const deUint32								subpass					= 0;
5832 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.vert"), 0));
5833 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5834 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5835 
5836 	{
5837 		const vk::VkDescriptorSetLayoutBinding binding =
5838 		{
5839 			0u,
5840 			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5841 			1,
5842 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5843 			DE_NULL
5844 		};
5845 
5846 		bindings.push_back(binding);
5847 	}
5848 
5849 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5850 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5851 
5852 	{
5853 		const vk::VkDescriptorPoolSize			poolSizes		=
5854 		{
5855 			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5856 			1
5857 		};
5858 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5859 		{
5860 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5861 			DE_NULL,
5862 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5863 
5864 			1u,
5865 			1u,
5866 			&poolSizes,
5867 		};
5868 
5869 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5870 	}
5871 
5872 	{
5873 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5874 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5875 		{
5876 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5877 			DE_NULL,
5878 
5879 			*m_descriptorPool,
5880 			1,
5881 			&layout
5882 		};
5883 
5884 		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5885 
5886 		{
5887 			const vk::VkImageViewCreateInfo createInfo =
5888 			{
5889 				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5890 				DE_NULL,
5891 				0u,
5892 
5893 				context.getImage(),
5894 				vk::VK_IMAGE_VIEW_TYPE_2D,
5895 				vk::VK_FORMAT_R8G8B8A8_UNORM,
5896 				vk::makeComponentMappingRGBA(),
5897 				{
5898 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
5899 					0u,
5900 					1u,
5901 					0u,
5902 					1u
5903 				}
5904 			};
5905 
5906 			m_imageView = vk::createImageView(vkd, device, &createInfo);
5907 		}
5908 
5909 		{
5910 			const vk::VkSamplerCreateInfo createInfo =
5911 			{
5912 				vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
5913 				DE_NULL,
5914 				0u,
5915 
5916 				vk::VK_FILTER_NEAREST,
5917 				vk::VK_FILTER_NEAREST,
5918 
5919 				vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
5920 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
5921 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
5922 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
5923 				0.0f,
5924 				VK_FALSE,
5925 				1.0f,
5926 				VK_FALSE,
5927 				vk::VK_COMPARE_OP_ALWAYS,
5928 				0.0f,
5929 				0.0f,
5930 				vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
5931 				VK_FALSE
5932 			};
5933 
5934 			m_sampler = vk::createSampler(vkd, device, &createInfo);
5935 		}
5936 
5937 		{
5938 			const vk::VkDescriptorImageInfo			imageInfo	=
5939 			{
5940 				*m_sampler,
5941 				*m_imageView,
5942 				context.getImageLayout()
5943 			};
5944 			const vk::VkWriteDescriptorSet			write		=
5945 			{
5946 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5947 				DE_NULL,
5948 				*m_descriptorSet,
5949 				0u,
5950 				0u,
5951 				1u,
5952 				vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5953 				&imageInfo,
5954 				DE_NULL,
5955 				DE_NULL,
5956 			};
5957 
5958 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5959 		}
5960 	}
5961 }
5962 
submit(SubmitContext & context)5963 void RenderVertexSampledImage::submit (SubmitContext& context)
5964 {
5965 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5966 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5967 
5968 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5969 
5970 	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
5971 	vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
5972 }
5973 
verify(VerifyRenderPassContext & context,size_t)5974 void RenderVertexSampledImage::verify (VerifyRenderPassContext& context, size_t)
5975 {
5976 	for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
5977 	{
5978 		const tcu::IVec3	size	= context.getReferenceImage().getAccess().getSize();
5979 		const tcu::UVec4	pixel	= context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
5980 
5981 		if (pos % 2 == 0)
5982 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
5983 		else
5984 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
5985 	}
5986 }
5987 
5988 class RenderFragmentUniformBuffer : public RenderPassCommand
5989 {
5990 public:
RenderFragmentUniformBuffer(void)5991 									RenderFragmentUniformBuffer		(void) {}
5992 									~RenderFragmentUniformBuffer	(void);
5993 
getName(void) const5994 	const char*						getName							(void) const { return "RenderFragmentUniformBuffer"; }
5995 	void							logPrepare						(TestLog&, size_t) const;
5996 	void							logSubmit						(TestLog&, size_t) const;
5997 	void							prepare							(PrepareRenderPassContext&);
5998 	void							submit							(SubmitContext& context);
5999 	void							verify							(VerifyRenderPassContext&, size_t);
6000 
6001 protected:
6002 
6003 	deUint32						calculateBufferPartSize			(size_t descriptorSetNdx) const;
6004 
6005 private:
6006 	PipelineResources				m_resources;
6007 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6008 	vector<vk::VkDescriptorSet>		m_descriptorSets;
6009 
6010 	vk::VkDeviceSize				m_bufferSize;
6011 	size_t							m_targetWidth;
6012 	size_t							m_targetHeight;
6013 	deUint32						m_valuesPerPixel;
6014 };
6015 
~RenderFragmentUniformBuffer(void)6016 RenderFragmentUniformBuffer::~RenderFragmentUniformBuffer (void)
6017 {
6018 }
6019 
logPrepare(TestLog & log,size_t commandIndex) const6020 void RenderFragmentUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6021 {
6022 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6023 }
6024 
logSubmit(TestLog & log,size_t commandIndex) const6025 void RenderFragmentUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6026 {
6027 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6028 }
6029 
prepare(PrepareRenderPassContext & context)6030 void RenderFragmentUniformBuffer::prepare (PrepareRenderPassContext& context)
6031 {
6032 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6033 	const vk::VkDevice							device					= context.getContext().getDevice();
6034 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6035 	const deUint32								subpass					= 0;
6036 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6037 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.frag"), 0));
6038 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6039 
6040 	// make sure buffer is smaller then MAX_SIZE and is multiple of 16 (in glsl we use uvec4 to store 16 values)
6041 	m_bufferSize	= de::min(context.getBufferSize(), (vk::VkDeviceSize)MAX_SIZE);
6042 	m_bufferSize	= static_cast<vk::VkDeviceSize>(m_bufferSize / 16u) * 16u;
6043 	m_targetWidth	= context.getTargetWidth();
6044 	m_targetHeight	= context.getTargetHeight();
6045 
6046 	{
6047 		const vk::VkDescriptorSetLayoutBinding binding =
6048 		{
6049 			0u,
6050 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6051 			1,
6052 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6053 			DE_NULL
6054 		};
6055 
6056 		bindings.push_back(binding);
6057 	}
6058 	const vk::VkPushConstantRange pushConstantRange =
6059 	{
6060 		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6061 		0u,
6062 		12u
6063 	};
6064 
6065 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6066 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6067 
6068 	{
6069 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
6070 		const vk::VkDescriptorPoolSize			poolSizes		=
6071 		{
6072 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6073 			descriptorCount
6074 		};
6075 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6076 		{
6077 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6078 			DE_NULL,
6079 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6080 
6081 			descriptorCount,
6082 			1u,
6083 			&poolSizes,
6084 		};
6085 
6086 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6087 		m_descriptorSets.resize(descriptorCount);
6088 
6089 		m_valuesPerPixel = (deUint32)divRoundUp<size_t>(descriptorCount * de::min<size_t>((size_t)m_bufferSize / 4, MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight);
6090 	}
6091 
6092 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6093 	{
6094 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6095 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6096 		{
6097 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6098 			DE_NULL,
6099 
6100 			*m_descriptorPool,
6101 			1,
6102 			&layout
6103 		};
6104 
6105 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6106 
6107 		{
6108 			const vk::VkDescriptorBufferInfo		bufferInfo	=
6109 			{
6110 				context.getBuffer(),
6111 				(vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
6112 				calculateBufferPartSize(descriptorSetNdx)
6113 			};
6114 			const vk::VkWriteDescriptorSet			write		=
6115 			{
6116 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6117 				DE_NULL,
6118 				m_descriptorSets[descriptorSetNdx],
6119 				0u,
6120 				0u,
6121 				1u,
6122 				vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6123 				DE_NULL,
6124 				&bufferInfo,
6125 				DE_NULL,
6126 			};
6127 
6128 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6129 		}
6130 	}
6131 }
6132 
submit(SubmitContext & context)6133 void RenderFragmentUniformBuffer::submit (SubmitContext& context)
6134 {
6135 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6136 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6137 
6138 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6139 
6140 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6141 	{
6142 		const struct
6143 		{
6144 			const deUint32	callId;
6145 			const deUint32	valuesPerPixel;
6146 			const deUint32	bufferSize;
6147 		} callParams =
6148 		{
6149 			(deUint32)descriptorSetNdx,
6150 			m_valuesPerPixel,
6151 			calculateBufferPartSize(descriptorSetNdx) / 16u
6152 		};
6153 
6154 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6155 		vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6156 		vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6157 	}
6158 }
6159 
verify(VerifyRenderPassContext & context,size_t)6160 void RenderFragmentUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
6161 {
6162 	const size_t	arrayIntSize	= MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32);
6163 
6164 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6165 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6166 	{
6167 		const deUint32	id						= (deUint32)y * 256u + (deUint32)x;
6168 		const size_t	firstDescriptorSetNdx	= de::min<size_t>(id / (arrayIntSize / m_valuesPerPixel), m_descriptorSets.size() - 1);
6169 
6170 		for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6171 		{
6172 			const size_t	offset	= descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
6173 			const deUint32	callId	= (deUint32)descriptorSetNdx;
6174 			const deUint32	count	= calculateBufferPartSize(descriptorSetNdx) / 16u;
6175 
6176 			if (id < callId * (arrayIntSize / m_valuesPerPixel))
6177 				continue;
6178 			else
6179 			{
6180 				deUint32 value = id;
6181 
6182 				for (deUint32 i = 0; i < m_valuesPerPixel; i++)
6183 				{
6184 					// in shader UBO has up to 64 items of uvec4, each uvec4 contains 16 values
6185 					size_t index = offset + size_t((value % count) * 16u) + size_t((value % 4u) * 4u);
6186 					value	= (((deUint32)context.getReference().get(index + 0)))
6187 							| (((deUint32)context.getReference().get(index + 1)) << 8u)
6188 							| (((deUint32)context.getReference().get(index + 2)) << 16u)
6189 							| (((deUint32)context.getReference().get(index + 3)) << 24u);
6190 				}
6191 				const UVec4	vec	((value >>  0u) & 0xFFu,
6192 								 (value >>  8u) & 0xFFu,
6193 								 (value >> 16u) & 0xFFu,
6194 								 (value >> 24u) & 0xFFu);
6195 
6196 				context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6197 			}
6198 		}
6199 	}
6200 }
6201 
calculateBufferPartSize(size_t descriptorSetNdx) const6202 deUint32 RenderFragmentUniformBuffer::calculateBufferPartSize(size_t descriptorSetNdx) const
6203 {
6204 	deUint32 size = static_cast<deUint32>(m_bufferSize) - static_cast<deUint32>(descriptorSetNdx) * MAX_UNIFORM_BUFFER_SIZE;
6205 	if (size < MAX_UNIFORM_BUFFER_SIZE)
6206 		return size;
6207 	return MAX_UNIFORM_BUFFER_SIZE;
6208 }
6209 
6210 class RenderFragmentStorageBuffer : public RenderPassCommand
6211 {
6212 public:
RenderFragmentStorageBuffer(void)6213 									RenderFragmentStorageBuffer		(void) {}
6214 									~RenderFragmentStorageBuffer	(void);
6215 
getName(void) const6216 	const char*						getName							(void) const { return "RenderFragmentStorageBuffer"; }
6217 	void							logPrepare						(TestLog&, size_t) const;
6218 	void							logSubmit						(TestLog&, size_t) const;
6219 	void							prepare							(PrepareRenderPassContext&);
6220 	void							submit							(SubmitContext& context);
6221 	void							verify							(VerifyRenderPassContext&, size_t);
6222 
6223 private:
6224 	PipelineResources				m_resources;
6225 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6226 	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
6227 
6228 	vk::VkDeviceSize				m_bufferSize;
6229 	size_t							m_targetWidth;
6230 	size_t							m_targetHeight;
6231 };
6232 
~RenderFragmentStorageBuffer(void)6233 RenderFragmentStorageBuffer::~RenderFragmentStorageBuffer (void)
6234 {
6235 }
6236 
logPrepare(TestLog & log,size_t commandIndex) const6237 void RenderFragmentStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6238 {
6239 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline to render buffer as storage buffer." << TestLog::EndMessage;
6240 }
6241 
logSubmit(TestLog & log,size_t commandIndex) const6242 void RenderFragmentStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6243 {
6244 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6245 }
6246 
prepare(PrepareRenderPassContext & context)6247 void RenderFragmentStorageBuffer::prepare (PrepareRenderPassContext& context)
6248 {
6249 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6250 	const vk::VkDevice							device					= context.getContext().getDevice();
6251 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6252 	const deUint32								subpass					= 0;
6253 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6254 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.frag"), 0));
6255 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6256 
6257 	// make sure buffer size is multiple of 16 (in glsl we use uvec4 to store 16 values)
6258 	m_bufferSize	= context.getBufferSize();
6259 	m_bufferSize	= static_cast<vk::VkDeviceSize>(m_bufferSize / 16u) * 16u;
6260 	m_targetWidth	= context.getTargetWidth();
6261 	m_targetHeight	= context.getTargetHeight();
6262 
6263 	{
6264 		const vk::VkDescriptorSetLayoutBinding binding =
6265 		{
6266 			0u,
6267 			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6268 			1,
6269 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6270 			DE_NULL
6271 		};
6272 
6273 		bindings.push_back(binding);
6274 	}
6275 	const vk::VkPushConstantRange pushConstantRange =
6276 	{
6277 		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6278 		0u,
6279 		12u
6280 	};
6281 
6282 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6283 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6284 
6285 	{
6286 		const deUint32							descriptorCount	= 1;
6287 		const vk::VkDescriptorPoolSize			poolSizes		=
6288 		{
6289 			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6290 			descriptorCount
6291 		};
6292 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6293 		{
6294 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6295 			DE_NULL,
6296 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6297 
6298 			descriptorCount,
6299 			1u,
6300 			&poolSizes,
6301 		};
6302 
6303 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6304 	}
6305 
6306 	{
6307 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6308 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6309 		{
6310 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6311 			DE_NULL,
6312 
6313 			*m_descriptorPool,
6314 			1,
6315 			&layout
6316 		};
6317 
6318 		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6319 
6320 		{
6321 			const vk::VkDescriptorBufferInfo	bufferInfo	=
6322 			{
6323 				context.getBuffer(),
6324 				0u,
6325 				m_bufferSize
6326 			};
6327 			const vk::VkWriteDescriptorSet		write		=
6328 			{
6329 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6330 				DE_NULL,
6331 				m_descriptorSet.get(),
6332 				0u,
6333 				0u,
6334 				1u,
6335 				vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6336 				DE_NULL,
6337 				&bufferInfo,
6338 				DE_NULL,
6339 			};
6340 
6341 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6342 		}
6343 	}
6344 }
6345 
submit(SubmitContext & context)6346 void RenderFragmentStorageBuffer::submit (SubmitContext& context)
6347 {
6348 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6349 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6350 
6351 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6352 
6353 	const struct
6354 	{
6355 		const deUint32	valuesPerPixel;
6356 		const deUint32	bufferSize;
6357 	} callParams =
6358 	{
6359 		(deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight),
6360 		(deUint32)m_bufferSize
6361 	};
6362 
6363 	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
6364 	vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6365 	vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6366 }
6367 
verify(VerifyRenderPassContext & context,size_t)6368 void RenderFragmentStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
6369 {
6370 	const deUint32	valuesPerPixel	= (deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight);
6371 
6372 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6373 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6374 	{
6375 		const deUint32	id		= (deUint32)y * 256u + (deUint32)x;
6376 
6377 		deUint32 value = id;
6378 
6379 		for (deUint32 i = 0; i < valuesPerPixel; i++)
6380 		{
6381 			value	= (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 0)) << 0u)
6382 					| (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 1)) << 8u)
6383 					| (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 2)) << 16u)
6384 					| (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 3)) << 24u);
6385 
6386 		}
6387 		const UVec4	vec	((value >>  0u) & 0xFFu,
6388 						 (value >>  8u) & 0xFFu,
6389 						 (value >> 16u) & 0xFFu,
6390 						 (value >> 24u) & 0xFFu);
6391 
6392 		context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6393 	}
6394 }
6395 
6396 class RenderFragmentUniformTexelBuffer : public RenderPassCommand
6397 {
6398 public:
RenderFragmentUniformTexelBuffer(void)6399 									RenderFragmentUniformTexelBuffer	(void) {}
6400 									~RenderFragmentUniformTexelBuffer	(void);
6401 
getName(void) const6402 	const char*						getName								(void) const { return "RenderFragmentUniformTexelBuffer"; }
6403 	void							logPrepare							(TestLog&, size_t) const;
6404 	void							logSubmit							(TestLog&, size_t) const;
6405 	void							prepare								(PrepareRenderPassContext&);
6406 	void							submit								(SubmitContext& context);
6407 	void							verify								(VerifyRenderPassContext&, size_t);
6408 
6409 private:
6410 	PipelineResources				m_resources;
6411 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6412 	vector<vk::VkDescriptorSet>		m_descriptorSets;
6413 	vector<vk::VkBufferView>		m_bufferViews;
6414 
6415 	const vk::DeviceInterface*		m_vkd;
6416 	vk::VkDevice					m_device;
6417 	vk::VkDeviceSize				m_bufferSize;
6418 	deUint32						m_maxUniformTexelCount;
6419 	size_t							m_targetWidth;
6420 	size_t							m_targetHeight;
6421 };
6422 
~RenderFragmentUniformTexelBuffer(void)6423 RenderFragmentUniformTexelBuffer::~RenderFragmentUniformTexelBuffer (void)
6424 {
6425 	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6426 	{
6427 		if (!!m_bufferViews[bufferViewNdx])
6428 		{
6429 			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6430 			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6431 		}
6432 	}
6433 }
6434 
logPrepare(TestLog & log,size_t commandIndex) const6435 void RenderFragmentUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6436 {
6437 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6438 }
6439 
logSubmit(TestLog & log,size_t commandIndex) const6440 void RenderFragmentUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6441 {
6442 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6443 }
6444 
prepare(PrepareRenderPassContext & context)6445 void RenderFragmentUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
6446 {
6447 	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
6448 	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
6449 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6450 	const vk::VkDevice							device					= context.getContext().getDevice();
6451 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6452 	const deUint32								subpass					= 0;
6453 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6454 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.frag"), 0));
6455 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6456 
6457 	m_device				= device;
6458 	m_vkd					= &vkd;
6459 	m_bufferSize			= context.getBufferSize();
6460 	m_maxUniformTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6461 	m_targetWidth			= context.getTargetWidth();
6462 	m_targetHeight			= context.getTargetHeight();
6463 
6464 	{
6465 		const vk::VkDescriptorSetLayoutBinding binding =
6466 		{
6467 			0u,
6468 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6469 			1,
6470 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6471 			DE_NULL
6472 		};
6473 
6474 		bindings.push_back(binding);
6475 	}
6476 	const vk::VkPushConstantRange pushConstantRange =
6477 	{
6478 		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6479 		0u,
6480 		12u
6481 	};
6482 
6483 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6484 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6485 
6486 	{
6487 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 4));
6488 		const vk::VkDescriptorPoolSize			poolSizes		=
6489 		{
6490 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6491 			descriptorCount
6492 		};
6493 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6494 		{
6495 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6496 			DE_NULL,
6497 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6498 
6499 			descriptorCount,
6500 			1u,
6501 			&poolSizes,
6502 		};
6503 
6504 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6505 		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6506 		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6507 	}
6508 
6509 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6510 	{
6511 		const deUint32							count			= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6512 																? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6513 																: m_maxUniformTexelCount * 4) / 4;
6514 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6515 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6516 		{
6517 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6518 			DE_NULL,
6519 
6520 			*m_descriptorPool,
6521 			1,
6522 			&layout
6523 		};
6524 
6525 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6526 
6527 		{
6528 			const vk::VkBufferViewCreateInfo createInfo =
6529 			{
6530 				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6531 				DE_NULL,
6532 				0u,
6533 
6534 				context.getBuffer(),
6535 				vk::VK_FORMAT_R32_UINT,
6536 				descriptorSetNdx * m_maxUniformTexelCount * 4,
6537 				count * 4
6538 			};
6539 
6540 			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6541 		}
6542 
6543 		{
6544 			const vk::VkWriteDescriptorSet			write		=
6545 			{
6546 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6547 				DE_NULL,
6548 				m_descriptorSets[descriptorSetNdx],
6549 				0u,
6550 				0u,
6551 				1u,
6552 				vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6553 				DE_NULL,
6554 				DE_NULL,
6555 				&m_bufferViews[descriptorSetNdx]
6556 			};
6557 
6558 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6559 		}
6560 	}
6561 }
6562 
submit(SubmitContext & context)6563 void RenderFragmentUniformTexelBuffer::submit (SubmitContext& context)
6564 {
6565 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6566 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6567 
6568 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6569 
6570 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6571 	{
6572 		const struct
6573 		{
6574 			const deUint32	callId;
6575 			const deUint32	valuesPerPixel;
6576 			const deUint32	maxUniformTexelCount;
6577 		} callParams =
6578 		{
6579 			(deUint32)descriptorSetNdx,
6580 			(deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight),
6581 			m_maxUniformTexelCount
6582 		};
6583 
6584 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6585 		vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6586 		vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6587 	}
6588 }
6589 
verify(VerifyRenderPassContext & context,size_t)6590 void RenderFragmentUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6591 {
6592 	const deUint32	valuesPerPixel	= (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight);
6593 
6594 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6595 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6596 	{
6597 		const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxUniformTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6598 
6599 		for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6600 		{
6601 			const size_t	offset	= descriptorSetNdx * m_maxUniformTexelCount * 4;
6602 			const deUint32	callId	= (deUint32)descriptorSetNdx;
6603 
6604 			const deUint32	id		= (deUint32)y * 256u + (deUint32)x;
6605 			const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6606 									? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6607 									: m_maxUniformTexelCount * 4) / 4;
6608 
6609 			if (y * 256u + x < callId * (m_maxUniformTexelCount / valuesPerPixel))
6610 				continue;
6611 			else
6612 			{
6613 				deUint32 value = id;
6614 
6615 				for (deUint32 i = 0; i < valuesPerPixel; i++)
6616 				{
6617 					value	=  ((deUint32)context.getReference().get(offset + (value % count) * 4 + 0))
6618 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6619 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6620 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6621 
6622 				}
6623 				const UVec4	vec	((value >>  0u) & 0xFFu,
6624 								 (value >>  8u) & 0xFFu,
6625 								 (value >> 16u) & 0xFFu,
6626 								 (value >> 24u) & 0xFFu);
6627 
6628 				context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6629 			}
6630 		}
6631 	}
6632 }
6633 
6634 class RenderFragmentStorageTexelBuffer : public RenderPassCommand
6635 {
6636 public:
RenderFragmentStorageTexelBuffer(void)6637 									RenderFragmentStorageTexelBuffer	(void) {}
6638 									~RenderFragmentStorageTexelBuffer	(void);
6639 
getName(void) const6640 	const char*						getName								(void) const { return "RenderFragmentStorageTexelBuffer"; }
6641 	void							logPrepare							(TestLog&, size_t) const;
6642 	void							logSubmit							(TestLog&, size_t) const;
6643 	void							prepare								(PrepareRenderPassContext&);
6644 	void							submit								(SubmitContext& context);
6645 	void							verify								(VerifyRenderPassContext&, size_t);
6646 
6647 private:
6648 	PipelineResources				m_resources;
6649 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6650 	vector<vk::VkDescriptorSet>		m_descriptorSets;
6651 	vector<vk::VkBufferView>		m_bufferViews;
6652 
6653 	const vk::DeviceInterface*		m_vkd;
6654 	vk::VkDevice					m_device;
6655 	vk::VkDeviceSize				m_bufferSize;
6656 	deUint32						m_maxStorageTexelCount;
6657 	size_t							m_targetWidth;
6658 	size_t							m_targetHeight;
6659 };
6660 
~RenderFragmentStorageTexelBuffer(void)6661 RenderFragmentStorageTexelBuffer::~RenderFragmentStorageTexelBuffer (void)
6662 {
6663 	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6664 	{
6665 		if (!!m_bufferViews[bufferViewNdx])
6666 		{
6667 			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6668 			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6669 		}
6670 	}
6671 }
6672 
logPrepare(TestLog & log,size_t commandIndex) const6673 void RenderFragmentStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6674 {
6675 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
6676 }
6677 
logSubmit(TestLog & log,size_t commandIndex) const6678 void RenderFragmentStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6679 {
6680 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6681 }
6682 
prepare(PrepareRenderPassContext & context)6683 void RenderFragmentStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
6684 {
6685 	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
6686 	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
6687 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6688 	const vk::VkDevice							device					= context.getContext().getDevice();
6689 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6690 	const deUint32								subpass					= 0;
6691 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6692 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.frag"), 0));
6693 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6694 
6695 	m_device				= device;
6696 	m_vkd					= &vkd;
6697 	m_bufferSize			= context.getBufferSize();
6698 	m_maxStorageTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6699 	m_targetWidth			= context.getTargetWidth();
6700 	m_targetHeight			= context.getTargetHeight();
6701 
6702 	{
6703 		const vk::VkDescriptorSetLayoutBinding binding =
6704 		{
6705 			0u,
6706 			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6707 			1,
6708 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6709 			DE_NULL
6710 		};
6711 
6712 		bindings.push_back(binding);
6713 	}
6714 	const vk::VkPushConstantRange pushConstantRange =
6715 	{
6716 		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6717 		0u,
6718 		16u
6719 	};
6720 
6721 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6722 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6723 
6724 	{
6725 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
6726 		const vk::VkDescriptorPoolSize			poolSizes		=
6727 		{
6728 			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6729 			descriptorCount
6730 		};
6731 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6732 		{
6733 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6734 			DE_NULL,
6735 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6736 
6737 			descriptorCount,
6738 			1u,
6739 			&poolSizes,
6740 		};
6741 
6742 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6743 		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6744 		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6745 	}
6746 
6747 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6748 	{
6749 		const deUint32							count			= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6750 																? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6751 																: m_maxStorageTexelCount * 4) / 4;
6752 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6753 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6754 		{
6755 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6756 			DE_NULL,
6757 
6758 			*m_descriptorPool,
6759 			1,
6760 			&layout
6761 		};
6762 
6763 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6764 
6765 		{
6766 			const vk::VkBufferViewCreateInfo createInfo =
6767 			{
6768 				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6769 				DE_NULL,
6770 				0u,
6771 
6772 				context.getBuffer(),
6773 				vk::VK_FORMAT_R32_UINT,
6774 				descriptorSetNdx * m_maxStorageTexelCount * 4,
6775 				count * 4
6776 			};
6777 
6778 			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6779 		}
6780 
6781 		{
6782 			const vk::VkWriteDescriptorSet			write		=
6783 			{
6784 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6785 				DE_NULL,
6786 				m_descriptorSets[descriptorSetNdx],
6787 				0u,
6788 				0u,
6789 				1u,
6790 				vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6791 				DE_NULL,
6792 				DE_NULL,
6793 				&m_bufferViews[descriptorSetNdx]
6794 			};
6795 
6796 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6797 		}
6798 	}
6799 }
6800 
submit(SubmitContext & context)6801 void RenderFragmentStorageTexelBuffer::submit (SubmitContext& context)
6802 {
6803 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6804 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6805 
6806 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6807 
6808 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6809 	{
6810 		const struct
6811 		{
6812 			const deUint32	callId;
6813 			const deUint32	valuesPerPixel;
6814 			const deUint32	maxStorageTexelCount;
6815 			const deUint32	width;
6816 		} callParams =
6817 		{
6818 			(deUint32)descriptorSetNdx,
6819 			(deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight),
6820 			m_maxStorageTexelCount,
6821 			(deUint32)(m_bufferSize < (descriptorSetNdx + 1u) * m_maxStorageTexelCount * 4u
6822 								? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4u
6823 								: m_maxStorageTexelCount * 4u) / 4u
6824 		};
6825 
6826 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6827 		vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6828 		vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6829 	}
6830 }
6831 
verify(VerifyRenderPassContext & context,size_t)6832 void RenderFragmentStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6833 {
6834 	const deUint32	valuesPerPixel	= (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight);
6835 
6836 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6837 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6838 	{
6839 		const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxStorageTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6840 
6841 		for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6842 		{
6843 			const size_t	offset	= descriptorSetNdx * m_maxStorageTexelCount * 4;
6844 			const deUint32	callId	= (deUint32)descriptorSetNdx;
6845 
6846 			const deUint32	id		= (deUint32)y * 256u + (deUint32)x;
6847 			const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6848 									? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6849 									: m_maxStorageTexelCount * 4) / 4;
6850 
6851 			if (y * 256u + x < callId * (m_maxStorageTexelCount / valuesPerPixel))
6852 				continue;
6853 			else
6854 			{
6855 				deUint32 value = id;
6856 
6857 				for (deUint32 i = 0; i < valuesPerPixel; i++)
6858 				{
6859 					value	= ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6860 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6861 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6862 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6863 
6864 				}
6865 				const UVec4	vec	((value >>  0u) & 0xFFu,
6866 								 (value >>  8u) & 0xFFu,
6867 								 (value >> 16u) & 0xFFu,
6868 								 (value >> 24u) & 0xFFu);
6869 
6870 				context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6871 			}
6872 		}
6873 	}
6874 }
6875 
6876 class RenderFragmentStorageImage : public RenderPassCommand
6877 {
6878 public:
RenderFragmentStorageImage(void)6879 									RenderFragmentStorageImage	(void) {}
6880 									~RenderFragmentStorageImage	(void);
6881 
getName(void) const6882 	const char*						getName						(void) const { return "RenderFragmentStorageImage"; }
6883 	void							logPrepare					(TestLog&, size_t) const;
6884 	void							logSubmit					(TestLog&, size_t) const;
6885 	void							prepare						(PrepareRenderPassContext&);
6886 	void							submit						(SubmitContext& context);
6887 	void							verify						(VerifyRenderPassContext&, size_t);
6888 
6889 private:
6890 	PipelineResources				m_resources;
6891 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6892 	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
6893 	vk::Move<vk::VkImageView>		m_imageView;
6894 };
6895 
~RenderFragmentStorageImage(void)6896 RenderFragmentStorageImage::~RenderFragmentStorageImage (void)
6897 {
6898 }
6899 
logPrepare(TestLog & log,size_t commandIndex) const6900 void RenderFragmentStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
6901 {
6902 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
6903 }
6904 
logSubmit(TestLog & log,size_t commandIndex) const6905 void RenderFragmentStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
6906 {
6907 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
6908 }
6909 
prepare(PrepareRenderPassContext & context)6910 void RenderFragmentStorageImage::prepare (PrepareRenderPassContext& context)
6911 {
6912 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6913 	const vk::VkDevice							device					= context.getContext().getDevice();
6914 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6915 	const deUint32								subpass					= 0;
6916 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6917 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.frag"), 0));
6918 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6919 
6920 	{
6921 		const vk::VkDescriptorSetLayoutBinding binding =
6922 		{
6923 			0u,
6924 			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
6925 			1,
6926 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6927 			DE_NULL
6928 		};
6929 
6930 		bindings.push_back(binding);
6931 	}
6932 
6933 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6934 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
6935 
6936 	{
6937 		const vk::VkDescriptorPoolSize			poolSizes		=
6938 		{
6939 			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
6940 			1
6941 		};
6942 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6943 		{
6944 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6945 			DE_NULL,
6946 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6947 
6948 			1u,
6949 			1u,
6950 			&poolSizes,
6951 		};
6952 
6953 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6954 	}
6955 
6956 	{
6957 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6958 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6959 		{
6960 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6961 			DE_NULL,
6962 
6963 			*m_descriptorPool,
6964 			1,
6965 			&layout
6966 		};
6967 
6968 		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6969 
6970 		{
6971 			const vk::VkImageViewCreateInfo createInfo =
6972 			{
6973 				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
6974 				DE_NULL,
6975 				0u,
6976 
6977 				context.getImage(),
6978 				vk::VK_IMAGE_VIEW_TYPE_2D,
6979 				vk::VK_FORMAT_R8G8B8A8_UNORM,
6980 				vk::makeComponentMappingRGBA(),
6981 				{
6982 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
6983 					0u,
6984 					1u,
6985 					0u,
6986 					1u
6987 				}
6988 			};
6989 
6990 			m_imageView = vk::createImageView(vkd, device, &createInfo);
6991 		}
6992 
6993 		{
6994 			const vk::VkDescriptorImageInfo			imageInfo	=
6995 			{
6996 				0,
6997 				*m_imageView,
6998 				context.getImageLayout()
6999 			};
7000 			const vk::VkWriteDescriptorSet			write		=
7001 			{
7002 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
7003 				DE_NULL,
7004 				*m_descriptorSet,
7005 				0u,
7006 				0u,
7007 				1u,
7008 				vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7009 				&imageInfo,
7010 				DE_NULL,
7011 				DE_NULL,
7012 			};
7013 
7014 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7015 		}
7016 	}
7017 }
7018 
submit(SubmitContext & context)7019 void RenderFragmentStorageImage::submit (SubmitContext& context)
7020 {
7021 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
7022 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
7023 
7024 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7025 
7026 	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
7027 	vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
7028 }
7029 
verify(VerifyRenderPassContext & context,size_t)7030 void RenderFragmentStorageImage::verify (VerifyRenderPassContext& context, size_t)
7031 {
7032 	const UVec2		size			= UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7033 	const deUint32	valuesPerPixel	= de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
7034 
7035 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7036 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7037 	{
7038 		UVec4	value	= UVec4(x, y, 0u, 0u);
7039 
7040 		for (deUint32 i = 0; i < valuesPerPixel; i++)
7041 		{
7042 			const UVec2	pos			= UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7043 			const Vec4	floatValue	= context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7044 
7045 			value = UVec4((deUint32)round(floatValue.x() * 255.0f),
7046 						  (deUint32)round(floatValue.y() * 255.0f),
7047 						  (deUint32)round(floatValue.z() * 255.0f),
7048 						  (deUint32)round(floatValue.w() * 255.0f));
7049 
7050 		}
7051 		context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7052 	}
7053 }
7054 
7055 class RenderFragmentSampledImage : public RenderPassCommand
7056 {
7057 public:
RenderFragmentSampledImage(void)7058 				RenderFragmentSampledImage	(void) {}
7059 				~RenderFragmentSampledImage	(void);
7060 
getName(void) const7061 	const char*	getName						(void) const { return "RenderFragmentSampledImage"; }
7062 	void		logPrepare					(TestLog&, size_t) const;
7063 	void		logSubmit					(TestLog&, size_t) const;
7064 	void		prepare						(PrepareRenderPassContext&);
7065 	void		submit						(SubmitContext& context);
7066 	void		verify						(VerifyRenderPassContext&, size_t);
7067 
7068 private:
7069 	PipelineResources				m_resources;
7070 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
7071 	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
7072 	vk::Move<vk::VkImageView>		m_imageView;
7073 	vk::Move<vk::VkSampler>			m_sampler;
7074 };
7075 
~RenderFragmentSampledImage(void)7076 RenderFragmentSampledImage::~RenderFragmentSampledImage (void)
7077 {
7078 }
7079 
logPrepare(TestLog & log,size_t commandIndex) const7080 void RenderFragmentSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
7081 {
7082 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
7083 }
7084 
logSubmit(TestLog & log,size_t commandIndex) const7085 void RenderFragmentSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
7086 {
7087 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
7088 }
7089 
prepare(PrepareRenderPassContext & context)7090 void RenderFragmentSampledImage::prepare (PrepareRenderPassContext& context)
7091 {
7092 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
7093 	const vk::VkDevice							device					= context.getContext().getDevice();
7094 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
7095 	const deUint32								subpass					= 0;
7096 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
7097 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.frag"), 0));
7098 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
7099 
7100 	{
7101 		const vk::VkDescriptorSetLayoutBinding binding =
7102 		{
7103 			0u,
7104 			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7105 			1,
7106 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
7107 			DE_NULL
7108 		};
7109 
7110 		bindings.push_back(binding);
7111 	}
7112 
7113 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
7114 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
7115 
7116 	{
7117 		const vk::VkDescriptorPoolSize			poolSizes		=
7118 		{
7119 			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7120 			1
7121 		};
7122 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
7123 		{
7124 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
7125 			DE_NULL,
7126 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
7127 
7128 			1u,
7129 			1u,
7130 			&poolSizes,
7131 		};
7132 
7133 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
7134 	}
7135 
7136 	{
7137 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
7138 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
7139 		{
7140 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
7141 			DE_NULL,
7142 
7143 			*m_descriptorPool,
7144 			1,
7145 			&layout
7146 		};
7147 
7148 		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
7149 
7150 		{
7151 			const vk::VkImageViewCreateInfo createInfo =
7152 			{
7153 				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
7154 				DE_NULL,
7155 				0u,
7156 
7157 				context.getImage(),
7158 				vk::VK_IMAGE_VIEW_TYPE_2D,
7159 				vk::VK_FORMAT_R8G8B8A8_UNORM,
7160 				vk::makeComponentMappingRGBA(),
7161 				{
7162 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
7163 					0u,
7164 					1u,
7165 					0u,
7166 					1u
7167 				}
7168 			};
7169 
7170 			m_imageView = vk::createImageView(vkd, device, &createInfo);
7171 		}
7172 
7173 		{
7174 			const vk::VkSamplerCreateInfo createInfo =
7175 			{
7176 				vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
7177 				DE_NULL,
7178 				0u,
7179 
7180 				vk::VK_FILTER_NEAREST,
7181 				vk::VK_FILTER_NEAREST,
7182 
7183 				vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
7184 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7185 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7186 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7187 				0.0f,
7188 				VK_FALSE,
7189 				1.0f,
7190 				VK_FALSE,
7191 				vk::VK_COMPARE_OP_ALWAYS,
7192 				0.0f,
7193 				0.0f,
7194 				vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
7195 				VK_FALSE
7196 			};
7197 
7198 			m_sampler = vk::createSampler(vkd, device, &createInfo);
7199 		}
7200 
7201 		{
7202 			const vk::VkDescriptorImageInfo			imageInfo	=
7203 			{
7204 				*m_sampler,
7205 				*m_imageView,
7206 				context.getImageLayout()
7207 			};
7208 			const vk::VkWriteDescriptorSet			write		=
7209 			{
7210 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
7211 				DE_NULL,
7212 				*m_descriptorSet,
7213 				0u,
7214 				0u,
7215 				1u,
7216 				vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7217 				&imageInfo,
7218 				DE_NULL,
7219 				DE_NULL,
7220 			};
7221 
7222 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7223 		}
7224 	}
7225 }
7226 
submit(SubmitContext & context)7227 void RenderFragmentSampledImage::submit (SubmitContext& context)
7228 {
7229 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
7230 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
7231 
7232 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7233 
7234 	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
7235 	vkd.cmdDraw(commandBuffer, 6u, 1u, 0u, 0u);
7236 }
7237 
verify(VerifyRenderPassContext & context,size_t)7238 void RenderFragmentSampledImage::verify (VerifyRenderPassContext& context, size_t)
7239 {
7240 	const UVec2		size			= UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7241 	const deUint32	valuesPerPixel	= de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
7242 
7243 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7244 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7245 	{
7246 		UVec4	value	= UVec4(x, y, 0u, 0u);
7247 
7248 		for (deUint32 i = 0; i < valuesPerPixel; i++)
7249 		{
7250 			const UVec2	pos			= UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7251 			const Vec4	floatValue	= context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7252 
7253 			value = UVec4((deUint32)round(floatValue.x() * 255.0f),
7254 						  (deUint32)round(floatValue.y() * 255.0f),
7255 						  (deUint32)round(floatValue.z() * 255.0f),
7256 						  (deUint32)round(floatValue.w() * 255.0f));
7257 
7258 		}
7259 
7260 		context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7261 	}
7262 }
7263 
7264 enum Op
7265 {
7266 	OP_MAP,
7267 	OP_UNMAP,
7268 
7269 	OP_MAP_FLUSH,
7270 	OP_MAP_INVALIDATE,
7271 
7272 	OP_MAP_READ,
7273 	OP_MAP_WRITE,
7274 	OP_MAP_MODIFY,
7275 
7276 	OP_BUFFER_CREATE,
7277 	OP_BUFFER_DESTROY,
7278 	OP_BUFFER_BINDMEMORY,
7279 
7280 	OP_QUEUE_WAIT_FOR_IDLE,
7281 	OP_DEVICE_WAIT_FOR_IDLE,
7282 
7283 	OP_COMMAND_BUFFER_BEGIN,
7284 	OP_COMMAND_BUFFER_END,
7285 
7286 	// Secondary, non render pass command buffers
7287 	// Render pass secondary command buffers are not currently covered
7288 	OP_SECONDARY_COMMAND_BUFFER_BEGIN,
7289 	OP_SECONDARY_COMMAND_BUFFER_END,
7290 
7291 	// Buffer transfer operations
7292 	OP_BUFFER_FILL,
7293 	OP_BUFFER_UPDATE,
7294 
7295 	OP_BUFFER_COPY_TO_BUFFER,
7296 	OP_BUFFER_COPY_FROM_BUFFER,
7297 
7298 	OP_BUFFER_COPY_TO_IMAGE,
7299 	OP_BUFFER_COPY_FROM_IMAGE,
7300 
7301 	OP_IMAGE_CREATE,
7302 	OP_IMAGE_DESTROY,
7303 	OP_IMAGE_BINDMEMORY,
7304 
7305 	OP_IMAGE_TRANSITION_LAYOUT,
7306 
7307 	OP_IMAGE_COPY_TO_BUFFER,
7308 	OP_IMAGE_COPY_FROM_BUFFER,
7309 
7310 	OP_IMAGE_COPY_TO_IMAGE,
7311 	OP_IMAGE_COPY_FROM_IMAGE,
7312 
7313 	OP_IMAGE_BLIT_TO_IMAGE,
7314 	OP_IMAGE_BLIT_FROM_IMAGE,
7315 
7316 	OP_IMAGE_RESOLVE,
7317 
7318 	OP_PIPELINE_BARRIER_GLOBAL,
7319 	OP_PIPELINE_BARRIER_BUFFER,
7320 	OP_PIPELINE_BARRIER_IMAGE,
7321 
7322 	// Renderpass operations
7323 	OP_RENDERPASS_BEGIN,
7324 	OP_RENDERPASS_END,
7325 
7326 	// Commands inside render pass
7327 	OP_RENDER_VERTEX_BUFFER,
7328 	OP_RENDER_INDEX_BUFFER,
7329 
7330 	OP_RENDER_VERTEX_UNIFORM_BUFFER,
7331 	OP_RENDER_FRAGMENT_UNIFORM_BUFFER,
7332 
7333 	OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER,
7334 	OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER,
7335 
7336 	OP_RENDER_VERTEX_STORAGE_BUFFER,
7337 	OP_RENDER_FRAGMENT_STORAGE_BUFFER,
7338 
7339 	OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER,
7340 	OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER,
7341 
7342 	OP_RENDER_VERTEX_STORAGE_IMAGE,
7343 	OP_RENDER_FRAGMENT_STORAGE_IMAGE,
7344 
7345 	OP_RENDER_VERTEX_SAMPLED_IMAGE,
7346 	OP_RENDER_FRAGMENT_SAMPLED_IMAGE,
7347 };
7348 
7349 enum Stage
7350 {
7351 	STAGE_HOST,
7352 	STAGE_COMMAND_BUFFER,
7353 	STAGE_SECONDARY_COMMAND_BUFFER,
7354 
7355 	STAGE_RENDER_PASS
7356 };
7357 
getWriteAccessFlags(void)7358 vk::VkAccessFlags getWriteAccessFlags (void)
7359 {
7360 	return vk::VK_ACCESS_SHADER_WRITE_BIT
7361 		| vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
7362 		| vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
7363 		| vk::VK_ACCESS_TRANSFER_WRITE_BIT
7364 		| vk::VK_ACCESS_HOST_WRITE_BIT
7365 		| vk::VK_ACCESS_MEMORY_WRITE_BIT;
7366 }
7367 
isWriteAccess(vk::VkAccessFlagBits access)7368 bool isWriteAccess (vk::VkAccessFlagBits access)
7369 {
7370 	return (getWriteAccessFlags() & access) != 0;
7371 }
7372 
7373 class CacheState
7374 {
7375 public:
7376 									CacheState				(vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses);
7377 
7378 	bool							isValid					(vk::VkPipelineStageFlagBits	stage,
7379 															 vk::VkAccessFlagBits			access) const;
7380 
7381 	void							perform					(vk::VkPipelineStageFlagBits	stage,
7382 															 vk::VkAccessFlagBits			access);
7383 
7384 	void							submitCommandBuffer		(void);
7385 	void							waitForIdle				(void);
7386 
7387 	void							getFullBarrier			(vk::VkPipelineStageFlags&	srcStages,
7388 															 vk::VkAccessFlags&			srcAccesses,
7389 															 vk::VkPipelineStageFlags&	dstStages,
7390 															 vk::VkAccessFlags&			dstAccesses) const;
7391 
7392 	void							barrier					(vk::VkPipelineStageFlags	srcStages,
7393 															 vk::VkAccessFlags			srcAccesses,
7394 															 vk::VkPipelineStageFlags	dstStages,
7395 															 vk::VkAccessFlags			dstAccesses);
7396 
7397 	void							imageLayoutBarrier		(vk::VkPipelineStageFlags	srcStages,
7398 															 vk::VkAccessFlags			srcAccesses,
7399 															 vk::VkPipelineStageFlags	dstStages,
7400 															 vk::VkAccessFlags			dstAccesses);
7401 
7402 	void							checkImageLayoutBarrier	(vk::VkPipelineStageFlags	srcStages,
7403 															 vk::VkAccessFlags			srcAccesses,
7404 															 vk::VkPipelineStageFlags	dstStages,
7405 															 vk::VkAccessFlags			dstAccesses);
7406 
7407 	// Everything is clean and there is no need for barriers
7408 	bool							isClean					(void) const;
7409 
getAllowedStages(void) const7410 	vk::VkPipelineStageFlags		getAllowedStages		(void) const { return m_allowedStages; }
getAllowedAcceses(void) const7411 	vk::VkAccessFlags				getAllowedAcceses		(void) const { return m_allowedAccesses; }
7412 private:
7413 	// Limit which stages and accesses are used by the CacheState tracker
7414 	const vk::VkPipelineStageFlags	m_allowedStages;
7415 	const vk::VkAccessFlags			m_allowedAccesses;
7416 
7417 	// [dstStage][srcStage][dstAccess] = srcAccesses
7418 	// In stage dstStage write srcAccesses from srcStage are not yet available for dstAccess
7419 	vk::VkAccessFlags				m_unavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST][ACCESS_LAST];
7420 	// Latest pipeline transition is not available in stage
7421 	bool							m_unavailableLayoutTransition[PIPELINESTAGE_LAST];
7422 	// [dstStage] = dstAccesses
7423 	// In stage dstStage ops with dstAccesses are not yet visible
7424 	vk::VkAccessFlags				m_invisibleOperations[PIPELINESTAGE_LAST];
7425 
7426 	// [dstStage] = srcStage
7427 	// Memory operation in srcStage have not completed before dstStage
7428 	vk::VkPipelineStageFlags		m_incompleteOperations[PIPELINESTAGE_LAST];
7429 };
7430 
CacheState(vk::VkPipelineStageFlags allowedStages,vk::VkAccessFlags allowedAccesses)7431 CacheState::CacheState (vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses)
7432 	: m_allowedStages	(allowedStages)
7433 	, m_allowedAccesses	(allowedAccesses)
7434 {
7435 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7436 	{
7437 		if ((dstStage_ & m_allowedStages) == 0)
7438 			continue;
7439 
7440 		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7441 
7442 
7443 		// All operations are initially visible
7444 		m_invisibleOperations[dstStage] = 0;
7445 
7446 		// There are no incomplete read operations initially
7447 		m_incompleteOperations[dstStage] = 0;
7448 
7449 		// There are no incomplete layout transitions
7450 		m_unavailableLayoutTransition[dstStage] = false;
7451 
7452 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7453 		{
7454 			if ((srcStage_ & m_allowedStages) == 0)
7455 				continue;
7456 
7457 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7458 
7459 			// There are no write operations that are not yet available
7460 			// initially.
7461 			for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7462 			{
7463 				if ((dstAccess_ & m_allowedAccesses) == 0)
7464 					continue;
7465 
7466 				const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7467 
7468 				m_unavailableWriteOperations[dstStage][srcStage][dstAccess] = 0;
7469 			}
7470 		}
7471 	}
7472 }
7473 
isValid(vk::VkPipelineStageFlagBits stage,vk::VkAccessFlagBits access) const7474 bool CacheState::isValid (vk::VkPipelineStageFlagBits	stage,
7475 						  vk::VkAccessFlagBits			access) const
7476 {
7477 	DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7478 	DE_ASSERT((stage & (~m_allowedStages)) == 0);
7479 
7480 	const PipelineStage	dstStage	= pipelineStageFlagToPipelineStage(stage);
7481 
7482 	// Previous operations are not visible to access on stage
7483 	if (m_unavailableLayoutTransition[dstStage] || (m_invisibleOperations[dstStage] & access) != 0)
7484 		return false;
7485 
7486 	if (isWriteAccess(access))
7487 	{
7488 		// Memory operations from other stages have not completed before
7489 		// dstStage
7490 		if (m_incompleteOperations[dstStage] != 0)
7491 			return false;
7492 	}
7493 
7494 	return true;
7495 }
7496 
perform(vk::VkPipelineStageFlagBits stage,vk::VkAccessFlagBits access)7497 void CacheState::perform (vk::VkPipelineStageFlagBits	stage,
7498 						  vk::VkAccessFlagBits			access)
7499 {
7500 	DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7501 	DE_ASSERT((stage & (~m_allowedStages)) == 0);
7502 
7503 	const PipelineStage srcStage = pipelineStageFlagToPipelineStage(stage);
7504 
7505 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7506 	{
7507 		if ((dstStage_ & m_allowedStages) == 0)
7508 			continue;
7509 
7510 		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7511 
7512 		// Mark stage as incomplete for all stages
7513 		m_incompleteOperations[dstStage] |= stage;
7514 
7515 		if (isWriteAccess(access))
7516 		{
7517 			// Mark all accesses from all stages invisible
7518 			m_invisibleOperations[dstStage] |= m_allowedAccesses;
7519 
7520 			// Mark write access from srcStage unavailable to all stages for all accesses
7521 			for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7522 			{
7523 				if ((dstAccess_ & m_allowedAccesses) == 0)
7524 					continue;
7525 
7526 				const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7527 
7528 				m_unavailableWriteOperations[dstStage][srcStage][dstAccess] |= access;
7529 			}
7530 		}
7531 	}
7532 }
7533 
submitCommandBuffer(void)7534 void CacheState::submitCommandBuffer (void)
7535 {
7536 	// Flush all host writes and reads
7537 	barrier(m_allowedStages & vk::VK_PIPELINE_STAGE_HOST_BIT,
7538 			m_allowedAccesses & (vk::VK_ACCESS_HOST_READ_BIT | vk::VK_ACCESS_HOST_WRITE_BIT),
7539 			m_allowedStages,
7540 			m_allowedAccesses);
7541 }
7542 
waitForIdle(void)7543 void CacheState::waitForIdle (void)
7544 {
7545 	// Make all writes available
7546 	barrier(m_allowedStages,
7547 			m_allowedAccesses & getWriteAccessFlags(),
7548 			m_allowedStages,
7549 			0);
7550 
7551 	// Make all writes visible on device side
7552 	barrier(m_allowedStages,
7553 			0,
7554 			m_allowedStages & (~vk::VK_PIPELINE_STAGE_HOST_BIT),
7555 			m_allowedAccesses);
7556 }
7557 
getFullBarrier(vk::VkPipelineStageFlags & srcStages,vk::VkAccessFlags & srcAccesses,vk::VkPipelineStageFlags & dstStages,vk::VkAccessFlags & dstAccesses) const7558 void CacheState::getFullBarrier (vk::VkPipelineStageFlags&	srcStages,
7559 								 vk::VkAccessFlags&			srcAccesses,
7560 								 vk::VkPipelineStageFlags&	dstStages,
7561 								 vk::VkAccessFlags&			dstAccesses) const
7562 {
7563 	srcStages	= 0;
7564 	srcAccesses	= 0;
7565 	dstStages	= 0;
7566 	dstAccesses	= 0;
7567 
7568 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7569 	{
7570 		if ((dstStage_ & m_allowedStages) == 0)
7571 			continue;
7572 
7573 		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7574 
7575 		// Make sure all previous operation are complete in all stages
7576 		if (m_incompleteOperations[dstStage])
7577 		{
7578 			dstStages |= dstStage_;
7579 			srcStages |= m_incompleteOperations[dstStage];
7580 		}
7581 
7582 		// Make sure all read operations are visible in dstStage
7583 		if (m_invisibleOperations[dstStage])
7584 		{
7585 			dstStages |= dstStage_;
7586 			dstAccesses |= m_invisibleOperations[dstStage];
7587 		}
7588 
7589 		// Make sure all write operations from all stages are available
7590 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7591 		{
7592 			if ((srcStage_ & m_allowedStages) == 0)
7593 				continue;
7594 
7595 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7596 
7597 			for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7598 			{
7599 				if ((dstAccess_ & m_allowedAccesses) == 0)
7600 					continue;
7601 
7602 				const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7603 
7604 				if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess])
7605 				{
7606 					dstStages |= dstStage_;
7607 					srcStages |= dstStage_;
7608 					srcAccesses |= m_unavailableWriteOperations[dstStage][srcStage][dstAccess];
7609 				}
7610 			}
7611 
7612 			if (m_unavailableLayoutTransition[dstStage] && !m_unavailableLayoutTransition[srcStage])
7613 			{
7614 				// Add dependency between srcStage and dstStage if layout transition has not completed in dstStage,
7615 				// but has completed in srcStage.
7616 				dstStages |= dstStage_;
7617 				srcStages |= dstStage_;
7618 			}
7619 		}
7620 	}
7621 
7622 	DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7623 	DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7624 	DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7625 	DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7626 }
7627 
checkImageLayoutBarrier(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses)7628 void CacheState::checkImageLayoutBarrier (vk::VkPipelineStageFlags	srcStages,
7629 										  vk::VkAccessFlags			srcAccesses,
7630 										  vk::VkPipelineStageFlags	dstStages,
7631 										  vk::VkAccessFlags			dstAccesses)
7632 {
7633 	DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7634 	DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7635 	DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7636 	DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7637 
7638 	DE_UNREF(srcStages);
7639 	DE_UNREF(srcAccesses);
7640 
7641 	DE_UNREF(dstStages);
7642 	DE_UNREF(dstAccesses);
7643 
7644 #if defined(DE_DEBUG)
7645 	// Check that all stages have completed before srcStages or are in srcStages.
7646 	{
7647 		vk::VkPipelineStageFlags completedStages = srcStages;
7648 
7649 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7650 		{
7651 			if ((srcStage_ & srcStages) == 0)
7652 				continue;
7653 
7654 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7655 
7656 			completedStages |= (~m_incompleteOperations[srcStage]);
7657 		}
7658 
7659 		DE_ASSERT((completedStages & m_allowedStages) == m_allowedStages);
7660 	}
7661 
7662 	// Check that any write is available at least in one stage. Since all stages are complete even single flush is enough.
7663 	if ((getWriteAccessFlags() & m_allowedAccesses) != 0 && (srcAccesses & getWriteAccessFlags()) == 0)
7664 	{
7665 		bool anyWriteAvailable = false;
7666 
7667 		for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7668 		{
7669 			if ((dstStage_ & m_allowedStages) == 0)
7670 				continue;
7671 
7672 			const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7673 
7674 			for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7675 			{
7676 				if ((srcStage_ & m_allowedStages) == 0)
7677 					continue;
7678 
7679 				const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7680 
7681 				for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7682 				{
7683 					if ((dstAccess_ & m_allowedAccesses) == 0)
7684 						continue;
7685 
7686 					const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7687 
7688 					if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess] != (getWriteAccessFlags() & m_allowedAccesses))
7689 					{
7690 						anyWriteAvailable = true;
7691 						break;
7692 					}
7693 				}
7694 			}
7695 		}
7696 
7697 		DE_ASSERT(anyWriteAvailable);
7698 	}
7699 #endif
7700 }
7701 
imageLayoutBarrier(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses)7702 void CacheState::imageLayoutBarrier (vk::VkPipelineStageFlags	srcStages,
7703 									 vk::VkAccessFlags			srcAccesses,
7704 									 vk::VkPipelineStageFlags	dstStages,
7705 									 vk::VkAccessFlags			dstAccesses)
7706 {
7707 	checkImageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
7708 
7709 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7710 	{
7711 		if ((dstStage_ & m_allowedStages) == 0)
7712 			continue;
7713 
7714 		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7715 
7716 		// All stages are incomplete after the barrier except each dstStage in it self.
7717 		m_incompleteOperations[dstStage] = m_allowedStages & (~dstStage_);
7718 
7719 		// All memory operations are invisible unless they are listed in dstAccess
7720 		m_invisibleOperations[dstStage] = m_allowedAccesses & (~dstAccesses);
7721 
7722 		// Layout transition is unavailable in stage unless it was listed in dstStages
7723 		m_unavailableLayoutTransition[dstStage]= (dstStage_ & dstStages) == 0;
7724 
7725 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7726 		{
7727 			if ((srcStage_ & m_allowedStages) == 0)
7728 				continue;
7729 
7730 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7731 
7732 			// All write operations are available after layout transition
7733 			for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7734 			{
7735 				if ((dstAccess_ & m_allowedAccesses) == 0)
7736 					continue;
7737 
7738 				const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7739 
7740 				m_unavailableWriteOperations[dstStage][srcStage][dstAccess] = 0;
7741 			}
7742 		}
7743 	}
7744 }
7745 
barrier(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses)7746 void CacheState::barrier (vk::VkPipelineStageFlags	srcStages,
7747 						  vk::VkAccessFlags			srcAccesses,
7748 						  vk::VkPipelineStageFlags	dstStages,
7749 						  vk::VkAccessFlags			dstAccesses)
7750 {
7751 	DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7752 	DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7753 	DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7754 	DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7755 
7756 	// Transitivity
7757 	{
7758 		vk::VkPipelineStageFlags		oldIncompleteOperations[PIPELINESTAGE_LAST];
7759 		vk::VkAccessFlags				oldUnavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST][ACCESS_LAST];
7760 		bool							oldUnavailableLayoutTransition[PIPELINESTAGE_LAST];
7761 
7762 		deMemcpy(oldIncompleteOperations, m_incompleteOperations, sizeof(oldIncompleteOperations));
7763 		deMemcpy(oldUnavailableWriteOperations, m_unavailableWriteOperations, sizeof(oldUnavailableWriteOperations));
7764 		deMemcpy(oldUnavailableLayoutTransition, m_unavailableLayoutTransition, sizeof(oldUnavailableLayoutTransition));
7765 
7766 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7767 		{
7768 			if ((srcStage_ & srcStages) == 0)
7769 				continue;
7770 
7771 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7772 
7773 			for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7774 			{
7775 				if ((dstStage_ & dstStages) == 0)
7776 					continue;
7777 
7778 				const PipelineStage	dstStage			= pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7779 
7780 				// Stages that have completed before srcStage have also completed before dstStage
7781 				m_incompleteOperations[dstStage] &= oldIncompleteOperations[srcStage];
7782 
7783 				// Image layout transition in srcStage are now available in dstStage
7784 				m_unavailableLayoutTransition[dstStage] &= oldUnavailableLayoutTransition[srcStage];
7785 
7786 				for (vk::VkPipelineStageFlags sharedStage_ = 1; sharedStage_ <= m_allowedStages; sharedStage_ <<= 1)
7787 				{
7788 					if ((sharedStage_ & m_allowedStages) == 0)
7789 						continue;
7790 
7791 					const PipelineStage	sharedStage			= pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)sharedStage_);
7792 
7793 					// Writes that are available in srcStage are also available in dstStage
7794 					for (vk::VkAccessFlags sharedAccess_ = 1; sharedAccess_ <= m_allowedAccesses; sharedAccess_ <<= 1)
7795 					{
7796 						if ((sharedAccess_ & m_allowedAccesses) == 0)
7797 							continue;
7798 
7799 						const Access sharedAccess = accessFlagToAccess((vk::VkAccessFlagBits)sharedAccess_);
7800 
7801 						m_unavailableWriteOperations[dstStage][sharedStage][sharedAccess] &= oldUnavailableWriteOperations[srcStage][sharedStage][sharedAccess];
7802 					}
7803 				}
7804 			}
7805 		}
7806 	}
7807 
7808 	// Barrier
7809 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7810 	{
7811 		const PipelineStage	dstStage			= pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7812 		bool				allWritesAvailable	= true;
7813 
7814 		if ((dstStage_ & dstStages) == 0)
7815 			continue;
7816 
7817 		// Operations in srcStages have completed before any stage in dstStages
7818 		m_incompleteOperations[dstStage] &= ~srcStages;
7819 
7820 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7821 		{
7822 			if ((srcStage_ & m_allowedStages) == 0)
7823 				continue;
7824 
7825 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7826 
7827 			// Make srcAccesses from srcStage available in dstStage for dstAccess
7828 			for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7829 			{
7830 				if ((dstAccess_ & m_allowedAccesses) == 0)
7831 					continue;
7832 
7833 				const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7834 
7835 				if (((srcStage_ & srcStages) != 0) && ((dstAccess_ & dstAccesses) != 0))
7836 					m_unavailableWriteOperations[dstStage][srcStage][dstAccess] &= ~srcAccesses;
7837 
7838 				if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess] != 0)
7839 					allWritesAvailable = false;
7840 			}
7841 		}
7842 
7843 		// If all writes are available in dstStage make dstAccesses also visible
7844 		if (allWritesAvailable)
7845 			m_invisibleOperations[dstStage] &= ~dstAccesses;
7846 	}
7847 }
7848 
isClean(void) const7849 bool CacheState::isClean (void) const
7850 {
7851 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7852 	{
7853 		if ((dstStage_ & m_allowedStages) == 0)
7854 			continue;
7855 
7856 		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7857 
7858 		// Some operations are not visible to some stages
7859 		if (m_invisibleOperations[dstStage] != 0)
7860 			return false;
7861 
7862 		// There are operation that have not completed yet
7863 		if (m_incompleteOperations[dstStage] != 0)
7864 			return false;
7865 
7866 		// Layout transition has not completed yet
7867 		if (m_unavailableLayoutTransition[dstStage])
7868 			return false;
7869 
7870 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7871 		{
7872 			if ((srcStage_ & m_allowedStages) == 0)
7873 				continue;
7874 
7875 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7876 
7877 			for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7878 			{
7879 				if ((dstAccess_ & m_allowedAccesses) == 0)
7880 					continue;
7881 
7882 				const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7883 
7884 				// Some write operations are not available yet
7885 				if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess] != 0)
7886 					return false;
7887 			}
7888 		}
7889 	}
7890 
7891 	return true;
7892 }
7893 
layoutSupportedByUsage(Usage usage,vk::VkImageLayout layout)7894 bool layoutSupportedByUsage (Usage usage, vk::VkImageLayout layout)
7895 {
7896 	switch (layout)
7897 	{
7898 		case vk::VK_IMAGE_LAYOUT_GENERAL:
7899 			return true;
7900 
7901 		case vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
7902 			return (usage & USAGE_COLOR_ATTACHMENT) != 0;
7903 
7904 		case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
7905 			return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7906 
7907 		case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
7908 			return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7909 
7910 		case vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
7911 			// \todo [2016-03-09 mika] Should include input attachment
7912 			return (usage & USAGE_SAMPLED_IMAGE) != 0;
7913 
7914 		case vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
7915 			return (usage & USAGE_TRANSFER_SRC) != 0;
7916 
7917 		case vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
7918 			return (usage & USAGE_TRANSFER_DST) != 0;
7919 
7920 		case vk::VK_IMAGE_LAYOUT_PREINITIALIZED:
7921 			return true;
7922 
7923 		default:
7924 			DE_FATAL("Unknown layout");
7925 			return false;
7926 	}
7927 }
7928 
getNumberOfSupportedLayouts(Usage usage)7929 size_t getNumberOfSupportedLayouts (Usage usage)
7930 {
7931 	const vk::VkImageLayout layouts[] =
7932 	{
7933 		vk::VK_IMAGE_LAYOUT_GENERAL,
7934 		vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7935 		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7936 		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7937 		vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7938 		vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7939 		vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7940 	};
7941 	size_t supportedLayoutCount = 0;
7942 
7943 	for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7944 	{
7945 		const vk::VkImageLayout layout = layouts[layoutNdx];
7946 
7947 		if (layoutSupportedByUsage(usage, layout))
7948 			supportedLayoutCount++;
7949 	}
7950 
7951 	return supportedLayoutCount;
7952 }
7953 
getRandomNextLayout(de::Random & rng,Usage usage,vk::VkImageLayout previousLayout)7954 vk::VkImageLayout getRandomNextLayout (de::Random&			rng,
7955 									   Usage				usage,
7956 									   vk::VkImageLayout	previousLayout)
7957 {
7958 	const vk::VkImageLayout	layouts[] =
7959 	{
7960 		vk::VK_IMAGE_LAYOUT_GENERAL,
7961 		vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7962 		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7963 		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7964 		vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7965 		vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7966 		vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7967 	};
7968 	const size_t			supportedLayoutCount = getNumberOfSupportedLayouts(usage);
7969 
7970 	DE_ASSERT(supportedLayoutCount > 0);
7971 
7972 	size_t nextLayoutNdx = ((size_t)rng.getUint32()) % (previousLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
7973 														? supportedLayoutCount
7974 														: supportedLayoutCount - 1);
7975 
7976 	for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7977 	{
7978 		const vk::VkImageLayout layout = layouts[layoutNdx];
7979 
7980 		if (layoutSupportedByUsage(usage, layout) && layout != previousLayout)
7981 		{
7982 			if (nextLayoutNdx == 0)
7983 				return layout;
7984 			else
7985 				nextLayoutNdx--;
7986 		}
7987 	}
7988 
7989 	DE_FATAL("Unreachable");
7990 	return vk::VK_IMAGE_LAYOUT_UNDEFINED;
7991 }
7992 
7993 struct State
7994 {
Statevkt::memory::__anon6635e60a0111::State7995 	State (Usage usage, deUint32 seed)
7996 		: stage							(STAGE_HOST)
7997 		, cache							(usageToStageFlags(usage), usageToAccessFlags(usage))
7998 		, rng							(seed)
7999 		, mapped						(false)
8000 		, hostInvalidated				(true)
8001 		, hostFlushed					(true)
8002 		, memoryDefined					(false)
8003 		, hasBuffer						(false)
8004 		, hasBoundBufferMemory			(false)
8005 		, hasImage						(false)
8006 		, hasBoundImageMemory			(false)
8007 		, imageLayout					(vk::VK_IMAGE_LAYOUT_UNDEFINED)
8008 		, imageDefined					(false)
8009 		, queueIdle						(true)
8010 		, deviceIdle					(true)
8011 		, commandBufferIsEmpty			(true)
8012 		, primaryCommandBufferIsEmpty	(true)
8013 		, renderPassIsEmpty				(true)
8014 	{
8015 	}
8016 
8017 	Stage				stage;
8018 	CacheState			cache;
8019 	de::Random			rng;
8020 
8021 	bool				mapped;
8022 	bool				hostInvalidated;
8023 	bool				hostFlushed;
8024 	bool				memoryDefined;
8025 
8026 	bool				hasBuffer;
8027 	bool				hasBoundBufferMemory;
8028 
8029 	bool				hasImage;
8030 	bool				hasBoundImageMemory;
8031 	vk::VkImageLayout	imageLayout;
8032 	bool				imageDefined;
8033 
8034 	bool				queueIdle;
8035 	bool				deviceIdle;
8036 
8037 	bool				commandBufferIsEmpty;
8038 
8039 	// a copy of commandBufferIsEmpty value, when secondary command buffer is in use
8040 	bool				primaryCommandBufferIsEmpty;
8041 
8042 	bool				renderPassIsEmpty;
8043 };
8044 
getAvailableOps(const State & state,bool supportsBuffers,bool supportsImages,Usage usage,vector<Op> & ops)8045 void getAvailableOps (const State& state, bool supportsBuffers, bool supportsImages, Usage usage, vector<Op>& ops)
8046 {
8047 	if (state.stage == STAGE_HOST)
8048 	{
8049 		if (usage & (USAGE_HOST_READ | USAGE_HOST_WRITE))
8050 		{
8051 			// Host memory operations
8052 			if (state.mapped)
8053 			{
8054 				ops.push_back(OP_UNMAP);
8055 
8056 				// Avoid flush and finish if they are not needed
8057 				if (!state.hostFlushed)
8058 					ops.push_back(OP_MAP_FLUSH);
8059 
8060 				if (!state.hostInvalidated
8061 					&& state.queueIdle
8062 					&& ((usage & USAGE_HOST_READ) == 0
8063 						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8064 					&& ((usage & USAGE_HOST_WRITE) == 0
8065 						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)))
8066 				{
8067 					ops.push_back(OP_MAP_INVALIDATE);
8068 				}
8069 
8070 				if (usage & USAGE_HOST_READ
8071 					&& usage & USAGE_HOST_WRITE
8072 					&& state.memoryDefined
8073 					&& state.hostInvalidated
8074 					&& state.queueIdle
8075 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)
8076 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8077 				{
8078 					ops.push_back(OP_MAP_MODIFY);
8079 				}
8080 
8081 				if (usage & USAGE_HOST_READ
8082 					&& state.memoryDefined
8083 					&& state.hostInvalidated
8084 					&& state.queueIdle
8085 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8086 				{
8087 					ops.push_back(OP_MAP_READ);
8088 				}
8089 
8090 				if (usage & USAGE_HOST_WRITE
8091 					&& state.hostInvalidated
8092 					&& state.queueIdle
8093 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT))
8094 				{
8095 					ops.push_back(OP_MAP_WRITE);
8096 				}
8097 			}
8098 			else
8099 				ops.push_back(OP_MAP);
8100 		}
8101 
8102 		if (state.hasBoundBufferMemory && state.queueIdle)
8103 		{
8104 			// \note Destroy only buffers after they have been bound
8105 			ops.push_back(OP_BUFFER_DESTROY);
8106 		}
8107 		else
8108 		{
8109 			if (state.hasBuffer)
8110 			{
8111 				if (!state.hasBoundBufferMemory)
8112 					ops.push_back(OP_BUFFER_BINDMEMORY);
8113 			}
8114 			else if (!state.hasImage && supportsBuffers)	// Avoid creating buffer if there is already image
8115 				ops.push_back(OP_BUFFER_CREATE);
8116 		}
8117 
8118 		if (state.hasBoundImageMemory && state.queueIdle)
8119 		{
8120 			// \note Destroy only image after they have been bound
8121 			ops.push_back(OP_IMAGE_DESTROY);
8122 		}
8123 		else
8124 		{
8125 			if (state.hasImage)
8126 			{
8127 				if (!state.hasBoundImageMemory)
8128 					ops.push_back(OP_IMAGE_BINDMEMORY);
8129 			}
8130 			else if (!state.hasBuffer && supportsImages)	// Avoid creating image if there is already buffer
8131 				ops.push_back(OP_IMAGE_CREATE);
8132 		}
8133 
8134 		// Host writes must be flushed before GPU commands and there must be
8135 		// buffer or image for GPU commands
8136 		if (state.hostFlushed
8137 			&& (state.memoryDefined || supportsDeviceBufferWrites(usage) || state.imageDefined || supportsDeviceImageWrites(usage))
8138 			&& (state.hasBoundBufferMemory || state.hasBoundImageMemory) // Avoid command buffers if there is no object to use
8139 			&& (usageToStageFlags(usage) & (~vk::VK_PIPELINE_STAGE_HOST_BIT)) != 0) // Don't start command buffer if there are no ways to use memory from gpu
8140 		{
8141 			ops.push_back(OP_COMMAND_BUFFER_BEGIN);
8142 		}
8143 
8144 		if (!state.deviceIdle)
8145 			ops.push_back(OP_DEVICE_WAIT_FOR_IDLE);
8146 
8147 		if (!state.queueIdle)
8148 			ops.push_back(OP_QUEUE_WAIT_FOR_IDLE);
8149 	}
8150 	else if (state.stage == STAGE_COMMAND_BUFFER)
8151 	{
8152 		if (!state.cache.isClean())
8153 		{
8154 			ops.push_back(OP_PIPELINE_BARRIER_GLOBAL);
8155 
8156 			if (state.hasImage && (state.imageLayout != vk::VK_IMAGE_LAYOUT_UNDEFINED))
8157 				ops.push_back(OP_PIPELINE_BARRIER_IMAGE);
8158 
8159 			if (state.hasBuffer)
8160 				ops.push_back(OP_PIPELINE_BARRIER_BUFFER);
8161 		}
8162 
8163 		if (state.hasBoundBufferMemory)
8164 		{
8165 			if (usage & USAGE_TRANSFER_DST
8166 				&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8167 			{
8168 				ops.push_back(OP_BUFFER_FILL);
8169 				ops.push_back(OP_BUFFER_UPDATE);
8170 				ops.push_back(OP_BUFFER_COPY_FROM_BUFFER);
8171 				ops.push_back(OP_BUFFER_COPY_FROM_IMAGE);
8172 			}
8173 
8174 			if (usage & USAGE_TRANSFER_SRC
8175 				&& state.memoryDefined
8176 				&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8177 			{
8178 				ops.push_back(OP_BUFFER_COPY_TO_BUFFER);
8179 				ops.push_back(OP_BUFFER_COPY_TO_IMAGE);
8180 			}
8181 		}
8182 
8183 		if (state.hasBoundImageMemory
8184 			&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
8185 				|| getNumberOfSupportedLayouts(usage) > 1))
8186 		{
8187 			ops.push_back(OP_IMAGE_TRANSITION_LAYOUT);
8188 
8189 			{
8190 				if (usage & USAGE_TRANSFER_DST
8191 					&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8192 						|| state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
8193 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8194 				{
8195 					ops.push_back(OP_IMAGE_COPY_FROM_BUFFER);
8196 					ops.push_back(OP_IMAGE_COPY_FROM_IMAGE);
8197 					ops.push_back(OP_IMAGE_BLIT_FROM_IMAGE);
8198 				}
8199 
8200 				if (usage & USAGE_TRANSFER_SRC
8201 					&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8202 						|| state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
8203 					&& state.imageDefined
8204 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8205 				{
8206 					ops.push_back(OP_IMAGE_COPY_TO_BUFFER);
8207 					ops.push_back(OP_IMAGE_COPY_TO_IMAGE);
8208 					ops.push_back(OP_IMAGE_BLIT_TO_IMAGE);
8209 				}
8210 			}
8211 		}
8212 
8213 		// \todo [2016-03-09 mika] Add other usages?
8214 		if ((state.memoryDefined
8215 				&& state.hasBoundBufferMemory
8216 				&& (((usage & USAGE_VERTEX_BUFFER)
8217 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8218 				|| ((usage & USAGE_INDEX_BUFFER)
8219 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8220 				|| ((usage & USAGE_UNIFORM_BUFFER)
8221 					&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8222 						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8223 				|| ((usage & USAGE_UNIFORM_TEXEL_BUFFER)
8224 					&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8225 						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8226 				|| ((usage & USAGE_STORAGE_BUFFER)
8227 					&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8228 						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8229 				|| ((usage & USAGE_STORAGE_TEXEL_BUFFER)
8230 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))
8231 			|| (state.imageDefined
8232 				&& state.hasBoundImageMemory
8233 				&& (((usage & USAGE_STORAGE_IMAGE)
8234 						&& state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8235 						&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8236 							|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8237 					|| ((usage & USAGE_SAMPLED_IMAGE)
8238 						&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8239 							|| state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
8240 						&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8241 							|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))))
8242 		{
8243 			ops.push_back(OP_RENDERPASS_BEGIN);
8244 		}
8245 
8246 		ops.push_back(OP_SECONDARY_COMMAND_BUFFER_BEGIN);
8247 
8248 		// \note This depends on previous operations and has to be always the
8249 		// last command buffer operation check
8250 		if (ops.empty() || !state.commandBufferIsEmpty)
8251 			ops.push_back(OP_COMMAND_BUFFER_END);
8252 	}
8253 	else if (state.stage == STAGE_SECONDARY_COMMAND_BUFFER)
8254 	{
8255 		if (!state.cache.isClean())
8256 		{
8257 			ops.push_back(OP_PIPELINE_BARRIER_GLOBAL);
8258 
8259 			if (state.hasImage && (state.imageLayout != vk::VK_IMAGE_LAYOUT_UNDEFINED))
8260 				ops.push_back(OP_PIPELINE_BARRIER_IMAGE);
8261 
8262 			if (state.hasBuffer)
8263 				ops.push_back(OP_PIPELINE_BARRIER_BUFFER);
8264 		}
8265 
8266 		if (state.hasBoundBufferMemory)
8267 		{
8268 			if (usage & USAGE_TRANSFER_DST
8269 				&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8270 			{
8271 				ops.push_back(OP_BUFFER_FILL);
8272 				ops.push_back(OP_BUFFER_UPDATE);
8273 				ops.push_back(OP_BUFFER_COPY_FROM_BUFFER);
8274 				ops.push_back(OP_BUFFER_COPY_FROM_IMAGE);
8275 			}
8276 
8277 			if (usage & USAGE_TRANSFER_SRC
8278 				&& state.memoryDefined
8279 				&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8280 			{
8281 				ops.push_back(OP_BUFFER_COPY_TO_BUFFER);
8282 				ops.push_back(OP_BUFFER_COPY_TO_IMAGE);
8283 			}
8284 		}
8285 
8286 		if (state.hasBoundImageMemory
8287 			&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
8288 				|| getNumberOfSupportedLayouts(usage) > 1))
8289 		{
8290 			ops.push_back(OP_IMAGE_TRANSITION_LAYOUT);
8291 
8292 			{
8293 				if (usage & USAGE_TRANSFER_DST
8294 					&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8295 						|| state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
8296 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8297 				{
8298 					ops.push_back(OP_IMAGE_COPY_FROM_BUFFER);
8299 					ops.push_back(OP_IMAGE_COPY_FROM_IMAGE);
8300 					ops.push_back(OP_IMAGE_BLIT_FROM_IMAGE);
8301 				}
8302 
8303 				if (usage & USAGE_TRANSFER_SRC
8304 					&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8305 						|| state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
8306 					&& state.imageDefined
8307 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8308 				{
8309 					ops.push_back(OP_IMAGE_COPY_TO_BUFFER);
8310 					ops.push_back(OP_IMAGE_COPY_TO_IMAGE);
8311 					ops.push_back(OP_IMAGE_BLIT_TO_IMAGE);
8312 				}
8313 			}
8314 		}
8315 
8316 		// \note This depends on previous operations and has to be always the
8317 		// last command buffer operation check
8318 		if (ops.empty() || !state.commandBufferIsEmpty)
8319 			ops.push_back(OP_SECONDARY_COMMAND_BUFFER_END);
8320 	}
8321 	else if (state.stage == STAGE_RENDER_PASS)
8322 	{
8323 		if ((usage & USAGE_VERTEX_BUFFER) != 0
8324 			&& state.memoryDefined
8325 			&& state.hasBoundBufferMemory
8326 			&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8327 		{
8328 			ops.push_back(OP_RENDER_VERTEX_BUFFER);
8329 		}
8330 
8331 		if ((usage & USAGE_INDEX_BUFFER) != 0
8332 			&& state.memoryDefined
8333 			&& state.hasBoundBufferMemory
8334 			&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8335 		{
8336 			ops.push_back(OP_RENDER_INDEX_BUFFER);
8337 		}
8338 
8339 		if ((usage & USAGE_UNIFORM_BUFFER) != 0
8340 			&& state.memoryDefined
8341 			&& state.hasBoundBufferMemory)
8342 		{
8343 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8344 				ops.push_back(OP_RENDER_VERTEX_UNIFORM_BUFFER);
8345 
8346 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8347 				ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_BUFFER);
8348 		}
8349 
8350 		if ((usage & USAGE_UNIFORM_TEXEL_BUFFER) != 0
8351 			&& state.memoryDefined
8352 			&& state.hasBoundBufferMemory)
8353 		{
8354 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8355 				ops.push_back(OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER);
8356 
8357 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8358 				ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER);
8359 		}
8360 
8361 		if ((usage & USAGE_STORAGE_BUFFER) != 0
8362 			&& state.memoryDefined
8363 			&& state.hasBoundBufferMemory)
8364 		{
8365 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8366 				ops.push_back(OP_RENDER_VERTEX_STORAGE_BUFFER);
8367 
8368 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8369 				ops.push_back(OP_RENDER_FRAGMENT_STORAGE_BUFFER);
8370 		}
8371 
8372 		if ((usage & USAGE_STORAGE_TEXEL_BUFFER) != 0
8373 			&& state.memoryDefined
8374 			&& state.hasBoundBufferMemory)
8375 		{
8376 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8377 				ops.push_back(OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER);
8378 
8379 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8380 				ops.push_back(OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER);
8381 		}
8382 
8383 		if ((usage & USAGE_STORAGE_IMAGE) != 0
8384 			&& state.imageDefined
8385 			&& state.hasBoundImageMemory
8386 			&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL))
8387 		{
8388 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8389 				ops.push_back(OP_RENDER_VERTEX_STORAGE_IMAGE);
8390 
8391 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8392 				ops.push_back(OP_RENDER_FRAGMENT_STORAGE_IMAGE);
8393 		}
8394 
8395 		if ((usage & USAGE_SAMPLED_IMAGE) != 0
8396 			&& state.imageDefined
8397 			&& state.hasBoundImageMemory
8398 			&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8399 				|| state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))
8400 		{
8401 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8402 				ops.push_back(OP_RENDER_VERTEX_SAMPLED_IMAGE);
8403 
8404 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8405 				ops.push_back(OP_RENDER_FRAGMENT_SAMPLED_IMAGE);
8406 		}
8407 
8408 		if (!state.renderPassIsEmpty)
8409 			ops.push_back(OP_RENDERPASS_END);
8410 	}
8411 	else
8412 		DE_FATAL("Unknown stage");
8413 }
8414 
removeIllegalAccessFlags(vk::VkAccessFlags & accessflags,vk::VkPipelineStageFlags stageflags)8415 void removeIllegalAccessFlags (vk::VkAccessFlags& accessflags, vk::VkPipelineStageFlags stageflags)
8416 {
8417 	if (!(stageflags & vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT))
8418 		accessflags &= ~vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
8419 
8420 	if (!(stageflags & vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT))
8421 		accessflags &= ~vk::VK_ACCESS_INDEX_READ_BIT;
8422 
8423 	if (!(stageflags & vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT))
8424 		accessflags &= ~vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
8425 
8426 	if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
8427 						vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8428 						vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
8429 						vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8430 						vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
8431 						vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8432 		accessflags &= ~vk::VK_ACCESS_UNIFORM_READ_BIT;
8433 
8434 	if (!(stageflags & vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT))
8435 		accessflags &= ~vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
8436 
8437 	if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
8438 						vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8439 						vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
8440 						vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8441 						vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
8442 						vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8443 		accessflags &= ~vk::VK_ACCESS_SHADER_READ_BIT;
8444 
8445 	if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
8446 						vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8447 						vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
8448 						vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8449 						vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
8450 						vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8451 		accessflags &= ~vk::VK_ACCESS_SHADER_WRITE_BIT;
8452 
8453 	if (!(stageflags & vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT))
8454 		accessflags &= ~vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
8455 
8456 	if (!(stageflags & vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT))
8457 		accessflags &= ~vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
8458 
8459 	if (!(stageflags & (vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
8460 						vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)))
8461 		accessflags &= ~vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
8462 
8463 	if (!(stageflags & (vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
8464 						vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)))
8465 		accessflags &= ~vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
8466 
8467 	if (!(stageflags & vk::VK_PIPELINE_STAGE_TRANSFER_BIT))
8468 		accessflags &= ~vk::VK_ACCESS_TRANSFER_READ_BIT;
8469 
8470 	if (!(stageflags & vk::VK_PIPELINE_STAGE_TRANSFER_BIT))
8471 		accessflags &= ~vk::VK_ACCESS_TRANSFER_WRITE_BIT;
8472 
8473 	if (!(stageflags & vk::VK_PIPELINE_STAGE_HOST_BIT))
8474 		accessflags &= ~vk::VK_ACCESS_HOST_READ_BIT;
8475 
8476 	if (!(stageflags & vk::VK_PIPELINE_STAGE_HOST_BIT))
8477 		accessflags &= ~vk::VK_ACCESS_HOST_WRITE_BIT;
8478 }
8479 
applyOp(State & state,const Memory & memory,Op op,Usage usage)8480 void applyOp (State& state, const Memory& memory, Op op, Usage usage)
8481 {
8482 	switch (op)
8483 	{
8484 		case OP_MAP:
8485 			DE_ASSERT(state.stage == STAGE_HOST);
8486 			DE_ASSERT(!state.mapped);
8487 			state.mapped = true;
8488 			break;
8489 
8490 		case OP_UNMAP:
8491 			DE_ASSERT(state.stage == STAGE_HOST);
8492 			DE_ASSERT(state.mapped);
8493 			state.mapped = false;
8494 			break;
8495 
8496 		case OP_MAP_FLUSH:
8497 			DE_ASSERT(state.stage == STAGE_HOST);
8498 			DE_ASSERT(!state.hostFlushed);
8499 			state.hostFlushed = true;
8500 			break;
8501 
8502 		case OP_MAP_INVALIDATE:
8503 			DE_ASSERT(state.stage == STAGE_HOST);
8504 			DE_ASSERT(!state.hostInvalidated);
8505 			state.hostInvalidated = true;
8506 			break;
8507 
8508 		case OP_MAP_READ:
8509 			DE_ASSERT(state.stage == STAGE_HOST);
8510 			DE_ASSERT(state.hostInvalidated);
8511 			state.rng.getUint32();
8512 			break;
8513 
8514 		case OP_MAP_WRITE:
8515 			DE_ASSERT(state.stage == STAGE_HOST);
8516 			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8517 				state.hostFlushed = false;
8518 
8519 			state.memoryDefined = true;
8520 			state.imageDefined = false;
8521 			state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8522 			state.rng.getUint32();
8523 			break;
8524 
8525 		case OP_MAP_MODIFY:
8526 			DE_ASSERT(state.stage == STAGE_HOST);
8527 			DE_ASSERT(state.hostInvalidated);
8528 
8529 			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8530 				state.hostFlushed = false;
8531 
8532 			state.rng.getUint32();
8533 			break;
8534 
8535 		case OP_BUFFER_CREATE:
8536 			DE_ASSERT(state.stage == STAGE_HOST);
8537 			DE_ASSERT(!state.hasBuffer);
8538 
8539 			state.hasBuffer = true;
8540 			break;
8541 
8542 		case OP_BUFFER_DESTROY:
8543 			DE_ASSERT(state.stage == STAGE_HOST);
8544 			DE_ASSERT(state.hasBuffer);
8545 			DE_ASSERT(state.hasBoundBufferMemory);
8546 
8547 			state.hasBuffer = false;
8548 			state.hasBoundBufferMemory = false;
8549 			break;
8550 
8551 		case OP_BUFFER_BINDMEMORY:
8552 			DE_ASSERT(state.stage == STAGE_HOST);
8553 			DE_ASSERT(state.hasBuffer);
8554 			DE_ASSERT(!state.hasBoundBufferMemory);
8555 
8556 			state.hasBoundBufferMemory = true;
8557 			break;
8558 
8559 		case OP_IMAGE_CREATE:
8560 			DE_ASSERT(state.stage == STAGE_HOST);
8561 			DE_ASSERT(!state.hasImage);
8562 			DE_ASSERT(!state.hasBuffer);
8563 
8564 			state.hasImage = true;
8565 			break;
8566 
8567 		case OP_IMAGE_DESTROY:
8568 			DE_ASSERT(state.stage == STAGE_HOST);
8569 			DE_ASSERT(state.hasImage);
8570 			DE_ASSERT(state.hasBoundImageMemory);
8571 
8572 			state.hasImage = false;
8573 			state.hasBoundImageMemory = false;
8574 			state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8575 			state.imageDefined = false;
8576 			break;
8577 
8578 		case OP_IMAGE_BINDMEMORY:
8579 			DE_ASSERT(state.stage == STAGE_HOST);
8580 			DE_ASSERT(state.hasImage);
8581 			DE_ASSERT(!state.hasBoundImageMemory);
8582 
8583 			state.hasBoundImageMemory = true;
8584 			break;
8585 
8586 		case OP_IMAGE_TRANSITION_LAYOUT:
8587 		{
8588 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8589 			DE_ASSERT(state.hasImage);
8590 			DE_ASSERT(state.hasBoundImageMemory);
8591 
8592 			// \todo [2016-03-09 mika] Support linear tiling and predefined data
8593 			const vk::VkImageLayout		srcLayout	= state.rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8594 			const vk::VkImageLayout		dstLayout	= getRandomNextLayout(state.rng, usage, srcLayout);
8595 
8596 			vk::VkPipelineStageFlags	dirtySrcStages;
8597 			vk::VkAccessFlags			dirtySrcAccesses;
8598 			vk::VkPipelineStageFlags	dirtyDstStages;
8599 			vk::VkAccessFlags			dirtyDstAccesses;
8600 
8601 			vk::VkPipelineStageFlags	srcStages;
8602 			vk::VkAccessFlags			srcAccesses;
8603 			vk::VkPipelineStageFlags	dstStages;
8604 			vk::VkAccessFlags			dstAccesses;
8605 
8606 			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8607 
8608 			// Try masking some random bits
8609 			srcStages	= dirtySrcStages;
8610 			srcAccesses	= dirtySrcAccesses;
8611 
8612 			dstStages	= state.cache.getAllowedStages() & state.rng.getUint32();
8613 			dstAccesses	= state.cache.getAllowedAcceses() & state.rng.getUint32();
8614 
8615 			// If there are no bits in dst stage mask use all stages
8616 			dstStages	= dstStages ? dstStages : state.cache.getAllowedStages();
8617 
8618 			if (!srcStages)
8619 				srcStages = dstStages;
8620 
8621 			removeIllegalAccessFlags(dstAccesses, dstStages);
8622 			removeIllegalAccessFlags(srcAccesses, srcStages);
8623 
8624 			if (srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED)
8625 				state.imageDefined = false;
8626 
8627 			state.commandBufferIsEmpty = false;
8628 			state.imageLayout = dstLayout;
8629 			state.memoryDefined = false;
8630 			state.cache.imageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
8631 			break;
8632 		}
8633 
8634 		case OP_QUEUE_WAIT_FOR_IDLE:
8635 			DE_ASSERT(state.stage == STAGE_HOST);
8636 			DE_ASSERT(!state.queueIdle);
8637 
8638 			state.queueIdle = true;
8639 
8640 			state.cache.waitForIdle();
8641 			break;
8642 
8643 		case OP_DEVICE_WAIT_FOR_IDLE:
8644 			DE_ASSERT(state.stage == STAGE_HOST);
8645 			DE_ASSERT(!state.deviceIdle);
8646 
8647 			state.queueIdle = true;
8648 			state.deviceIdle = true;
8649 
8650 			state.cache.waitForIdle();
8651 			break;
8652 
8653 		case OP_COMMAND_BUFFER_BEGIN:
8654 			DE_ASSERT(state.stage == STAGE_HOST);
8655 			state.stage = STAGE_COMMAND_BUFFER;
8656 			state.commandBufferIsEmpty = true;
8657 			// Makes host writes visible to command buffer
8658 			state.cache.submitCommandBuffer();
8659 			break;
8660 
8661 		case OP_COMMAND_BUFFER_END:
8662 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8663 			state.stage = STAGE_HOST;
8664 			state.queueIdle = false;
8665 			state.deviceIdle = false;
8666 			break;
8667 
8668 		case OP_SECONDARY_COMMAND_BUFFER_BEGIN:
8669 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8670 			state.stage = STAGE_SECONDARY_COMMAND_BUFFER;
8671 			state.primaryCommandBufferIsEmpty = state.commandBufferIsEmpty;
8672 			state.commandBufferIsEmpty = true;
8673 			break;
8674 
8675 		case OP_SECONDARY_COMMAND_BUFFER_END:
8676 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8677 			state.stage = STAGE_COMMAND_BUFFER;
8678 			state.commandBufferIsEmpty = state.primaryCommandBufferIsEmpty;
8679 			break;
8680 
8681 		case OP_BUFFER_COPY_FROM_BUFFER:
8682 		case OP_BUFFER_COPY_FROM_IMAGE:
8683 		case OP_BUFFER_UPDATE:
8684 		case OP_BUFFER_FILL:
8685 			state.rng.getUint32();
8686 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8687 
8688 			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8689 				state.hostInvalidated = false;
8690 
8691 			state.commandBufferIsEmpty = false;
8692 			state.memoryDefined = true;
8693 			state.imageDefined = false;
8694 			state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8695 			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8696 			break;
8697 
8698 		case OP_BUFFER_COPY_TO_BUFFER:
8699 		case OP_BUFFER_COPY_TO_IMAGE:
8700 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8701 
8702 			state.commandBufferIsEmpty = false;
8703 			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8704 			break;
8705 
8706 		case OP_IMAGE_BLIT_FROM_IMAGE:
8707 			state.rng.getBool();
8708 			// Fall through
8709 		case OP_IMAGE_COPY_FROM_BUFFER:
8710 		case OP_IMAGE_COPY_FROM_IMAGE:
8711 			state.rng.getUint32();
8712 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8713 
8714 			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8715 				state.hostInvalidated = false;
8716 
8717 			state.commandBufferIsEmpty = false;
8718 			state.memoryDefined = false;
8719 			state.imageDefined = true;
8720 			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8721 			break;
8722 
8723 		case OP_IMAGE_BLIT_TO_IMAGE:
8724 			state.rng.getBool();
8725 			// Fall through
8726 		case OP_IMAGE_COPY_TO_BUFFER:
8727 		case OP_IMAGE_COPY_TO_IMAGE:
8728 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8729 
8730 			state.commandBufferIsEmpty = false;
8731 			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8732 			break;
8733 
8734 		case OP_PIPELINE_BARRIER_GLOBAL:
8735 		case OP_PIPELINE_BARRIER_BUFFER:
8736 		case OP_PIPELINE_BARRIER_IMAGE:
8737 		{
8738 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8739 
8740 			vk::VkPipelineStageFlags	dirtySrcStages;
8741 			vk::VkAccessFlags			dirtySrcAccesses;
8742 			vk::VkPipelineStageFlags	dirtyDstStages;
8743 			vk::VkAccessFlags			dirtyDstAccesses;
8744 
8745 			vk::VkPipelineStageFlags	srcStages;
8746 			vk::VkAccessFlags			srcAccesses;
8747 			vk::VkPipelineStageFlags	dstStages;
8748 			vk::VkAccessFlags			dstAccesses;
8749 
8750 			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8751 
8752 			// Try masking some random bits
8753 			srcStages	= dirtySrcStages & state.rng.getUint32();
8754 			srcAccesses	= dirtySrcAccesses & state.rng.getUint32();
8755 
8756 			dstStages	= dirtyDstStages & state.rng.getUint32();
8757 			dstAccesses	= dirtyDstAccesses & state.rng.getUint32();
8758 
8759 			// If there are no bits in stage mask use the original dirty stages
8760 			srcStages	= srcStages ? srcStages : dirtySrcStages;
8761 			dstStages	= dstStages ? dstStages : dirtyDstStages;
8762 
8763 			if (!srcStages)
8764 				srcStages = dstStages;
8765 
8766 			removeIllegalAccessFlags(dstAccesses, dstStages);
8767 			removeIllegalAccessFlags(srcAccesses, srcStages);
8768 
8769 			state.commandBufferIsEmpty = false;
8770 			state.cache.barrier(srcStages, srcAccesses, dstStages, dstAccesses);
8771 			break;
8772 		}
8773 
8774 		case OP_RENDERPASS_BEGIN:
8775 		{
8776 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8777 
8778 			state.renderPassIsEmpty	= true;
8779 			state.stage				= STAGE_RENDER_PASS;
8780 			break;
8781 		}
8782 
8783 		case OP_RENDERPASS_END:
8784 		{
8785 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8786 
8787 			state.renderPassIsEmpty	= true;
8788 			state.stage				= STAGE_COMMAND_BUFFER;
8789 			break;
8790 		}
8791 
8792 		case OP_RENDER_VERTEX_BUFFER:
8793 		{
8794 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8795 
8796 			state.renderPassIsEmpty = false;
8797 			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT);
8798 			break;
8799 		}
8800 
8801 		case OP_RENDER_INDEX_BUFFER:
8802 		{
8803 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8804 
8805 			state.renderPassIsEmpty = false;
8806 			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT);
8807 			break;
8808 		}
8809 
8810 		case OP_RENDER_VERTEX_UNIFORM_BUFFER:
8811 		case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER:
8812 		{
8813 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8814 
8815 			state.renderPassIsEmpty = false;
8816 			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8817 			break;
8818 		}
8819 
8820 		case OP_RENDER_FRAGMENT_UNIFORM_BUFFER:
8821 		case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER:
8822 		{
8823 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8824 
8825 			state.renderPassIsEmpty = false;
8826 			state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8827 			break;
8828 		}
8829 
8830 		case OP_RENDER_VERTEX_STORAGE_BUFFER:
8831 		case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER:
8832 		{
8833 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8834 
8835 			state.renderPassIsEmpty = false;
8836 			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8837 			break;
8838 		}
8839 
8840 		case OP_RENDER_FRAGMENT_STORAGE_BUFFER:
8841 		case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER:
8842 		{
8843 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8844 
8845 			state.renderPassIsEmpty = false;
8846 			state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8847 			break;
8848 		}
8849 
8850 		case OP_RENDER_FRAGMENT_STORAGE_IMAGE:
8851 		case OP_RENDER_FRAGMENT_SAMPLED_IMAGE:
8852 		{
8853 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8854 
8855 			state.renderPassIsEmpty = false;
8856 			state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8857 			break;
8858 		}
8859 
8860 		case OP_RENDER_VERTEX_STORAGE_IMAGE:
8861 		case OP_RENDER_VERTEX_SAMPLED_IMAGE:
8862 		{
8863 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8864 
8865 			state.renderPassIsEmpty = false;
8866 			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8867 			break;
8868 		}
8869 
8870 		default:
8871 			DE_FATAL("Unknown op");
8872 	}
8873 }
8874 
createHostCommand(Op op,de::Random & rng,Usage usage,vk::VkSharingMode sharing)8875 de::MovePtr<Command> createHostCommand (Op					op,
8876 										de::Random&			rng,
8877 										Usage				usage,
8878 										vk::VkSharingMode	sharing)
8879 {
8880 	switch (op)
8881 	{
8882 		case OP_MAP:					return de::MovePtr<Command>(new Map());
8883 		case OP_UNMAP:					return de::MovePtr<Command>(new UnMap());
8884 
8885 		case OP_MAP_FLUSH:				return de::MovePtr<Command>(new Flush());
8886 		case OP_MAP_INVALIDATE:			return de::MovePtr<Command>(new Invalidate());
8887 
8888 		case OP_MAP_READ:				return de::MovePtr<Command>(new HostMemoryAccess(true, false, rng.getUint32()));
8889 		case OP_MAP_WRITE:				return de::MovePtr<Command>(new HostMemoryAccess(false, true, rng.getUint32()));
8890 		case OP_MAP_MODIFY:				return de::MovePtr<Command>(new HostMemoryAccess(true, true, rng.getUint32()));
8891 
8892 		case OP_BUFFER_CREATE:			return de::MovePtr<Command>(new CreateBuffer(usageToBufferUsageFlags(usage), sharing));
8893 		case OP_BUFFER_DESTROY:			return de::MovePtr<Command>(new DestroyBuffer());
8894 		case OP_BUFFER_BINDMEMORY:		return de::MovePtr<Command>(new BindBufferMemory());
8895 
8896 		case OP_IMAGE_CREATE:			return de::MovePtr<Command>(new CreateImage(usageToImageUsageFlags(usage), sharing));
8897 		case OP_IMAGE_DESTROY:			return de::MovePtr<Command>(new DestroyImage());
8898 		case OP_IMAGE_BINDMEMORY:		return de::MovePtr<Command>(new BindImageMemory());
8899 
8900 		case OP_QUEUE_WAIT_FOR_IDLE:	return de::MovePtr<Command>(new QueueWaitIdle());
8901 		case OP_DEVICE_WAIT_FOR_IDLE:	return de::MovePtr<Command>(new DeviceWaitIdle());
8902 
8903 		default:
8904 			DE_FATAL("Unknown op");
8905 			return de::MovePtr<Command>(DE_NULL);
8906 	}
8907 }
8908 
createCmdCommand(de::Random & rng,const State & state,Op op,Usage usage)8909 de::MovePtr<CmdCommand> createCmdCommand (de::Random&	rng,
8910 										  const State&	state,
8911 										  Op			op,
8912 										  Usage			usage)
8913 {
8914 	switch (op)
8915 	{
8916 		case OP_BUFFER_FILL:					return de::MovePtr<CmdCommand>(new FillBuffer(rng.getUint32()));
8917 		case OP_BUFFER_UPDATE:					return de::MovePtr<CmdCommand>(new UpdateBuffer(rng.getUint32()));
8918 		case OP_BUFFER_COPY_TO_BUFFER:			return de::MovePtr<CmdCommand>(new BufferCopyToBuffer());
8919 		case OP_BUFFER_COPY_FROM_BUFFER:		return de::MovePtr<CmdCommand>(new BufferCopyFromBuffer(rng.getUint32()));
8920 
8921 		case OP_BUFFER_COPY_TO_IMAGE:			return de::MovePtr<CmdCommand>(new BufferCopyToImage());
8922 		case OP_BUFFER_COPY_FROM_IMAGE:			return de::MovePtr<CmdCommand>(new BufferCopyFromImage(rng.getUint32()));
8923 
8924 		case OP_IMAGE_TRANSITION_LAYOUT:
8925 		{
8926 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8927 			DE_ASSERT(state.hasImage);
8928 			DE_ASSERT(state.hasBoundImageMemory);
8929 
8930 			const vk::VkImageLayout		srcLayout	= rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8931 			const vk::VkImageLayout		dstLayout	= getRandomNextLayout(rng, usage, srcLayout);
8932 
8933 			vk::VkPipelineStageFlags	dirtySrcStages;
8934 			vk::VkAccessFlags			dirtySrcAccesses;
8935 			vk::VkPipelineStageFlags	dirtyDstStages;
8936 			vk::VkAccessFlags			dirtyDstAccesses;
8937 
8938 			vk::VkPipelineStageFlags	srcStages;
8939 			vk::VkAccessFlags			srcAccesses;
8940 			vk::VkPipelineStageFlags	dstStages;
8941 			vk::VkAccessFlags			dstAccesses;
8942 
8943 			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8944 
8945 			// Try masking some random bits
8946 			srcStages	= dirtySrcStages;
8947 			srcAccesses	= dirtySrcAccesses;
8948 
8949 			dstStages	= state.cache.getAllowedStages() & rng.getUint32();
8950 			dstAccesses	= state.cache.getAllowedAcceses() & rng.getUint32();
8951 
8952 			// If there are no bits in dst stage mask use all stages
8953 			dstStages	= dstStages ? dstStages : state.cache.getAllowedStages();
8954 
8955 			if (!srcStages)
8956 				srcStages = dstStages;
8957 
8958 			removeIllegalAccessFlags(dstAccesses, dstStages);
8959 			removeIllegalAccessFlags(srcAccesses, srcStages);
8960 
8961 			return de::MovePtr<CmdCommand>(new ImageTransition(srcStages, srcAccesses, dstStages, dstAccesses, srcLayout, dstLayout));
8962 		}
8963 
8964 		case OP_IMAGE_COPY_TO_BUFFER:			return de::MovePtr<CmdCommand>(new ImageCopyToBuffer(state.imageLayout));
8965 		case OP_IMAGE_COPY_FROM_BUFFER:			return de::MovePtr<CmdCommand>(new ImageCopyFromBuffer(rng.getUint32(), state.imageLayout));
8966 		case OP_IMAGE_COPY_TO_IMAGE:			return de::MovePtr<CmdCommand>(new ImageCopyToImage(state.imageLayout));
8967 		case OP_IMAGE_COPY_FROM_IMAGE:			return de::MovePtr<CmdCommand>(new ImageCopyFromImage(rng.getUint32(), state.imageLayout));
8968 		case OP_IMAGE_BLIT_TO_IMAGE:
8969 		{
8970 			const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8971 			return de::MovePtr<CmdCommand>(new ImageBlitToImage(scale, state.imageLayout));
8972 		}
8973 
8974 		case OP_IMAGE_BLIT_FROM_IMAGE:
8975 		{
8976 			const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8977 			return de::MovePtr<CmdCommand>(new ImageBlitFromImage(rng.getUint32(), scale, state.imageLayout));
8978 		}
8979 
8980 		case OP_PIPELINE_BARRIER_GLOBAL:
8981 		case OP_PIPELINE_BARRIER_BUFFER:
8982 		case OP_PIPELINE_BARRIER_IMAGE:
8983 		{
8984 			vk::VkPipelineStageFlags	dirtySrcStages;
8985 			vk::VkAccessFlags			dirtySrcAccesses;
8986 			vk::VkPipelineStageFlags	dirtyDstStages;
8987 			vk::VkAccessFlags			dirtyDstAccesses;
8988 
8989 			vk::VkPipelineStageFlags	srcStages;
8990 			vk::VkAccessFlags			srcAccesses;
8991 			vk::VkPipelineStageFlags	dstStages;
8992 			vk::VkAccessFlags			dstAccesses;
8993 
8994 			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8995 
8996 			// Try masking some random bits
8997 			srcStages	= dirtySrcStages & rng.getUint32();
8998 			srcAccesses	= dirtySrcAccesses & rng.getUint32();
8999 
9000 			dstStages	= dirtyDstStages & rng.getUint32();
9001 			dstAccesses	= dirtyDstAccesses & rng.getUint32();
9002 
9003 			// If there are no bits in stage mask use the original dirty stages
9004 			srcStages	= srcStages ? srcStages : dirtySrcStages;
9005 			dstStages	= dstStages ? dstStages : dirtyDstStages;
9006 
9007 			if (!srcStages)
9008 				srcStages = dstStages;
9009 
9010 			removeIllegalAccessFlags(dstAccesses, dstStages);
9011 			removeIllegalAccessFlags(srcAccesses, srcStages);
9012 
9013 			PipelineBarrier::Type type;
9014 			switch (op)
9015 			{
9016 			case OP_PIPELINE_BARRIER_IMAGE:
9017 				type = PipelineBarrier::TYPE_IMAGE;
9018 				break;
9019 			case OP_PIPELINE_BARRIER_BUFFER:
9020 				type = PipelineBarrier::TYPE_BUFFER;
9021 				break;
9022 			case OP_PIPELINE_BARRIER_GLOBAL:
9023 				type = PipelineBarrier::TYPE_GLOBAL;
9024 				break;
9025 			default:
9026 				type = PipelineBarrier::TYPE_LAST;
9027 				DE_FATAL("Unknown op");
9028 			}
9029 
9030 			if (type == PipelineBarrier::TYPE_IMAGE)
9031 				return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::just(state.imageLayout)));
9032 			else
9033 				return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::Nothing));
9034 		}
9035 
9036 		default:
9037 			DE_FATAL("Unknown op");
9038 			return de::MovePtr<CmdCommand>(DE_NULL);
9039 	}
9040 }
9041 
createRenderPassCommand(de::Random &,const State &,const TestConfig & testConfig,Op op)9042 de::MovePtr<RenderPassCommand> createRenderPassCommand (de::Random&,
9043 														const State&,
9044 														const TestConfig&	testConfig,
9045 														Op					op)
9046 {
9047 	switch (op)
9048 	{
9049 		case OP_RENDER_VERTEX_BUFFER:					return de::MovePtr<RenderPassCommand>(new RenderVertexBuffer(testConfig.vertexBufferStride));
9050 		case OP_RENDER_INDEX_BUFFER:					return de::MovePtr<RenderPassCommand>(new RenderIndexBuffer());
9051 
9052 		case OP_RENDER_VERTEX_UNIFORM_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderVertexUniformBuffer());
9053 		case OP_RENDER_FRAGMENT_UNIFORM_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformBuffer());
9054 
9055 		case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER:		return de::MovePtr<RenderPassCommand>(new RenderVertexUniformTexelBuffer());
9056 		case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER:	return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformTexelBuffer());
9057 
9058 		case OP_RENDER_VERTEX_STORAGE_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderVertexStorageBuffer());
9059 		case OP_RENDER_FRAGMENT_STORAGE_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageBuffer());
9060 
9061 		case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER:		return de::MovePtr<RenderPassCommand>(new RenderVertexStorageTexelBuffer());
9062 		case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER:	return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageTexelBuffer());
9063 
9064 		case OP_RENDER_VERTEX_STORAGE_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderVertexStorageImage());
9065 		case OP_RENDER_FRAGMENT_STORAGE_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageImage());
9066 
9067 		case OP_RENDER_VERTEX_SAMPLED_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderVertexSampledImage());
9068 		case OP_RENDER_FRAGMENT_SAMPLED_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderFragmentSampledImage());
9069 
9070 		default:
9071 			DE_FATAL("Unknown op");
9072 			return de::MovePtr<RenderPassCommand>(DE_NULL);
9073 	}
9074 }
9075 
createRenderPassCommands(const Memory & memory,de::Random & nextOpRng,State & state,const TestConfig & testConfig,size_t & opNdx,size_t opCount)9076 de::MovePtr<CmdCommand> createRenderPassCommands (const Memory&		memory,
9077 												  de::Random&		nextOpRng,
9078 												  State&			state,
9079 												  const TestConfig&	testConfig,
9080 												  size_t&			opNdx,
9081 												  size_t			opCount)
9082 {
9083 	vector<RenderPassCommand*>	commands;
9084 
9085 	try
9086 	{
9087 		for (; opNdx < opCount; opNdx++)
9088 		{
9089 			vector<Op>	ops;
9090 
9091 			getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), testConfig.usage, ops);
9092 
9093 			DE_ASSERT(!ops.empty());
9094 
9095 			{
9096 				const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9097 
9098 				if (op == OP_RENDERPASS_END)
9099 				{
9100 					break;
9101 				}
9102 				else
9103 				{
9104 					de::Random	rng	(state.rng);
9105 
9106 					commands.push_back(createRenderPassCommand(rng, state, testConfig, op).release());
9107 					applyOp(state, memory, op, testConfig.usage);
9108 
9109 					DE_ASSERT(state.rng == rng);
9110 				}
9111 			}
9112 		}
9113 
9114 		applyOp(state, memory, OP_RENDERPASS_END, testConfig.usage);
9115 		return de::MovePtr<CmdCommand>(new SubmitRenderPass(commands));
9116 	}
9117 	catch (...)
9118 	{
9119 		for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9120 			delete commands[commandNdx];
9121 
9122 		throw;
9123 	}
9124 }
9125 
createSecondaryCmdCommands(const Memory & memory,de::Random & nextOpRng,State & state,Usage usage,size_t & opNdx,size_t opCount)9126 de::MovePtr<CmdCommand> createSecondaryCmdCommands (const Memory&	memory,
9127 												    de::Random&		nextOpRng,
9128 												    State&			state,
9129 												    Usage			usage,
9130 												    size_t&			opNdx,
9131 												    size_t			opCount)
9132 {
9133 	vector<CmdCommand*>	commands;
9134 
9135 	try
9136 	{
9137 		for (; opNdx < opCount; opNdx++)
9138 		{
9139 			vector<Op>	ops;
9140 
9141 			getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9142 
9143 			DE_ASSERT(!ops.empty());
9144 
9145 			{
9146 				const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9147 
9148 				if (op == OP_SECONDARY_COMMAND_BUFFER_END)
9149 				{
9150 					break;
9151 				}
9152 				else
9153 				{
9154 					de::Random	rng(state.rng);
9155 
9156 					commands.push_back(createCmdCommand(rng, state, op, usage).release());
9157 					applyOp(state, memory, op, usage);
9158 
9159 					DE_ASSERT(state.rng == rng);
9160 				}
9161 			}
9162 		}
9163 
9164 		applyOp(state, memory, OP_SECONDARY_COMMAND_BUFFER_END, usage);
9165 		return de::MovePtr<CmdCommand>(new ExecuteSecondaryCommandBuffer(commands));
9166 	}
9167 	catch (...)
9168 	{
9169 		for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9170 			delete commands[commandNdx];
9171 
9172 		throw;
9173 	}
9174 }
9175 
createCmdCommands(const Memory & memory,de::Random & nextOpRng,State & state,const TestConfig & testConfig,size_t & opNdx,size_t opCount)9176 de::MovePtr<Command> createCmdCommands (const Memory&		memory,
9177 										de::Random&			nextOpRng,
9178 										State&				state,
9179 										const TestConfig&	testConfig,
9180 										size_t&				opNdx,
9181 										size_t				opCount)
9182 {
9183 	vector<CmdCommand*>	commands;
9184 
9185 	try
9186 	{
9187 		// Insert a mostly-full barrier to order this work wrt previous command buffer.
9188 		commands.push_back(new PipelineBarrier(state.cache.getAllowedStages(),
9189 											   state.cache.getAllowedAcceses(),
9190 											   state.cache.getAllowedStages(),
9191 											   state.cache.getAllowedAcceses(),
9192 											   PipelineBarrier::TYPE_GLOBAL,
9193 											   tcu::Nothing));
9194 
9195 		for (; opNdx < opCount; opNdx++)
9196 		{
9197 			vector<Op>	ops;
9198 
9199 			getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), testConfig.usage, ops);
9200 
9201 			DE_ASSERT(!ops.empty());
9202 
9203 			{
9204 				const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9205 
9206 				if (op == OP_COMMAND_BUFFER_END)
9207 				{
9208 					break;
9209 				}
9210 				else
9211 				{
9212 					// \note Command needs to known the state before the operation
9213 					if (op == OP_RENDERPASS_BEGIN)
9214 					{
9215 						applyOp(state, memory, op, testConfig.usage);
9216 						commands.push_back(createRenderPassCommands(memory, nextOpRng, state, testConfig, opNdx, opCount).release());
9217 					}
9218 					else if (op == OP_SECONDARY_COMMAND_BUFFER_BEGIN)
9219 					{
9220 						applyOp(state, memory, op, testConfig.usage);
9221 						commands.push_back(createSecondaryCmdCommands(memory, nextOpRng, state, testConfig.usage, opNdx, opCount).release());
9222 					}
9223 					else
9224 					{
9225 						de::Random	rng	(state.rng);
9226 
9227 						commands.push_back(createCmdCommand(rng, state, op, testConfig.usage).release());
9228 						applyOp(state, memory, op, testConfig.usage);
9229 
9230 						DE_ASSERT(state.rng == rng);
9231 					}
9232 
9233 				}
9234 			}
9235 		}
9236 
9237 		applyOp(state, memory, OP_COMMAND_BUFFER_END, testConfig.usage);
9238 		return de::MovePtr<Command>(new SubmitCommandBuffer(commands));
9239 	}
9240 	catch (...)
9241 	{
9242 		for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9243 			delete commands[commandNdx];
9244 
9245 		throw;
9246 	}
9247 }
9248 
createCommands(vector<Command * > & commands,deUint32 seed,const Memory & memory,const TestConfig & testConfig,size_t opCount)9249 void createCommands (vector<Command*>&	commands,
9250 					 deUint32			seed,
9251 					 const Memory&		memory,
9252 					 const TestConfig&	testConfig,
9253 					 size_t				opCount)
9254 {
9255 	State			state		(testConfig.usage, seed);
9256 	// Used to select next operation only
9257 	de::Random		nextOpRng	(seed ^ 12930809);
9258 
9259 	commands.reserve(opCount);
9260 
9261 	for (size_t opNdx = 0; opNdx < opCount; opNdx++)
9262 	{
9263 		vector<Op>	ops;
9264 
9265 		getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), testConfig.usage, ops);
9266 
9267 		DE_ASSERT(!ops.empty());
9268 
9269 		{
9270 			const Op	op	= nextOpRng.choose<Op>(ops.begin(), ops.end());
9271 
9272 			if (op == OP_COMMAND_BUFFER_BEGIN)
9273 			{
9274 				applyOp(state, memory, op, testConfig.usage);
9275 				commands.push_back(createCmdCommands(memory, nextOpRng, state, testConfig, opNdx, opCount).release());
9276 			}
9277 			else
9278 			{
9279 				de::Random	rng	(state.rng);
9280 
9281 				commands.push_back(createHostCommand(op, rng, testConfig.usage, testConfig.sharing).release());
9282 				applyOp(state, memory, op, testConfig.usage);
9283 
9284 				// Make sure that random generator is in sync
9285 				DE_ASSERT(state.rng == rng);
9286 			}
9287 		}
9288 	}
9289 
9290 	// Clean up resources
9291 	if (state.hasBuffer && state.hasImage)
9292 	{
9293 		if (!state.queueIdle)
9294 			commands.push_back(new QueueWaitIdle());
9295 
9296 		if (state.hasBuffer)
9297 			commands.push_back(new DestroyBuffer());
9298 
9299 		if (state.hasImage)
9300 			commands.push_back(new DestroyImage());
9301 	}
9302 }
9303 
9304 class MemoryTestInstance : public TestInstance
9305 {
9306 public:
9307 
9308 	typedef bool(MemoryTestInstance::*StageFunc)(void);
9309 
9310 												MemoryTestInstance				(::vkt::Context& context, const TestConfig& config);
9311 												~MemoryTestInstance				(void);
9312 
9313 	tcu::TestStatus								iterate							(void);
9314 
9315 private:
9316 	const TestConfig							m_config;
9317 	const size_t								m_iterationCount;
9318 	const size_t								m_opCount;
9319 	const vk::VkPhysicalDeviceMemoryProperties	m_memoryProperties;
9320 	deUint32									m_memoryTypeNdx;
9321 	size_t										m_iteration;
9322 	StageFunc									m_stage;
9323 	tcu::ResultCollector						m_resultCollector;
9324 
9325 	vector<Command*>							m_commands;
9326 	MovePtr<Memory>								m_memory;
9327 	MovePtr<Context>							m_renderContext;
9328 	MovePtr<PrepareContext>						m_prepareContext;
9329 
9330 	bool										nextIteration					(void);
9331 	bool										nextMemoryType					(void);
9332 
9333 	bool										createCommandsAndAllocateMemory	(void);
9334 	bool										prepare							(void);
9335 	bool										execute							(void);
9336 	bool										verify							(void);
9337 	void										resetResources					(void);
9338 };
9339 
resetResources(void)9340 void MemoryTestInstance::resetResources (void)
9341 {
9342 	const vk::DeviceInterface&	vkd		= m_context.getDeviceInterface();
9343 	const vk::VkDevice			device	= m_context.getDevice();
9344 
9345 	VK_CHECK(vkd.deviceWaitIdle(device));
9346 
9347 	for (size_t commandNdx = 0; commandNdx < m_commands.size(); commandNdx++)
9348 	{
9349 		delete m_commands[commandNdx];
9350 		m_commands[commandNdx] = DE_NULL;
9351 	}
9352 
9353 	m_commands.clear();
9354 	m_prepareContext.clear();
9355 	m_memory.clear();
9356 }
9357 
nextIteration(void)9358 bool MemoryTestInstance::nextIteration (void)
9359 {
9360 	m_iteration++;
9361 
9362 	if (m_iteration < m_iterationCount)
9363 	{
9364 		resetResources();
9365 		m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
9366 		return true;
9367 	}
9368 	else
9369 		return nextMemoryType();
9370 }
9371 
nextMemoryType(void)9372 bool MemoryTestInstance::nextMemoryType (void)
9373 {
9374 	resetResources();
9375 
9376 	DE_ASSERT(m_commands.empty());
9377 
9378 	m_memoryTypeNdx++;
9379 
9380 	if (m_memoryTypeNdx < m_memoryProperties.memoryTypeCount)
9381 	{
9382 		m_iteration	= 0;
9383 		m_stage		= &MemoryTestInstance::createCommandsAndAllocateMemory;
9384 
9385 		return true;
9386 	}
9387 	else
9388 	{
9389 		m_stage = DE_NULL;
9390 		return false;
9391 	}
9392 }
9393 
MemoryTestInstance(::vkt::Context & context,const TestConfig & config)9394 MemoryTestInstance::MemoryTestInstance (::vkt::Context& context, const TestConfig& config)
9395 	: TestInstance			(context)
9396 	, m_config				(config)
9397 	, m_iterationCount		(5)
9398 	, m_opCount				(50)
9399 	, m_memoryProperties	(vk::getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
9400 	, m_memoryTypeNdx		(0)
9401 	, m_iteration			(0)
9402 	, m_stage				(&MemoryTestInstance::createCommandsAndAllocateMemory)
9403 	, m_resultCollector		(context.getTestContext().getLog())
9404 
9405 	, m_memory				(DE_NULL)
9406 {
9407 	TestLog&	log	= context.getTestContext().getLog();
9408 	{
9409 		const tcu::ScopedLogSection section (log, "TestCaseInfo", "Test Case Info");
9410 
9411 		log << TestLog::Message << "Buffer size: " << config.size << TestLog::EndMessage;
9412 		log << TestLog::Message << "Sharing: " << config.sharing << TestLog::EndMessage;
9413 		log << TestLog::Message << "Access: " << config.usage << TestLog::EndMessage;
9414 	}
9415 
9416 	{
9417 		const tcu::ScopedLogSection section (log, "MemoryProperties", "Memory Properties");
9418 
9419 		for (deUint32 heapNdx = 0; heapNdx < m_memoryProperties.memoryHeapCount; heapNdx++)
9420 		{
9421 			const tcu::ScopedLogSection heapSection (log, "Heap" + de::toString(heapNdx), "Heap " + de::toString(heapNdx));
9422 
9423 			log << TestLog::Message << "Size: " << m_memoryProperties.memoryHeaps[heapNdx].size << TestLog::EndMessage;
9424 			log << TestLog::Message << "Flags: " << m_memoryProperties.memoryHeaps[heapNdx].flags << TestLog::EndMessage;
9425 		}
9426 
9427 		for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < m_memoryProperties.memoryTypeCount; memoryTypeNdx++)
9428 		{
9429 			const tcu::ScopedLogSection memoryTypeSection (log, "MemoryType" + de::toString(memoryTypeNdx), "Memory type " + de::toString(memoryTypeNdx));
9430 
9431 			log << TestLog::Message << "Properties: " << m_memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags << TestLog::EndMessage;
9432 			log << TestLog::Message << "Heap: " << m_memoryProperties.memoryTypes[memoryTypeNdx].heapIndex << TestLog::EndMessage;
9433 		}
9434 	}
9435 
9436 	{
9437 		const vk::InstanceInterface&			vki					= context.getInstanceInterface();
9438 		const vk::VkPhysicalDevice				physicalDevice		= context.getPhysicalDevice();
9439 		const vk::DeviceInterface&				vkd					= context.getDeviceInterface();
9440 		const vk::VkDevice						device				= context.getDevice();
9441 		const vk::VkQueue						queue				= context.getUniversalQueue();
9442 		const deUint32							queueFamilyIndex	= context.getUniversalQueueFamilyIndex();
9443 		vector<pair<deUint32, vk::VkQueue> >	queues;
9444 
9445 		queues.push_back(std::make_pair(queueFamilyIndex, queue));
9446 
9447 		m_renderContext = MovePtr<Context>(new Context(vki, vkd, physicalDevice, device, queue, queueFamilyIndex, queues, context.getBinaryCollection()));
9448 	}
9449 }
9450 
~MemoryTestInstance(void)9451 MemoryTestInstance::~MemoryTestInstance (void)
9452 {
9453 	resetResources();
9454 }
9455 
createCommandsAndAllocateMemory(void)9456 bool MemoryTestInstance::createCommandsAndAllocateMemory (void)
9457 {
9458 	const vk::VkDevice							device				= m_context.getDevice();
9459 	TestLog&									log					= m_context.getTestContext().getLog();
9460 	const vk::InstanceInterface&				vki					= m_context.getInstanceInterface();
9461 	const vk::VkPhysicalDevice					physicalDevice		= m_context.getPhysicalDevice();
9462 	const vk::DeviceInterface&					vkd					= m_context.getDeviceInterface();
9463 	const vk::VkPhysicalDeviceMemoryProperties	memoryProperties	= vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
9464 	const tcu::ScopedLogSection					section				(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "CreateCommands" + de::toString(m_iteration),
9465 																		  "Memory type " + de::toString(m_memoryTypeNdx) + " create commands iteration " + de::toString(m_iteration));
9466 	const vector<deUint32>&						queues				= m_renderContext->getQueueFamilies();
9467 
9468 	DE_ASSERT(m_commands.empty());
9469 
9470 	if (m_config.usage & (USAGE_HOST_READ | USAGE_HOST_WRITE)
9471 		&& !(memoryProperties.memoryTypes[m_memoryTypeNdx].propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
9472 	{
9473 		log << TestLog::Message << "Memory type not supported" << TestLog::EndMessage;
9474 
9475 		return nextMemoryType();
9476 	}
9477 	else
9478 	{
9479 		try
9480 		{
9481 			const vk::VkBufferUsageFlags	bufferUsage		= usageToBufferUsageFlags(m_config.usage);
9482 			const vk::VkImageUsageFlags		imageUsage		= usageToImageUsageFlags(m_config.usage);
9483 			const vk::VkDeviceSize			maxBufferSize	= bufferUsage != 0
9484 															? roundBufferSizeToWxHx4(findMaxBufferSize(vkd, device, bufferUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx))
9485 															: 0;
9486 			const IVec2						maxImageSize	= imageUsage != 0
9487 															? findMaxRGBA8ImageSize(vkd, device, imageUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx)
9488 															: IVec2(0, 0);
9489 
9490 			log << TestLog::Message << "Max buffer size: " << maxBufferSize << TestLog::EndMessage;
9491 			log << TestLog::Message << "Max RGBA8 image size: " << maxImageSize << TestLog::EndMessage;
9492 
9493 			// Skip tests if there are no supported operations
9494 			if (maxBufferSize == 0
9495 				&& maxImageSize[0] == 0
9496 				&& (m_config.usage & (USAGE_HOST_READ|USAGE_HOST_WRITE)) == 0)
9497 			{
9498 				log << TestLog::Message << "Skipping memory type. None of the usages are supported." << TestLog::EndMessage;
9499 
9500 				return nextMemoryType();
9501 			}
9502 			else
9503 			{
9504 				const deUint32	seed	= 2830980989u ^ deUint32Hash((deUint32)(m_iteration) * m_memoryProperties.memoryTypeCount +  m_memoryTypeNdx);
9505 
9506 				m_memory	= MovePtr<Memory>(new Memory(vki, vkd, physicalDevice, device, m_config.size, m_memoryTypeNdx, maxBufferSize, maxImageSize[0], maxImageSize[1]));
9507 
9508 				log << TestLog::Message << "Create commands" << TestLog::EndMessage;
9509 				createCommands(m_commands, seed, *m_memory, m_config, m_opCount);
9510 
9511 				m_stage = &MemoryTestInstance::prepare;
9512 				return true;
9513 			}
9514 		}
9515 		catch (const tcu::TestError& e)
9516 		{
9517 			m_resultCollector.fail("Failed, got exception: " + string(e.getMessage()));
9518 			return nextMemoryType();
9519 		}
9520 	}
9521 }
9522 
prepare(void)9523 bool MemoryTestInstance::prepare (void)
9524 {
9525 	TestLog&					log		= m_context.getTestContext().getLog();
9526 	const tcu::ScopedLogSection	section	(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Prepare" + de::toString(m_iteration),
9527 											  "Memory type " + de::toString(m_memoryTypeNdx) + " prepare iteration " + de::toString(m_iteration));
9528 
9529 	m_prepareContext = MovePtr<PrepareContext>(new PrepareContext(*m_renderContext, *m_memory));
9530 
9531 	DE_ASSERT(!m_commands.empty());
9532 
9533 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9534 	{
9535 		Command& command = *m_commands[cmdNdx];
9536 
9537 		try
9538 		{
9539 			command.prepare(*m_prepareContext);
9540 		}
9541 		catch (const tcu::TestError& e)
9542 		{
9543 			m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to prepare, got exception: " + string(e.getMessage()));
9544 			return nextMemoryType();
9545 		}
9546 	}
9547 
9548 	m_stage = &MemoryTestInstance::execute;
9549 	return true;
9550 }
9551 
execute(void)9552 bool MemoryTestInstance::execute (void)
9553 {
9554 	TestLog&					log				= m_context.getTestContext().getLog();
9555 	const tcu::ScopedLogSection	section			(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Execute" + de::toString(m_iteration),
9556 													  "Memory type " + de::toString(m_memoryTypeNdx) + " execute iteration " + de::toString(m_iteration));
9557 	ExecuteContext				executeContext	(*m_renderContext);
9558 	const vk::VkDevice			device			= m_context.getDevice();
9559 	const vk::DeviceInterface&	vkd				= m_context.getDeviceInterface();
9560 
9561 	DE_ASSERT(!m_commands.empty());
9562 
9563 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9564 	{
9565 		Command& command = *m_commands[cmdNdx];
9566 
9567 		try
9568 		{
9569 			command.execute(executeContext);
9570 		}
9571 		catch (const tcu::TestError& e)
9572 		{
9573 			m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to execute, got exception: " + string(e.getMessage()));
9574 			return nextIteration();
9575 		}
9576 	}
9577 
9578 	VK_CHECK(vkd.deviceWaitIdle(device));
9579 
9580 	m_stage = &MemoryTestInstance::verify;
9581 	return true;
9582 }
9583 
verify(void)9584 bool MemoryTestInstance::verify (void)
9585 {
9586 	DE_ASSERT(!m_commands.empty());
9587 
9588 	TestLog&					log				= m_context.getTestContext().getLog();
9589 	const tcu::ScopedLogSection	section			(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Verify" + de::toString(m_iteration),
9590 													  "Memory type " + de::toString(m_memoryTypeNdx) + " verify iteration " + de::toString(m_iteration));
9591 	VerifyContext				verifyContext	(log, m_resultCollector, *m_renderContext, m_config.size);
9592 
9593 	log << TestLog::Message << "Begin verify" << TestLog::EndMessage;
9594 
9595 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9596 	{
9597 		Command& command = *m_commands[cmdNdx];
9598 
9599 		try
9600 		{
9601 			command.verify(verifyContext, cmdNdx);
9602 		}
9603 		catch (const tcu::TestError& e)
9604 		{
9605 			m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to verify, got exception: " + string(e.getMessage()));
9606 			return nextIteration();
9607 		}
9608 	}
9609 
9610 	return nextIteration();
9611 }
9612 
iterate(void)9613 tcu::TestStatus MemoryTestInstance::iterate (void)
9614 {
9615 	if ((this->*m_stage)())
9616 		return tcu::TestStatus::incomplete();
9617 	else
9618 		return tcu::TestStatus(m_resultCollector.getResult(), m_resultCollector.getMessage());
9619 }
9620 
9621 struct AddPrograms
9622 {
initvkt::memory::__anon6635e60a0111::AddPrograms9623 	void init (vk::SourceCollections& sources, TestConfig config) const
9624 	{
9625 		// Vertex buffer rendering
9626 		if (config.usage & USAGE_VERTEX_BUFFER)
9627 		{
9628 			const char* const vertexShader =
9629 				"#version 310 es\n"
9630 				"layout(location = 0) in highp vec2 a_position;\n"
9631 				"void main (void) {\n"
9632 				"\tgl_PointSize = 1.0;\n"
9633 				"\tgl_Position = vec4(1.998 * a_position - vec2(0.999), 0.0, 1.0);\n"
9634 				"}\n";
9635 
9636 			sources.glslSources.add("vertex-buffer.vert")
9637 				<< glu::VertexSource(vertexShader);
9638 		}
9639 
9640 		// Index buffer rendering
9641 		if (config.usage & USAGE_INDEX_BUFFER)
9642 		{
9643 			const char* const vertexShader =
9644 				"#version 310 es\n"
9645 				"precision highp float;\n"
9646 				"void main (void) {\n"
9647 				"\tgl_PointSize = 1.0;\n"
9648 				"\thighp vec2 pos = vec2(gl_VertexIndex % 256, gl_VertexIndex / 256) / vec2(255.0);\n"
9649 				"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9650 				"}\n";
9651 
9652 			sources.glslSources.add("index-buffer.vert")
9653 				<< glu::VertexSource(vertexShader);
9654 		}
9655 
9656 		if (config.usage & USAGE_UNIFORM_BUFFER)
9657 		{
9658 			{
9659 				std::ostringstream vertexShader;
9660 
9661 				vertexShader <<
9662 					"#version 310 es\n"
9663 					"precision highp float;\n"
9664 					"layout(set=0, binding=0) uniform Block\n"
9665 					"{\n"
9666 					"\thighp uvec4 values[" << de::toString<size_t>(MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4)) << "];\n"
9667 					"} block;\n"
9668 					"void main (void) {\n"
9669 					"\tgl_PointSize = 1.0;\n"
9670 					"\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9671 					"\thighp uint val;\n"
9672 					"\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9673 					"\t\tval = vecVal.x;\n"
9674 					"\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9675 					"\t\tval = vecVal.y;\n"
9676 					"\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9677 					"\t\tval = vecVal.z;\n"
9678 					"\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9679 					"\t\tval = vecVal.w;\n"
9680 					"\tif ((gl_VertexIndex % 2) == 0)\n"
9681 					"\t\tval = val & 0xFFFFu;\n"
9682 					"\telse\n"
9683 					"\t\tval = val >> 16u;\n"
9684 					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9685 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9686 					"}\n";
9687 
9688 				sources.glslSources.add("uniform-buffer.vert")
9689 					<< glu::VertexSource(vertexShader.str());
9690 			}
9691 
9692 			{
9693 				const size_t		arraySize		= MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
9694 				const size_t		arrayIntSize	= arraySize * 4;
9695 				std::ostringstream	fragmentShader;
9696 
9697 				fragmentShader <<
9698 					"#version 310 es\n"
9699 					"precision highp float;\n"
9700 					"precision highp int;\n"
9701 					"layout(location = 0) out highp vec4 o_color;\n"
9702 					"layout(set=0, binding=0) uniform Block\n"
9703 					"{\n"
9704 					"\thighp uvec4 values[" << arraySize << "];\n"
9705 					"} block;\n"
9706 					"layout(push_constant) uniform PushC\n"
9707 					"{\n"
9708 					"\tuint callId;\n"
9709 					"\tuint valuesPerPixel;\n"
9710 					"\tuint bufferSize;\n"
9711 					"} pushC;\n"
9712 					"void main (void) {\n"
9713 					"\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9714 					"\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (" << arrayIntSize  << "u / pushC.valuesPerPixel))\n"
9715 					"\t\tdiscard;\n"
9716 					"\thighp uint value = id;\n"
9717 					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9718 					"\t{\n"
9719 					"\t\thighp uvec4 vecVal = block.values[value % pushC.bufferSize];\n"
9720 					"\t\tif ((value % 4u) == 0u)\n"
9721 					"\t\t\tvalue = vecVal.x;\n"
9722 					"\t\telse if ((value % 4u) == 1u)\n"
9723 					"\t\t\tvalue = vecVal.y;\n"
9724 					"\t\telse if ((value % 4u) == 2u)\n"
9725 					"\t\t\tvalue = vecVal.z;\n"
9726 					"\t\telse if ((value % 4u) == 3u)\n"
9727 					"\t\t\tvalue = vecVal.w;\n"
9728 					"\t}\n"
9729 					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9730 					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9731 					"}\n";
9732 
9733 				sources.glslSources.add("uniform-buffer.frag")
9734 					<< glu::FragmentSource(fragmentShader.str());
9735 			}
9736 		}
9737 
9738 		if (config.usage & USAGE_STORAGE_BUFFER)
9739 		{
9740 			{
9741 				// Vertex storage buffer rendering
9742 				const char* const vertexShader =
9743 					"#version 310 es\n"
9744 					"precision highp float;\n"
9745 					"readonly layout(set=0, binding=0) buffer Block\n"
9746 					"{\n"
9747 					"\thighp uvec4 values[];\n"
9748 					"} block;\n"
9749 					"void main (void) {\n"
9750 					"\tgl_PointSize = 1.0;\n"
9751 					"\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9752 					"\thighp uint val;\n"
9753 					"\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9754 					"\t\tval = vecVal.x;\n"
9755 					"\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9756 					"\t\tval = vecVal.y;\n"
9757 					"\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9758 					"\t\tval = vecVal.z;\n"
9759 					"\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9760 					"\t\tval = vecVal.w;\n"
9761 					"\tif ((gl_VertexIndex % 2) == 0)\n"
9762 					"\t\tval = val & 0xFFFFu;\n"
9763 					"\telse\n"
9764 					"\t\tval = val >> 16u;\n"
9765 					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9766 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9767 					"}\n";
9768 
9769 				sources.glslSources.add("storage-buffer.vert")
9770 					<< glu::VertexSource(vertexShader);
9771 			}
9772 
9773 			{
9774 				std::ostringstream	fragmentShader;
9775 
9776 				fragmentShader <<
9777 					"#version 310 es\n"
9778 					"precision highp float;\n"
9779 					"precision highp int;\n"
9780 					"layout(location = 0) out highp vec4 o_color;\n"
9781 					"layout(set=0, binding=0) buffer Block\n"
9782 					"{\n"
9783 					"\thighp uvec4 values[];\n"
9784 					"} block;\n"
9785 					"layout(push_constant) uniform PushC\n"
9786 					"{\n"
9787 					"\tuint valuesPerPixel;\n"
9788 					"\tuint bufferSize;\n"
9789 					"} pushC;\n"
9790 					"void main (void) {\n"
9791 					"\thighp uint arrayIntSize = pushC.bufferSize / 4u;\n"
9792 					"\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9793 					"\thighp uint value = id;\n"
9794 					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9795 					"\t{\n"
9796 					"\t\thighp uvec4 vecVal = block.values[(value / 4u) % (arrayIntSize / 4u)];\n"
9797 					"\t\tif ((value % 4u) == 0u)\n"
9798 					"\t\t\tvalue = vecVal.x;\n"
9799 					"\t\telse if ((value % 4u) == 1u)\n"
9800 					"\t\t\tvalue = vecVal.y;\n"
9801 					"\t\telse if ((value % 4u) == 2u)\n"
9802 					"\t\t\tvalue = vecVal.z;\n"
9803 					"\t\telse if ((value % 4u) == 3u)\n"
9804 					"\t\t\tvalue = vecVal.w;\n"
9805 					"\t}\n"
9806 					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9807 					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9808 					"}\n";
9809 
9810 				sources.glslSources.add("storage-buffer.frag")
9811 					<< glu::FragmentSource(fragmentShader.str());
9812 			}
9813 		}
9814 
9815 		if (config.usage & USAGE_UNIFORM_TEXEL_BUFFER)
9816 		{
9817 			{
9818 				// Vertex uniform texel buffer rendering
9819 				const char* const vertexShader =
9820 					"#version 310 es\n"
9821 					"#extension GL_EXT_texture_buffer : require\n"
9822 					"precision highp float;\n"
9823 					"layout(set=0, binding=0) uniform highp utextureBuffer u_sampler;\n"
9824 					"void main (void) {\n"
9825 					"\tgl_PointSize = 1.0;\n"
9826 					"\thighp uint val = texelFetch(u_sampler, gl_VertexIndex).x;\n"
9827 					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9828 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9829 					"}\n";
9830 
9831 				sources.glslSources.add("uniform-texel-buffer.vert")
9832 					<< glu::VertexSource(vertexShader);
9833 			}
9834 
9835 			{
9836 				// Fragment uniform texel buffer rendering
9837 				const char* const fragmentShader =
9838 					"#version 310 es\n"
9839 					"#extension GL_EXT_texture_buffer : require\n"
9840 					"#extension GL_EXT_samplerless_texture_functions : require\n"
9841 					"precision highp float;\n"
9842 					"precision highp int;\n"
9843 					"layout(set=0, binding=0) uniform highp utextureBuffer u_sampler;\n"
9844 					"layout(location = 0) out highp vec4 o_color;\n"
9845 					"layout(push_constant) uniform PushC\n"
9846 					"{\n"
9847 					"\tuint callId;\n"
9848 					"\tuint valuesPerPixel;\n"
9849 					"\tuint maxTexelCount;\n"
9850 					"} pushC;\n"
9851 					"void main (void) {\n"
9852 					"\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9853 					"\thighp uint value = id;\n"
9854 					"\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9855 					"\t\tdiscard;\n"
9856 					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9857 					"\t{\n"
9858 					"\t\tvalue = texelFetch(u_sampler, int(value % uint(textureSize(u_sampler)))).x;\n"
9859 					"\t}\n"
9860 					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9861 					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9862 					"}\n";
9863 
9864 				sources.glslSources.add("uniform-texel-buffer.frag")
9865 					<< glu::FragmentSource(fragmentShader);
9866 			}
9867 		}
9868 
9869 		if (config.usage & USAGE_STORAGE_TEXEL_BUFFER)
9870 		{
9871 			{
9872 				// Vertex storage texel buffer rendering
9873 				const char* const vertexShader =
9874 					"#version 450\n"
9875 					"#extension GL_EXT_texture_buffer : require\n"
9876 					"precision highp float;\n"
9877 					"layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9878 					"out gl_PerVertex {\n"
9879 					"\tvec4 gl_Position;\n"
9880 					"\tfloat gl_PointSize;\n"
9881 					"};\n"
9882 					"void main (void) {\n"
9883 					"\tgl_PointSize = 1.0;\n"
9884 					"\thighp uint val = imageLoad(u_sampler, gl_VertexIndex / 2).x;\n"
9885 					"\tif (gl_VertexIndex % 2 == 0)\n"
9886 					"\t\tval = val & 0xFFFFu;\n"
9887 					"\telse\n"
9888 					"\t\tval = val >> 16;\n"
9889 					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9890 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9891 					"}\n";
9892 
9893 				sources.glslSources.add("storage-texel-buffer.vert")
9894 					<< glu::VertexSource(vertexShader);
9895 			}
9896 			{
9897 				// Fragment storage texel buffer rendering
9898 				const char* const fragmentShader =
9899 					"#version 310 es\n"
9900 					"#extension GL_EXT_texture_buffer : require\n"
9901 					"precision highp float;\n"
9902 					"precision highp int;\n"
9903 					"layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9904 					"layout(location = 0) out highp vec4 o_color;\n"
9905 					"layout(push_constant) uniform PushC\n"
9906 					"{\n"
9907 					"\tuint callId;\n"
9908 					"\tuint valuesPerPixel;\n"
9909 					"\tuint maxTexelCount;\n"
9910 					"\tuint width;\n"
9911 					"} pushC;\n"
9912 					"void main (void) {\n"
9913 					"\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9914 					"\thighp uint value = id;\n"
9915 					"\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9916 					"\t\tdiscard;\n"
9917 					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9918 					"\t{\n"
9919 					"\t\tvalue = imageLoad(u_sampler, int(value % pushC.width)).x;\n"
9920 					"\t}\n"
9921 					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9922 					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9923 					"}\n";
9924 
9925 				sources.glslSources.add("storage-texel-buffer.frag")
9926 					<< glu::FragmentSource(fragmentShader);
9927 			}
9928 		}
9929 
9930 		if (config.usage & USAGE_STORAGE_IMAGE)
9931 		{
9932 			{
9933 				// Vertex storage image
9934 				const char* const vertexShader =
9935 					"#version 450\n"
9936 					"precision highp float;\n"
9937 					"layout(set=0, binding=0, rgba8) uniform readonly image2D u_image;\n"
9938 					"out gl_PerVertex {\n"
9939 					"\tvec4 gl_Position;\n"
9940 					"\tfloat gl_PointSize;\n"
9941 					"};\n"
9942 					"void main (void) {\n"
9943 					"\tgl_PointSize = 1.0;\n"
9944 					"\thighp vec4 val = imageLoad(u_image, ivec2((gl_VertexIndex / 2) / imageSize(u_image).x, (gl_VertexIndex / 2) % imageSize(u_image).x));\n"
9945 					"\thighp vec2 pos;\n"
9946 					"\tif (gl_VertexIndex % 2 == 0)\n"
9947 					"\t\tpos = val.xy;\n"
9948 					"\telse\n"
9949 					"\t\tpos = val.zw;\n"
9950 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9951 					"}\n";
9952 
9953 				sources.glslSources.add("storage-image.vert")
9954 					<< glu::VertexSource(vertexShader);
9955 			}
9956 			{
9957 				// Fragment storage image
9958 				const char* const fragmentShader =
9959 					"#version 450\n"
9960 					"#extension GL_EXT_texture_buffer : require\n"
9961 					"precision highp float;\n"
9962 					"layout(set=0, binding=0, rgba8) uniform readonly image2D u_image;\n"
9963 					"layout(location = 0) out highp vec4 o_color;\n"
9964 					"void main (void) {\n"
9965 					"\thighp uvec2 size = uvec2(imageSize(u_image).x, imageSize(u_image).y);\n"
9966 					"\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
9967 					"\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
9968 					"\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
9969 					"\t{\n"
9970 					"\t\thighp vec4 floatValue = imageLoad(u_image, ivec2(int((value.z *  256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)));\n"
9971 					"\t\tvalue = uvec4(uint(round(floatValue.x * 255.0)), uint(round(floatValue.y * 255.0)), uint(round(floatValue.z * 255.0)), uint(round(floatValue.w * 255.0)));\n"
9972 					"\t}\n"
9973 					"\to_color = vec4(value) / vec4(255.0);\n"
9974 					"}\n";
9975 
9976 				sources.glslSources.add("storage-image.frag")
9977 					<< glu::FragmentSource(fragmentShader);
9978 			}
9979 		}
9980 
9981 		if (config.usage & USAGE_SAMPLED_IMAGE)
9982 		{
9983 			{
9984 				// Vertex storage image
9985 				const char* const vertexShader =
9986 					"#version 450\n"
9987 					"precision highp float;\n"
9988 					"layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
9989 					"out gl_PerVertex {\n"
9990 					"\tvec4 gl_Position;\n"
9991 					"\tfloat gl_PointSize;\n"
9992 					"};\n"
9993 					"void main (void) {\n"
9994 					"\tgl_PointSize = 1.0;\n"
9995 					"\thighp vec4 val = texelFetch(u_sampler, ivec2((gl_VertexIndex / 2) / textureSize(u_sampler, 0).x, (gl_VertexIndex / 2) % textureSize(u_sampler, 0).x), 0);\n"
9996 					"\thighp vec2 pos;\n"
9997 					"\tif (gl_VertexIndex % 2 == 0)\n"
9998 					"\t\tpos = val.xy;\n"
9999 					"\telse\n"
10000 					"\t\tpos = val.zw;\n"
10001 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
10002 					"}\n";
10003 
10004 				sources.glslSources.add("sampled-image.vert")
10005 					<< glu::VertexSource(vertexShader);
10006 			}
10007 			{
10008 				// Fragment storage image
10009 				const char* const fragmentShader =
10010 					"#version 450\n"
10011 					"#extension GL_EXT_texture_buffer : require\n"
10012 					"precision highp float;\n"
10013 					"layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
10014 					"layout(location = 0) out highp vec4 o_color;\n"
10015 					"void main (void) {\n"
10016 					"\thighp uvec2 size = uvec2(textureSize(u_sampler, 0).x, textureSize(u_sampler, 0).y);\n"
10017 					"\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
10018 					"\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
10019 					"\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
10020 					"\t{\n"
10021 					"\t\thighp vec4 floatValue = texelFetch(u_sampler, ivec2(int((value.z *  256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)), 0);\n"
10022 					"\t\tvalue = uvec4(uint(round(floatValue.x * 255.0)), uint(round(floatValue.y * 255.0)), uint(round(floatValue.z * 255.0)), uint(round(floatValue.w * 255.0)));\n"
10023 					"\t}\n"
10024 					"\to_color = vec4(value) / vec4(255.0);\n"
10025 					"}\n";
10026 
10027 				sources.glslSources.add("sampled-image.frag")
10028 					<< glu::FragmentSource(fragmentShader);
10029 			}
10030 		}
10031 
10032 		{
10033 			const char* const vertexShader =
10034 				"#version 450\n"
10035 				"out gl_PerVertex {\n"
10036 				"\tvec4 gl_Position;\n"
10037 				"};\n"
10038 				"precision highp float;\n"
10039 				"void main (void) {\n"
10040 				"\tgl_Position = vec4(((gl_VertexIndex + 2) / 3) % 2 == 0 ? -1.0 : 1.0,\n"
10041 				"\t                   ((gl_VertexIndex + 1) / 3) % 2 == 0 ? -1.0 : 1.0, 0.0, 1.0);\n"
10042 				"}\n";
10043 
10044 			sources.glslSources.add("render-quad.vert")
10045 				<< glu::VertexSource(vertexShader);
10046 		}
10047 
10048 		{
10049 			const char* const fragmentShader =
10050 				"#version 310 es\n"
10051 				"layout(location = 0) out highp vec4 o_color;\n"
10052 				"void main (void) {\n"
10053 				"\to_color = vec4(1.0);\n"
10054 				"}\n";
10055 
10056 			sources.glslSources.add("render-white.frag")
10057 				<< glu::FragmentSource(fragmentShader);
10058 		}
10059 	}
10060 };
10061 
checkSupport(vkt::Context & context,TestConfig config)10062 void checkSupport(vkt::Context& context, TestConfig config)
10063 {
10064 #ifndef CTS_USES_VULKANSC
10065 	if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
10066 		((config.vertexBufferStride % context.getPortabilitySubsetProperties().minVertexInputBindingStrideAlignment) != 0u))
10067 	{
10068 		TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: stride is not multiply of minVertexInputBindingStrideAlignment");
10069 	}
10070 #else
10071 	DE_UNREF(context);
10072 	DE_UNREF(config);
10073 #endif // CTS_USES_VULKANSC
10074 }
10075 
10076 } // anonymous
10077 
createPipelineBarrierTests(tcu::TestContext & testCtx)10078 tcu::TestCaseGroup* createPipelineBarrierTests (tcu::TestContext& testCtx)
10079 {
10080 	de::MovePtr<tcu::TestCaseGroup>	group			(new tcu::TestCaseGroup(testCtx, "pipeline_barrier", "Pipeline barrier tests."));
10081 	const vk::VkDeviceSize			sizes[]			=
10082 	{
10083 		1024,			// 1K
10084 		8*1024,			// 8K
10085 		64*1024,		// 64K
10086 		ONE_MEGABYTE,	// 1M
10087 	};
10088 	const Usage						usages[]		=
10089 	{
10090 		USAGE_HOST_READ,
10091 		USAGE_HOST_WRITE,
10092 		USAGE_TRANSFER_SRC,
10093 		USAGE_TRANSFER_DST,
10094 		USAGE_VERTEX_BUFFER,
10095 		USAGE_INDEX_BUFFER,
10096 		USAGE_UNIFORM_BUFFER,
10097 		USAGE_UNIFORM_TEXEL_BUFFER,
10098 		USAGE_STORAGE_BUFFER,
10099 		USAGE_STORAGE_TEXEL_BUFFER,
10100 		USAGE_STORAGE_IMAGE,
10101 		USAGE_SAMPLED_IMAGE
10102 	};
10103 	const Usage						readUsages[]		=
10104 	{
10105 		USAGE_HOST_READ,
10106 		USAGE_TRANSFER_SRC,
10107 		USAGE_VERTEX_BUFFER,
10108 		USAGE_INDEX_BUFFER,
10109 		USAGE_UNIFORM_BUFFER,
10110 		USAGE_UNIFORM_TEXEL_BUFFER,
10111 		USAGE_STORAGE_BUFFER,
10112 		USAGE_STORAGE_TEXEL_BUFFER,
10113 		USAGE_STORAGE_IMAGE,
10114 		USAGE_SAMPLED_IMAGE
10115 	};
10116 
10117 	const Usage						writeUsages[]	=
10118 	{
10119 		USAGE_HOST_WRITE,
10120 		USAGE_TRANSFER_DST
10121 	};
10122 
10123 	const deUint32					vertexStrides[]	=
10124 	{
10125 		DEFAULT_VERTEX_BUFFER_STRIDE,
10126 		ALTERNATIVE_VERTEX_BUFFER_STRIDE,
10127 	};
10128 
10129 	for (size_t writeUsageNdx = 0; writeUsageNdx < DE_LENGTH_OF_ARRAY(writeUsages); writeUsageNdx++)
10130 	{
10131 		const Usage	writeUsage	= writeUsages[writeUsageNdx];
10132 
10133 		for (size_t readUsageNdx = 0; readUsageNdx < DE_LENGTH_OF_ARRAY(readUsages); readUsageNdx++)
10134 		{
10135 			const Usage						readUsage		= readUsages[readUsageNdx];
10136 			const Usage						usage			= writeUsage | readUsage;
10137 			const string					usageGroupName	(usageToName(usage));
10138 			de::MovePtr<tcu::TestCaseGroup>	usageGroup		(new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
10139 
10140 			for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10141 			{
10142 				const vk::VkDeviceSize	size		= sizes[sizeNdx];
10143 				TestConfig				config		=
10144 				{
10145 					usage,
10146 					DEFAULT_VERTEX_BUFFER_STRIDE,
10147 					size,
10148 					vk::VK_SHARING_MODE_EXCLUSIVE
10149 				};
10150 				const string			testName	(de::toString((deUint64)(size)));
10151 
10152 				if (readUsage == USAGE_VERTEX_BUFFER)
10153 				{
10154 					for (size_t strideNdx = 0; strideNdx < DE_LENGTH_OF_ARRAY(vertexStrides); ++strideNdx)
10155 					{
10156 						const deUint32	stride			= vertexStrides[strideNdx];
10157 						const string	finalTestName	= testName + "_vertex_buffer_stride_" + de::toString(stride);
10158 
10159 						config.vertexBufferStride = stride;
10160 						usageGroup->addChild(new InstanceFactory1WithSupport<MemoryTestInstance, TestConfig, FunctionSupport1<TestConfig>, AddPrograms>(testCtx, tcu::NODETYPE_SELF_VALIDATE, finalTestName, finalTestName, config, typename FunctionSupport1<TestConfig>::Args(checkSupport, config)));
10161 					}
10162 				}
10163 				else
10164 				{
10165 					usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
10166 				}
10167 			}
10168 
10169 			group->addChild(usageGroup.get());
10170 			usageGroup.release();
10171 		}
10172 	}
10173 
10174 	{
10175 		Usage all = (Usage)0;
10176 
10177 		for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usages); usageNdx++)
10178 			all = all | usages[usageNdx];
10179 
10180 		{
10181 			const string					usageGroupName	("all");
10182 			de::MovePtr<tcu::TestCaseGroup>	usageGroup		(new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
10183 
10184 			for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10185 			{
10186 				const vk::VkDeviceSize	size		= sizes[sizeNdx];
10187 
10188 				for (size_t strideNdx = 0; strideNdx < DE_LENGTH_OF_ARRAY(vertexStrides); ++strideNdx)
10189 				{
10190 					const deUint32			stride		= vertexStrides[strideNdx];
10191 					const string			testName	= de::toString(size) + "_vertex_buffer_stride_" + de::toString(stride);
10192 					const TestConfig		config		=
10193 					{
10194 						all,
10195 						stride,
10196 						size,
10197 						vk::VK_SHARING_MODE_EXCLUSIVE
10198 					};
10199 
10200 					usageGroup->addChild(new InstanceFactory1WithSupport<MemoryTestInstance, vkt::memory::TestConfig, FunctionSupport1<TestConfig>, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, config, typename FunctionSupport1<TestConfig>::Args(checkSupport, config)));
10201 				}
10202 			}
10203 
10204 			group->addChild(usageGroup.get());
10205 			usageGroup.release();
10206 		}
10207 
10208 		{
10209 			const string					usageGroupName	("all_device");
10210 			de::MovePtr<tcu::TestCaseGroup>	usageGroup		(new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
10211 
10212 			for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10213 			{
10214 				const vk::VkDeviceSize	size		= sizes[sizeNdx];
10215 
10216 				for (size_t strideNdx = 0; strideNdx < DE_LENGTH_OF_ARRAY(vertexStrides); ++strideNdx)
10217 				{
10218 					const deUint32			stride		= vertexStrides[strideNdx];
10219 					const string			testName	= de::toString(size) + "_vertex_buffer_stride_" + de::toString(stride);
10220 					const TestConfig		config		=
10221 					{
10222 						(Usage)(all & (~(USAGE_HOST_READ|USAGE_HOST_WRITE))),
10223 						stride,
10224 						size,
10225 						vk::VK_SHARING_MODE_EXCLUSIVE
10226 					};
10227 
10228 					usageGroup->addChild(new InstanceFactory1WithSupport<MemoryTestInstance, TestConfig, FunctionSupport1<TestConfig>, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, config, typename FunctionSupport1<TestConfig>::Args(checkSupport, config)));
10229 				}
10230 			}
10231 
10232 			group->addChild(usageGroup.get());
10233 			usageGroup.release();
10234 		}
10235 	}
10236 
10237 	return group.release();
10238 }
10239 
10240 } // memory
10241 } // vkt
10242