• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2015 Google Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file
21  * \brief Pipeline barrier tests
22  *//*--------------------------------------------------------------------*/
23 
24 #include "vktMemoryPipelineBarrierTests.hpp"
25 
26 #include "vktTestCaseUtil.hpp"
27 
28 #include "vkDefs.hpp"
29 #include "vkPlatform.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkQueryUtil.hpp"
32 #include "vkMemUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkPrograms.hpp"
35 #include "vkCmdUtil.hpp"
36 #include "vkObjUtil.hpp"
37 
38 #include "tcuMaybe.hpp"
39 #include "tcuTextureUtil.hpp"
40 #include "tcuTestLog.hpp"
41 #include "tcuResultCollector.hpp"
42 #include "tcuTexture.hpp"
43 #include "tcuImageCompare.hpp"
44 
45 #include "deUniquePtr.hpp"
46 #include "deStringUtil.hpp"
47 #include "deRandom.hpp"
48 
49 #include "deInt32.h"
50 #include "deMath.h"
51 #include "deMemory.h"
52 
53 #include <map>
54 #include <set>
55 #include <sstream>
56 #include <string>
57 #include <vector>
58 
59 using tcu::TestLog;
60 using tcu::Maybe;
61 
62 using de::MovePtr;
63 
64 using std::string;
65 using std::vector;
66 using std::map;
67 using std::set;
68 using std::pair;
69 
70 using tcu::IVec2;
71 using tcu::UVec2;
72 using tcu::UVec4;
73 using tcu::Vec4;
74 using tcu::ConstPixelBufferAccess;
75 using tcu::PixelBufferAccess;
76 using tcu::TextureFormat;
77 using tcu::TextureLevel;
78 
79 namespace vkt
80 {
81 namespace memory
82 {
83 namespace
84 {
85 
86 #define ONE_MEGABYTE						1024*1024
87 #define DEFAULT_VERTEX_BUFFER_STRIDE		2
88 #define ALTERNATIVE_VERTEX_BUFFER_STRIDE	4
89 
90 enum
91 {
92 	MAX_UNIFORM_BUFFER_SIZE = 1024,
93 	MAX_STORAGE_BUFFER_SIZE = (1<<28),
94 	MAX_SIZE = (128 * 1024)
95 };
96 
97 // \todo [mika] Add to utilities
98 template<typename T>
divRoundUp(const T & a,const T & b)99 T divRoundUp (const T& a, const T& b)
100 {
101 	return (a / b) + (a % b == 0 ? 0 : 1);
102 }
103 
104 enum Usage
105 {
106 	// Mapped host read and write
107 	USAGE_HOST_READ = (0x1u<<0),
108 	USAGE_HOST_WRITE = (0x1u<<1),
109 
110 	// Copy and other transfer operations
111 	USAGE_TRANSFER_SRC = (0x1u<<2),
112 	USAGE_TRANSFER_DST = (0x1u<<3),
113 
114 	// Buffer usage flags
115 	USAGE_INDEX_BUFFER = (0x1u<<4),
116 	USAGE_VERTEX_BUFFER = (0x1u<<5),
117 
118 	USAGE_UNIFORM_BUFFER = (0x1u<<6),
119 	USAGE_STORAGE_BUFFER = (0x1u<<7),
120 
121 	USAGE_UNIFORM_TEXEL_BUFFER = (0x1u<<8),
122 	USAGE_STORAGE_TEXEL_BUFFER = (0x1u<<9),
123 
124 	// \todo [2016-03-09 mika] This is probably almost impossible to do
125 	USAGE_INDIRECT_BUFFER = (0x1u<<10),
126 
127 	// Texture usage flags
128 	USAGE_SAMPLED_IMAGE = (0x1u<<11),
129 	USAGE_STORAGE_IMAGE = (0x1u<<12),
130 	USAGE_COLOR_ATTACHMENT = (0x1u<<13),
131 	USAGE_INPUT_ATTACHMENT = (0x1u<<14),
132 	USAGE_DEPTH_STENCIL_ATTACHMENT = (0x1u<<15),
133 };
134 
supportsDeviceBufferWrites(Usage usage)135 bool supportsDeviceBufferWrites (Usage usage)
136 {
137 	if (usage & USAGE_TRANSFER_DST)
138 		return true;
139 
140 	if (usage & USAGE_STORAGE_BUFFER)
141 		return true;
142 
143 	if (usage & USAGE_STORAGE_TEXEL_BUFFER)
144 		return true;
145 
146 	return false;
147 }
148 
supportsDeviceImageWrites(Usage usage)149 bool supportsDeviceImageWrites (Usage usage)
150 {
151 	if (usage & USAGE_TRANSFER_DST)
152 		return true;
153 
154 	if (usage & USAGE_STORAGE_IMAGE)
155 		return true;
156 
157 	if (usage & USAGE_COLOR_ATTACHMENT)
158 		return true;
159 
160 	return false;
161 }
162 
163 // Sequential access enums
164 enum Access
165 {
166 	ACCESS_INDIRECT_COMMAND_READ_BIT = 0,
167 	ACCESS_INDEX_READ_BIT,
168 	ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
169 	ACCESS_UNIFORM_READ_BIT,
170 	ACCESS_INPUT_ATTACHMENT_READ_BIT,
171 	ACCESS_SHADER_READ_BIT,
172 	ACCESS_SHADER_WRITE_BIT,
173 	ACCESS_COLOR_ATTACHMENT_READ_BIT,
174 	ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
175 	ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
176 	ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
177 	ACCESS_TRANSFER_READ_BIT,
178 	ACCESS_TRANSFER_WRITE_BIT,
179 	ACCESS_HOST_READ_BIT,
180 	ACCESS_HOST_WRITE_BIT,
181 	ACCESS_MEMORY_READ_BIT,
182 	ACCESS_MEMORY_WRITE_BIT,
183 
184 	ACCESS_LAST
185 };
186 
accessFlagToAccess(vk::VkAccessFlagBits flag)187 Access accessFlagToAccess (vk::VkAccessFlagBits flag)
188 {
189 	switch (flag)
190 	{
191 	case vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT:			return ACCESS_INDIRECT_COMMAND_READ_BIT;
192 	case vk::VK_ACCESS_INDEX_READ_BIT:						return ACCESS_INDEX_READ_BIT;
193 	case vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:			return ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
194 	case vk::VK_ACCESS_UNIFORM_READ_BIT:					return ACCESS_UNIFORM_READ_BIT;
195 	case vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:			return ACCESS_INPUT_ATTACHMENT_READ_BIT;
196 	case vk::VK_ACCESS_SHADER_READ_BIT:						return ACCESS_SHADER_READ_BIT;
197 	case vk::VK_ACCESS_SHADER_WRITE_BIT:					return ACCESS_SHADER_WRITE_BIT;
198 	case vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:			return ACCESS_COLOR_ATTACHMENT_READ_BIT;
199 	case vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:			return ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
200 	case vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:	return ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
201 	case vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:	return ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
202 	case vk::VK_ACCESS_TRANSFER_READ_BIT:					return ACCESS_TRANSFER_READ_BIT;
203 	case vk::VK_ACCESS_TRANSFER_WRITE_BIT:					return ACCESS_TRANSFER_WRITE_BIT;
204 	case vk::VK_ACCESS_HOST_READ_BIT:						return ACCESS_HOST_READ_BIT;
205 	case vk::VK_ACCESS_HOST_WRITE_BIT:						return ACCESS_HOST_WRITE_BIT;
206 	case vk::VK_ACCESS_MEMORY_READ_BIT:						return ACCESS_MEMORY_READ_BIT;
207 	case vk::VK_ACCESS_MEMORY_WRITE_BIT:					return ACCESS_MEMORY_WRITE_BIT;
208 
209 	default:
210 		DE_FATAL("Unknown access flags");
211 		return ACCESS_LAST;
212 	}
213 }
214 
215 // Sequential stage enums
216 enum PipelineStage
217 {
218 	PIPELINESTAGE_TOP_OF_PIPE_BIT = 0,
219 	PIPELINESTAGE_BOTTOM_OF_PIPE_BIT,
220 	PIPELINESTAGE_DRAW_INDIRECT_BIT,
221 	PIPELINESTAGE_VERTEX_INPUT_BIT,
222 	PIPELINESTAGE_VERTEX_SHADER_BIT,
223 	PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT,
224 	PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT,
225 	PIPELINESTAGE_GEOMETRY_SHADER_BIT,
226 	PIPELINESTAGE_FRAGMENT_SHADER_BIT,
227 	PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT,
228 	PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT,
229 	PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
230 	PIPELINESTAGE_COMPUTE_SHADER_BIT,
231 	PIPELINESTAGE_TRANSFER_BIT,
232 	PIPELINESTAGE_HOST_BIT,
233 
234 	PIPELINESTAGE_LAST
235 };
236 
pipelineStageFlagToPipelineStage(vk::VkPipelineStageFlagBits flag)237 PipelineStage pipelineStageFlagToPipelineStage (vk::VkPipelineStageFlagBits flag)
238 {
239 	switch (flag)
240 	{
241 		case vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:						return PIPELINESTAGE_TOP_OF_PIPE_BIT;
242 		case vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT:					return PIPELINESTAGE_BOTTOM_OF_PIPE_BIT;
243 		case vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT:					return PIPELINESTAGE_DRAW_INDIRECT_BIT;
244 		case vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT:					return PIPELINESTAGE_VERTEX_INPUT_BIT;
245 		case vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT:					return PIPELINESTAGE_VERTEX_SHADER_BIT;
246 		case vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT:		return PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT;
247 		case vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT:	return PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT;
248 		case vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT:					return PIPELINESTAGE_GEOMETRY_SHADER_BIT;
249 		case vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT:					return PIPELINESTAGE_FRAGMENT_SHADER_BIT;
250 		case vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT:			return PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT;
251 		case vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT:				return PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT;
252 		case vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT:			return PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
253 		case vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT:					return PIPELINESTAGE_COMPUTE_SHADER_BIT;
254 		case vk::VK_PIPELINE_STAGE_TRANSFER_BIT:						return PIPELINESTAGE_TRANSFER_BIT;
255 		case vk::VK_PIPELINE_STAGE_HOST_BIT:							return PIPELINESTAGE_HOST_BIT;
256 
257 		default:
258 			DE_FATAL("Unknown pipeline stage flags");
259 			return PIPELINESTAGE_LAST;
260 	}
261 }
262 
operator |(Usage a,Usage b)263 Usage operator| (Usage a, Usage b)
264 {
265 	return (Usage)((deUint32)a | (deUint32)b);
266 }
267 
operator &(Usage a,Usage b)268 Usage operator& (Usage a, Usage b)
269 {
270 	return (Usage)((deUint32)a & (deUint32)b);
271 }
272 
usageToName(Usage usage)273 string usageToName (Usage usage)
274 {
275 	const struct
276 	{
277 		Usage				usage;
278 		const char* const	name;
279 	} usageNames[] =
280 	{
281 		{ USAGE_HOST_READ,					"host_read" },
282 		{ USAGE_HOST_WRITE,					"host_write" },
283 
284 		{ USAGE_TRANSFER_SRC,				"transfer_src" },
285 		{ USAGE_TRANSFER_DST,				"transfer_dst" },
286 
287 		{ USAGE_INDEX_BUFFER,				"index_buffer" },
288 		{ USAGE_VERTEX_BUFFER,				"vertex_buffer" },
289 		{ USAGE_UNIFORM_BUFFER,				"uniform_buffer" },
290 		{ USAGE_STORAGE_BUFFER,				"storage_buffer" },
291 		{ USAGE_UNIFORM_TEXEL_BUFFER,		"uniform_texel_buffer" },
292 		{ USAGE_STORAGE_TEXEL_BUFFER,		"storage_texel_buffer" },
293 		{ USAGE_INDIRECT_BUFFER,			"indirect_buffer" },
294 		{ USAGE_SAMPLED_IMAGE,				"image_sampled" },
295 		{ USAGE_STORAGE_IMAGE,				"storage_image" },
296 		{ USAGE_COLOR_ATTACHMENT,			"color_attachment" },
297 		{ USAGE_INPUT_ATTACHMENT,			"input_attachment" },
298 		{ USAGE_DEPTH_STENCIL_ATTACHMENT,	"depth_stencil_attachment" },
299 	};
300 
301 	std::ostringstream	stream;
302 	bool				first = true;
303 
304 	for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usageNames); usageNdx++)
305 	{
306 		if (usage & usageNames[usageNdx].usage)
307 		{
308 			if (!first)
309 				stream << "_";
310 			else
311 				first = false;
312 
313 			stream << usageNames[usageNdx].name;
314 		}
315 	}
316 
317 	return stream.str();
318 }
319 
usageToBufferUsageFlags(Usage usage)320 vk::VkBufferUsageFlags usageToBufferUsageFlags (Usage usage)
321 {
322 	vk::VkBufferUsageFlags flags = 0;
323 
324 	if (usage & USAGE_TRANSFER_SRC)
325 		flags |= vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
326 
327 	if (usage & USAGE_TRANSFER_DST)
328 		flags |= vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT;
329 
330 	if (usage & USAGE_INDEX_BUFFER)
331 		flags |= vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
332 
333 	if (usage & USAGE_VERTEX_BUFFER)
334 		flags |= vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
335 
336 	if (usage & USAGE_INDIRECT_BUFFER)
337 		flags |= vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
338 
339 	if (usage & USAGE_UNIFORM_BUFFER)
340 		flags |= vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
341 
342 	if (usage & USAGE_STORAGE_BUFFER)
343 		flags |= vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
344 
345 	if (usage & USAGE_UNIFORM_TEXEL_BUFFER)
346 		flags |= vk::VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
347 
348 	if (usage & USAGE_STORAGE_TEXEL_BUFFER)
349 		flags |= vk::VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
350 
351 	return flags;
352 }
353 
usageToImageUsageFlags(Usage usage)354 vk::VkImageUsageFlags usageToImageUsageFlags (Usage usage)
355 {
356 	vk::VkImageUsageFlags flags = 0;
357 
358 	if (usage & USAGE_TRANSFER_SRC)
359 		flags |= vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
360 
361 	if (usage & USAGE_TRANSFER_DST)
362 		flags |= vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT;
363 
364 	if (usage & USAGE_SAMPLED_IMAGE)
365 		flags |= vk::VK_IMAGE_USAGE_SAMPLED_BIT;
366 
367 	if (usage & USAGE_STORAGE_IMAGE)
368 		flags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
369 
370 	if (usage & USAGE_COLOR_ATTACHMENT)
371 		flags |= vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
372 
373 	if (usage & USAGE_INPUT_ATTACHMENT)
374 		flags |= vk::VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
375 
376 	if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
377 		flags |= vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
378 
379 	return flags;
380 }
381 
usageToStageFlags(Usage usage)382 vk::VkPipelineStageFlags usageToStageFlags (Usage usage)
383 {
384 	vk::VkPipelineStageFlags flags = 0;
385 
386 	if (usage & (USAGE_HOST_READ|USAGE_HOST_WRITE))
387 		flags |= vk::VK_PIPELINE_STAGE_HOST_BIT;
388 
389 	if (usage & (USAGE_TRANSFER_SRC|USAGE_TRANSFER_DST))
390 		flags |= vk::VK_PIPELINE_STAGE_TRANSFER_BIT;
391 
392 	if (usage & (USAGE_VERTEX_BUFFER|USAGE_INDEX_BUFFER))
393 		flags |= vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
394 
395 	if (usage & USAGE_INDIRECT_BUFFER)
396 		flags |= vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
397 
398 	if (usage &
399 			(USAGE_UNIFORM_BUFFER
400 			| USAGE_STORAGE_BUFFER
401 			| USAGE_UNIFORM_TEXEL_BUFFER
402 			| USAGE_STORAGE_TEXEL_BUFFER
403 			| USAGE_SAMPLED_IMAGE
404 			| USAGE_STORAGE_IMAGE))
405 	{
406 		flags |= (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
407 				| vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
408 				| vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
409 	}
410 
411 	if (usage & USAGE_INPUT_ATTACHMENT)
412 		flags |= vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
413 
414 	if (usage & USAGE_COLOR_ATTACHMENT)
415 		flags |= vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
416 
417 	if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
418 	{
419 		flags |= vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
420 				| vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
421 	}
422 
423 	return flags;
424 }
425 
usageToAccessFlags(Usage usage)426 vk::VkAccessFlags usageToAccessFlags (Usage usage)
427 {
428 	vk::VkAccessFlags flags = 0;
429 
430 	if (usage & USAGE_HOST_READ)
431 		flags |= vk::VK_ACCESS_HOST_READ_BIT;
432 
433 	if (usage & USAGE_HOST_WRITE)
434 		flags |= vk::VK_ACCESS_HOST_WRITE_BIT;
435 
436 	if (usage & USAGE_TRANSFER_SRC)
437 		flags |= vk::VK_ACCESS_TRANSFER_READ_BIT;
438 
439 	if (usage & USAGE_TRANSFER_DST)
440 		flags |= vk::VK_ACCESS_TRANSFER_WRITE_BIT;
441 
442 	if (usage & USAGE_INDEX_BUFFER)
443 		flags |= vk::VK_ACCESS_INDEX_READ_BIT;
444 
445 	if (usage & USAGE_VERTEX_BUFFER)
446 		flags |= vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
447 
448 	if (usage & (USAGE_UNIFORM_BUFFER | USAGE_UNIFORM_TEXEL_BUFFER))
449 		flags |= vk::VK_ACCESS_UNIFORM_READ_BIT;
450 
451 	if (usage & USAGE_SAMPLED_IMAGE)
452 		flags |= vk::VK_ACCESS_SHADER_READ_BIT;
453 
454 	if (usage & (USAGE_STORAGE_BUFFER
455 				| USAGE_STORAGE_TEXEL_BUFFER
456 				| USAGE_STORAGE_IMAGE))
457 		flags |= vk::VK_ACCESS_SHADER_READ_BIT | vk::VK_ACCESS_SHADER_WRITE_BIT;
458 
459 	if (usage & USAGE_INDIRECT_BUFFER)
460 		flags |= vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
461 
462 	if (usage & USAGE_COLOR_ATTACHMENT)
463 		flags |= vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
464 
465 	if (usage & USAGE_INPUT_ATTACHMENT)
466 		flags |= vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
467 
468 	if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
469 		flags |= vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
470 			| vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
471 
472 	return flags;
473 }
474 
475 struct TestConfig
476 {
477 	Usage				usage;
478 	deUint32			vertexBufferStride;
479 	vk::VkDeviceSize	size;
480 	vk::VkSharingMode	sharing;
481 };
482 
createBeginCommandBuffer(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkCommandPool pool,vk::VkCommandBufferLevel level)483 vk::Move<vk::VkCommandBuffer> createBeginCommandBuffer (const vk::DeviceInterface&	vkd,
484 														vk::VkDevice				device,
485 														vk::VkCommandPool			pool,
486 														vk::VkCommandBufferLevel	level)
487 {
488 	const vk::VkCommandBufferInheritanceInfo	inheritInfo	=
489 	{
490 		vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
491 		DE_NULL,
492 		0,
493 		0,
494 		0,
495 		VK_FALSE,
496 		0u,
497 		0u
498 	};
499 	const vk::VkCommandBufferBeginInfo			beginInfo =
500 	{
501 		vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
502 		DE_NULL,
503 		0u,
504 		(level == vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY ? &inheritInfo : (const vk::VkCommandBufferInheritanceInfo*)DE_NULL),
505 	};
506 
507 	vk::Move<vk::VkCommandBuffer> commandBuffer (allocateCommandBuffer(vkd, device, pool, level));
508 
509 	vkd.beginCommandBuffer(*commandBuffer, &beginInfo);
510 
511 	return commandBuffer;
512 }
513 
createBuffer(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkDeviceSize size,vk::VkBufferUsageFlags usage,vk::VkSharingMode sharingMode,const vector<deUint32> & queueFamilies)514 vk::Move<vk::VkBuffer> createBuffer (const vk::DeviceInterface&	vkd,
515 									 vk::VkDevice				device,
516 									 vk::VkDeviceSize			size,
517 									 vk::VkBufferUsageFlags		usage,
518 									 vk::VkSharingMode			sharingMode,
519 									 const vector<deUint32>&	queueFamilies)
520 {
521 	const vk::VkBufferCreateInfo	createInfo =
522 	{
523 		vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
524 		DE_NULL,
525 
526 		0,	// flags
527 		size,
528 		usage,
529 		sharingMode,
530 		(deUint32)queueFamilies.size(),
531 		&queueFamilies[0]
532 	};
533 
534 	return vk::createBuffer(vkd, device, &createInfo);
535 }
536 
allocMemory(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkDeviceSize size,deUint32 memoryTypeIndex)537 vk::Move<vk::VkDeviceMemory> allocMemory (const vk::DeviceInterface&	vkd,
538 										  vk::VkDevice					device,
539 										  vk::VkDeviceSize				size,
540 										  deUint32						memoryTypeIndex)
541 {
542 	const vk::VkMemoryAllocateInfo alloc =
543 	{
544 		vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,	// sType
545 		DE_NULL,									// pNext
546 
547 		size,
548 		memoryTypeIndex
549 	};
550 
551 	return vk::allocateMemory(vkd, device, &alloc);
552 }
553 
bindBufferMemory(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkBuffer buffer,vk::VkMemoryPropertyFlags properties)554 vk::Move<vk::VkDeviceMemory> bindBufferMemory (const vk::InstanceInterface&	vki,
555 											   const vk::DeviceInterface&	vkd,
556 											   vk::VkPhysicalDevice			physicalDevice,
557 											   vk::VkDevice					device,
558 											   vk::VkBuffer					buffer,
559 											   vk::VkMemoryPropertyFlags	properties)
560 {
561 	const vk::VkMemoryRequirements				memoryRequirements	= vk::getBufferMemoryRequirements(vkd, device, buffer);
562 	const vk::VkPhysicalDeviceMemoryProperties	memoryProperties	= vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
563 	deUint32									memoryTypeIndex;
564 
565 	for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
566 	{
567 		if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
568 			&& (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
569 		{
570 			try
571 			{
572 				const vk::VkMemoryAllocateInfo	allocationInfo	=
573 				{
574 					vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
575 					DE_NULL,
576 					memoryRequirements.size,
577 					memoryTypeIndex
578 				};
579 				vk::Move<vk::VkDeviceMemory>	memory			(vk::allocateMemory(vkd, device, &allocationInfo));
580 
581 				VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0));
582 
583 				return memory;
584 			}
585 			catch (const vk::Error& error)
586 			{
587 				if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
588 					|| error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
589 				{
590 					// Try next memory type/heap if out of memory
591 				}
592 				else
593 				{
594 					// Throw all other errors forward
595 					throw;
596 				}
597 			}
598 		}
599 	}
600 
601 	TCU_FAIL("Failed to allocate memory for buffer");
602 }
603 
bindImageMemory(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkImage image,vk::VkMemoryPropertyFlags properties)604 vk::Move<vk::VkDeviceMemory> bindImageMemory (const vk::InstanceInterface&	vki,
605 											   const vk::DeviceInterface&	vkd,
606 											   vk::VkPhysicalDevice			physicalDevice,
607 											   vk::VkDevice					device,
608 											   vk::VkImage					image,
609 											   vk::VkMemoryPropertyFlags	properties)
610 {
611 	const vk::VkMemoryRequirements				memoryRequirements	= vk::getImageMemoryRequirements(vkd, device, image);
612 	const vk::VkPhysicalDeviceMemoryProperties	memoryProperties	= vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
613 	deUint32									memoryTypeIndex;
614 
615 	for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
616 	{
617 		if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
618 			&& (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
619 		{
620 			try
621 			{
622 				const vk::VkMemoryAllocateInfo	allocationInfo	=
623 				{
624 					vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
625 					DE_NULL,
626 					memoryRequirements.size,
627 					memoryTypeIndex
628 				};
629 				vk::Move<vk::VkDeviceMemory>	memory			(vk::allocateMemory(vkd, device, &allocationInfo));
630 
631 				VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0));
632 
633 				return memory;
634 			}
635 			catch (const vk::Error& error)
636 			{
637 				if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
638 					|| error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
639 				{
640 					// Try next memory type/heap if out of memory
641 				}
642 				else
643 				{
644 					// Throw all other errors forward
645 					throw;
646 				}
647 			}
648 		}
649 	}
650 
651 	TCU_FAIL("Failed to allocate memory for image");
652 }
653 
mapMemory(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkDeviceMemory memory,vk::VkDeviceSize size)654 void* mapMemory (const vk::DeviceInterface&	vkd,
655 				 vk::VkDevice				device,
656 				 vk::VkDeviceMemory			memory,
657 				 vk::VkDeviceSize			size)
658 {
659 	void* ptr;
660 
661 	VK_CHECK(vkd.mapMemory(device, memory, 0, size, 0, &ptr));
662 
663 	return ptr;
664 }
665 
666 class ReferenceMemory
667 {
668 public:
669 			ReferenceMemory	(size_t size);
670 
671 	void	set				(size_t pos, deUint8 val);
672 	deUint8	get				(size_t pos) const;
673 	bool	isDefined		(size_t pos) const;
674 
675 	void	setDefined		(size_t offset, size_t size, const void* data);
676 	void	setUndefined	(size_t offset, size_t size);
677 	void	setData			(size_t offset, size_t size, const void* data);
678 
getSize(void) const679 	size_t	getSize			(void) const { return m_data.size(); }
680 
681 private:
682 	vector<deUint8>		m_data;
683 	vector<deUint64>	m_defined;
684 };
685 
ReferenceMemory(size_t size)686 ReferenceMemory::ReferenceMemory (size_t size)
687 	: m_data	(size, 0)
688 	, m_defined	(size / 64 + (size % 64 == 0 ? 0 : 1), 0ull)
689 {
690 }
691 
set(size_t pos,deUint8 val)692 void ReferenceMemory::set (size_t pos, deUint8 val)
693 {
694 	DE_ASSERT(pos < m_data.size());
695 
696 	m_data[pos] = val;
697 	m_defined[pos / 64] |= 0x1ull << (pos % 64);
698 }
699 
setData(size_t offset,size_t size,const void * data_)700 void ReferenceMemory::setData (size_t offset, size_t size, const void* data_)
701 {
702 	const deUint8* data = (const deUint8*)data_;
703 
704 	DE_ASSERT(offset < m_data.size());
705 	DE_ASSERT(offset + size <= m_data.size());
706 
707 	// \todo [2016-03-09 mika] Optimize
708 	for (size_t pos = 0; pos < size; pos++)
709 	{
710 		m_data[offset + pos] = data[pos];
711 		m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
712 	}
713 }
714 
setUndefined(size_t offset,size_t size)715 void ReferenceMemory::setUndefined	(size_t offset, size_t size)
716 {
717 	// \todo [2016-03-09 mika] Optimize
718 	for (size_t pos = 0; pos < size; pos++)
719 		m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
720 }
721 
get(size_t pos) const722 deUint8 ReferenceMemory::get (size_t pos) const
723 {
724 	DE_ASSERT(pos < m_data.size());
725 	DE_ASSERT(isDefined(pos));
726 	return m_data[pos];
727 }
728 
isDefined(size_t pos) const729 bool ReferenceMemory::isDefined (size_t pos) const
730 {
731 	DE_ASSERT(pos < m_data.size());
732 
733 	return (m_defined[pos / 64] & (0x1ull << (pos % 64))) != 0;
734 }
735 
736 class Memory
737 {
738 public:
739 							Memory				(const vk::InstanceInterface&	vki,
740 												 const vk::DeviceInterface&		vkd,
741 												 vk::VkPhysicalDevice			physicalDevice,
742 												 vk::VkDevice					device,
743 												 vk::VkDeviceSize				size,
744 												 deUint32						memoryTypeIndex,
745 												 vk::VkDeviceSize				maxBufferSize,
746 												 deInt32						maxImageWidth,
747 												 deInt32						maxImageHeight);
748 
getSize(void) const749 	vk::VkDeviceSize		getSize				(void) const { return m_size; }
getMaxBufferSize(void) const750 	vk::VkDeviceSize		getMaxBufferSize	(void) const { return m_maxBufferSize; }
getSupportBuffers(void) const751 	bool					getSupportBuffers	(void) const { return m_maxBufferSize > 0; }
752 
getMaxImageWidth(void) const753 	deInt32					getMaxImageWidth	(void) const { return m_maxImageWidth; }
getMaxImageHeight(void) const754 	deInt32					getMaxImageHeight	(void) const { return m_maxImageHeight; }
getSupportImages(void) const755 	bool					getSupportImages	(void) const { return m_maxImageWidth > 0; }
756 
getMemoryType(void) const757 	const vk::VkMemoryType&	getMemoryType		(void) const { return m_memoryType; }
getMemoryTypeIndex(void) const758 	deUint32				getMemoryTypeIndex	(void) const { return m_memoryTypeIndex; }
getMemory(void) const759 	vk::VkDeviceMemory		getMemory			(void) const { return *m_memory; }
760 
761 private:
762 	const vk::VkDeviceSize					m_size;
763 	const deUint32							m_memoryTypeIndex;
764 	const vk::VkMemoryType					m_memoryType;
765 	const vk::Unique<vk::VkDeviceMemory>	m_memory;
766 	const vk::VkDeviceSize					m_maxBufferSize;
767 	const deInt32							m_maxImageWidth;
768 	const deInt32							m_maxImageHeight;
769 };
770 
getMemoryTypeInfo(const vk::InstanceInterface & vki,vk::VkPhysicalDevice device,deUint32 memoryTypeIndex)771 vk::VkMemoryType getMemoryTypeInfo (const vk::InstanceInterface&	vki,
772 									vk::VkPhysicalDevice			device,
773 									deUint32						memoryTypeIndex)
774 {
775 	const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, device);
776 
777 	DE_ASSERT(memoryTypeIndex < memoryProperties.memoryTypeCount);
778 
779 	return memoryProperties.memoryTypes[memoryTypeIndex];
780 }
781 
findMaxBufferSize(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkBufferUsageFlags usage,vk::VkSharingMode sharingMode,const vector<deUint32> & queueFamilies,vk::VkDeviceSize memorySize,deUint32 memoryTypeIndex)782 vk::VkDeviceSize findMaxBufferSize (const vk::DeviceInterface&		vkd,
783 									vk::VkDevice					device,
784 
785 									vk::VkBufferUsageFlags			usage,
786 									vk::VkSharingMode				sharingMode,
787 									const vector<deUint32>&			queueFamilies,
788 
789 									vk::VkDeviceSize				memorySize,
790 									deUint32						memoryTypeIndex)
791 {
792 	vk::VkDeviceSize	lastSuccess	= 0;
793 	vk::VkDeviceSize	currentSize	= memorySize / 2;
794 
795 	{
796 		const vk::Unique<vk::VkBuffer>  buffer			(createBuffer(vkd, device, memorySize, usage, sharingMode, queueFamilies));
797 		const vk::VkMemoryRequirements  requirements	(vk::getBufferMemoryRequirements(vkd, device, *buffer));
798 
799 		if (requirements.size == memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
800 			return memorySize;
801 	}
802 
803 	for (vk::VkDeviceSize stepSize = memorySize / 4; currentSize > 0; stepSize /= 2)
804 	{
805 		const vk::Unique<vk::VkBuffer>	buffer			(createBuffer(vkd, device, currentSize, usage, sharingMode, queueFamilies));
806 		const vk::VkMemoryRequirements	requirements	(vk::getBufferMemoryRequirements(vkd, device, *buffer));
807 
808 		if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
809 		{
810 			lastSuccess = currentSize;
811 			currentSize += stepSize;
812 		}
813 		else
814 			currentSize -= stepSize;
815 
816 		if (stepSize == 0)
817 			break;
818 	}
819 
820 	return lastSuccess;
821 }
822 
823 // Round size down maximum W * H * 4, where W and H < 4096
roundBufferSizeToWxHx4(vk::VkDeviceSize size)824 vk::VkDeviceSize roundBufferSizeToWxHx4 (vk::VkDeviceSize size)
825 {
826 	const vk::VkDeviceSize	maxTextureSize	= 4096;
827 	vk::VkDeviceSize		maxTexelCount	= size / 4;
828 	vk::VkDeviceSize		bestW			= de::max(maxTexelCount, maxTextureSize);
829 	vk::VkDeviceSize		bestH			= maxTexelCount / bestW;
830 
831 	// \todo [2016-03-09 mika] Could probably be faster?
832 	for (vk::VkDeviceSize w = 1; w * w < maxTexelCount && w < maxTextureSize && bestW * bestH * 4 < size; w++)
833 	{
834 		const vk::VkDeviceSize h = maxTexelCount / w;
835 
836 		if (bestW * bestH < w * h)
837 		{
838 			bestW = w;
839 			bestH = h;
840 		}
841 	}
842 
843 	return bestW * bestH * 4;
844 }
845 
846 // Find RGBA8 image size that has exactly "size" of number of bytes.
847 // "size" must be W * H * 4 where W and H < 4096
findImageSizeWxHx4(vk::VkDeviceSize size)848 IVec2 findImageSizeWxHx4 (vk::VkDeviceSize size)
849 {
850 	const vk::VkDeviceSize	maxTextureSize	= 4096;
851 	vk::VkDeviceSize		texelCount		= size / 4;
852 
853 	DE_ASSERT((size % 4) == 0);
854 
855 	// \todo [2016-03-09 mika] Could probably be faster?
856 	for (vk::VkDeviceSize w = 1; w < maxTextureSize && w < texelCount; w++)
857 	{
858 		const vk::VkDeviceSize	h	= texelCount / w;
859 
860 		if ((texelCount  % w) == 0 && h < maxTextureSize)
861 			return IVec2((int)w, (int)h);
862 	}
863 
864 	DE_FATAL("Invalid size");
865 	return IVec2(-1, -1);
866 }
867 
findMaxRGBA8ImageSize(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkImageUsageFlags usage,vk::VkSharingMode sharingMode,const vector<deUint32> & queueFamilies,vk::VkDeviceSize memorySize,deUint32 memoryTypeIndex)868 IVec2 findMaxRGBA8ImageSize (const vk::DeviceInterface&	vkd,
869 							 vk::VkDevice				device,
870 
871 							 vk::VkImageUsageFlags		usage,
872 							 vk::VkSharingMode			sharingMode,
873 							 const vector<deUint32>&	queueFamilies,
874 
875 							 vk::VkDeviceSize			memorySize,
876 							 deUint32					memoryTypeIndex)
877 {
878 	IVec2		lastSuccess		(0);
879 	IVec2		currentSize;
880 
881 	{
882 		const deUint32	texelCount	= (deUint32)(memorySize / 4);
883 		const deUint32	width		= (deUint32)deFloatSqrt((float)texelCount);
884 		const deUint32	height		= texelCount / width;
885 
886 		currentSize[0] = deMaxu32(width, height);
887 		currentSize[1] = deMinu32(width, height);
888 	}
889 
890 	for (deInt32 stepSize = currentSize[0] / 2; currentSize[0] > 0; stepSize /= 2)
891 	{
892 		const vk::VkImageCreateInfo	createInfo		=
893 		{
894 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
895 			DE_NULL,
896 
897 			0u,
898 			vk::VK_IMAGE_TYPE_2D,
899 			vk::VK_FORMAT_R8G8B8A8_UNORM,
900 			{
901 				(deUint32)currentSize[0],
902 				(deUint32)currentSize[1],
903 				1u,
904 			},
905 			1u, 1u,
906 			vk::VK_SAMPLE_COUNT_1_BIT,
907 			vk::VK_IMAGE_TILING_OPTIMAL,
908 			usage,
909 			sharingMode,
910 			(deUint32)queueFamilies.size(),
911 			&queueFamilies[0],
912 			vk::VK_IMAGE_LAYOUT_UNDEFINED
913 		};
914 		const vk::Unique<vk::VkImage>	image			(vk::createImage(vkd, device, &createInfo));
915 		const vk::VkMemoryRequirements	requirements	(vk::getImageMemoryRequirements(vkd, device, *image));
916 
917 		if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
918 		{
919 			lastSuccess = currentSize;
920 			currentSize[0] += stepSize;
921 			currentSize[1] += stepSize;
922 		}
923 		else
924 		{
925 			currentSize[0] -= stepSize;
926 			currentSize[1] -= stepSize;
927 		}
928 
929 		if (stepSize == 0)
930 			break;
931 	}
932 
933 	return lastSuccess;
934 }
935 
Memory(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkDeviceSize size,deUint32 memoryTypeIndex,vk::VkDeviceSize maxBufferSize,deInt32 maxImageWidth,deInt32 maxImageHeight)936 Memory::Memory (const vk::InstanceInterface&	vki,
937 				const vk::DeviceInterface&		vkd,
938 				vk::VkPhysicalDevice			physicalDevice,
939 				vk::VkDevice					device,
940 				vk::VkDeviceSize				size,
941 				deUint32						memoryTypeIndex,
942 				vk::VkDeviceSize				maxBufferSize,
943 				deInt32							maxImageWidth,
944 				deInt32							maxImageHeight)
945 	: m_size			(size)
946 	, m_memoryTypeIndex	(memoryTypeIndex)
947 	, m_memoryType		(getMemoryTypeInfo(vki, physicalDevice, memoryTypeIndex))
948 	, m_memory			(allocMemory(vkd, device, size, memoryTypeIndex))
949 	, m_maxBufferSize	(maxBufferSize)
950 	, m_maxImageWidth	(maxImageWidth)
951 	, m_maxImageHeight	(maxImageHeight)
952 {
953 }
954 
955 class Context
956 {
957 public:
Context(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkQueue queue,deUint32 queueFamilyIndex,const vector<pair<deUint32,vk::VkQueue>> & queues,const vk::BinaryCollection & binaryCollection)958 													Context					(const vk::InstanceInterface&					vki,
959 																			 const vk::DeviceInterface&						vkd,
960 																			 vk::VkPhysicalDevice							physicalDevice,
961 																			 vk::VkDevice									device,
962 																			 vk::VkQueue									queue,
963 																			 deUint32										queueFamilyIndex,
964 																			 const vector<pair<deUint32, vk::VkQueue> >&	queues,
965 																			 const vk::BinaryCollection&					binaryCollection)
966 		: m_vki					(vki)
967 		, m_vkd					(vkd)
968 		, m_physicalDevice		(physicalDevice)
969 		, m_device				(device)
970 		, m_queue				(queue)
971 		, m_queueFamilyIndex	(queueFamilyIndex)
972 		, m_queues				(queues)
973 		, m_commandPool			(createCommandPool(vkd, device, vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex))
974 		, m_binaryCollection	(binaryCollection)
975 	{
976 		for (size_t queueNdx = 0; queueNdx < m_queues.size(); queueNdx++)
977 			m_queueFamilies.push_back(m_queues[queueNdx].first);
978 	}
979 
getInstanceInterface(void) const980 	const vk::InstanceInterface&					getInstanceInterface	(void) const { return m_vki; }
getPhysicalDevice(void) const981 	vk::VkPhysicalDevice							getPhysicalDevice		(void) const { return m_physicalDevice; }
getDevice(void) const982 	vk::VkDevice									getDevice				(void) const { return m_device; }
getDeviceInterface(void) const983 	const vk::DeviceInterface&						getDeviceInterface		(void) const { return m_vkd; }
getQueue(void) const984 	vk::VkQueue										getQueue				(void) const { return m_queue; }
getQueueFamily(void) const985 	deUint32										getQueueFamily			(void) const { return m_queueFamilyIndex; }
getQueues(void) const986 	const vector<pair<deUint32, vk::VkQueue> >&		getQueues				(void) const { return m_queues; }
getQueueFamilies(void) const987 	const vector<deUint32>							getQueueFamilies		(void) const { return m_queueFamilies; }
getCommandPool(void) const988 	vk::VkCommandPool								getCommandPool			(void) const { return *m_commandPool; }
getBinaryCollection(void) const989 	const vk::BinaryCollection&						getBinaryCollection		(void) const { return m_binaryCollection; }
990 
991 private:
992 	const vk::InstanceInterface&					m_vki;
993 	const vk::DeviceInterface&						m_vkd;
994 	const vk::VkPhysicalDevice						m_physicalDevice;
995 	const vk::VkDevice								m_device;
996 	const vk::VkQueue								m_queue;
997 	const deUint32									m_queueFamilyIndex;
998 	const vector<pair<deUint32, vk::VkQueue> >		m_queues;
999 	const vk::Unique<vk::VkCommandPool>				m_commandPool;
1000 	const vk::BinaryCollection&						m_binaryCollection;
1001 	vector<deUint32>								m_queueFamilies;
1002 };
1003 
1004 class PrepareContext
1005 {
1006 public:
PrepareContext(const Context & context,const Memory & memory)1007 													PrepareContext			(const Context&	context,
1008 																			 const Memory&	memory)
1009 		: m_context	(context)
1010 		, m_memory	(memory)
1011 	{
1012 	}
1013 
getMemory(void) const1014 	const Memory&									getMemory				(void) const { return m_memory; }
getContext(void) const1015 	const Context&									getContext				(void) const { return m_context; }
getBinaryCollection(void) const1016 	const vk::BinaryCollection&						getBinaryCollection		(void) const { return m_context.getBinaryCollection(); }
1017 
setBuffer(vk::Move<vk::VkBuffer> buffer,vk::VkDeviceSize size)1018 	void				setBuffer		(vk::Move<vk::VkBuffer>	buffer,
1019 										 vk::VkDeviceSize		size)
1020 	{
1021 		DE_ASSERT(!m_currentImage);
1022 		DE_ASSERT(!m_currentBuffer);
1023 
1024 		m_currentBuffer		= buffer;
1025 		m_currentBufferSize	= size;
1026 	}
1027 
getBuffer(void) const1028 	vk::VkBuffer		getBuffer		(void) const { return *m_currentBuffer; }
getBufferSize(void) const1029 	vk::VkDeviceSize	getBufferSize	(void) const
1030 	{
1031 		DE_ASSERT(m_currentBuffer);
1032 		return m_currentBufferSize;
1033 	}
1034 
releaseBuffer(void)1035 	void				releaseBuffer	(void) { m_currentBuffer.disown(); }
1036 
setImage(vk::Move<vk::VkImage> image,vk::VkImageLayout layout,vk::VkDeviceSize memorySize,deInt32 width,deInt32 height)1037 	void				setImage		(vk::Move<vk::VkImage>	image,
1038 										 vk::VkImageLayout		layout,
1039 										 vk::VkDeviceSize		memorySize,
1040 										 deInt32				width,
1041 										 deInt32				height)
1042 	{
1043 		DE_ASSERT(!m_currentImage);
1044 		DE_ASSERT(!m_currentBuffer);
1045 
1046 		m_currentImage				= image;
1047 		m_currentImageMemorySize	= memorySize;
1048 		m_currentImageLayout		= layout;
1049 		m_currentImageWidth			= width;
1050 		m_currentImageHeight		= height;
1051 	}
1052 
setImageLayout(vk::VkImageLayout layout)1053 	void				setImageLayout	(vk::VkImageLayout layout)
1054 	{
1055 		DE_ASSERT(m_currentImage);
1056 		m_currentImageLayout = layout;
1057 	}
1058 
getImage(void) const1059 	vk::VkImage			getImage		(void) const { return *m_currentImage; }
getImageWidth(void) const1060 	deInt32				getImageWidth	(void) const
1061 	{
1062 		DE_ASSERT(m_currentImage);
1063 		return m_currentImageWidth;
1064 	}
getImageHeight(void) const1065 	deInt32				getImageHeight	(void) const
1066 	{
1067 		DE_ASSERT(m_currentImage);
1068 		return m_currentImageHeight;
1069 	}
getImageMemorySize(void) const1070 	vk::VkDeviceSize	getImageMemorySize	(void) const
1071 	{
1072 		DE_ASSERT(m_currentImage);
1073 		return m_currentImageMemorySize;
1074 	}
1075 
releaseImage(void)1076 	void				releaseImage	(void) { m_currentImage.disown(); }
1077 
getImageLayout(void) const1078 	vk::VkImageLayout	getImageLayout	(void) const
1079 	{
1080 		DE_ASSERT(m_currentImage);
1081 		return m_currentImageLayout;
1082 	}
1083 
1084 private:
1085 	const Context&			m_context;
1086 	const Memory&			m_memory;
1087 
1088 	vk::Move<vk::VkBuffer>	m_currentBuffer;
1089 	vk::VkDeviceSize		m_currentBufferSize;
1090 
1091 	vk::Move<vk::VkImage>	m_currentImage;
1092 	vk::VkDeviceSize		m_currentImageMemorySize;
1093 	vk::VkImageLayout		m_currentImageLayout;
1094 	deInt32					m_currentImageWidth;
1095 	deInt32					m_currentImageHeight;
1096 };
1097 
1098 class ExecuteContext
1099 {
1100 public:
ExecuteContext(const Context & context)1101 					ExecuteContext	(const Context&	context)
1102 		: m_context	(context)
1103 	{
1104 	}
1105 
getContext(void) const1106 	const Context&	getContext		(void) const { return m_context; }
setMapping(void * ptr)1107 	void			setMapping		(void* ptr) { m_mapping = ptr; }
getMapping(void) const1108 	void*			getMapping		(void) const { return m_mapping; }
1109 
1110 private:
1111 	const Context&	m_context;
1112 	void*			m_mapping;
1113 };
1114 
1115 class VerifyContext
1116 {
1117 public:
VerifyContext(TestLog & log,tcu::ResultCollector & resultCollector,const Context & context,vk::VkDeviceSize size)1118 							VerifyContext		(TestLog&				log,
1119 												 tcu::ResultCollector&	resultCollector,
1120 												 const Context&			context,
1121 												 vk::VkDeviceSize		size)
1122 		: m_log				(log)
1123 		, m_resultCollector	(resultCollector)
1124 		, m_context			(context)
1125 		, m_reference		((size_t)size)
1126 	{
1127 	}
1128 
getContext(void) const1129 	const Context&			getContext			(void) const { return m_context; }
getLog(void) const1130 	TestLog&				getLog				(void) const { return m_log; }
getResultCollector(void) const1131 	tcu::ResultCollector&	getResultCollector	(void) const { return m_resultCollector; }
1132 
getReference(void)1133 	ReferenceMemory&		getReference		(void) { return m_reference; }
getReferenceImage(void)1134 	TextureLevel&			getReferenceImage	(void) { return m_referenceImage;}
1135 
1136 private:
1137 	TestLog&				m_log;
1138 	tcu::ResultCollector&	m_resultCollector;
1139 	const Context&			m_context;
1140 	ReferenceMemory			m_reference;
1141 	TextureLevel			m_referenceImage;
1142 };
1143 
1144 class Command
1145 {
1146 public:
1147 	// Constructor should allocate all non-vulkan resources.
~Command(void)1148 	virtual				~Command	(void) {}
1149 
1150 	// Get name of the command
1151 	virtual const char*	getName		(void) const = 0;
1152 
1153 	// Log prepare operations
logPrepare(TestLog &,size_t) const1154 	virtual void		logPrepare	(TestLog&, size_t) const {}
1155 	// Log executed operations
logExecute(TestLog &,size_t) const1156 	virtual void		logExecute	(TestLog&, size_t) const {}
1157 
1158 	// Prepare should allocate all vulkan resources and resources that require
1159 	// that buffer or memory has been already allocated. This should build all
1160 	// command buffers etc.
prepare(PrepareContext &)1161 	virtual void		prepare		(PrepareContext&) {}
1162 
1163 	// Execute command. Write or read mapped memory, submit commands to queue
1164 	// etc.
execute(ExecuteContext &)1165 	virtual void		execute		(ExecuteContext&) {}
1166 
1167 	// Verify that results are correct.
verify(VerifyContext &,size_t)1168 	virtual void		verify		(VerifyContext&, size_t) {}
1169 
1170 protected:
1171 	// Allow only inheritance
Command(void)1172 						Command		(void) {}
1173 
1174 private:
1175 	// Disallow copying
1176 						Command		(const Command&);
1177 	Command&			operator&	(const Command&);
1178 };
1179 
1180 class Map : public Command
1181 {
1182 public:
Map(void)1183 						Map			(void) {}
~Map(void)1184 						~Map		(void) {}
getName(void) const1185 	const char*			getName		(void) const { return "Map"; }
1186 
1187 
logExecute(TestLog & log,size_t commandIndex) const1188 	void				logExecute	(TestLog& log, size_t commandIndex) const
1189 	{
1190 		log << TestLog::Message << commandIndex << ":" << getName() << " Map memory" << TestLog::EndMessage;
1191 	}
1192 
prepare(PrepareContext & context)1193 	void				prepare		(PrepareContext& context)
1194 	{
1195 		m_memory	= context.getMemory().getMemory();
1196 		m_size		= context.getMemory().getSize();
1197 	}
1198 
execute(ExecuteContext & context)1199 	void				execute		(ExecuteContext& context)
1200 	{
1201 		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1202 		const vk::VkDevice			device	= context.getContext().getDevice();
1203 
1204 		context.setMapping(mapMemory(vkd, device, m_memory, m_size));
1205 	}
1206 
1207 private:
1208 	vk::VkDeviceMemory	m_memory;
1209 	vk::VkDeviceSize	m_size;
1210 };
1211 
1212 class UnMap : public Command
1213 {
1214 public:
UnMap(void)1215 						UnMap		(void) {}
~UnMap(void)1216 						~UnMap		(void) {}
getName(void) const1217 	const char*			getName		(void) const { return "UnMap"; }
1218 
logExecute(TestLog & log,size_t commandIndex) const1219 	void				logExecute	(TestLog& log, size_t commandIndex) const
1220 	{
1221 		log << TestLog::Message << commandIndex << ": Unmap memory" << TestLog::EndMessage;
1222 	}
1223 
prepare(PrepareContext & context)1224 	void				prepare		(PrepareContext& context)
1225 	{
1226 		m_memory	= context.getMemory().getMemory();
1227 	}
1228 
execute(ExecuteContext & context)1229 	void				execute		(ExecuteContext& context)
1230 	{
1231 		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1232 		const vk::VkDevice			device	= context.getContext().getDevice();
1233 
1234 		vkd.unmapMemory(device, m_memory);
1235 		context.setMapping(DE_NULL);
1236 	}
1237 
1238 private:
1239 	vk::VkDeviceMemory	m_memory;
1240 };
1241 
1242 class Invalidate : public Command
1243 {
1244 public:
Invalidate(void)1245 						Invalidate	(void) {}
~Invalidate(void)1246 						~Invalidate	(void) {}
getName(void) const1247 	const char*			getName		(void) const { return "Invalidate"; }
1248 
logExecute(TestLog & log,size_t commandIndex) const1249 	void				logExecute	(TestLog& log, size_t commandIndex) const
1250 	{
1251 		log << TestLog::Message << commandIndex << ": Invalidate mapped memory" << TestLog::EndMessage;
1252 	}
1253 
prepare(PrepareContext & context)1254 	void				prepare		(PrepareContext& context)
1255 	{
1256 		m_memory	= context.getMemory().getMemory();
1257 		m_size		= context.getMemory().getSize();
1258 	}
1259 
execute(ExecuteContext & context)1260 	void				execute		(ExecuteContext& context)
1261 	{
1262 		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1263 		const vk::VkDevice			device	= context.getContext().getDevice();
1264 
1265 		vk::invalidateMappedMemoryRange(vkd, device, m_memory, 0, VK_WHOLE_SIZE);
1266 	}
1267 
1268 private:
1269 	vk::VkDeviceMemory	m_memory;
1270 	vk::VkDeviceSize	m_size;
1271 };
1272 
1273 class Flush : public Command
1274 {
1275 public:
Flush(void)1276 						Flush		(void) {}
~Flush(void)1277 						~Flush		(void) {}
getName(void) const1278 	const char*			getName		(void) const { return "Flush"; }
1279 
logExecute(TestLog & log,size_t commandIndex) const1280 	void				logExecute	(TestLog& log, size_t commandIndex) const
1281 	{
1282 		log << TestLog::Message << commandIndex << ": Flush mapped memory" << TestLog::EndMessage;
1283 	}
1284 
prepare(PrepareContext & context)1285 	void				prepare		(PrepareContext& context)
1286 	{
1287 		m_memory	= context.getMemory().getMemory();
1288 		m_size		= context.getMemory().getSize();
1289 	}
1290 
execute(ExecuteContext & context)1291 	void				execute		(ExecuteContext& context)
1292 	{
1293 		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1294 		const vk::VkDevice			device	= context.getContext().getDevice();
1295 
1296 		vk::flushMappedMemoryRange(vkd, device, m_memory, 0, VK_WHOLE_SIZE);
1297 	}
1298 
1299 private:
1300 	vk::VkDeviceMemory	m_memory;
1301 	vk::VkDeviceSize	m_size;
1302 };
1303 
1304 // Host memory reads and writes
1305 class HostMemoryAccess : public Command
1306 {
1307 public:
1308 					HostMemoryAccess	(bool read, bool write, deUint32 seed);
~HostMemoryAccess(void)1309 					~HostMemoryAccess	(void) {}
getName(void) const1310 	const char*		getName				(void) const { return "HostMemoryAccess"; }
1311 
1312 	void			logExecute			(TestLog& log, size_t commandIndex) const;
1313 	void			prepare				(PrepareContext& context);
1314 	void			execute				(ExecuteContext& context);
1315 	void			verify				(VerifyContext& context, size_t commandIndex);
1316 
1317 private:
1318 	const bool		m_read;
1319 	const bool		m_write;
1320 	const deUint32	m_seed;
1321 
1322 	size_t			m_size;
1323 	vector<deUint8>	m_readData;
1324 };
1325 
HostMemoryAccess(bool read,bool write,deUint32 seed)1326 HostMemoryAccess::HostMemoryAccess (bool read, bool write, deUint32 seed)
1327 	: m_read	(read)
1328 	, m_write	(write)
1329 	, m_seed	(seed)
1330 {
1331 }
1332 
logExecute(TestLog & log,size_t commandIndex) const1333 void HostMemoryAccess::logExecute (TestLog& log, size_t commandIndex) const
1334 {
1335 	log << TestLog::Message << commandIndex << ": Host memory access:" << (m_read ? " read" : "") << (m_write ? " write" : "")  << ", seed: " << m_seed << TestLog::EndMessage;
1336 }
1337 
prepare(PrepareContext & context)1338 void HostMemoryAccess::prepare (PrepareContext& context)
1339 {
1340 	m_size = (size_t)context.getMemory().getSize();
1341 
1342 	if (m_read)
1343 		m_readData.resize(m_size, 0);
1344 }
1345 
execute(ExecuteContext & context)1346 void HostMemoryAccess::execute (ExecuteContext& context)
1347 {
1348 	if (m_read && m_write)
1349 	{
1350 		de::Random		rng	(m_seed);
1351 		deUint8* const	ptr	= (deUint8*)context.getMapping();
1352 		if (m_size >= ONE_MEGABYTE)
1353 		{
1354 			deMemcpy(&m_readData[0], ptr, m_size);
1355 			for (size_t pos = 0; pos < m_size; ++pos)
1356 			{
1357 				ptr[pos] = m_readData[pos] ^ rng.getUint8();
1358 			}
1359 		}
1360 		else
1361 		{
1362 			for (size_t pos = 0; pos < m_size; ++pos)
1363 			{
1364 				const deUint8	mask	= rng.getUint8();
1365 				const deUint8	value	= ptr[pos];
1366 
1367 				m_readData[pos] = value;
1368 				ptr[pos] = value ^ mask;
1369 			}
1370 		}
1371 	}
1372 	else if (m_read)
1373 	{
1374 		const deUint8* const	ptr = (deUint8*)context.getMapping();
1375 		if (m_size >= ONE_MEGABYTE)
1376 		{
1377 			deMemcpy(&m_readData[0], ptr, m_size);
1378 		}
1379 		else
1380 		{
1381 			for (size_t pos = 0; pos < m_size; ++pos)
1382 			{
1383 				m_readData[pos] = ptr[pos];
1384 			}
1385 		}
1386 	}
1387 	else if (m_write)
1388 	{
1389 		de::Random		rng	(m_seed);
1390 		deUint8* const	ptr	= (deUint8*)context.getMapping();
1391 		for (size_t pos = 0; pos < m_size; ++pos)
1392 		{
1393 			ptr[pos] = rng.getUint8();
1394 		}
1395 	}
1396 	else
1397 		DE_FATAL("Host memory access without read or write.");
1398 }
1399 
verify(VerifyContext & context,size_t commandIndex)1400 void HostMemoryAccess::verify (VerifyContext& context, size_t commandIndex)
1401 {
1402 	tcu::ResultCollector&	resultCollector	= context.getResultCollector();
1403 	ReferenceMemory&		reference		= context.getReference();
1404 	de::Random				rng				(m_seed);
1405 
1406 	if (m_read && m_write)
1407 	{
1408 		for (size_t pos = 0; pos < m_size; pos++)
1409 		{
1410 			const deUint8	mask	= rng.getUint8();
1411 			const deUint8	value	= m_readData[pos];
1412 
1413 			if (reference.isDefined(pos))
1414 			{
1415 				if (value != reference.get(pos))
1416 				{
1417 					resultCollector.fail(
1418 							de::toString(commandIndex) + ":" + getName()
1419 							+ " Result differs from reference, Expected: "
1420 							+ de::toString(tcu::toHex<8>(reference.get(pos)))
1421 							+ ", Got: "
1422 							+ de::toString(tcu::toHex<8>(value))
1423 							+ ", At offset: "
1424 							+ de::toString(pos));
1425 					break;
1426 				}
1427 
1428 				reference.set(pos, reference.get(pos) ^ mask);
1429 			}
1430 		}
1431 	}
1432 	else if (m_read)
1433 	{
1434 		for (size_t pos = 0; pos < m_size; pos++)
1435 		{
1436 			const deUint8	value	= m_readData[pos];
1437 
1438 			if (reference.isDefined(pos))
1439 			{
1440 				if (value != reference.get(pos))
1441 				{
1442 					resultCollector.fail(
1443 							de::toString(commandIndex) + ":" + getName()
1444 							+ " Result differs from reference, Expected: "
1445 							+ de::toString(tcu::toHex<8>(reference.get(pos)))
1446 							+ ", Got: "
1447 							+ de::toString(tcu::toHex<8>(value))
1448 							+ ", At offset: "
1449 							+ de::toString(pos));
1450 					break;
1451 				}
1452 			}
1453 		}
1454 	}
1455 	else if (m_write)
1456 	{
1457 		for (size_t pos = 0; pos < m_size; pos++)
1458 		{
1459 			const deUint8	value	= rng.getUint8();
1460 
1461 			reference.set(pos, value);
1462 		}
1463 	}
1464 	else
1465 		DE_FATAL("Host memory access without read or write.");
1466 }
1467 
1468 class CreateBuffer : public Command
1469 {
1470 public:
1471 									CreateBuffer	(vk::VkBufferUsageFlags	usage,
1472 													 vk::VkSharingMode		sharing);
~CreateBuffer(void)1473 									~CreateBuffer	(void) {}
getName(void) const1474 	const char*						getName			(void) const { return "CreateBuffer"; }
1475 
1476 	void							logPrepare		(TestLog& log, size_t commandIndex) const;
1477 	void							prepare			(PrepareContext& context);
1478 
1479 private:
1480 	const vk::VkBufferUsageFlags	m_usage;
1481 	const vk::VkSharingMode			m_sharing;
1482 };
1483 
CreateBuffer(vk::VkBufferUsageFlags usage,vk::VkSharingMode sharing)1484 CreateBuffer::CreateBuffer (vk::VkBufferUsageFlags	usage,
1485 							vk::VkSharingMode		sharing)
1486 	: m_usage	(usage)
1487 	, m_sharing	(sharing)
1488 {
1489 }
1490 
logPrepare(TestLog & log,size_t commandIndex) const1491 void CreateBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1492 {
1493 	log << TestLog::Message << commandIndex << ":" << getName() << " Create buffer, Sharing mode: " << m_sharing << ", Usage: " << vk::getBufferUsageFlagsStr(m_usage) << TestLog::EndMessage;
1494 }
1495 
prepare(PrepareContext & context)1496 void CreateBuffer::prepare (PrepareContext& context)
1497 {
1498 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1499 	const vk::VkDevice			device			= context.getContext().getDevice();
1500 	const vk::VkDeviceSize		bufferSize		= context.getMemory().getMaxBufferSize();
1501 	const vector<deUint32>&		queueFamilies	= context.getContext().getQueueFamilies();
1502 
1503 	context.setBuffer(createBuffer(vkd, device, bufferSize, m_usage, m_sharing, queueFamilies), bufferSize);
1504 }
1505 
1506 class DestroyBuffer : public Command
1507 {
1508 public:
1509 							DestroyBuffer	(void);
~DestroyBuffer(void)1510 							~DestroyBuffer	(void) {}
getName(void) const1511 	const char*				getName			(void) const { return "DestroyBuffer"; }
1512 
1513 	void					logExecute		(TestLog& log, size_t commandIndex) const;
1514 	void					prepare			(PrepareContext& context);
1515 	void					execute			(ExecuteContext& context);
1516 
1517 private:
1518 	vk::Move<vk::VkBuffer>	m_buffer;
1519 };
1520 
DestroyBuffer(void)1521 DestroyBuffer::DestroyBuffer (void)
1522 {
1523 }
1524 
prepare(PrepareContext & context)1525 void DestroyBuffer::prepare (PrepareContext& context)
1526 {
1527 	m_buffer = vk::Move<vk::VkBuffer>(vk::check(context.getBuffer()), vk::Deleter<vk::VkBuffer>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1528 	context.releaseBuffer();
1529 }
1530 
logExecute(TestLog & log,size_t commandIndex) const1531 void DestroyBuffer::logExecute (TestLog& log, size_t commandIndex) const
1532 {
1533 	log << TestLog::Message << commandIndex << ":" << getName() << " Destroy buffer" << TestLog::EndMessage;
1534 }
1535 
execute(ExecuteContext & context)1536 void DestroyBuffer::execute (ExecuteContext& context)
1537 {
1538 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1539 	const vk::VkDevice			device			= context.getContext().getDevice();
1540 
1541 	vkd.destroyBuffer(device, m_buffer.disown(), DE_NULL);
1542 }
1543 
1544 class BindBufferMemory : public Command
1545 {
1546 public:
BindBufferMemory(void)1547 				BindBufferMemory	(void) {}
~BindBufferMemory(void)1548 				~BindBufferMemory	(void) {}
getName(void) const1549 	const char*	getName				(void) const { return "BindBufferMemory"; }
1550 
1551 	void		logPrepare			(TestLog& log, size_t commandIndex) const;
1552 	void		prepare				(PrepareContext& context);
1553 };
1554 
logPrepare(TestLog & log,size_t commandIndex) const1555 void BindBufferMemory::logPrepare (TestLog& log, size_t commandIndex) const
1556 {
1557 	log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to buffer" << TestLog::EndMessage;
1558 }
1559 
prepare(PrepareContext & context)1560 void BindBufferMemory::prepare (PrepareContext& context)
1561 {
1562 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1563 	const vk::VkDevice			device			= context.getContext().getDevice();
1564 
1565 	VK_CHECK(vkd.bindBufferMemory(device, context.getBuffer(), context.getMemory().getMemory(), 0));
1566 }
1567 
1568 class CreateImage : public Command
1569 {
1570 public:
1571 									CreateImage		(vk::VkImageUsageFlags	usage,
1572 													 vk::VkSharingMode		sharing);
~CreateImage(void)1573 									~CreateImage	(void) {}
getName(void) const1574 	const char*						getName			(void) const { return "CreateImage"; }
1575 
1576 	void							logPrepare		(TestLog& log, size_t commandIndex) const;
1577 	void							prepare			(PrepareContext& context);
1578 	void							verify			(VerifyContext& context, size_t commandIndex);
1579 
1580 private:
1581 	const vk::VkImageUsageFlags	m_usage;
1582 	const vk::VkSharingMode		m_sharing;
1583 	deInt32						m_imageWidth;
1584 	deInt32						m_imageHeight;
1585 };
1586 
CreateImage(vk::VkImageUsageFlags usage,vk::VkSharingMode sharing)1587 CreateImage::CreateImage (vk::VkImageUsageFlags	usage,
1588 						  vk::VkSharingMode		sharing)
1589 	: m_usage	(usage)
1590 	, m_sharing	(sharing)
1591 {
1592 }
1593 
logPrepare(TestLog & log,size_t commandIndex) const1594 void CreateImage::logPrepare (TestLog& log, size_t commandIndex) const
1595 {
1596 	log << TestLog::Message << commandIndex << ":" << getName() << " Create image, sharing: " << m_sharing << ", usage: " << vk::getImageUsageFlagsStr(m_usage)  << TestLog::EndMessage;
1597 }
1598 
prepare(PrepareContext & context)1599 void CreateImage::prepare (PrepareContext& context)
1600 {
1601 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1602 	const vk::VkDevice			device			= context.getContext().getDevice();
1603 	const vector<deUint32>&		queueFamilies	= context.getContext().getQueueFamilies();
1604 
1605 	m_imageWidth	= context.getMemory().getMaxImageWidth();
1606 	m_imageHeight	= context.getMemory().getMaxImageHeight();
1607 
1608 	{
1609 		const vk::VkImageCreateInfo	createInfo		=
1610 		{
1611 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1612 			DE_NULL,
1613 
1614 			0u,
1615 			vk::VK_IMAGE_TYPE_2D,
1616 			vk::VK_FORMAT_R8G8B8A8_UNORM,
1617 			{
1618 				(deUint32)m_imageWidth,
1619 				(deUint32)m_imageHeight,
1620 				1u,
1621 			},
1622 			1u, 1u,
1623 			vk::VK_SAMPLE_COUNT_1_BIT,
1624 			vk::VK_IMAGE_TILING_OPTIMAL,
1625 			m_usage,
1626 			m_sharing,
1627 			(deUint32)queueFamilies.size(),
1628 			&queueFamilies[0],
1629 			vk::VK_IMAGE_LAYOUT_UNDEFINED
1630 		};
1631 		vk::Move<vk::VkImage>			image			(createImage(vkd, device, &createInfo));
1632 		const vk::VkMemoryRequirements	requirements	= vk::getImageMemoryRequirements(vkd, device, *image);
1633 
1634 		context.setImage(image, vk::VK_IMAGE_LAYOUT_UNDEFINED, requirements.size, m_imageWidth, m_imageHeight);
1635 	}
1636 }
1637 
verify(VerifyContext & context,size_t)1638 void CreateImage::verify (VerifyContext& context, size_t)
1639 {
1640 	context.getReferenceImage() = TextureLevel(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight);
1641 }
1642 
1643 class DestroyImage : public Command
1644 {
1645 public:
1646 							DestroyImage	(void);
~DestroyImage(void)1647 							~DestroyImage	(void) {}
getName(void) const1648 	const char*				getName			(void) const { return "DestroyImage"; }
1649 
1650 	void					logExecute		(TestLog& log, size_t commandIndex) const;
1651 	void					prepare			(PrepareContext& context);
1652 	void					execute			(ExecuteContext& context);
1653 
1654 private:
1655 	vk::Move<vk::VkImage>	m_image;
1656 };
1657 
DestroyImage(void)1658 DestroyImage::DestroyImage (void)
1659 {
1660 }
1661 
prepare(PrepareContext & context)1662 void DestroyImage::prepare (PrepareContext& context)
1663 {
1664 	m_image = vk::Move<vk::VkImage>(vk::check(context.getImage()), vk::Deleter<vk::VkImage>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1665 	context.releaseImage();
1666 }
1667 
1668 
logExecute(TestLog & log,size_t commandIndex) const1669 void DestroyImage::logExecute (TestLog& log, size_t commandIndex) const
1670 {
1671 	log << TestLog::Message << commandIndex << ":" << getName() << " Destroy image" << TestLog::EndMessage;
1672 }
1673 
execute(ExecuteContext & context)1674 void DestroyImage::execute (ExecuteContext& context)
1675 {
1676 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1677 	const vk::VkDevice			device			= context.getContext().getDevice();
1678 
1679 	vkd.destroyImage(device, m_image.disown(), DE_NULL);
1680 }
1681 
1682 class BindImageMemory : public Command
1683 {
1684 public:
BindImageMemory(void)1685 				BindImageMemory		(void) {}
~BindImageMemory(void)1686 				~BindImageMemory	(void) {}
getName(void) const1687 	const char*	getName				(void) const { return "BindImageMemory"; }
1688 
1689 	void		logPrepare			(TestLog& log, size_t commandIndex) const;
1690 	void		prepare				(PrepareContext& context);
1691 };
1692 
logPrepare(TestLog & log,size_t commandIndex) const1693 void BindImageMemory::logPrepare (TestLog& log, size_t commandIndex) const
1694 {
1695 	log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to image" << TestLog::EndMessage;
1696 }
1697 
prepare(PrepareContext & context)1698 void BindImageMemory::prepare (PrepareContext& context)
1699 {
1700 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
1701 	const vk::VkDevice				device			= context.getContext().getDevice();
1702 
1703 	VK_CHECK(vkd.bindImageMemory(device, context.getImage(), context.getMemory().getMemory(), 0));
1704 }
1705 
1706 class QueueWaitIdle : public Command
1707 {
1708 public:
QueueWaitIdle(void)1709 				QueueWaitIdle	(void) {}
~QueueWaitIdle(void)1710 				~QueueWaitIdle	(void) {}
getName(void) const1711 	const char*	getName			(void) const { return "QueuetWaitIdle"; }
1712 
1713 	void		logExecute		(TestLog& log, size_t commandIndex) const;
1714 	void		execute			(ExecuteContext& context);
1715 };
1716 
logExecute(TestLog & log,size_t commandIndex) const1717 void QueueWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1718 {
1719 	log << TestLog::Message << commandIndex << ":" << getName() << " Queue wait idle" << TestLog::EndMessage;
1720 }
1721 
execute(ExecuteContext & context)1722 void QueueWaitIdle::execute (ExecuteContext& context)
1723 {
1724 	const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1725 	const vk::VkQueue			queue	= context.getContext().getQueue();
1726 
1727 	VK_CHECK(vkd.queueWaitIdle(queue));
1728 }
1729 
1730 class DeviceWaitIdle : public Command
1731 {
1732 public:
DeviceWaitIdle(void)1733 				DeviceWaitIdle	(void) {}
~DeviceWaitIdle(void)1734 				~DeviceWaitIdle	(void) {}
getName(void) const1735 	const char*	getName			(void) const { return "DeviceWaitIdle"; }
1736 
1737 	void		logExecute		(TestLog& log, size_t commandIndex) const;
1738 	void		execute			(ExecuteContext& context);
1739 };
1740 
logExecute(TestLog & log,size_t commandIndex) const1741 void DeviceWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1742 {
1743 	log << TestLog::Message << commandIndex << ":" << getName() << " Device wait idle" << TestLog::EndMessage;
1744 }
1745 
execute(ExecuteContext & context)1746 void DeviceWaitIdle::execute (ExecuteContext& context)
1747 {
1748 	const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1749 	const vk::VkDevice			device	= context.getContext().getDevice();
1750 
1751 	VK_CHECK(vkd.deviceWaitIdle(device));
1752 }
1753 
1754 class SubmitContext
1755 {
1756 public:
SubmitContext(const PrepareContext & context,const vk::VkCommandBuffer commandBuffer)1757 								SubmitContext		(const PrepareContext&		context,
1758 													 const vk::VkCommandBuffer	commandBuffer)
1759 		: m_context			(context)
1760 		, m_commandBuffer	(commandBuffer)
1761 	{
1762 	}
1763 
getMemory(void) const1764 	const Memory&				getMemory			(void) const { return m_context.getMemory(); }
getContext(void) const1765 	const Context&				getContext			(void) const { return m_context.getContext(); }
getCommandBuffer(void) const1766 	vk::VkCommandBuffer			getCommandBuffer	(void) const { return m_commandBuffer; }
1767 
getBuffer(void) const1768 	vk::VkBuffer				getBuffer			(void) const { return m_context.getBuffer(); }
getBufferSize(void) const1769 	vk::VkDeviceSize			getBufferSize		(void) const { return m_context.getBufferSize(); }
1770 
getImage(void) const1771 	vk::VkImage					getImage			(void) const { return m_context.getImage(); }
getImageWidth(void) const1772 	deInt32						getImageWidth		(void) const { return m_context.getImageWidth(); }
getImageHeight(void) const1773 	deInt32						getImageHeight		(void) const { return m_context.getImageHeight(); }
1774 
1775 private:
1776 	const PrepareContext&		m_context;
1777 	const vk::VkCommandBuffer	m_commandBuffer;
1778 };
1779 
1780 class CmdCommand
1781 {
1782 public:
~CmdCommand(void)1783 	virtual				~CmdCommand	(void) {}
1784 	virtual const char*	getName		(void) const = 0;
1785 
1786 	// Log things that are done during prepare
logPrepare(TestLog &,size_t) const1787 	virtual void		logPrepare	(TestLog&, size_t) const {}
1788 	// Log submitted calls etc.
logSubmit(TestLog &,size_t) const1789 	virtual void		logSubmit	(TestLog&, size_t) const {}
1790 
1791 	// Allocate vulkan resources and prepare for submit.
prepare(PrepareContext &)1792 	virtual void		prepare		(PrepareContext&) {}
1793 
1794 	// Submit commands to command buffer.
submit(SubmitContext &)1795 	virtual void		submit		(SubmitContext&) {}
1796 
1797 	// Verify results
verify(VerifyContext &,size_t)1798 	virtual void		verify		(VerifyContext&, size_t) {}
1799 };
1800 
1801 class SubmitCommandBuffer : public Command
1802 {
1803 public:
1804 					SubmitCommandBuffer		(const vector<CmdCommand*>& commands);
1805 					~SubmitCommandBuffer	(void);
1806 
getName(void) const1807 	const char*		getName					(void) const { return "SubmitCommandBuffer"; }
1808 	void			logExecute				(TestLog& log, size_t commandIndex) const;
1809 	void			logPrepare				(TestLog& log, size_t commandIndex) const;
1810 
1811 	// Allocate command buffer and submit commands to command buffer
1812 	void			prepare					(PrepareContext& context);
1813 	void			execute					(ExecuteContext& context);
1814 
1815 	// Verify that results are correct.
1816 	void			verify					(VerifyContext& context, size_t commandIndex);
1817 
1818 private:
1819 	vector<CmdCommand*>				m_commands;
1820 	vk::Move<vk::VkCommandBuffer>	m_commandBuffer;
1821 };
1822 
SubmitCommandBuffer(const vector<CmdCommand * > & commands)1823 SubmitCommandBuffer::SubmitCommandBuffer (const vector<CmdCommand*>& commands)
1824 	: m_commands	(commands)
1825 {
1826 }
1827 
~SubmitCommandBuffer(void)1828 SubmitCommandBuffer::~SubmitCommandBuffer (void)
1829 {
1830 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1831 		delete m_commands[cmdNdx];
1832 }
1833 
prepare(PrepareContext & context)1834 void SubmitCommandBuffer::prepare (PrepareContext& context)
1835 {
1836 	const vk::DeviceInterface&	vkd			= context.getContext().getDeviceInterface();
1837 	const vk::VkDevice			device		= context.getContext().getDevice();
1838 	const vk::VkCommandPool		commandPool	= context.getContext().getCommandPool();
1839 
1840 	m_commandBuffer = createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1841 
1842 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1843 	{
1844 		CmdCommand& command = *m_commands[cmdNdx];
1845 
1846 		command.prepare(context);
1847 	}
1848 
1849 	{
1850 		SubmitContext submitContext (context, *m_commandBuffer);
1851 
1852 		for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1853 		{
1854 			CmdCommand& command = *m_commands[cmdNdx];
1855 
1856 			command.submit(submitContext);
1857 		}
1858 
1859 		endCommandBuffer(vkd, *m_commandBuffer);
1860 	}
1861 }
1862 
execute(ExecuteContext & context)1863 void SubmitCommandBuffer::execute (ExecuteContext& context)
1864 {
1865 	const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1866 	const vk::VkCommandBuffer	cmd		= *m_commandBuffer;
1867 	const vk::VkQueue			queue	= context.getContext().getQueue();
1868 	const vk::VkSubmitInfo		submit	=
1869 	{
1870 		vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
1871 		DE_NULL,
1872 
1873 		0,
1874 		DE_NULL,
1875 		(const vk::VkPipelineStageFlags*)DE_NULL,
1876 
1877 		1,
1878 		&cmd,
1879 
1880 		0,
1881 		DE_NULL
1882 	};
1883 
1884 	vkd.queueSubmit(queue, 1, &submit, 0);
1885 }
1886 
verify(VerifyContext & context,size_t commandIndex)1887 void SubmitCommandBuffer::verify (VerifyContext& context, size_t commandIndex)
1888 {
1889 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
1890 	const tcu::ScopedLogSection	section		(context.getLog(), sectionName, sectionName);
1891 
1892 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1893 		m_commands[cmdNdx]->verify(context, cmdNdx);
1894 }
1895 
logPrepare(TestLog & log,size_t commandIndex) const1896 void SubmitCommandBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1897 {
1898 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
1899 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
1900 
1901 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1902 		m_commands[cmdNdx]->logPrepare(log, cmdNdx);
1903 }
1904 
logExecute(TestLog & log,size_t commandIndex) const1905 void SubmitCommandBuffer::logExecute (TestLog& log, size_t commandIndex) const
1906 {
1907 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
1908 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
1909 
1910 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1911 		m_commands[cmdNdx]->logSubmit(log, cmdNdx);
1912 }
1913 
1914 class PipelineBarrier : public CmdCommand
1915 {
1916 public:
1917 	enum Type
1918 	{
1919 		TYPE_GLOBAL = 0,
1920 		TYPE_BUFFER,
1921 		TYPE_IMAGE,
1922 		TYPE_LAST
1923 	};
1924 									PipelineBarrier		(const vk::VkPipelineStageFlags			srcStages,
1925 														 const vk::VkAccessFlags				srcAccesses,
1926 														 const vk::VkPipelineStageFlags			dstStages,
1927 														 const vk::VkAccessFlags				dstAccesses,
1928 														 Type									type,
1929 														 const tcu::Maybe<vk::VkImageLayout>	imageLayout);
~PipelineBarrier(void)1930 									~PipelineBarrier	(void) {}
getName(void) const1931 	const char*						getName				(void) const { return "PipelineBarrier"; }
1932 
1933 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
1934 	void							submit				(SubmitContext& context);
1935 
1936 private:
1937 	const vk::VkPipelineStageFlags		m_srcStages;
1938 	const vk::VkAccessFlags				m_srcAccesses;
1939 	const vk::VkPipelineStageFlags		m_dstStages;
1940 	const vk::VkAccessFlags				m_dstAccesses;
1941 	const Type							m_type;
1942 	const tcu::Maybe<vk::VkImageLayout>	m_imageLayout;
1943 };
1944 
PipelineBarrier(const vk::VkPipelineStageFlags srcStages,const vk::VkAccessFlags srcAccesses,const vk::VkPipelineStageFlags dstStages,const vk::VkAccessFlags dstAccesses,Type type,const tcu::Maybe<vk::VkImageLayout> imageLayout)1945 PipelineBarrier::PipelineBarrier (const vk::VkPipelineStageFlags		srcStages,
1946 								  const vk::VkAccessFlags				srcAccesses,
1947 								  const vk::VkPipelineStageFlags		dstStages,
1948 								  const vk::VkAccessFlags				dstAccesses,
1949 								  Type									type,
1950 								  const tcu::Maybe<vk::VkImageLayout>	imageLayout)
1951 	: m_srcStages	(srcStages)
1952 	, m_srcAccesses	(srcAccesses)
1953 	, m_dstStages	(dstStages)
1954 	, m_dstAccesses	(dstAccesses)
1955 	, m_type		(type)
1956 	, m_imageLayout	(imageLayout)
1957 {
1958 }
1959 
logSubmit(TestLog & log,size_t commandIndex) const1960 void PipelineBarrier::logSubmit (TestLog& log, size_t commandIndex) const
1961 {
1962 	log << TestLog::Message << commandIndex << ":" << getName()
1963 		<< " " << (m_type == TYPE_GLOBAL ? "Global pipeline barrier"
1964 					: m_type == TYPE_BUFFER ? "Buffer pipeline barrier"
1965 					: "Image pipeline barrier")
1966 		<< ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
1967 		<< ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses) << TestLog::EndMessage;
1968 }
1969 
submit(SubmitContext & context)1970 void PipelineBarrier::submit (SubmitContext& context)
1971 {
1972 	const vk::DeviceInterface&	vkd	= context.getContext().getDeviceInterface();
1973 	const vk::VkCommandBuffer	cmd	= context.getCommandBuffer();
1974 
1975 	switch (m_type)
1976 	{
1977 		case TYPE_GLOBAL:
1978 		{
1979 			const vk::VkMemoryBarrier	barrier		=
1980 			{
1981 				vk::VK_STRUCTURE_TYPE_MEMORY_BARRIER,
1982 				DE_NULL,
1983 
1984 				m_srcAccesses,
1985 				m_dstAccesses
1986 			};
1987 
1988 			vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 1, &barrier, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
1989 			break;
1990 		}
1991 
1992 		case TYPE_BUFFER:
1993 		{
1994 			const vk::VkBufferMemoryBarrier	barrier		=
1995 			{
1996 				vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
1997 				DE_NULL,
1998 
1999 				m_srcAccesses,
2000 				m_dstAccesses,
2001 
2002 				VK_QUEUE_FAMILY_IGNORED,
2003 				VK_QUEUE_FAMILY_IGNORED,
2004 
2005 				context.getBuffer(),
2006 				0,
2007 				VK_WHOLE_SIZE
2008 			};
2009 
2010 			vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2011 			break;
2012 		}
2013 
2014 		case TYPE_IMAGE:
2015 		{
2016 			const vk::VkImageMemoryBarrier	barrier		=
2017 			{
2018 				vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2019 				DE_NULL,
2020 
2021 				m_srcAccesses,
2022 				m_dstAccesses,
2023 
2024 				*m_imageLayout,
2025 				*m_imageLayout,
2026 
2027 				VK_QUEUE_FAMILY_IGNORED,
2028 				VK_QUEUE_FAMILY_IGNORED,
2029 
2030 				context.getImage(),
2031 				{
2032 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
2033 					0, 1,
2034 					0, 1
2035 				}
2036 			};
2037 
2038 			vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2039 			break;
2040 		}
2041 
2042 		default:
2043 			DE_FATAL("Unknown pipeline barrier type");
2044 	}
2045 }
2046 
2047 class ImageTransition : public CmdCommand
2048 {
2049 public:
2050 						ImageTransition		(vk::VkPipelineStageFlags	srcStages,
2051 											 vk::VkAccessFlags			srcAccesses,
2052 
2053 											 vk::VkPipelineStageFlags	dstStages,
2054 											 vk::VkAccessFlags			dstAccesses,
2055 
2056 											 vk::VkImageLayout			srcLayout,
2057 											 vk::VkImageLayout			dstLayout);
2058 
~ImageTransition(void)2059 						~ImageTransition	(void) {}
getName(void) const2060 	const char*			getName				(void) const { return "ImageTransition"; }
2061 
2062 	void				prepare				(PrepareContext& context);
2063 	void				logSubmit			(TestLog& log, size_t commandIndex) const;
2064 	void				submit				(SubmitContext& context);
2065 	void				verify				(VerifyContext& context, size_t);
2066 
2067 private:
2068 	const vk::VkPipelineStageFlags	m_srcStages;
2069 	const vk::VkAccessFlags			m_srcAccesses;
2070 	const vk::VkPipelineStageFlags	m_dstStages;
2071 	const vk::VkAccessFlags			m_dstAccesses;
2072 	const vk::VkImageLayout			m_srcLayout;
2073 	const vk::VkImageLayout			m_dstLayout;
2074 
2075 	vk::VkDeviceSize				m_imageMemorySize;
2076 };
2077 
ImageTransition(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses,vk::VkImageLayout srcLayout,vk::VkImageLayout dstLayout)2078 ImageTransition::ImageTransition (vk::VkPipelineStageFlags	srcStages,
2079 								  vk::VkAccessFlags			srcAccesses,
2080 
2081 								  vk::VkPipelineStageFlags	dstStages,
2082 								  vk::VkAccessFlags			dstAccesses,
2083 
2084 								  vk::VkImageLayout			srcLayout,
2085 								  vk::VkImageLayout			dstLayout)
2086 	: m_srcStages		(srcStages)
2087 	, m_srcAccesses		(srcAccesses)
2088 	, m_dstStages		(dstStages)
2089 	, m_dstAccesses		(dstAccesses)
2090 	, m_srcLayout		(srcLayout)
2091 	, m_dstLayout		(dstLayout)
2092 {
2093 }
2094 
logSubmit(TestLog & log,size_t commandIndex) const2095 void ImageTransition::logSubmit (TestLog& log, size_t commandIndex) const
2096 {
2097 	log << TestLog::Message << commandIndex << ":" << getName()
2098 		<< " Image transition pipeline barrier"
2099 		<< ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
2100 		<< ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses)
2101 		<< ", srcLayout: " << m_srcLayout << ", dstLayout: " << m_dstLayout << TestLog::EndMessage;
2102 }
2103 
prepare(PrepareContext & context)2104 void ImageTransition::prepare (PrepareContext& context)
2105 {
2106 	DE_ASSERT(context.getImageLayout() == vk::VK_IMAGE_LAYOUT_UNDEFINED || m_srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED || context.getImageLayout() == m_srcLayout);
2107 
2108 	context.setImageLayout(m_dstLayout);
2109 	m_imageMemorySize = context.getImageMemorySize();
2110 }
2111 
submit(SubmitContext & context)2112 void ImageTransition::submit (SubmitContext& context)
2113 {
2114 	const vk::DeviceInterface&		vkd			= context.getContext().getDeviceInterface();
2115 	const vk::VkCommandBuffer		cmd			= context.getCommandBuffer();
2116 	const vk::VkImageMemoryBarrier	barrier		=
2117 	{
2118 		vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2119 		DE_NULL,
2120 
2121 		m_srcAccesses,
2122 		m_dstAccesses,
2123 
2124 		m_srcLayout,
2125 		m_dstLayout,
2126 
2127 		VK_QUEUE_FAMILY_IGNORED,
2128 		VK_QUEUE_FAMILY_IGNORED,
2129 
2130 		context.getImage(),
2131 		{
2132 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2133 			0u, 1u,
2134 			0u, 1u
2135 		}
2136 	};
2137 
2138 	vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2139 }
2140 
verify(VerifyContext & context,size_t)2141 void ImageTransition::verify (VerifyContext& context, size_t)
2142 {
2143 	context.getReference().setUndefined(0, (size_t)m_imageMemorySize);
2144 }
2145 
2146 class FillBuffer : public CmdCommand
2147 {
2148 public:
FillBuffer(deUint32 value)2149 						FillBuffer	(deUint32 value) : m_value(value) {}
~FillBuffer(void)2150 						~FillBuffer	(void) {}
getName(void) const2151 	const char*			getName		(void) const { return "FillBuffer"; }
2152 
2153 	void				logSubmit	(TestLog& log, size_t commandIndex) const;
2154 	void				submit		(SubmitContext& context);
2155 	void				verify		(VerifyContext& context, size_t commandIndex);
2156 
2157 private:
2158 	const deUint32		m_value;
2159 	vk::VkDeviceSize	m_bufferSize;
2160 };
2161 
logSubmit(TestLog & log,size_t commandIndex) const2162 void FillBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2163 {
2164 	log << TestLog::Message << commandIndex << ":" << getName() << " Fill value: " << m_value << TestLog::EndMessage;
2165 }
2166 
submit(SubmitContext & context)2167 void FillBuffer::submit (SubmitContext& context)
2168 {
2169 	const vk::DeviceInterface&	vkd			= context.getContext().getDeviceInterface();
2170 	const vk::VkCommandBuffer	cmd			= context.getCommandBuffer();
2171 	const vk::VkBuffer			buffer		= context.getBuffer();
2172 	const vk::VkDeviceSize		sizeMask	= ~(0x3ull); // \note Round down to multiple of 4
2173 
2174 	m_bufferSize = sizeMask & context.getBufferSize();
2175 	vkd.cmdFillBuffer(cmd, buffer, 0, m_bufferSize, m_value);
2176 }
2177 
verify(VerifyContext & context,size_t)2178 void FillBuffer::verify (VerifyContext& context, size_t)
2179 {
2180 	ReferenceMemory&	reference	= context.getReference();
2181 
2182 	for (size_t ndx = 0; ndx < m_bufferSize; ndx++)
2183 	{
2184 #if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
2185 		reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(ndx % 4)))));
2186 #else
2187 		reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(3 - (ndx % 4))))));
2188 #endif
2189 	}
2190 }
2191 
2192 class UpdateBuffer : public CmdCommand
2193 {
2194 public:
UpdateBuffer(deUint32 seed)2195 						UpdateBuffer	(deUint32 seed) : m_seed(seed) {}
~UpdateBuffer(void)2196 						~UpdateBuffer	(void) {}
getName(void) const2197 	const char*			getName			(void) const { return "UpdateBuffer"; }
2198 
2199 	void				logSubmit		(TestLog& log, size_t commandIndex) const;
2200 	void				submit			(SubmitContext& context);
2201 	void				verify			(VerifyContext& context, size_t commandIndex);
2202 
2203 private:
2204 	const deUint32		m_seed;
2205 	vk::VkDeviceSize	m_bufferSize;
2206 };
2207 
logSubmit(TestLog & log,size_t commandIndex) const2208 void UpdateBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2209 {
2210 	log << TestLog::Message << commandIndex << ":" << getName() << " Update buffer, seed: " << m_seed << TestLog::EndMessage;
2211 }
2212 
submit(SubmitContext & context)2213 void UpdateBuffer::submit (SubmitContext& context)
2214 {
2215 	const vk::DeviceInterface&	vkd			= context.getContext().getDeviceInterface();
2216 	const vk::VkCommandBuffer	cmd			= context.getCommandBuffer();
2217 	const vk::VkBuffer			buffer		= context.getBuffer();
2218 	const size_t				blockSize	= 65536;
2219 	std::vector<deUint8>		data		(blockSize, 0);
2220 	de::Random					rng			(m_seed);
2221 
2222 	m_bufferSize = context.getBufferSize();
2223 
2224 	for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2225 	{
2226 		for (size_t ndx = 0; ndx < data.size(); ndx++)
2227 			data[ndx] = rng.getUint8();
2228 
2229 		if (m_bufferSize - updated > blockSize)
2230 			vkd.cmdUpdateBuffer(cmd, buffer, updated, blockSize, (const deUint32*)(&data[0]));
2231 		else
2232 			vkd.cmdUpdateBuffer(cmd, buffer, updated, m_bufferSize - updated, (const deUint32*)(&data[0]));
2233 	}
2234 }
2235 
verify(VerifyContext & context,size_t)2236 void UpdateBuffer::verify (VerifyContext& context, size_t)
2237 {
2238 	ReferenceMemory&	reference	= context.getReference();
2239 	const size_t		blockSize	= 65536;
2240 	vector<deUint8>		data		(blockSize, 0);
2241 	de::Random			rng			(m_seed);
2242 
2243 	for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2244 	{
2245 		for (size_t ndx = 0; ndx < data.size(); ndx++)
2246 			data[ndx] = rng.getUint8();
2247 
2248 		if (m_bufferSize - updated > blockSize)
2249 			reference.setData(updated, blockSize, &data[0]);
2250 		else
2251 			reference.setData(updated, (size_t)(m_bufferSize - updated), &data[0]);
2252 	}
2253 }
2254 
2255 class BufferCopyToBuffer : public CmdCommand
2256 {
2257 public:
BufferCopyToBuffer(void)2258 									BufferCopyToBuffer	(void) {}
~BufferCopyToBuffer(void)2259 									~BufferCopyToBuffer	(void) {}
getName(void) const2260 	const char*						getName				(void) const { return "BufferCopyToBuffer"; }
2261 
2262 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
2263 	void							prepare				(PrepareContext& context);
2264 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
2265 	void							submit				(SubmitContext& context);
2266 	void							verify				(VerifyContext& context, size_t commandIndex);
2267 
2268 private:
2269 	vk::VkDeviceSize				m_bufferSize;
2270 	vk::Move<vk::VkBuffer>			m_dstBuffer;
2271 	vk::Move<vk::VkDeviceMemory>	m_memory;
2272 };
2273 
logPrepare(TestLog & log,size_t commandIndex) const2274 void BufferCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2275 {
2276 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for buffer to buffer copy." << TestLog::EndMessage;
2277 }
2278 
prepare(PrepareContext & context)2279 void BufferCopyToBuffer::prepare (PrepareContext& context)
2280 {
2281 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2282 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2283 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2284 	const vk::VkDevice				device			= context.getContext().getDevice();
2285 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2286 
2287 	m_bufferSize = context.getBufferSize();
2288 
2289 	m_dstBuffer	= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2290 	m_memory	= bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2291 }
2292 
logSubmit(TestLog & log,size_t commandIndex) const2293 void BufferCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2294 {
2295 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to another buffer" << TestLog::EndMessage;
2296 }
2297 
submit(SubmitContext & context)2298 void BufferCopyToBuffer::submit (SubmitContext& context)
2299 {
2300 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2301 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2302 	const vk::VkBufferCopy		range			=
2303 	{
2304 		0, 0, // Offsets
2305 		m_bufferSize
2306 	};
2307 
2308 	vkd.cmdCopyBuffer(commandBuffer, context.getBuffer(), *m_dstBuffer, 1, &range);
2309 }
2310 
verify(VerifyContext & context,size_t commandIndex)2311 void BufferCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2312 {
2313 	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
2314 	ReferenceMemory&						reference		(context.getReference());
2315 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
2316 	const vk::VkDevice						device			= context.getContext().getDevice();
2317 	const vk::VkQueue						queue			= context.getContext().getQueue();
2318 	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
2319 	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2320 	const vk::VkBufferMemoryBarrier			barrier			=
2321 	{
2322 		vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2323 		DE_NULL,
2324 
2325 		vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2326 		vk::VK_ACCESS_HOST_READ_BIT,
2327 
2328 		VK_QUEUE_FAMILY_IGNORED,
2329 		VK_QUEUE_FAMILY_IGNORED,
2330 		*m_dstBuffer,
2331 		0,
2332 		VK_WHOLE_SIZE
2333 	};
2334 
2335 	vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2336 
2337 	endCommandBuffer(vkd, *commandBuffer);
2338 	submitCommandsAndWait(vkd, device, queue, *commandBuffer);
2339 
2340 	{
2341 		void* const	ptr		= mapMemory(vkd, device, *m_memory, m_bufferSize);
2342 		bool		isOk	= true;
2343 
2344 		vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, VK_WHOLE_SIZE);
2345 
2346 		{
2347 			const deUint8* const data = (const deUint8*)ptr;
2348 
2349 			for (size_t pos = 0; pos < (size_t)m_bufferSize; pos++)
2350 			{
2351 				if (reference.isDefined(pos))
2352 				{
2353 					if (data[pos] != reference.get(pos))
2354 					{
2355 						resultCollector.fail(
2356 								de::toString(commandIndex) + ":" + getName()
2357 								+ " Result differs from reference, Expected: "
2358 								+ de::toString(tcu::toHex<8>(reference.get(pos)))
2359 								+ ", Got: "
2360 								+ de::toString(tcu::toHex<8>(data[pos]))
2361 								+ ", At offset: "
2362 								+ de::toString(pos));
2363 						break;
2364 					}
2365 				}
2366 			}
2367 		}
2368 
2369 		vkd.unmapMemory(device, *m_memory);
2370 
2371 		if (!isOk)
2372 			context.getLog() << TestLog::Message << commandIndex << ": Buffer copy to buffer verification failed" << TestLog::EndMessage;
2373 	}
2374 }
2375 
2376 class BufferCopyFromBuffer : public CmdCommand
2377 {
2378 public:
BufferCopyFromBuffer(deUint32 seed)2379 									BufferCopyFromBuffer	(deUint32 seed) : m_seed(seed) {}
~BufferCopyFromBuffer(void)2380 									~BufferCopyFromBuffer	(void) {}
getName(void) const2381 	const char*						getName					(void) const { return "BufferCopyFromBuffer"; }
2382 
2383 	void							logPrepare				(TestLog& log, size_t commandIndex) const;
2384 	void							prepare					(PrepareContext& context);
2385 	void							logSubmit				(TestLog& log, size_t commandIndex) const;
2386 	void							submit					(SubmitContext& context);
2387 	void							verify					(VerifyContext& context, size_t commandIndex);
2388 
2389 private:
2390 	const deUint32					m_seed;
2391 	vk::VkDeviceSize				m_bufferSize;
2392 	vk::Move<vk::VkBuffer>			m_srcBuffer;
2393 	vk::Move<vk::VkDeviceMemory>	m_memory;
2394 };
2395 
logPrepare(TestLog & log,size_t commandIndex) const2396 void BufferCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2397 {
2398 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to buffer copy. Seed: " << m_seed << TestLog::EndMessage;
2399 }
2400 
prepare(PrepareContext & context)2401 void BufferCopyFromBuffer::prepare (PrepareContext& context)
2402 {
2403 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2404 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2405 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2406 	const vk::VkDevice				device			= context.getContext().getDevice();
2407 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2408 
2409 	m_bufferSize	= context.getBufferSize();
2410 	m_srcBuffer		= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2411 	m_memory		= bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2412 
2413 	{
2414 		void* const	ptr	= mapMemory(vkd, device, *m_memory, m_bufferSize);
2415 		de::Random	rng	(m_seed);
2416 
2417 		{
2418 			deUint8* const	data = (deUint8*)ptr;
2419 
2420 			for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2421 				data[ndx] = rng.getUint8();
2422 		}
2423 
2424 		vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, VK_WHOLE_SIZE);
2425 		vkd.unmapMemory(device, *m_memory);
2426 	}
2427 }
2428 
logSubmit(TestLog & log,size_t commandIndex) const2429 void BufferCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2430 {
2431 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from another buffer" << TestLog::EndMessage;
2432 }
2433 
submit(SubmitContext & context)2434 void BufferCopyFromBuffer::submit (SubmitContext& context)
2435 {
2436 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2437 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2438 	const vk::VkBufferCopy		range			=
2439 	{
2440 		0, 0, // Offsets
2441 		m_bufferSize
2442 	};
2443 
2444 	vkd.cmdCopyBuffer(commandBuffer, *m_srcBuffer, context.getBuffer(), 1, &range);
2445 }
2446 
verify(VerifyContext & context,size_t)2447 void BufferCopyFromBuffer::verify (VerifyContext& context, size_t)
2448 {
2449 	ReferenceMemory&	reference	(context.getReference());
2450 	de::Random			rng			(m_seed);
2451 
2452 	for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2453 		reference.set(ndx, rng.getUint8());
2454 }
2455 
2456 class BufferCopyToImage : public CmdCommand
2457 {
2458 public:
BufferCopyToImage(void)2459 									BufferCopyToImage	(void) {}
~BufferCopyToImage(void)2460 									~BufferCopyToImage	(void) {}
getName(void) const2461 	const char*						getName				(void) const { return "BufferCopyToImage"; }
2462 
2463 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
2464 	void							prepare				(PrepareContext& context);
2465 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
2466 	void							submit				(SubmitContext& context);
2467 	void							verify				(VerifyContext& context, size_t commandIndex);
2468 
2469 private:
2470 	deInt32							m_imageWidth;
2471 	deInt32							m_imageHeight;
2472 	vk::Move<vk::VkImage>			m_dstImage;
2473 	vk::Move<vk::VkDeviceMemory>	m_memory;
2474 };
2475 
logPrepare(TestLog & log,size_t commandIndex) const2476 void BufferCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
2477 {
2478 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for buffer to image copy." << TestLog::EndMessage;
2479 }
2480 
prepare(PrepareContext & context)2481 void BufferCopyToImage::prepare (PrepareContext& context)
2482 {
2483 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2484 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2485 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2486 	const vk::VkDevice				device			= context.getContext().getDevice();
2487 	const vk::VkQueue				queue			= context.getContext().getQueue();
2488 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
2489 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2490 	const IVec2						imageSize		= findImageSizeWxHx4(context.getBufferSize());
2491 
2492 	m_imageWidth	= imageSize[0];
2493 	m_imageHeight	= imageSize[1];
2494 
2495 	{
2496 		const vk::VkImageCreateInfo	createInfo =
2497 		{
2498 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2499 			DE_NULL,
2500 
2501 			0,
2502 			vk::VK_IMAGE_TYPE_2D,
2503 			vk::VK_FORMAT_R8G8B8A8_UNORM,
2504 			{
2505 				(deUint32)m_imageWidth,
2506 				(deUint32)m_imageHeight,
2507 				1u,
2508 			},
2509 			1, 1, // mipLevels, arrayLayers
2510 			vk::VK_SAMPLE_COUNT_1_BIT,
2511 
2512 			vk::VK_IMAGE_TILING_OPTIMAL,
2513 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2514 			vk::VK_SHARING_MODE_EXCLUSIVE,
2515 
2516 			(deUint32)queueFamilies.size(),
2517 			&queueFamilies[0],
2518 			vk::VK_IMAGE_LAYOUT_UNDEFINED
2519 		};
2520 
2521 		m_dstImage = vk::createImage(vkd, device, &createInfo);
2522 	}
2523 
2524 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
2525 
2526 	{
2527 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2528 		const vk::VkImageMemoryBarrier			barrier			=
2529 		{
2530 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2531 			DE_NULL,
2532 
2533 			0,
2534 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2535 
2536 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
2537 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2538 
2539 			VK_QUEUE_FAMILY_IGNORED,
2540 			VK_QUEUE_FAMILY_IGNORED,
2541 
2542 			*m_dstImage,
2543 			{
2544 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2545 				0,	// Mip level
2546 				1,	// Mip level count
2547 				0,	// Layer
2548 				1	// Layer count
2549 			}
2550 		};
2551 
2552 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2553 
2554 		endCommandBuffer(vkd, *commandBuffer);
2555 		submitCommandsAndWait(vkd, device, queue, *commandBuffer);
2556 	}
2557 }
2558 
logSubmit(TestLog & log,size_t commandIndex) const2559 void BufferCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
2560 {
2561 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to image" << TestLog::EndMessage;
2562 }
2563 
submit(SubmitContext & context)2564 void BufferCopyToImage::submit (SubmitContext& context)
2565 {
2566 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2567 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2568 	const vk::VkBufferImageCopy	region			=
2569 	{
2570 		0,
2571 		0, 0,
2572 		{
2573 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2574 			0,	// mipLevel
2575 			0,	// arrayLayer
2576 			1	// layerCount
2577 		},
2578 		{ 0, 0, 0 },
2579 		{
2580 			(deUint32)m_imageWidth,
2581 			(deUint32)m_imageHeight,
2582 			1u
2583 		}
2584 	};
2585 
2586 	vkd.cmdCopyBufferToImage(commandBuffer, context.getBuffer(), *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
2587 }
2588 
verify(VerifyContext & context,size_t commandIndex)2589 void BufferCopyToImage::verify (VerifyContext& context, size_t commandIndex)
2590 {
2591 	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
2592 	ReferenceMemory&						reference		(context.getReference());
2593 	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
2594 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
2595 	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
2596 	const vk::VkDevice						device			= context.getContext().getDevice();
2597 	const vk::VkQueue						queue			= context.getContext().getQueue();
2598 	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
2599 	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2600 	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
2601 	const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2602 	const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2603 	{
2604 		const vk::VkImageMemoryBarrier		imageBarrier	=
2605 		{
2606 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2607 			DE_NULL,
2608 
2609 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2610 			vk::VK_ACCESS_TRANSFER_READ_BIT,
2611 
2612 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2613 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2614 
2615 			VK_QUEUE_FAMILY_IGNORED,
2616 			VK_QUEUE_FAMILY_IGNORED,
2617 
2618 			*m_dstImage,
2619 			{
2620 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2621 				0,	// Mip level
2622 				1,	// Mip level count
2623 				0,	// Layer
2624 				1	// Layer count
2625 			}
2626 		};
2627 		const vk::VkBufferMemoryBarrier bufferBarrier =
2628 		{
2629 			vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2630 			DE_NULL,
2631 
2632 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2633 			vk::VK_ACCESS_HOST_READ_BIT,
2634 
2635 			VK_QUEUE_FAMILY_IGNORED,
2636 			VK_QUEUE_FAMILY_IGNORED,
2637 			*dstBuffer,
2638 			0,
2639 			VK_WHOLE_SIZE
2640 		};
2641 
2642 		const vk::VkBufferImageCopy	region =
2643 		{
2644 			0,
2645 			0, 0,
2646 			{
2647 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2648 				0,	// mipLevel
2649 				0,	// arrayLayer
2650 				1	// layerCount
2651 			},
2652 			{ 0, 0, 0 },
2653 			{
2654 				(deUint32)m_imageWidth,
2655 				(deUint32)m_imageHeight,
2656 				1u
2657 			}
2658 		};
2659 
2660 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
2661 		vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
2662 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2663 	}
2664 
2665 	endCommandBuffer(vkd, *commandBuffer);
2666 	submitCommandsAndWait(vkd, device, queue, *commandBuffer);
2667 
2668 	{
2669 		void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2670 
2671 		invalidateMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
2672 
2673 		{
2674 			const deUint8* const	data = (const deUint8*)ptr;
2675 
2676 			for (size_t pos = 0; pos < (size_t)( 4 * m_imageWidth * m_imageHeight); pos++)
2677 			{
2678 				if (reference.isDefined(pos))
2679 				{
2680 					if (data[pos] != reference.get(pos))
2681 					{
2682 						resultCollector.fail(
2683 								de::toString(commandIndex) + ":" + getName()
2684 								+ " Result differs from reference, Expected: "
2685 								+ de::toString(tcu::toHex<8>(reference.get(pos)))
2686 								+ ", Got: "
2687 								+ de::toString(tcu::toHex<8>(data[pos]))
2688 								+ ", At offset: "
2689 								+ de::toString(pos));
2690 						break;
2691 					}
2692 				}
2693 			}
2694 		}
2695 
2696 		vkd.unmapMemory(device, *memory);
2697 	}
2698 }
2699 
2700 class BufferCopyFromImage : public CmdCommand
2701 {
2702 public:
BufferCopyFromImage(deUint32 seed)2703 									BufferCopyFromImage		(deUint32 seed) : m_seed(seed) {}
~BufferCopyFromImage(void)2704 									~BufferCopyFromImage	(void) {}
getName(void) const2705 	const char*						getName					(void) const { return "BufferCopyFromImage"; }
2706 
2707 	void							logPrepare				(TestLog& log, size_t commandIndex) const;
2708 	void							prepare					(PrepareContext& context);
2709 	void							logSubmit				(TestLog& log, size_t commandIndex) const;
2710 	void							submit					(SubmitContext& context);
2711 	void							verify					(VerifyContext& context, size_t commandIndex);
2712 
2713 private:
2714 	const deUint32					m_seed;
2715 	deInt32							m_imageWidth;
2716 	deInt32							m_imageHeight;
2717 	vk::Move<vk::VkImage>			m_srcImage;
2718 	vk::Move<vk::VkDeviceMemory>	m_memory;
2719 };
2720 
logPrepare(TestLog & log,size_t commandIndex) const2721 void BufferCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
2722 {
2723 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to buffer copy." << TestLog::EndMessage;
2724 }
2725 
prepare(PrepareContext & context)2726 void BufferCopyFromImage::prepare (PrepareContext& context)
2727 {
2728 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2729 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2730 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2731 	const vk::VkDevice				device			= context.getContext().getDevice();
2732 	const vk::VkQueue				queue			= context.getContext().getQueue();
2733 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
2734 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2735 	const IVec2						imageSize		= findImageSizeWxHx4(context.getBufferSize());
2736 
2737 	m_imageWidth	= imageSize[0];
2738 	m_imageHeight	= imageSize[1];
2739 
2740 	{
2741 		const vk::VkImageCreateInfo	createInfo =
2742 		{
2743 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2744 			DE_NULL,
2745 
2746 			0,
2747 			vk::VK_IMAGE_TYPE_2D,
2748 			vk::VK_FORMAT_R8G8B8A8_UNORM,
2749 			{
2750 				(deUint32)m_imageWidth,
2751 				(deUint32)m_imageHeight,
2752 				1u,
2753 			},
2754 			1, 1, // mipLevels, arrayLayers
2755 			vk::VK_SAMPLE_COUNT_1_BIT,
2756 
2757 			vk::VK_IMAGE_TILING_OPTIMAL,
2758 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2759 			vk::VK_SHARING_MODE_EXCLUSIVE,
2760 
2761 			(deUint32)queueFamilies.size(),
2762 			&queueFamilies[0],
2763 			vk::VK_IMAGE_LAYOUT_UNDEFINED
2764 		};
2765 
2766 		m_srcImage = vk::createImage(vkd, device, &createInfo);
2767 	}
2768 
2769 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
2770 
2771 	{
2772 		const vk::Unique<vk::VkBuffer>			srcBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2773 		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2774 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2775 		const vk::VkImageMemoryBarrier			preImageBarrier	=
2776 		{
2777 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2778 			DE_NULL,
2779 
2780 			0,
2781 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2782 
2783 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
2784 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2785 
2786 			VK_QUEUE_FAMILY_IGNORED,
2787 			VK_QUEUE_FAMILY_IGNORED,
2788 
2789 			*m_srcImage,
2790 			{
2791 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2792 				0,	// Mip level
2793 				1,	// Mip level count
2794 				0,	// Layer
2795 				1	// Layer count
2796 			}
2797 		};
2798 		const vk::VkImageMemoryBarrier			postImageBarrier =
2799 		{
2800 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2801 			DE_NULL,
2802 
2803 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2804 			0,
2805 
2806 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2807 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2808 
2809 			VK_QUEUE_FAMILY_IGNORED,
2810 			VK_QUEUE_FAMILY_IGNORED,
2811 
2812 			*m_srcImage,
2813 			{
2814 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2815 				0,	// Mip level
2816 				1,	// Mip level count
2817 				0,	// Layer
2818 				1	// Layer count
2819 			}
2820 		};
2821 		const vk::VkBufferImageCopy				region				=
2822 		{
2823 			0,
2824 			0, 0,
2825 			{
2826 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2827 				0,	// mipLevel
2828 				0,	// arrayLayer
2829 				1	// layerCount
2830 			},
2831 			{ 0, 0, 0 },
2832 			{
2833 				(deUint32)m_imageWidth,
2834 				(deUint32)m_imageHeight,
2835 				1u
2836 			}
2837 		};
2838 
2839 		{
2840 			void* const	ptr	= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2841 			de::Random	rng	(m_seed);
2842 
2843 			{
2844 				deUint8* const	data = (deUint8*)ptr;
2845 
2846 				for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2847 					data[ndx] = rng.getUint8();
2848 			}
2849 
2850 			vk::flushMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
2851 			vkd.unmapMemory(device, *memory);
2852 		}
2853 
2854 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2855 		vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
2856 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2857 
2858 		endCommandBuffer(vkd, *commandBuffer);
2859 		submitCommandsAndWait(vkd, device, queue, *commandBuffer);
2860 	}
2861 }
2862 
logSubmit(TestLog & log,size_t commandIndex) const2863 void BufferCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
2864 {
2865 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from image" << TestLog::EndMessage;
2866 }
2867 
submit(SubmitContext & context)2868 void BufferCopyFromImage::submit (SubmitContext& context)
2869 {
2870 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2871 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2872 	const vk::VkBufferImageCopy	region			=
2873 	{
2874 		0,
2875 		0, 0,
2876 		{
2877 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2878 			0,	// mipLevel
2879 			0,	// arrayLayer
2880 			1	// layerCount
2881 		},
2882 		{ 0, 0, 0 },
2883 		{
2884 			(deUint32)m_imageWidth,
2885 			(deUint32)m_imageHeight,
2886 			1u
2887 		}
2888 	};
2889 
2890 	vkd.cmdCopyImageToBuffer(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getBuffer(), 1, &region);
2891 }
2892 
verify(VerifyContext & context,size_t)2893 void BufferCopyFromImage::verify (VerifyContext& context, size_t)
2894 {
2895 	ReferenceMemory&	reference		(context.getReference());
2896 	de::Random			rng	(m_seed);
2897 
2898 	for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2899 		reference.set(ndx, rng.getUint8());
2900 }
2901 
2902 class ImageCopyToBuffer : public CmdCommand
2903 {
2904 public:
ImageCopyToBuffer(vk::VkImageLayout imageLayout)2905 									ImageCopyToBuffer	(vk::VkImageLayout imageLayout) : m_imageLayout (imageLayout) {}
~ImageCopyToBuffer(void)2906 									~ImageCopyToBuffer	(void) {}
getName(void) const2907 	const char*						getName				(void) const { return "BufferCopyToImage"; }
2908 
2909 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
2910 	void							prepare				(PrepareContext& context);
2911 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
2912 	void							submit				(SubmitContext& context);
2913 	void							verify				(VerifyContext& context, size_t commandIndex);
2914 
2915 private:
2916 	vk::VkImageLayout				m_imageLayout;
2917 	vk::VkDeviceSize				m_bufferSize;
2918 	vk::Move<vk::VkBuffer>			m_dstBuffer;
2919 	vk::Move<vk::VkDeviceMemory>	m_memory;
2920 	vk::VkDeviceSize				m_imageMemorySize;
2921 	deInt32							m_imageWidth;
2922 	deInt32							m_imageHeight;
2923 };
2924 
logPrepare(TestLog & log,size_t commandIndex) const2925 void ImageCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2926 {
2927 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for image to buffer copy." << TestLog::EndMessage;
2928 }
2929 
prepare(PrepareContext & context)2930 void ImageCopyToBuffer::prepare (PrepareContext& context)
2931 {
2932 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2933 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2934 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2935 	const vk::VkDevice				device			= context.getContext().getDevice();
2936 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2937 
2938 	m_imageWidth		= context.getImageWidth();
2939 	m_imageHeight		= context.getImageHeight();
2940 	m_bufferSize		= 4 * m_imageWidth * m_imageHeight;
2941 	m_imageMemorySize	= context.getImageMemorySize();
2942 	m_dstBuffer			= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2943 	m_memory			= bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2944 }
2945 
logSubmit(TestLog & log,size_t commandIndex) const2946 void ImageCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2947 {
2948 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to buffer" << TestLog::EndMessage;
2949 }
2950 
submit(SubmitContext & context)2951 void ImageCopyToBuffer::submit (SubmitContext& context)
2952 {
2953 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2954 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2955 	const vk::VkBufferImageCopy	region			=
2956 	{
2957 		0,
2958 		0, 0,
2959 		{
2960 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2961 			0,	// mipLevel
2962 			0,	// arrayLayer
2963 			1	// layerCount
2964 		},
2965 		{ 0, 0, 0 },
2966 		{
2967 			(deUint32)m_imageWidth,
2968 			(deUint32)m_imageHeight,
2969 			1u
2970 		}
2971 	};
2972 
2973 	vkd.cmdCopyImageToBuffer(commandBuffer, context.getImage(), m_imageLayout, *m_dstBuffer, 1, &region);
2974 }
2975 
verify(VerifyContext & context,size_t commandIndex)2976 void ImageCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2977 {
2978 	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
2979 	ReferenceMemory&						reference		(context.getReference());
2980 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
2981 	const vk::VkDevice						device			= context.getContext().getDevice();
2982 	const vk::VkQueue						queue			= context.getContext().getQueue();
2983 	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
2984 	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2985 	const vk::VkBufferMemoryBarrier			barrier			=
2986 	{
2987 		vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2988 		DE_NULL,
2989 
2990 		vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2991 		vk::VK_ACCESS_HOST_READ_BIT,
2992 
2993 		VK_QUEUE_FAMILY_IGNORED,
2994 		VK_QUEUE_FAMILY_IGNORED,
2995 		*m_dstBuffer,
2996 		0,
2997 		VK_WHOLE_SIZE
2998 	};
2999 
3000 	vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3001 
3002 	endCommandBuffer(vkd, *commandBuffer);
3003 	submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3004 
3005 	reference.setUndefined(0, (size_t)m_imageMemorySize);
3006 	{
3007 		void* const						ptr				= mapMemory(vkd, device, *m_memory, m_bufferSize);
3008 		const ConstPixelBufferAccess	referenceImage	(context.getReferenceImage().getAccess());
3009 		const ConstPixelBufferAccess	resultImage		(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, ptr);
3010 
3011 		vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, VK_WHOLE_SIZE);
3012 
3013 		if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), referenceImage, resultImage, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3014 			resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3015 
3016 		vkd.unmapMemory(device, *m_memory);
3017 	}
3018 }
3019 
3020 class ImageCopyFromBuffer : public CmdCommand
3021 {
3022 public:
ImageCopyFromBuffer(deUint32 seed,vk::VkImageLayout imageLayout)3023 									ImageCopyFromBuffer		(deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
~ImageCopyFromBuffer(void)3024 									~ImageCopyFromBuffer	(void) {}
getName(void) const3025 	const char*						getName					(void) const { return "ImageCopyFromBuffer"; }
3026 
3027 	void							logPrepare				(TestLog& log, size_t commandIndex) const;
3028 	void							prepare					(PrepareContext& context);
3029 	void							logSubmit				(TestLog& log, size_t commandIndex) const;
3030 	void							submit					(SubmitContext& context);
3031 	void							verify					(VerifyContext& context, size_t commandIndex);
3032 
3033 private:
3034 	const deUint32					m_seed;
3035 	const vk::VkImageLayout			m_imageLayout;
3036 	deInt32							m_imageWidth;
3037 	deInt32							m_imageHeight;
3038 	vk::VkDeviceSize				m_imageMemorySize;
3039 	vk::VkDeviceSize				m_bufferSize;
3040 	vk::Move<vk::VkBuffer>			m_srcBuffer;
3041 	vk::Move<vk::VkDeviceMemory>	m_memory;
3042 };
3043 
logPrepare(TestLog & log,size_t commandIndex) const3044 void ImageCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
3045 {
3046 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to image copy. Seed: " << m_seed << TestLog::EndMessage;
3047 }
3048 
prepare(PrepareContext & context)3049 void ImageCopyFromBuffer::prepare (PrepareContext& context)
3050 {
3051 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3052 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3053 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3054 	const vk::VkDevice				device			= context.getContext().getDevice();
3055 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3056 
3057 	m_imageWidth		= context.getImageHeight();
3058 	m_imageHeight		= context.getImageWidth();
3059 	m_imageMemorySize	= context.getImageMemorySize();
3060 	m_bufferSize		= m_imageWidth * m_imageHeight * 4;
3061 	m_srcBuffer			= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
3062 	m_memory			= bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
3063 
3064 	{
3065 		void* const	ptr	= mapMemory(vkd, device, *m_memory, m_bufferSize);
3066 		de::Random	rng	(m_seed);
3067 
3068 		{
3069 			deUint8* const	data = (deUint8*)ptr;
3070 
3071 			for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
3072 				data[ndx] = rng.getUint8();
3073 		}
3074 
3075 		vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, VK_WHOLE_SIZE);
3076 		vkd.unmapMemory(device, *m_memory);
3077 	}
3078 }
3079 
logSubmit(TestLog & log,size_t commandIndex) const3080 void ImageCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
3081 {
3082 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from buffer" << TestLog::EndMessage;
3083 }
3084 
submit(SubmitContext & context)3085 void ImageCopyFromBuffer::submit (SubmitContext& context)
3086 {
3087 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3088 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3089 	const vk::VkBufferImageCopy	region			=
3090 	{
3091 		0,
3092 		0, 0,
3093 		{
3094 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3095 			0,	// mipLevel
3096 			0,	// arrayLayer
3097 			1	// layerCount
3098 		},
3099 		{ 0, 0, 0 },
3100 		{
3101 			(deUint32)m_imageWidth,
3102 			(deUint32)m_imageHeight,
3103 			1u
3104 		}
3105 	};
3106 
3107 	vkd.cmdCopyBufferToImage(commandBuffer, *m_srcBuffer, context.getImage(), m_imageLayout, 1, &region);
3108 }
3109 
verify(VerifyContext & context,size_t)3110 void ImageCopyFromBuffer::verify (VerifyContext& context, size_t)
3111 {
3112 	ReferenceMemory&	reference	(context.getReference());
3113 	de::Random			rng			(m_seed);
3114 
3115 	reference.setUndefined(0, (size_t)m_imageMemorySize);
3116 
3117 	{
3118 		const PixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3119 
3120 		for (deInt32 y = 0; y < m_imageHeight; y++)
3121 		for (deInt32 x = 0; x < m_imageWidth; x++)
3122 		{
3123 			const deUint8 r8 = rng.getUint8();
3124 			const deUint8 g8 = rng.getUint8();
3125 			const deUint8 b8 = rng.getUint8();
3126 			const deUint8 a8 = rng.getUint8();
3127 
3128 			refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3129 		}
3130 	}
3131 }
3132 
3133 class ImageCopyFromImage : public CmdCommand
3134 {
3135 public:
ImageCopyFromImage(deUint32 seed,vk::VkImageLayout imageLayout)3136 									ImageCopyFromImage	(deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
~ImageCopyFromImage(void)3137 									~ImageCopyFromImage	(void) {}
getName(void) const3138 	const char*						getName				(void) const { return "ImageCopyFromImage"; }
3139 
3140 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3141 	void							prepare				(PrepareContext& context);
3142 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3143 	void							submit				(SubmitContext& context);
3144 	void							verify				(VerifyContext& context, size_t commandIndex);
3145 
3146 private:
3147 	const deUint32					m_seed;
3148 	const vk::VkImageLayout			m_imageLayout;
3149 	deInt32							m_imageWidth;
3150 	deInt32							m_imageHeight;
3151 	vk::VkDeviceSize				m_imageMemorySize;
3152 	vk::Move<vk::VkImage>			m_srcImage;
3153 	vk::Move<vk::VkDeviceMemory>	m_memory;
3154 };
3155 
logPrepare(TestLog & log,size_t commandIndex) const3156 void ImageCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3157 {
3158 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image copy." << TestLog::EndMessage;
3159 }
3160 
prepare(PrepareContext & context)3161 void ImageCopyFromImage::prepare (PrepareContext& context)
3162 {
3163 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3164 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3165 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3166 	const vk::VkDevice				device			= context.getContext().getDevice();
3167 	const vk::VkQueue				queue			= context.getContext().getQueue();
3168 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3169 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3170 
3171 	m_imageWidth		= context.getImageWidth();
3172 	m_imageHeight		= context.getImageHeight();
3173 	m_imageMemorySize	= context.getImageMemorySize();
3174 
3175 	{
3176 		const vk::VkImageCreateInfo	createInfo =
3177 		{
3178 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3179 			DE_NULL,
3180 
3181 			0,
3182 			vk::VK_IMAGE_TYPE_2D,
3183 			vk::VK_FORMAT_R8G8B8A8_UNORM,
3184 			{
3185 				(deUint32)m_imageWidth,
3186 				(deUint32)m_imageHeight,
3187 				1u,
3188 			},
3189 			1, 1, // mipLevels, arrayLayers
3190 			vk::VK_SAMPLE_COUNT_1_BIT,
3191 
3192 			vk::VK_IMAGE_TILING_OPTIMAL,
3193 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3194 			vk::VK_SHARING_MODE_EXCLUSIVE,
3195 
3196 			(deUint32)queueFamilies.size(),
3197 			&queueFamilies[0],
3198 			vk::VK_IMAGE_LAYOUT_UNDEFINED
3199 		};
3200 
3201 		m_srcImage = vk::createImage(vkd, device, &createInfo);
3202 	}
3203 
3204 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3205 
3206 	{
3207 		const vk::Unique<vk::VkBuffer>			srcBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3208 		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3209 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3210 		const vk::VkImageMemoryBarrier			preImageBarrier	=
3211 		{
3212 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3213 			DE_NULL,
3214 
3215 			0,
3216 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3217 
3218 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3219 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3220 
3221 			VK_QUEUE_FAMILY_IGNORED,
3222 			VK_QUEUE_FAMILY_IGNORED,
3223 
3224 			*m_srcImage,
3225 			{
3226 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3227 				0,	// Mip level
3228 				1,	// Mip level count
3229 				0,	// Layer
3230 				1	// Layer count
3231 			}
3232 		};
3233 		const vk::VkImageMemoryBarrier			postImageBarrier =
3234 		{
3235 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3236 			DE_NULL,
3237 
3238 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3239 			0,
3240 
3241 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3242 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3243 
3244 			VK_QUEUE_FAMILY_IGNORED,
3245 			VK_QUEUE_FAMILY_IGNORED,
3246 
3247 			*m_srcImage,
3248 			{
3249 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3250 				0,	// Mip level
3251 				1,	// Mip level count
3252 				0,	// Layer
3253 				1	// Layer count
3254 			}
3255 		};
3256 		const vk::VkBufferImageCopy				region				=
3257 		{
3258 			0,
3259 			0, 0,
3260 			{
3261 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3262 				0,	// mipLevel
3263 				0,	// arrayLayer
3264 				1	// layerCount
3265 			},
3266 			{ 0, 0, 0 },
3267 			{
3268 				(deUint32)m_imageWidth,
3269 				(deUint32)m_imageHeight,
3270 				1u
3271 			}
3272 		};
3273 
3274 		{
3275 			void* const	ptr	= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3276 			de::Random	rng	(m_seed);
3277 
3278 			{
3279 				deUint8* const	data = (deUint8*)ptr;
3280 
3281 				for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
3282 					data[ndx] = rng.getUint8();
3283 			}
3284 
3285 			vk::flushMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
3286 			vkd.unmapMemory(device, *memory);
3287 		}
3288 
3289 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3290 		vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
3291 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3292 
3293 		endCommandBuffer(vkd, *commandBuffer);
3294 		submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3295 	}
3296 }
3297 
logSubmit(TestLog & log,size_t commandIndex) const3298 void ImageCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3299 {
3300 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from another image" << TestLog::EndMessage;
3301 }
3302 
submit(SubmitContext & context)3303 void ImageCopyFromImage::submit (SubmitContext& context)
3304 {
3305 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3306 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3307 	const vk::VkImageCopy		region			=
3308 	{
3309 		{
3310 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3311 			0,	// mipLevel
3312 			0,	// arrayLayer
3313 			1	// layerCount
3314 		},
3315 		{ 0, 0, 0 },
3316 
3317 		{
3318 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3319 			0,	// mipLevel
3320 			0,	// arrayLayer
3321 			1	// layerCount
3322 		},
3323 		{ 0, 0, 0 },
3324 		{
3325 			(deUint32)m_imageWidth,
3326 			(deUint32)m_imageHeight,
3327 			1u
3328 		}
3329 	};
3330 
3331 	vkd.cmdCopyImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, &region);
3332 }
3333 
verify(VerifyContext & context,size_t)3334 void ImageCopyFromImage::verify (VerifyContext& context, size_t)
3335 {
3336 	ReferenceMemory&	reference	(context.getReference());
3337 	de::Random			rng			(m_seed);
3338 
3339 	reference.setUndefined(0, (size_t)m_imageMemorySize);
3340 
3341 	{
3342 		const PixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3343 
3344 		for (deInt32 y = 0; y < m_imageHeight; y++)
3345 		for (deInt32 x = 0; x < m_imageWidth; x++)
3346 		{
3347 			const deUint8 r8 = rng.getUint8();
3348 			const deUint8 g8 = rng.getUint8();
3349 			const deUint8 b8 = rng.getUint8();
3350 			const deUint8 a8 = rng.getUint8();
3351 
3352 			refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3353 		}
3354 	}
3355 }
3356 
3357 class ImageCopyToImage : public CmdCommand
3358 {
3359 public:
ImageCopyToImage(vk::VkImageLayout imageLayout)3360 									ImageCopyToImage	(vk::VkImageLayout imageLayout) : m_imageLayout(imageLayout) {}
~ImageCopyToImage(void)3361 									~ImageCopyToImage	(void) {}
getName(void) const3362 	const char*						getName				(void) const { return "ImageCopyToImage"; }
3363 
3364 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3365 	void							prepare				(PrepareContext& context);
3366 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3367 	void							submit				(SubmitContext& context);
3368 	void							verify				(VerifyContext& context, size_t commandIndex);
3369 
3370 private:
3371 	const vk::VkImageLayout			m_imageLayout;
3372 	deInt32							m_imageWidth;
3373 	deInt32							m_imageHeight;
3374 	vk::VkDeviceSize				m_imageMemorySize;
3375 	vk::Move<vk::VkImage>			m_dstImage;
3376 	vk::Move<vk::VkDeviceMemory>	m_memory;
3377 };
3378 
logPrepare(TestLog & log,size_t commandIndex) const3379 void ImageCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
3380 {
3381 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image copy." << TestLog::EndMessage;
3382 }
3383 
prepare(PrepareContext & context)3384 void ImageCopyToImage::prepare (PrepareContext& context)
3385 {
3386 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3387 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3388 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3389 	const vk::VkDevice				device			= context.getContext().getDevice();
3390 	const vk::VkQueue				queue			= context.getContext().getQueue();
3391 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3392 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3393 
3394 	m_imageWidth		= context.getImageWidth();
3395 	m_imageHeight		= context.getImageHeight();
3396 	m_imageMemorySize	= context.getImageMemorySize();
3397 
3398 	{
3399 		const vk::VkImageCreateInfo	createInfo =
3400 		{
3401 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3402 			DE_NULL,
3403 
3404 			0,
3405 			vk::VK_IMAGE_TYPE_2D,
3406 			vk::VK_FORMAT_R8G8B8A8_UNORM,
3407 			{
3408 				(deUint32)m_imageWidth,
3409 				(deUint32)m_imageHeight,
3410 				1u,
3411 			},
3412 			1, 1, // mipLevels, arrayLayers
3413 			vk::VK_SAMPLE_COUNT_1_BIT,
3414 
3415 			vk::VK_IMAGE_TILING_OPTIMAL,
3416 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3417 			vk::VK_SHARING_MODE_EXCLUSIVE,
3418 
3419 			(deUint32)queueFamilies.size(),
3420 			&queueFamilies[0],
3421 			vk::VK_IMAGE_LAYOUT_UNDEFINED
3422 		};
3423 
3424 		m_dstImage = vk::createImage(vkd, device, &createInfo);
3425 	}
3426 
3427 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3428 
3429 	{
3430 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3431 		const vk::VkImageMemoryBarrier			barrier			=
3432 		{
3433 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3434 			DE_NULL,
3435 
3436 			0,
3437 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3438 
3439 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3440 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3441 
3442 			VK_QUEUE_FAMILY_IGNORED,
3443 			VK_QUEUE_FAMILY_IGNORED,
3444 
3445 			*m_dstImage,
3446 			{
3447 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3448 				0,	// Mip level
3449 				1,	// Mip level count
3450 				0,	// Layer
3451 				1	// Layer count
3452 			}
3453 		};
3454 
3455 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
3456 
3457 		endCommandBuffer(vkd, *commandBuffer);
3458 		submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3459 	}
3460 }
3461 
logSubmit(TestLog & log,size_t commandIndex) const3462 void ImageCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
3463 {
3464 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to another image" << TestLog::EndMessage;
3465 }
3466 
submit(SubmitContext & context)3467 void ImageCopyToImage::submit (SubmitContext& context)
3468 {
3469 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3470 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3471 	const vk::VkImageCopy		region			=
3472 	{
3473 		{
3474 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3475 			0,	// mipLevel
3476 			0,	// arrayLayer
3477 			1	// layerCount
3478 		},
3479 		{ 0, 0, 0 },
3480 
3481 		{
3482 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3483 			0,	// mipLevel
3484 			0,	// arrayLayer
3485 			1	// layerCount
3486 		},
3487 		{ 0, 0, 0 },
3488 		{
3489 			(deUint32)m_imageWidth,
3490 			(deUint32)m_imageHeight,
3491 			1u
3492 		}
3493 	};
3494 
3495 	vkd.cmdCopyImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
3496 }
3497 
verify(VerifyContext & context,size_t commandIndex)3498 void ImageCopyToImage::verify (VerifyContext& context, size_t commandIndex)
3499 {
3500 	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
3501 	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
3502 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
3503 	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
3504 	const vk::VkDevice						device			= context.getContext().getDevice();
3505 	const vk::VkQueue						queue			= context.getContext().getQueue();
3506 	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
3507 	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3508 	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
3509 	const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3510 	const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3511 	{
3512 		const vk::VkImageMemoryBarrier		imageBarrier	=
3513 		{
3514 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3515 			DE_NULL,
3516 
3517 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3518 			vk::VK_ACCESS_TRANSFER_READ_BIT,
3519 
3520 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3521 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3522 
3523 			VK_QUEUE_FAMILY_IGNORED,
3524 			VK_QUEUE_FAMILY_IGNORED,
3525 
3526 			*m_dstImage,
3527 			{
3528 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3529 				0,	// Mip level
3530 				1,	// Mip level count
3531 				0,	// Layer
3532 				1	// Layer count
3533 			}
3534 		};
3535 		const vk::VkBufferMemoryBarrier bufferBarrier =
3536 		{
3537 			vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3538 			DE_NULL,
3539 
3540 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3541 			vk::VK_ACCESS_HOST_READ_BIT,
3542 
3543 			VK_QUEUE_FAMILY_IGNORED,
3544 			VK_QUEUE_FAMILY_IGNORED,
3545 			*dstBuffer,
3546 			0,
3547 			VK_WHOLE_SIZE
3548 		};
3549 		const vk::VkBufferImageCopy	region =
3550 		{
3551 			0,
3552 			0, 0,
3553 			{
3554 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3555 				0,	// mipLevel
3556 				0,	// arrayLayer
3557 				1	// layerCount
3558 			},
3559 			{ 0, 0, 0 },
3560 			{
3561 				(deUint32)m_imageWidth,
3562 				(deUint32)m_imageHeight,
3563 				1u
3564 			}
3565 		};
3566 
3567 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
3568 		vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
3569 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3570 	}
3571 
3572 	endCommandBuffer(vkd, *commandBuffer);
3573 	submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3574 
3575 	{
3576 		void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3577 
3578 		vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
3579 
3580 		{
3581 			const deUint8* const			data		= (const deUint8*)ptr;
3582 			const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, data);
3583 			const ConstPixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3584 
3585 			if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3586 				resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3587 		}
3588 
3589 		vkd.unmapMemory(device, *memory);
3590 	}
3591 }
3592 
3593 enum BlitScale
3594 {
3595 	BLIT_SCALE_20,
3596 	BLIT_SCALE_10,
3597 };
3598 
3599 class ImageBlitFromImage : public CmdCommand
3600 {
3601 public:
ImageBlitFromImage(deUint32 seed,BlitScale scale,vk::VkImageLayout imageLayout)3602 									ImageBlitFromImage	(deUint32 seed, BlitScale scale, vk::VkImageLayout imageLayout) : m_seed(seed), m_scale(scale), m_imageLayout(imageLayout) {}
~ImageBlitFromImage(void)3603 									~ImageBlitFromImage	(void) {}
getName(void) const3604 	const char*						getName				(void) const { return "ImageBlitFromImage"; }
3605 
3606 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3607 	void							prepare				(PrepareContext& context);
3608 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3609 	void							submit				(SubmitContext& context);
3610 	void							verify				(VerifyContext& context, size_t commandIndex);
3611 
3612 private:
3613 	const deUint32					m_seed;
3614 	const BlitScale					m_scale;
3615 	const vk::VkImageLayout			m_imageLayout;
3616 	deInt32							m_imageWidth;
3617 	deInt32							m_imageHeight;
3618 	vk::VkDeviceSize				m_imageMemorySize;
3619 	deInt32							m_srcImageWidth;
3620 	deInt32							m_srcImageHeight;
3621 	vk::Move<vk::VkImage>			m_srcImage;
3622 	vk::Move<vk::VkDeviceMemory>	m_memory;
3623 };
3624 
logPrepare(TestLog & log,size_t commandIndex) const3625 void ImageBlitFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3626 {
3627 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image blit." << TestLog::EndMessage;
3628 }
3629 
prepare(PrepareContext & context)3630 void ImageBlitFromImage::prepare (PrepareContext& context)
3631 {
3632 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3633 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3634 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3635 	const vk::VkDevice				device			= context.getContext().getDevice();
3636 	const vk::VkQueue				queue			= context.getContext().getQueue();
3637 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3638 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3639 
3640 	m_imageWidth		= context.getImageWidth();
3641 	m_imageHeight		= context.getImageHeight();
3642 	m_imageMemorySize	= context.getImageMemorySize();
3643 
3644 	if (m_scale == BLIT_SCALE_10)
3645 	{
3646 		m_srcImageWidth			= m_imageWidth;
3647 		m_srcImageHeight		= m_imageHeight;
3648 	}
3649 	else if (m_scale == BLIT_SCALE_20)
3650 	{
3651 		m_srcImageWidth			= m_imageWidth == 1 ? 1 : m_imageWidth / 2;
3652 		m_srcImageHeight		= m_imageHeight == 1 ? 1 : m_imageHeight / 2;
3653 	}
3654 	else
3655 		DE_FATAL("Unsupported scale");
3656 
3657 	{
3658 		const vk::VkImageCreateInfo	createInfo =
3659 		{
3660 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3661 			DE_NULL,
3662 
3663 			0,
3664 			vk::VK_IMAGE_TYPE_2D,
3665 			vk::VK_FORMAT_R8G8B8A8_UNORM,
3666 			{
3667 				(deUint32)m_srcImageWidth,
3668 				(deUint32)m_srcImageHeight,
3669 				1u,
3670 			},
3671 			1, 1, // mipLevels, arrayLayers
3672 			vk::VK_SAMPLE_COUNT_1_BIT,
3673 
3674 			vk::VK_IMAGE_TILING_OPTIMAL,
3675 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3676 			vk::VK_SHARING_MODE_EXCLUSIVE,
3677 
3678 			(deUint32)queueFamilies.size(),
3679 			&queueFamilies[0],
3680 			vk::VK_IMAGE_LAYOUT_UNDEFINED
3681 		};
3682 
3683 		m_srcImage = vk::createImage(vkd, device, &createInfo);
3684 	}
3685 
3686 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3687 
3688 	{
3689 		const vk::Unique<vk::VkBuffer>			srcBuffer		(createBuffer(vkd, device, 4 * m_srcImageWidth * m_srcImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3690 		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3691 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3692 		const vk::VkImageMemoryBarrier			preImageBarrier	=
3693 		{
3694 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3695 			DE_NULL,
3696 
3697 			0,
3698 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3699 
3700 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3701 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3702 
3703 			VK_QUEUE_FAMILY_IGNORED,
3704 			VK_QUEUE_FAMILY_IGNORED,
3705 
3706 			*m_srcImage,
3707 			{
3708 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3709 				0,	// Mip level
3710 				1,	// Mip level count
3711 				0,	// Layer
3712 				1	// Layer count
3713 			}
3714 		};
3715 		const vk::VkImageMemoryBarrier			postImageBarrier =
3716 		{
3717 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3718 			DE_NULL,
3719 
3720 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3721 			0,
3722 
3723 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3724 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3725 
3726 			VK_QUEUE_FAMILY_IGNORED,
3727 			VK_QUEUE_FAMILY_IGNORED,
3728 
3729 			*m_srcImage,
3730 			{
3731 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3732 				0,	// Mip level
3733 				1,	// Mip level count
3734 				0,	// Layer
3735 				1	// Layer count
3736 			}
3737 		};
3738 		const vk::VkBufferImageCopy				region				=
3739 		{
3740 			0,
3741 			0, 0,
3742 			{
3743 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3744 				0,	// mipLevel
3745 				0,	// arrayLayer
3746 				1	// layerCount
3747 			},
3748 			{ 0, 0, 0 },
3749 			{
3750 				(deUint32)m_srcImageWidth,
3751 				(deUint32)m_srcImageHeight,
3752 				1u
3753 			}
3754 		};
3755 
3756 		{
3757 			void* const	ptr	= mapMemory(vkd, device, *memory, 4 * m_srcImageWidth * m_srcImageHeight);
3758 			de::Random	rng	(m_seed);
3759 
3760 			{
3761 				deUint8* const	data = (deUint8*)ptr;
3762 
3763 				for (size_t ndx = 0; ndx < (size_t)(4 * m_srcImageWidth * m_srcImageHeight); ndx++)
3764 					data[ndx] = rng.getUint8();
3765 			}
3766 
3767 			vk::flushMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
3768 			vkd.unmapMemory(device, *memory);
3769 		}
3770 
3771 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3772 		vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
3773 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3774 
3775 		endCommandBuffer(vkd, *commandBuffer);
3776 		submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3777 	}
3778 }
3779 
logSubmit(TestLog & log,size_t commandIndex) const3780 void ImageBlitFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3781 {
3782 	log << TestLog::Message << commandIndex << ":" << getName() << " Blit from another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "")  << TestLog::EndMessage;
3783 }
3784 
submit(SubmitContext & context)3785 void ImageBlitFromImage::submit (SubmitContext& context)
3786 {
3787 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3788 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3789 	const vk::VkImageBlit		region			=
3790 	{
3791 		// Src
3792 		{
3793 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3794 			0,	// mipLevel
3795 			0,	// arrayLayer
3796 			1	// layerCount
3797 		},
3798 		{
3799 			{ 0, 0, 0 },
3800 			{
3801 				m_srcImageWidth,
3802 				m_srcImageHeight,
3803 				1
3804 			},
3805 		},
3806 
3807 		// Dst
3808 		{
3809 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3810 			0,	// mipLevel
3811 			0,	// arrayLayer
3812 			1	// layerCount
3813 		},
3814 		{
3815 			{ 0, 0, 0 },
3816 			{
3817 				m_imageWidth,
3818 				m_imageHeight,
3819 				1u
3820 			}
3821 		}
3822 	};
3823 	vkd.cmdBlitImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, &region, vk::VK_FILTER_NEAREST);
3824 }
3825 
verify(VerifyContext & context,size_t)3826 void ImageBlitFromImage::verify (VerifyContext& context, size_t)
3827 {
3828 	ReferenceMemory&	reference	(context.getReference());
3829 	de::Random			rng			(m_seed);
3830 
3831 	reference.setUndefined(0, (size_t)m_imageMemorySize);
3832 
3833 	{
3834 		const PixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3835 
3836 		if (m_scale == BLIT_SCALE_10)
3837 		{
3838 			for (deInt32 y = 0; y < m_imageHeight; y++)
3839 			for (deInt32 x = 0; x < m_imageWidth; x++)
3840 			{
3841 				const deUint8 r8 = rng.getUint8();
3842 				const deUint8 g8 = rng.getUint8();
3843 				const deUint8 b8 = rng.getUint8();
3844 				const deUint8 a8 = rng.getUint8();
3845 
3846 				refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3847 			}
3848 		}
3849 		else if (m_scale == BLIT_SCALE_20)
3850 		{
3851 			tcu::TextureLevel	source	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_srcImageWidth, m_srcImageHeight);
3852 			const float			xscale	= ((float)m_srcImageWidth)  / (float)m_imageWidth;
3853 			const float			yscale	= ((float)m_srcImageHeight) / (float)m_imageHeight;
3854 
3855 			for (deInt32 y = 0; y < m_srcImageHeight; y++)
3856 			for (deInt32 x = 0; x < m_srcImageWidth; x++)
3857 			{
3858 				const deUint8 r8 = rng.getUint8();
3859 				const deUint8 g8 = rng.getUint8();
3860 				const deUint8 b8 = rng.getUint8();
3861 				const deUint8 a8 = rng.getUint8();
3862 
3863 				source.getAccess().setPixel(UVec4(r8, g8, b8, a8), x, y);
3864 			}
3865 
3866 			for (deInt32 y = 0; y < m_imageHeight; y++)
3867 			for (deInt32 x = 0; x < m_imageWidth; x++)
3868 				refAccess.setPixel(source.getAccess().getPixelUint(int((float(x) + 0.5f) * xscale), int((float(y) + 0.5f) * yscale)), x, y);
3869 		}
3870 		else
3871 			DE_FATAL("Unsupported scale");
3872 	}
3873 }
3874 
3875 class ImageBlitToImage : public CmdCommand
3876 {
3877 public:
ImageBlitToImage(BlitScale scale,vk::VkImageLayout imageLayout)3878 									ImageBlitToImage	(BlitScale scale, vk::VkImageLayout imageLayout) : m_scale(scale), m_imageLayout(imageLayout) {}
~ImageBlitToImage(void)3879 									~ImageBlitToImage	(void) {}
getName(void) const3880 	const char*						getName				(void) const { return "ImageBlitToImage"; }
3881 
3882 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3883 	void							prepare				(PrepareContext& context);
3884 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3885 	void							submit				(SubmitContext& context);
3886 	void							verify				(VerifyContext& context, size_t commandIndex);
3887 
3888 private:
3889 	const BlitScale					m_scale;
3890 	const vk::VkImageLayout			m_imageLayout;
3891 	deInt32							m_imageWidth;
3892 	deInt32							m_imageHeight;
3893 	vk::VkDeviceSize				m_imageMemorySize;
3894 	deInt32							m_dstImageWidth;
3895 	deInt32							m_dstImageHeight;
3896 	vk::Move<vk::VkImage>			m_dstImage;
3897 	vk::Move<vk::VkDeviceMemory>	m_memory;
3898 };
3899 
logPrepare(TestLog & log,size_t commandIndex) const3900 void ImageBlitToImage::logPrepare (TestLog& log, size_t commandIndex) const
3901 {
3902 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image blit." << TestLog::EndMessage;
3903 }
3904 
prepare(PrepareContext & context)3905 void ImageBlitToImage::prepare (PrepareContext& context)
3906 {
3907 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3908 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3909 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3910 	const vk::VkDevice				device			= context.getContext().getDevice();
3911 	const vk::VkQueue				queue			= context.getContext().getQueue();
3912 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3913 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3914 
3915 	m_imageWidth		= context.getImageWidth();
3916 	m_imageHeight		= context.getImageHeight();
3917 	m_imageMemorySize	= context.getImageMemorySize();
3918 
3919 	if (m_scale == BLIT_SCALE_10)
3920 	{
3921 		m_dstImageWidth		= context.getImageWidth();
3922 		m_dstImageHeight	= context.getImageHeight();
3923 	}
3924 	else if (m_scale == BLIT_SCALE_20)
3925 	{
3926 		m_dstImageWidth		= context.getImageWidth() * 2;
3927 		m_dstImageHeight	= context.getImageHeight() * 2;
3928 	}
3929 	else
3930 		DE_FATAL("Unsupportd blit scale");
3931 
3932 	{
3933 		const vk::VkImageCreateInfo	createInfo =
3934 		{
3935 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3936 			DE_NULL,
3937 
3938 			0,
3939 			vk::VK_IMAGE_TYPE_2D,
3940 			vk::VK_FORMAT_R8G8B8A8_UNORM,
3941 			{
3942 				(deUint32)m_dstImageWidth,
3943 				(deUint32)m_dstImageHeight,
3944 				1u,
3945 			},
3946 			1, 1, // mipLevels, arrayLayers
3947 			vk::VK_SAMPLE_COUNT_1_BIT,
3948 
3949 			vk::VK_IMAGE_TILING_OPTIMAL,
3950 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3951 			vk::VK_SHARING_MODE_EXCLUSIVE,
3952 
3953 			(deUint32)queueFamilies.size(),
3954 			&queueFamilies[0],
3955 			vk::VK_IMAGE_LAYOUT_UNDEFINED
3956 		};
3957 
3958 		m_dstImage = vk::createImage(vkd, device, &createInfo);
3959 	}
3960 
3961 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3962 
3963 	{
3964 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3965 		const vk::VkImageMemoryBarrier			barrier			=
3966 		{
3967 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3968 			DE_NULL,
3969 
3970 			0,
3971 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3972 
3973 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3974 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3975 
3976 			VK_QUEUE_FAMILY_IGNORED,
3977 			VK_QUEUE_FAMILY_IGNORED,
3978 
3979 			*m_dstImage,
3980 			{
3981 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3982 				0,	// Mip level
3983 				1,	// Mip level count
3984 				0,	// Layer
3985 				1	// Layer count
3986 			}
3987 		};
3988 
3989 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
3990 
3991 		endCommandBuffer(vkd, *commandBuffer);
3992 		submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3993 	}
3994 }
3995 
logSubmit(TestLog & log,size_t commandIndex) const3996 void ImageBlitToImage::logSubmit (TestLog& log, size_t commandIndex) const
3997 {
3998 	log << TestLog::Message << commandIndex << ":" << getName() << " Blit image to another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "")  << TestLog::EndMessage;
3999 }
4000 
submit(SubmitContext & context)4001 void ImageBlitToImage::submit (SubmitContext& context)
4002 {
4003 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
4004 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
4005 	const vk::VkImageBlit		region			=
4006 	{
4007 		// Src
4008 		{
4009 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
4010 			0,	// mipLevel
4011 			0,	// arrayLayer
4012 			1	// layerCount
4013 		},
4014 		{
4015 			{ 0, 0, 0 },
4016 			{
4017 				m_imageWidth,
4018 				m_imageHeight,
4019 				1
4020 			},
4021 		},
4022 
4023 		// Dst
4024 		{
4025 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
4026 			0,	// mipLevel
4027 			0,	// arrayLayer
4028 			1	// layerCount
4029 		},
4030 		{
4031 			{ 0, 0, 0 },
4032 			{
4033 				m_dstImageWidth,
4034 				m_dstImageHeight,
4035 				1u
4036 			}
4037 		}
4038 	};
4039 	vkd.cmdBlitImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region, vk::VK_FILTER_NEAREST);
4040 }
4041 
verify(VerifyContext & context,size_t commandIndex)4042 void ImageBlitToImage::verify (VerifyContext& context, size_t commandIndex)
4043 {
4044 	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
4045 	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
4046 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
4047 	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
4048 	const vk::VkDevice						device			= context.getContext().getDevice();
4049 	const vk::VkQueue						queue			= context.getContext().getQueue();
4050 	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
4051 	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4052 	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
4053 	const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_dstImageWidth * m_dstImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4054 	const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4055 	{
4056 		const vk::VkImageMemoryBarrier		imageBarrier	=
4057 		{
4058 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4059 			DE_NULL,
4060 
4061 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4062 			vk::VK_ACCESS_TRANSFER_READ_BIT,
4063 
4064 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
4065 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4066 
4067 			VK_QUEUE_FAMILY_IGNORED,
4068 			VK_QUEUE_FAMILY_IGNORED,
4069 
4070 			*m_dstImage,
4071 			{
4072 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
4073 				0,	// Mip level
4074 				1,	// Mip level count
4075 				0,	// Layer
4076 				1	// Layer count
4077 			}
4078 		};
4079 		const vk::VkBufferMemoryBarrier bufferBarrier =
4080 		{
4081 			vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4082 			DE_NULL,
4083 
4084 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4085 			vk::VK_ACCESS_HOST_READ_BIT,
4086 
4087 			VK_QUEUE_FAMILY_IGNORED,
4088 			VK_QUEUE_FAMILY_IGNORED,
4089 			*dstBuffer,
4090 			0,
4091 			VK_WHOLE_SIZE
4092 		};
4093 		const vk::VkBufferImageCopy	region =
4094 		{
4095 			0,
4096 			0, 0,
4097 			{
4098 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
4099 				0,	// mipLevel
4100 				0,	// arrayLayer
4101 				1	// layerCount
4102 			},
4103 			{ 0, 0, 0 },
4104 			{
4105 				(deUint32)m_dstImageWidth,
4106 				(deUint32)m_dstImageHeight,
4107 				1
4108 			}
4109 		};
4110 
4111 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4112 		vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
4113 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4114 	}
4115 
4116 	endCommandBuffer(vkd, *commandBuffer);
4117 	submitCommandsAndWait(vkd, device, queue, *commandBuffer);
4118 
4119 	{
4120 		void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_dstImageWidth * m_dstImageHeight);
4121 
4122 		vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
4123 
4124 		if (m_scale == BLIT_SCALE_10)
4125 		{
4126 			const deUint8* const			data		= (const deUint8*)ptr;
4127 			const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4128 			const ConstPixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
4129 
4130 			if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4131 				resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4132 		}
4133 		else if (m_scale == BLIT_SCALE_20)
4134 		{
4135 			const deUint8* const			data		= (const deUint8*)ptr;
4136 			const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4137 			tcu::TextureLevel				reference	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1);
4138 
4139 			{
4140 				const ConstPixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
4141 
4142 				for (deInt32 y = 0; y < m_dstImageHeight; y++)
4143 				for (deInt32 x = 0; x < m_dstImageWidth; x++)
4144 				{
4145 					reference.getAccess().setPixel(refAccess.getPixel(x/2, y/2), x, y);
4146 				}
4147 			}
4148 
4149 			if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), reference.getAccess(), resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4150 				resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4151 		}
4152 		else
4153 			DE_FATAL("Unknown scale");
4154 
4155 		vkd.unmapMemory(device, *memory);
4156 	}
4157 }
4158 
4159 class PrepareRenderPassContext
4160 {
4161 public:
PrepareRenderPassContext(PrepareContext & context,vk::VkRenderPass renderPass,vk::VkFramebuffer framebuffer,deInt32 targetWidth,deInt32 targetHeight)4162 								PrepareRenderPassContext	(PrepareContext&	context,
4163 															 vk::VkRenderPass	renderPass,
4164 															 vk::VkFramebuffer	framebuffer,
4165 															 deInt32			targetWidth,
4166 															 deInt32			targetHeight)
4167 		: m_context			(context)
4168 		, m_renderPass		(renderPass)
4169 		, m_framebuffer		(framebuffer)
4170 		, m_targetWidth		(targetWidth)
4171 		, m_targetHeight	(targetHeight)
4172 	{
4173 	}
4174 
getMemory(void) const4175 	const Memory&				getMemory					(void) const { return m_context.getMemory(); }
getContext(void) const4176 	const Context&				getContext					(void) const { return m_context.getContext(); }
getBinaryCollection(void) const4177 	const vk::BinaryCollection&	getBinaryCollection			(void) const { return m_context.getBinaryCollection(); }
4178 
getBuffer(void) const4179 	vk::VkBuffer				getBuffer					(void) const { return m_context.getBuffer(); }
getBufferSize(void) const4180 	vk::VkDeviceSize			getBufferSize				(void) const { return m_context.getBufferSize(); }
4181 
getImage(void) const4182 	vk::VkImage					getImage					(void) const { return m_context.getImage(); }
getImageWidth(void) const4183 	deInt32						getImageWidth				(void) const { return m_context.getImageWidth(); }
getImageHeight(void) const4184 	deInt32						getImageHeight				(void) const { return m_context.getImageHeight(); }
getImageLayout(void) const4185 	vk::VkImageLayout			getImageLayout				(void) const { return m_context.getImageLayout(); }
4186 
getTargetWidth(void) const4187 	deInt32						getTargetWidth				(void) const { return m_targetWidth; }
getTargetHeight(void) const4188 	deInt32						getTargetHeight				(void) const { return m_targetHeight; }
4189 
getRenderPass(void) const4190 	vk::VkRenderPass			getRenderPass				(void) const { return m_renderPass; }
4191 
4192 private:
4193 	PrepareContext&				m_context;
4194 	const vk::VkRenderPass		m_renderPass;
4195 	const vk::VkFramebuffer		m_framebuffer;
4196 	const deInt32				m_targetWidth;
4197 	const deInt32				m_targetHeight;
4198 };
4199 
4200 class VerifyRenderPassContext
4201 {
4202 public:
VerifyRenderPassContext(VerifyContext & context,deInt32 targetWidth,deInt32 targetHeight)4203 							VerifyRenderPassContext		(VerifyContext&			context,
4204 														 deInt32				targetWidth,
4205 														 deInt32				targetHeight)
4206 		: m_context			(context)
4207 		, m_referenceTarget	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), targetWidth, targetHeight)
4208 	{
4209 	}
4210 
getContext(void) const4211 	const Context&			getContext			(void) const { return m_context.getContext(); }
getLog(void) const4212 	TestLog&				getLog				(void) const { return m_context.getLog(); }
getResultCollector(void) const4213 	tcu::ResultCollector&	getResultCollector	(void) const { return m_context.getResultCollector(); }
4214 
getReferenceTarget(void)4215 	TextureLevel&			getReferenceTarget	(void) { return m_referenceTarget; }
4216 
getReference(void)4217 	ReferenceMemory&		getReference		(void) { return m_context.getReference(); }
getReferenceImage(void)4218 	TextureLevel&			getReferenceImage	(void) { return m_context.getReferenceImage();}
4219 
4220 private:
4221 	VerifyContext&	m_context;
4222 	TextureLevel	m_referenceTarget;
4223 };
4224 
4225 class RenderPassCommand
4226 {
4227 public:
~RenderPassCommand(void)4228 	virtual				~RenderPassCommand	(void) {}
4229 	virtual const char*	getName				(void) const = 0;
4230 
4231 	// Log things that are done during prepare
logPrepare(TestLog &,size_t) const4232 	virtual void		logPrepare			(TestLog&, size_t) const {}
4233 	// Log submitted calls etc.
logSubmit(TestLog &,size_t) const4234 	virtual void		logSubmit			(TestLog&, size_t) const {}
4235 
4236 	// Allocate vulkan resources and prepare for submit.
prepare(PrepareRenderPassContext &)4237 	virtual void		prepare				(PrepareRenderPassContext&) {}
4238 
4239 	// Submit commands to command buffer.
submit(SubmitContext &)4240 	virtual void		submit				(SubmitContext&) {}
4241 
4242 	// Verify results
verify(VerifyRenderPassContext &,size_t)4243 	virtual void		verify				(VerifyRenderPassContext&, size_t) {}
4244 };
4245 
4246 class SubmitRenderPass : public CmdCommand
4247 {
4248 public:
4249 				SubmitRenderPass	(const vector<RenderPassCommand*>& commands);
4250 				~SubmitRenderPass	(void);
getName(void) const4251 	const char*	getName				(void) const { return "SubmitRenderPass"; }
4252 
4253 	void		logPrepare			(TestLog&, size_t) const;
4254 	void		logSubmit			(TestLog&, size_t) const;
4255 
4256 	void		prepare				(PrepareContext&);
4257 	void		submit				(SubmitContext&);
4258 
4259 	void		verify				(VerifyContext&, size_t);
4260 
4261 private:
4262 	const deInt32					m_targetWidth;
4263 	const deInt32					m_targetHeight;
4264 	vk::Move<vk::VkRenderPass>		m_renderPass;
4265 	vk::Move<vk::VkDeviceMemory>	m_colorTargetMemory;
4266 	de::MovePtr<vk::Allocation>		m_colorTargetMemory2;
4267 	vk::Move<vk::VkImage>			m_colorTarget;
4268 	vk::Move<vk::VkImageView>		m_colorTargetView;
4269 	vk::Move<vk::VkFramebuffer>		m_framebuffer;
4270 	vector<RenderPassCommand*>		m_commands;
4271 };
4272 
SubmitRenderPass(const vector<RenderPassCommand * > & commands)4273 SubmitRenderPass::SubmitRenderPass (const vector<RenderPassCommand*>& commands)
4274 	: m_targetWidth		(256)
4275 	, m_targetHeight	(256)
4276 	, m_commands		(commands)
4277 {
4278 }
4279 
~SubmitRenderPass()4280 SubmitRenderPass::~SubmitRenderPass()
4281 {
4282 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4283 		delete m_commands[cmdNdx];
4284 }
4285 
logPrepare(TestLog & log,size_t commandIndex) const4286 void SubmitRenderPass::logPrepare (TestLog& log, size_t commandIndex) const
4287 {
4288 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
4289 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
4290 
4291 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4292 	{
4293 		RenderPassCommand& command = *m_commands[cmdNdx];
4294 		command.logPrepare(log, cmdNdx);
4295 	}
4296 }
4297 
logSubmit(TestLog & log,size_t commandIndex) const4298 void SubmitRenderPass::logSubmit (TestLog& log, size_t commandIndex) const
4299 {
4300 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
4301 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
4302 
4303 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4304 	{
4305 		RenderPassCommand& command = *m_commands[cmdNdx];
4306 		command.logSubmit(log, cmdNdx);
4307 	}
4308 }
4309 
prepare(PrepareContext & context)4310 void SubmitRenderPass::prepare (PrepareContext& context)
4311 {
4312 	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
4313 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
4314 	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
4315 	const vk::VkDevice						device			= context.getContext().getDevice();
4316 	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
4317 
4318 	{
4319 		const vk::VkImageCreateInfo createInfo =
4320 		{
4321 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4322 			DE_NULL,
4323 			0u,
4324 
4325 			vk::VK_IMAGE_TYPE_2D,
4326 			vk::VK_FORMAT_R8G8B8A8_UNORM,
4327 			{ (deUint32)m_targetWidth, (deUint32)m_targetHeight, 1u },
4328 			1u,
4329 			1u,
4330 			vk::VK_SAMPLE_COUNT_1_BIT,
4331 			vk::VK_IMAGE_TILING_OPTIMAL,
4332 			vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
4333 			vk::VK_SHARING_MODE_EXCLUSIVE,
4334 			(deUint32)queueFamilies.size(),
4335 			&queueFamilies[0],
4336 			vk::VK_IMAGE_LAYOUT_UNDEFINED
4337 		};
4338 
4339 		m_colorTarget = vk::createImage(vkd, device, &createInfo);
4340 	}
4341 
4342 	m_colorTargetMemory = bindImageMemory(vki, vkd, physicalDevice, device, *m_colorTarget, 0);
4343 
4344 	{
4345 		const vk::VkImageViewCreateInfo createInfo =
4346 		{
4347 			vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
4348 			DE_NULL,
4349 
4350 			0u,
4351 			*m_colorTarget,
4352 			vk::VK_IMAGE_VIEW_TYPE_2D,
4353 			vk::VK_FORMAT_R8G8B8A8_UNORM,
4354 			{
4355 				vk::VK_COMPONENT_SWIZZLE_R,
4356 				vk::VK_COMPONENT_SWIZZLE_G,
4357 				vk::VK_COMPONENT_SWIZZLE_B,
4358 				vk::VK_COMPONENT_SWIZZLE_A
4359 			},
4360 			{
4361 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
4362 				0u,
4363 				1u,
4364 				0u,
4365 				1u
4366 			}
4367 		};
4368 
4369 		m_colorTargetView = vk::createImageView(vkd, device, &createInfo);
4370 	}
4371 
4372 	m_renderPass = vk::makeRenderPass(vkd, device, vk::VK_FORMAT_R8G8B8A8_UNORM, vk::VK_FORMAT_UNDEFINED, vk::VK_ATTACHMENT_LOAD_OP_CLEAR, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
4373 
4374 	{
4375 		const vk::VkImageView				imageViews[]	=
4376 		{
4377 			*m_colorTargetView
4378 		};
4379 		const vk::VkFramebufferCreateInfo	createInfo		=
4380 		{
4381 			vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
4382 			DE_NULL,
4383 			0u,
4384 
4385 			*m_renderPass,
4386 			DE_LENGTH_OF_ARRAY(imageViews),
4387 			imageViews,
4388 			(deUint32)m_targetWidth,
4389 			(deUint32)m_targetHeight,
4390 			1u
4391 		};
4392 
4393 		m_framebuffer = vk::createFramebuffer(vkd, device, &createInfo);
4394 	}
4395 
4396 	{
4397 		PrepareRenderPassContext renderpassContext (context, *m_renderPass, *m_framebuffer, m_targetWidth, m_targetHeight);
4398 
4399 		for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4400 		{
4401 			RenderPassCommand& command = *m_commands[cmdNdx];
4402 			command.prepare(renderpassContext);
4403 		}
4404 	}
4405 }
4406 
submit(SubmitContext & context)4407 void SubmitRenderPass::submit (SubmitContext& context)
4408 {
4409 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
4410 	const vk::VkCommandBuffer		commandBuffer	= context.getCommandBuffer();
4411 
4412 	beginRenderPass(vkd, commandBuffer, *m_renderPass, *m_framebuffer, vk::makeRect2D(0, 0, m_targetWidth, m_targetHeight), tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f));
4413 
4414 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4415 	{
4416 		RenderPassCommand& command = *m_commands[cmdNdx];
4417 
4418 		command.submit(context);
4419 	}
4420 
4421 	endRenderPass(vkd, commandBuffer);
4422 }
4423 
verify(VerifyContext & context,size_t commandIndex)4424 void SubmitRenderPass::verify (VerifyContext& context, size_t commandIndex)
4425 {
4426 	TestLog&					log				(context.getLog());
4427 	tcu::ResultCollector&		resultCollector	(context.getResultCollector());
4428 	const string				sectionName		(de::toString(commandIndex) + ":" + getName());
4429 	const tcu::ScopedLogSection	section			(log, sectionName, sectionName);
4430 	VerifyRenderPassContext		verifyContext	(context, m_targetWidth, m_targetHeight);
4431 
4432 	tcu::clear(verifyContext.getReferenceTarget().getAccess(), Vec4(0.0f, 0.0f, 0.0f, 1.0f));
4433 
4434 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4435 	{
4436 		RenderPassCommand& command = *m_commands[cmdNdx];
4437 		command.verify(verifyContext, cmdNdx);
4438 	}
4439 
4440 	{
4441 		const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
4442 		const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
4443 		const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
4444 		const vk::VkDevice						device			= context.getContext().getDevice();
4445 		const vk::VkQueue						queue			= context.getContext().getQueue();
4446 		const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
4447 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4448 		const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
4449 		const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_targetWidth * m_targetHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4450 		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4451 		{
4452 			const vk::VkImageMemoryBarrier		imageBarrier	=
4453 			{
4454 				vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4455 				DE_NULL,
4456 
4457 				vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
4458 				vk::VK_ACCESS_TRANSFER_READ_BIT,
4459 
4460 				vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4461 				vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4462 
4463 				VK_QUEUE_FAMILY_IGNORED,
4464 				VK_QUEUE_FAMILY_IGNORED,
4465 
4466 				*m_colorTarget,
4467 				{
4468 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
4469 					0,	// Mip level
4470 					1,	// Mip level count
4471 					0,	// Layer
4472 					1	// Layer count
4473 				}
4474 			};
4475 			const vk::VkBufferMemoryBarrier bufferBarrier =
4476 			{
4477 				vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4478 				DE_NULL,
4479 
4480 				vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4481 				vk::VK_ACCESS_HOST_READ_BIT,
4482 
4483 				VK_QUEUE_FAMILY_IGNORED,
4484 				VK_QUEUE_FAMILY_IGNORED,
4485 				*dstBuffer,
4486 				0,
4487 				VK_WHOLE_SIZE
4488 			};
4489 			const vk::VkBufferImageCopy	region =
4490 			{
4491 				0,
4492 				0, 0,
4493 				{
4494 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
4495 					0,	// mipLevel
4496 					0,	// arrayLayer
4497 					1	// layerCount
4498 				},
4499 				{ 0, 0, 0 },
4500 				{
4501 					(deUint32)m_targetWidth,
4502 					(deUint32)m_targetHeight,
4503 					1u
4504 				}
4505 			};
4506 
4507 			vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4508 			vkd.cmdCopyImageToBuffer(*commandBuffer, *m_colorTarget, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
4509 			vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4510 		}
4511 
4512 		endCommandBuffer(vkd, *commandBuffer);
4513 		submitCommandsAndWait(vkd, device, queue, *commandBuffer);
4514 
4515 		{
4516 			void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_targetWidth * m_targetHeight);
4517 
4518 			vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, VK_WHOLE_SIZE);
4519 
4520 			{
4521 				const deUint8* const			data		= (const deUint8*)ptr;
4522 				const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_targetWidth, m_targetHeight, 1, data);
4523 				const ConstPixelBufferAccess&	refAccess	(verifyContext.getReferenceTarget().getAccess());
4524 
4525 				if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4526 					resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4527 			}
4528 
4529 			vkd.unmapMemory(device, *memory);
4530 		}
4531 	}
4532 }
4533 
4534 class ExecuteSecondaryCommandBuffer : public CmdCommand
4535 {
4536 public:
4537 				ExecuteSecondaryCommandBuffer	(const vector<CmdCommand*>& commands);
4538 				~ExecuteSecondaryCommandBuffer	(void);
getName(void) const4539 	const char*	getName							(void) const { return "ExecuteSecondaryCommandBuffer"; }
4540 
4541 	void		logPrepare						(TestLog&, size_t) const;
4542 	void		logSubmit						(TestLog&, size_t) const;
4543 
4544 	void		prepare							(PrepareContext&);
4545 	void		submit							(SubmitContext&);
4546 
4547 	void		verify							(VerifyContext&, size_t);
4548 
4549 private:
4550 	vk::Move<vk::VkCommandBuffer>				m_commandBuffer;
4551 	vk::Move<vk::VkDeviceMemory>				m_colorTargetMemory;
4552 	de::MovePtr<vk::Allocation>					m_colorTargetMemory2;
4553 	vk::Move<vk::VkImage>						m_colorTarget;
4554 	vk::Move<vk::VkImageView>					m_colorTargetView;
4555 	vk::Move<vk::VkFramebuffer>					m_framebuffer;
4556 	vector<CmdCommand*>							m_commands;
4557 };
4558 
ExecuteSecondaryCommandBuffer(const vector<CmdCommand * > & commands)4559 ExecuteSecondaryCommandBuffer::ExecuteSecondaryCommandBuffer(const vector<CmdCommand*>& commands)
4560 	: m_commands		(commands)
4561 {
4562 }
4563 
~ExecuteSecondaryCommandBuffer(void)4564 ExecuteSecondaryCommandBuffer::~ExecuteSecondaryCommandBuffer (void)
4565 {
4566 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4567 		delete m_commands[cmdNdx];
4568 }
4569 
logPrepare(TestLog & log,size_t commandIndex) const4570 void ExecuteSecondaryCommandBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4571 {
4572 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
4573 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
4574 
4575 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4576 	{
4577 		CmdCommand& command = *m_commands[cmdNdx];
4578 		command.logPrepare(log, cmdNdx);
4579 	}
4580 }
4581 
logSubmit(TestLog & log,size_t commandIndex) const4582 void ExecuteSecondaryCommandBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4583 {
4584 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
4585 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
4586 
4587 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4588 	{
4589 		CmdCommand& command = *m_commands[cmdNdx];
4590 		command.logSubmit(log, cmdNdx);
4591 	}
4592 }
4593 
prepare(PrepareContext & context)4594 void ExecuteSecondaryCommandBuffer::prepare (PrepareContext& context)
4595 {
4596 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
4597 	const vk::VkDevice				device			= context.getContext().getDevice();
4598 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
4599 
4600 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4601 	{
4602 		CmdCommand& command = *m_commands[cmdNdx];
4603 
4604 		command.prepare(context);
4605 	}
4606 
4607 	m_commandBuffer = createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY);
4608 	{
4609 		SubmitContext submitContext (context, *m_commandBuffer);
4610 
4611 		for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4612 		{
4613 			CmdCommand& command = *m_commands[cmdNdx];
4614 
4615 			command.submit(submitContext);
4616 		}
4617 
4618 		endCommandBuffer(vkd, *m_commandBuffer);
4619 	}
4620 }
4621 
submit(SubmitContext & context)4622 void ExecuteSecondaryCommandBuffer::submit (SubmitContext& context)
4623 {
4624 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
4625 	const vk::VkCommandBuffer		commandBuffer	= context.getCommandBuffer();
4626 
4627 
4628 	{
4629 		vkd.cmdExecuteCommands(commandBuffer, 1, &m_commandBuffer.get());
4630 	}
4631 }
4632 
verify(VerifyContext & context,size_t commandIndex)4633 void ExecuteSecondaryCommandBuffer::verify (VerifyContext& context, size_t commandIndex)
4634 {
4635 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
4636 	const tcu::ScopedLogSection	section		(context.getLog(), sectionName, sectionName);
4637 
4638 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4639 		m_commands[cmdNdx]->verify(context, cmdNdx);
4640 }
4641 
4642 struct PipelineResources
4643 {
4644 	vk::Move<vk::VkPipeline>			pipeline;
4645 	vk::Move<vk::VkDescriptorSetLayout>	descriptorSetLayout;
4646 	vk::Move<vk::VkPipelineLayout>		pipelineLayout;
4647 };
4648 
createPipelineWithResources(const vk::DeviceInterface & vkd,const vk::VkDevice device,const vk::VkRenderPass renderPass,const deUint32 subpass,const vk::VkShaderModule & vertexShaderModule,const vk::VkShaderModule & fragmentShaderModule,const deUint32 viewPortWidth,const deUint32 viewPortHeight,const vector<vk::VkVertexInputBindingDescription> & vertexBindingDescriptions,const vector<vk::VkVertexInputAttributeDescription> & vertexAttributeDescriptions,const vector<vk::VkDescriptorSetLayoutBinding> & bindings,const vk::VkPrimitiveTopology topology,deUint32 pushConstantRangeCount,const vk::VkPushConstantRange * pushConstantRanges,PipelineResources & resources)4649 void createPipelineWithResources (const vk::DeviceInterface&							vkd,
4650 								  const vk::VkDevice									device,
4651 								  const vk::VkRenderPass								renderPass,
4652 								  const deUint32										subpass,
4653 								  const vk::VkShaderModule&								vertexShaderModule,
4654 								  const vk::VkShaderModule&								fragmentShaderModule,
4655 								  const deUint32										viewPortWidth,
4656 								  const deUint32										viewPortHeight,
4657 								  const vector<vk::VkVertexInputBindingDescription>&	vertexBindingDescriptions,
4658 								  const vector<vk::VkVertexInputAttributeDescription>&	vertexAttributeDescriptions,
4659 								  const vector<vk::VkDescriptorSetLayoutBinding>&		bindings,
4660 								  const vk::VkPrimitiveTopology							topology,
4661 								  deUint32												pushConstantRangeCount,
4662 								  const vk::VkPushConstantRange*						pushConstantRanges,
4663 								  PipelineResources&									resources)
4664 {
4665 	if (!bindings.empty())
4666 	{
4667 		const vk::VkDescriptorSetLayoutCreateInfo createInfo =
4668 		{
4669 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
4670 			DE_NULL,
4671 
4672 			0u,
4673 			(deUint32)bindings.size(),
4674 			bindings.empty() ? DE_NULL : &bindings[0]
4675 		};
4676 
4677 		resources.descriptorSetLayout = vk::createDescriptorSetLayout(vkd, device, &createInfo);
4678 	}
4679 
4680 	{
4681 		const vk::VkDescriptorSetLayout			descriptorSetLayout_	= *resources.descriptorSetLayout;
4682 		const vk::VkPipelineLayoutCreateInfo	createInfo				=
4683 		{
4684 			vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
4685 			DE_NULL,
4686 			0,
4687 
4688 			resources.descriptorSetLayout ? 1u : 0u,
4689 			resources.descriptorSetLayout ? &descriptorSetLayout_ : DE_NULL,
4690 
4691 			pushConstantRangeCount,
4692 			pushConstantRanges
4693 		};
4694 
4695 		resources.pipelineLayout = vk::createPipelineLayout(vkd, device, &createInfo);
4696 	}
4697 
4698 	{
4699 		const std::vector<vk::VkViewport>				viewports			(1, vk::makeViewport(0.0f, 0.0f, (float)viewPortWidth, (float)viewPortHeight, 0.0f, 1.0f));
4700 		const std::vector<vk::VkRect2D>					scissors			(1, vk::makeRect2D(0, 0, viewPortWidth, viewPortHeight));
4701 
4702 		const vk::VkPipelineVertexInputStateCreateInfo	vertexInputState	=
4703 		{
4704 			vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
4705 			DE_NULL,
4706 			0u,
4707 
4708 			(deUint32)vertexBindingDescriptions.size(),
4709 			vertexBindingDescriptions.empty() ? DE_NULL : &vertexBindingDescriptions[0],
4710 
4711 			(deUint32)vertexAttributeDescriptions.size(),
4712 			vertexAttributeDescriptions.empty() ? DE_NULL : &vertexAttributeDescriptions[0]
4713 		};
4714 
4715 		resources.pipeline = vk::makeGraphicsPipeline(vkd,							// const DeviceInterface&                        vk
4716 													  device,						// const VkDevice                                device
4717 													  *resources.pipelineLayout,	// const VkPipelineLayout                        pipelineLayout
4718 													  vertexShaderModule,			// const VkShaderModule                          vertexShaderModule
4719 													  DE_NULL,						// const VkShaderModule                          tessellationControlModule
4720 													  DE_NULL,						// const VkShaderModule                          tessellationEvalModule
4721 													  DE_NULL,						// const VkShaderModule                          geometryShaderModule
4722 													  fragmentShaderModule,			// const VkShaderModule                          fragmentShaderModule
4723 													  renderPass,					// const VkRenderPass                            renderPass
4724 													  viewports,					// const std::vector<VkViewport>&                viewports
4725 													  scissors,						// const std::vector<VkRect2D>&                  scissors
4726 													  topology,						// const VkPrimitiveTopology                     topology
4727 													  subpass,						// const deUint32                                subpass
4728 													  0u,							// const deUint32                                patchControlPoints
4729 													  &vertexInputState);			// const VkPipelineVertexInputStateCreateInfo*   vertexInputStateCreateInfo
4730 	}
4731 }
4732 
4733 class RenderIndexBuffer : public RenderPassCommand
4734 {
4735 public:
RenderIndexBuffer(void)4736 				RenderIndexBuffer	(void) {}
~RenderIndexBuffer(void)4737 				~RenderIndexBuffer	(void) {}
4738 
getName(void) const4739 	const char*	getName				(void) const { return "RenderIndexBuffer"; }
4740 	void		logPrepare			(TestLog&, size_t) const;
4741 	void		logSubmit			(TestLog&, size_t) const;
4742 	void		prepare				(PrepareRenderPassContext&);
4743 	void		submit				(SubmitContext& context);
4744 	void		verify				(VerifyRenderPassContext&, size_t);
4745 
4746 private:
4747 	PipelineResources				m_resources;
4748 	vk::VkDeviceSize				m_bufferSize;
4749 };
4750 
logPrepare(TestLog & log,size_t commandIndex) const4751 void RenderIndexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4752 {
4753 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as index buffer." << TestLog::EndMessage;
4754 }
4755 
logSubmit(TestLog & log,size_t commandIndex) const4756 void RenderIndexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4757 {
4758 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as index buffer." << TestLog::EndMessage;
4759 }
4760 
prepare(PrepareRenderPassContext & context)4761 void RenderIndexBuffer::prepare (PrepareRenderPassContext& context)
4762 {
4763 	const vk::DeviceInterface&				vkd						= context.getContext().getDeviceInterface();
4764 	const vk::VkDevice						device					= context.getContext().getDevice();
4765 	const vk::VkRenderPass					renderPass				= context.getRenderPass();
4766 	const deUint32							subpass					= 0;
4767 	const vk::Unique<vk::VkShaderModule>	vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("index-buffer.vert"), 0));
4768 	const vk::Unique<vk::VkShaderModule>	fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4769 
4770 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4771 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4772 	m_bufferSize = context.getBufferSize();
4773 }
4774 
submit(SubmitContext & context)4775 void RenderIndexBuffer::submit (SubmitContext& context)
4776 {
4777 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
4778 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
4779 
4780 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4781 	vkd.cmdBindIndexBuffer(commandBuffer, context.getBuffer(), 0, vk::VK_INDEX_TYPE_UINT16);
4782 	vkd.cmdDrawIndexed(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0, 0);
4783 }
4784 
verify(VerifyRenderPassContext & context,size_t)4785 void RenderIndexBuffer::verify (VerifyRenderPassContext& context, size_t)
4786 {
4787 	for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
4788 	{
4789 		const deUint8 x  = context.getReference().get(pos * 2);
4790 		const deUint8 y  = context.getReference().get((pos * 2) + 1);
4791 
4792 		context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
4793 	}
4794 }
4795 
4796 class RenderVertexBuffer : public RenderPassCommand
4797 {
4798 public:
RenderVertexBuffer(deUint32 stride)4799 				RenderVertexBuffer	(deUint32 stride)
4800 					: m_stride(stride)
4801 					, m_name("RenderVertexBuffer" + de::toString(stride))
4802 					{}
~RenderVertexBuffer(void)4803 				~RenderVertexBuffer	(void) {}
4804 
getName(void) const4805 	const char*	getName				(void) const { return m_name.c_str(); }
4806 	void		logPrepare			(TestLog&, size_t) const;
4807 	void		logSubmit			(TestLog&, size_t) const;
4808 	void		prepare				(PrepareRenderPassContext&);
4809 	void		submit				(SubmitContext& context);
4810 	void		verify				(VerifyRenderPassContext&, size_t);
4811 
4812 private:
4813 	const deUint32		m_stride;
4814 	const std::string	m_name;
4815 	PipelineResources	m_resources;
4816 	vk::VkDeviceSize	m_bufferSize;
4817 };
4818 
logPrepare(TestLog & log,size_t commandIndex) const4819 void RenderVertexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4820 {
4821 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as vertex buffer." << TestLog::EndMessage;
4822 }
4823 
logSubmit(TestLog & log,size_t commandIndex) const4824 void RenderVertexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4825 {
4826 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as vertex buffer." << TestLog::EndMessage;
4827 }
4828 
prepare(PrepareRenderPassContext & context)4829 void RenderVertexBuffer::prepare (PrepareRenderPassContext& context)
4830 {
4831 	const vk::DeviceInterface&						vkd						= context.getContext().getDeviceInterface();
4832 	const vk::VkDevice								device					= context.getContext().getDevice();
4833 	const vk::VkRenderPass							renderPass				= context.getRenderPass();
4834 	const deUint32									subpass					= 0;
4835 	const vk::Unique<vk::VkShaderModule>			vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("vertex-buffer.vert"), 0));
4836 	const vk::Unique<vk::VkShaderModule>			fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4837 
4838 	vector<vk::VkVertexInputAttributeDescription>	vertexAttributeDescriptions;
4839 	vector<vk::VkVertexInputBindingDescription>		vertexBindingDescriptions;
4840 
4841 	{
4842 		const vk::VkVertexInputBindingDescription vertexBindingDescription =
4843 			{
4844 				0,
4845 				m_stride,
4846 				vk::VK_VERTEX_INPUT_RATE_VERTEX
4847 			};
4848 
4849 		vertexBindingDescriptions.push_back(vertexBindingDescription);
4850 	}
4851 	{
4852 		const vk::VkVertexInputAttributeDescription vertexAttributeDescription =
4853 		{
4854 			0,
4855 			0,
4856 			vk::VK_FORMAT_R8G8_UNORM,
4857 			0
4858 		};
4859 
4860 		vertexAttributeDescriptions.push_back(vertexAttributeDescription);
4861 	}
4862 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4863 								vertexBindingDescriptions, vertexAttributeDescriptions, vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4864 
4865 	m_bufferSize = context.getBufferSize();
4866 }
4867 
submit(SubmitContext & context)4868 void RenderVertexBuffer::submit (SubmitContext& context)
4869 {
4870 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
4871 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
4872 	const vk::VkDeviceSize		offset			= 0;
4873 	const vk::VkBuffer			buffer			= context.getBuffer();
4874 
4875 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4876 	vkd.cmdBindVertexBuffers(commandBuffer, 0, 1, &buffer, &offset);
4877 	vkd.cmdDraw(commandBuffer, (deUint32)(context.getBufferSize() / m_stride), 1, 0, 0);
4878 }
4879 
verify(VerifyRenderPassContext & context,size_t)4880 void RenderVertexBuffer::verify (VerifyRenderPassContext& context, size_t)
4881 {
4882 	for (size_t pos = 0; pos < (size_t)m_bufferSize / m_stride; pos++)
4883 	{
4884 		const deUint8 x  = context.getReference().get(pos * m_stride);
4885 		const deUint8 y  = context.getReference().get((pos * m_stride) + 1);
4886 
4887 		context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
4888 	}
4889 }
4890 
4891 class RenderVertexUniformBuffer : public RenderPassCommand
4892 {
4893 public:
RenderVertexUniformBuffer(void)4894 									RenderVertexUniformBuffer	(void) {}
4895 									~RenderVertexUniformBuffer	(void);
4896 
getName(void) const4897 	const char*						getName						(void) const { return "RenderVertexUniformBuffer"; }
4898 	void							logPrepare					(TestLog&, size_t) const;
4899 	void							logSubmit					(TestLog&, size_t) const;
4900 	void							prepare						(PrepareRenderPassContext&);
4901 	void							submit						(SubmitContext& context);
4902 	void							verify						(VerifyRenderPassContext&, size_t);
4903 
4904 protected:
4905 
4906 	deUint32						calculateBufferPartSize		(size_t descriptorSetNdx) const;
4907 
4908 private:
4909 	PipelineResources				m_resources;
4910 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
4911 	vector<vk::VkDescriptorSet>		m_descriptorSets;
4912 
4913 	vk::VkDeviceSize				m_bufferSize;
4914 };
4915 
~RenderVertexUniformBuffer(void)4916 RenderVertexUniformBuffer::~RenderVertexUniformBuffer (void)
4917 {
4918 }
4919 
logPrepare(TestLog & log,size_t commandIndex) const4920 void RenderVertexUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4921 {
4922 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
4923 }
4924 
logSubmit(TestLog & log,size_t commandIndex) const4925 void RenderVertexUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4926 {
4927 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
4928 }
4929 
prepare(PrepareRenderPassContext & context)4930 void RenderVertexUniformBuffer::prepare (PrepareRenderPassContext& context)
4931 {
4932 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
4933 	const vk::VkDevice							device					= context.getContext().getDevice();
4934 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
4935 	const deUint32								subpass					= 0;
4936 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.vert"), 0));
4937 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4938 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
4939 
4940 	// make sure buffer size is multiple of 16 (in glsl we use uvec4 to store 16 values)
4941 	m_bufferSize = context.getBufferSize();
4942 	m_bufferSize = static_cast<vk::VkDeviceSize>(m_bufferSize / 16u) * 16u;
4943 
4944 	{
4945 		const vk::VkDescriptorSetLayoutBinding binding =
4946 		{
4947 			0u,
4948 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
4949 			1,
4950 			vk::VK_SHADER_STAGE_VERTEX_BIT,
4951 			DE_NULL
4952 		};
4953 
4954 		bindings.push_back(binding);
4955 	}
4956 
4957 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4958 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4959 
4960 	{
4961 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
4962 		const vk::VkDescriptorPoolSize			poolSizes		=
4963 		{
4964 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
4965 			descriptorCount
4966 		};
4967 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
4968 		{
4969 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
4970 			DE_NULL,
4971 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
4972 
4973 			descriptorCount,
4974 			1u,
4975 			&poolSizes,
4976 		};
4977 
4978 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
4979 		m_descriptorSets.resize(descriptorCount);
4980 	}
4981 
4982 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
4983 	{
4984 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
4985 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
4986 		{
4987 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
4988 			DE_NULL,
4989 
4990 			*m_descriptorPool,
4991 			1,
4992 			&layout
4993 		};
4994 
4995 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
4996 
4997 		{
4998 			const vk::VkDescriptorBufferInfo		bufferInfo	=
4999 			{
5000 				context.getBuffer(),
5001 				(vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
5002 				calculateBufferPartSize(descriptorSetNdx)
5003 			};
5004 			const vk::VkWriteDescriptorSet			write		=
5005 			{
5006 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5007 				DE_NULL,
5008 				m_descriptorSets[descriptorSetNdx],
5009 				0u,
5010 				0u,
5011 				1u,
5012 				vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5013 				DE_NULL,
5014 				&bufferInfo,
5015 				DE_NULL,
5016 			};
5017 
5018 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5019 		}
5020 	}
5021 }
5022 
submit(SubmitContext & context)5023 void RenderVertexUniformBuffer::submit (SubmitContext& context)
5024 {
5025 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5026 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5027 
5028 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5029 
5030 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5031 	{
5032 		const size_t	size	= calculateBufferPartSize(descriptorSetNdx);
5033 		const deUint32	count	= (deUint32)(size / 2);
5034 
5035 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5036 		vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5037 	}
5038 }
5039 
verify(VerifyRenderPassContext & context,size_t)5040 void RenderVertexUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
5041 {
5042 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5043 	{
5044 		const size_t	offset	= descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
5045 		const size_t	size	= calculateBufferPartSize(descriptorSetNdx);
5046 		const size_t	count	= size / 2;
5047 
5048 		for (size_t pos = 0; pos < count; pos++)
5049 		{
5050 			const deUint8 x  = context.getReference().get(offset + pos * 2);
5051 			const deUint8 y  = context.getReference().get(offset + (pos * 2) + 1);
5052 
5053 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5054 		}
5055 	}
5056 }
5057 
calculateBufferPartSize(size_t descriptorSetNdx) const5058 deUint32 RenderVertexUniformBuffer::calculateBufferPartSize(size_t descriptorSetNdx) const
5059 {
5060 	deUint32 size = static_cast<deUint32>(m_bufferSize) - static_cast<deUint32>(descriptorSetNdx) * MAX_UNIFORM_BUFFER_SIZE;
5061 	if (size < MAX_UNIFORM_BUFFER_SIZE)
5062 		return size;
5063 	return MAX_UNIFORM_BUFFER_SIZE;
5064 }
5065 
5066 class RenderVertexUniformTexelBuffer : public RenderPassCommand
5067 {
5068 public:
RenderVertexUniformTexelBuffer(void)5069 				RenderVertexUniformTexelBuffer	(void) {}
5070 				~RenderVertexUniformTexelBuffer	(void);
5071 
getName(void) const5072 	const char*	getName							(void) const { return "RenderVertexUniformTexelBuffer"; }
5073 	void		logPrepare						(TestLog&, size_t) const;
5074 	void		logSubmit						(TestLog&, size_t) const;
5075 	void		prepare							(PrepareRenderPassContext&);
5076 	void		submit							(SubmitContext& context);
5077 	void		verify							(VerifyRenderPassContext&, size_t);
5078 
5079 private:
5080 	PipelineResources				m_resources;
5081 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5082 	vector<vk::VkDescriptorSet>		m_descriptorSets;
5083 	vector<vk::VkBufferView>		m_bufferViews;
5084 
5085 	const vk::DeviceInterface*		m_vkd;
5086 	vk::VkDevice					m_device;
5087 	vk::VkDeviceSize				m_bufferSize;
5088 	deUint32						m_maxUniformTexelCount;
5089 };
5090 
~RenderVertexUniformTexelBuffer(void)5091 RenderVertexUniformTexelBuffer::~RenderVertexUniformTexelBuffer (void)
5092 {
5093 	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5094 	{
5095 		if (!!m_bufferViews[bufferViewNdx])
5096 		{
5097 			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5098 			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5099 		}
5100 	}
5101 }
5102 
logPrepare(TestLog & log,size_t commandIndex) const5103 void RenderVertexUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5104 {
5105 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5106 }
5107 
logSubmit(TestLog & log,size_t commandIndex) const5108 void RenderVertexUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5109 {
5110 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
5111 }
5112 
prepare(PrepareRenderPassContext & context)5113 void RenderVertexUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
5114 {
5115 	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
5116 	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
5117 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5118 	const vk::VkDevice							device					= context.getContext().getDevice();
5119 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5120 	const deUint32								subpass					= 0;
5121 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.vert"), 0));
5122 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5123 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5124 
5125 	m_device				= device;
5126 	m_vkd					= &vkd;
5127 	m_bufferSize			= context.getBufferSize();
5128 	m_maxUniformTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5129 
5130 	{
5131 		const vk::VkDescriptorSetLayoutBinding binding =
5132 		{
5133 			0u,
5134 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5135 			1,
5136 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5137 			DE_NULL
5138 		};
5139 
5140 		bindings.push_back(binding);
5141 	}
5142 
5143 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5144 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5145 
5146 	{
5147 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 2));
5148 		const vk::VkDescriptorPoolSize			poolSizes		=
5149 		{
5150 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5151 			descriptorCount
5152 		};
5153 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5154 		{
5155 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5156 			DE_NULL,
5157 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5158 
5159 			descriptorCount,
5160 			1u,
5161 			&poolSizes,
5162 		};
5163 
5164 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5165 		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5166 		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5167 	}
5168 
5169 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5170 	{
5171 		const deUint32							count			= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5172 																? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5173 																: m_maxUniformTexelCount * 2) / 2;
5174 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5175 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5176 		{
5177 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5178 			DE_NULL,
5179 
5180 			*m_descriptorPool,
5181 			1,
5182 			&layout
5183 		};
5184 
5185 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5186 
5187 		{
5188 			const vk::VkBufferViewCreateInfo createInfo =
5189 			{
5190 				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5191 				DE_NULL,
5192 				0u,
5193 
5194 				context.getBuffer(),
5195 				vk::VK_FORMAT_R16_UINT,
5196 				descriptorSetNdx * m_maxUniformTexelCount * 2,
5197 				count * 2
5198 			};
5199 
5200 			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5201 		}
5202 
5203 		{
5204 			const vk::VkWriteDescriptorSet			write		=
5205 			{
5206 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5207 				DE_NULL,
5208 				m_descriptorSets[descriptorSetNdx],
5209 				0u,
5210 				0u,
5211 				1u,
5212 				vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5213 				DE_NULL,
5214 				DE_NULL,
5215 				&m_bufferViews[descriptorSetNdx]
5216 			};
5217 
5218 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5219 		}
5220 	}
5221 }
5222 
submit(SubmitContext & context)5223 void RenderVertexUniformTexelBuffer::submit (SubmitContext& context)
5224 {
5225 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5226 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5227 
5228 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5229 
5230 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5231 	{
5232 		const deUint32 count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5233 								? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5234 								: m_maxUniformTexelCount * 2) / 2;
5235 
5236 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5237 		vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5238 	}
5239 }
5240 
verify(VerifyRenderPassContext & context,size_t)5241 void RenderVertexUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5242 {
5243 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5244 	{
5245 		const size_t	offset	= descriptorSetNdx * m_maxUniformTexelCount * 2;
5246 		const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5247 								? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5248 								: m_maxUniformTexelCount * 2) / 2;
5249 
5250 		for (size_t pos = 0; pos < (size_t)count; pos++)
5251 		{
5252 			const deUint8 x  = context.getReference().get(offset + pos * 2);
5253 			const deUint8 y  = context.getReference().get(offset + (pos * 2) + 1);
5254 
5255 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5256 		}
5257 	}
5258 }
5259 
5260 class RenderVertexStorageBuffer : public RenderPassCommand
5261 {
5262 public:
RenderVertexStorageBuffer(void)5263 				RenderVertexStorageBuffer	(void) {}
5264 				~RenderVertexStorageBuffer	(void);
5265 
getName(void) const5266 	const char*	getName						(void) const { return "RenderVertexStorageBuffer"; }
5267 	void		logPrepare					(TestLog&, size_t) const;
5268 	void		logSubmit					(TestLog&, size_t) const;
5269 	void		prepare						(PrepareRenderPassContext&);
5270 	void		submit						(SubmitContext& context);
5271 	void		verify						(VerifyRenderPassContext&, size_t);
5272 
5273 private:
5274 	PipelineResources				m_resources;
5275 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5276 	vector<vk::VkDescriptorSet>		m_descriptorSets;
5277 
5278 	vk::VkDeviceSize				m_bufferSize;
5279 };
5280 
~RenderVertexStorageBuffer(void)5281 RenderVertexStorageBuffer::~RenderVertexStorageBuffer (void)
5282 {
5283 }
5284 
logPrepare(TestLog & log,size_t commandIndex) const5285 void RenderVertexStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5286 {
5287 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5288 }
5289 
logSubmit(TestLog & log,size_t commandIndex) const5290 void RenderVertexStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5291 {
5292 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5293 }
5294 
prepare(PrepareRenderPassContext & context)5295 void RenderVertexStorageBuffer::prepare (PrepareRenderPassContext& context)
5296 {
5297 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5298 	const vk::VkDevice							device					= context.getContext().getDevice();
5299 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5300 	const deUint32								subpass					= 0;
5301 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.vert"), 0));
5302 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5303 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5304 
5305 	m_bufferSize = context.getBufferSize();
5306 
5307 	{
5308 		const vk::VkDescriptorSetLayoutBinding binding =
5309 		{
5310 			0u,
5311 			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5312 			1,
5313 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5314 			DE_NULL
5315 		};
5316 
5317 		bindings.push_back(binding);
5318 	}
5319 
5320 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5321 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5322 
5323 	{
5324 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE));
5325 		const vk::VkDescriptorPoolSize			poolSizes		=
5326 		{
5327 			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5328 			descriptorCount
5329 		};
5330 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5331 		{
5332 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5333 			DE_NULL,
5334 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5335 
5336 			descriptorCount,
5337 			1u,
5338 			&poolSizes,
5339 		};
5340 
5341 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5342 		m_descriptorSets.resize(descriptorCount);
5343 	}
5344 
5345 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5346 	{
5347 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5348 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5349 		{
5350 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5351 			DE_NULL,
5352 
5353 			*m_descriptorPool,
5354 			1,
5355 			&layout
5356 		};
5357 
5358 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5359 
5360 		{
5361 			const vk::VkDescriptorBufferInfo		bufferInfo	=
5362 			{
5363 				context.getBuffer(),
5364 				descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE,
5365 				de::min(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE,  (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE)
5366 			};
5367 			const vk::VkWriteDescriptorSet			write		=
5368 			{
5369 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5370 				DE_NULL,
5371 				m_descriptorSets[descriptorSetNdx],
5372 				0u,
5373 				0u,
5374 				1u,
5375 				vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5376 				DE_NULL,
5377 				&bufferInfo,
5378 				DE_NULL,
5379 			};
5380 
5381 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5382 		}
5383 	}
5384 }
5385 
submit(SubmitContext & context)5386 void RenderVertexStorageBuffer::submit (SubmitContext& context)
5387 {
5388 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5389 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5390 
5391 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5392 
5393 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5394 	{
5395 		const size_t size	= m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5396 							? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5397 							: (size_t)(MAX_STORAGE_BUFFER_SIZE);
5398 
5399 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5400 		vkd.cmdDraw(commandBuffer, (deUint32)(size / 2), 1, 0, 0);
5401 	}
5402 }
5403 
verify(VerifyRenderPassContext & context,size_t)5404 void RenderVertexStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
5405 {
5406 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5407 	{
5408 		const size_t offset	= descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE;
5409 		const size_t size	= m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5410 							? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5411 							: (size_t)(MAX_STORAGE_BUFFER_SIZE);
5412 
5413 		for (size_t pos = 0; pos < size / 2; pos++)
5414 		{
5415 			const deUint8 x  = context.getReference().get(offset + pos * 2);
5416 			const deUint8 y  = context.getReference().get(offset + (pos * 2) + 1);
5417 
5418 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5419 		}
5420 	}
5421 }
5422 
5423 class RenderVertexStorageTexelBuffer : public RenderPassCommand
5424 {
5425 public:
RenderVertexStorageTexelBuffer(void)5426 				RenderVertexStorageTexelBuffer	(void) {}
5427 				~RenderVertexStorageTexelBuffer	(void);
5428 
getName(void) const5429 	const char*	getName							(void) const { return "RenderVertexStorageTexelBuffer"; }
5430 	void		logPrepare						(TestLog&, size_t) const;
5431 	void		logSubmit						(TestLog&, size_t) const;
5432 	void		prepare							(PrepareRenderPassContext&);
5433 	void		submit							(SubmitContext& context);
5434 	void		verify							(VerifyRenderPassContext&, size_t);
5435 
5436 private:
5437 	PipelineResources				m_resources;
5438 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5439 	vector<vk::VkDescriptorSet>		m_descriptorSets;
5440 	vector<vk::VkBufferView>		m_bufferViews;
5441 
5442 	const vk::DeviceInterface*		m_vkd;
5443 	vk::VkDevice					m_device;
5444 	vk::VkDeviceSize				m_bufferSize;
5445 	deUint32						m_maxStorageTexelCount;
5446 };
5447 
~RenderVertexStorageTexelBuffer(void)5448 RenderVertexStorageTexelBuffer::~RenderVertexStorageTexelBuffer (void)
5449 {
5450 	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5451 	{
5452 		if (!!m_bufferViews[bufferViewNdx])
5453 		{
5454 			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5455 			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5456 		}
5457 	}
5458 }
5459 
logPrepare(TestLog & log,size_t commandIndex) const5460 void RenderVertexStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5461 {
5462 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5463 }
5464 
logSubmit(TestLog & log,size_t commandIndex) const5465 void RenderVertexStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5466 {
5467 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5468 }
5469 
prepare(PrepareRenderPassContext & context)5470 void RenderVertexStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
5471 {
5472 	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
5473 	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
5474 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5475 	const vk::VkDevice							device					= context.getContext().getDevice();
5476 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5477 	const deUint32								subpass					= 0;
5478 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.vert"), 0));
5479 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5480 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5481 
5482 	m_device				= device;
5483 	m_vkd					= &vkd;
5484 	m_bufferSize			= context.getBufferSize();
5485 	m_maxStorageTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5486 
5487 	{
5488 		const vk::VkDescriptorSetLayoutBinding binding =
5489 		{
5490 			0u,
5491 			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5492 			1,
5493 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5494 			DE_NULL
5495 		};
5496 
5497 		bindings.push_back(binding);
5498 	}
5499 
5500 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5501 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5502 
5503 	{
5504 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
5505 		const vk::VkDescriptorPoolSize			poolSizes		=
5506 		{
5507 			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5508 			descriptorCount
5509 		};
5510 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5511 		{
5512 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5513 			DE_NULL,
5514 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5515 
5516 			descriptorCount,
5517 			1u,
5518 			&poolSizes,
5519 		};
5520 
5521 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5522 		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5523 		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5524 	}
5525 
5526 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5527 	{
5528 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5529 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5530 		{
5531 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5532 			DE_NULL,
5533 
5534 			*m_descriptorPool,
5535 			1,
5536 			&layout
5537 		};
5538 
5539 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5540 
5541 		{
5542 			const vk::VkBufferViewCreateInfo createInfo =
5543 			{
5544 				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5545 				DE_NULL,
5546 				0u,
5547 
5548 				context.getBuffer(),
5549 				vk::VK_FORMAT_R32_UINT,
5550 				descriptorSetNdx * m_maxStorageTexelCount * 4,
5551 				(deUint32)de::min<vk::VkDeviceSize>(m_maxStorageTexelCount * 4, m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4)
5552 			};
5553 
5554 			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5555 		}
5556 
5557 		{
5558 			const vk::VkWriteDescriptorSet			write		=
5559 			{
5560 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5561 				DE_NULL,
5562 				m_descriptorSets[descriptorSetNdx],
5563 				0u,
5564 				0u,
5565 				1u,
5566 				vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5567 				DE_NULL,
5568 				DE_NULL,
5569 				&m_bufferViews[descriptorSetNdx]
5570 			};
5571 
5572 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5573 		}
5574 	}
5575 }
5576 
submit(SubmitContext & context)5577 void RenderVertexStorageTexelBuffer::submit (SubmitContext& context)
5578 {
5579 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5580 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5581 
5582 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5583 
5584 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5585 	{
5586 		const deUint32 count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5587 								? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5588 								: m_maxStorageTexelCount * 4) / 2;
5589 
5590 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5591 		vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5592 	}
5593 }
5594 
verify(VerifyRenderPassContext & context,size_t)5595 void RenderVertexStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5596 {
5597 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5598 	{
5599 		const size_t	offset	= descriptorSetNdx * m_maxStorageTexelCount * 4;
5600 		const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5601 								? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5602 								: m_maxStorageTexelCount * 4) / 2;
5603 
5604 		DE_ASSERT(context.getReference().getSize() <= 4 * m_maxStorageTexelCount * m_descriptorSets.size());
5605 		DE_ASSERT(context.getReference().getSize() > offset);
5606 		DE_ASSERT(offset + count * 2 <= context.getReference().getSize());
5607 
5608 		for (size_t pos = 0; pos < (size_t)count; pos++)
5609 		{
5610 			const deUint8 x = context.getReference().get(offset + pos * 2);
5611 			const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5612 
5613 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5614 		}
5615 	}
5616 }
5617 
5618 class RenderVertexStorageImage : public RenderPassCommand
5619 {
5620 public:
RenderVertexStorageImage(void)5621 				RenderVertexStorageImage	(void) {}
5622 				~RenderVertexStorageImage	(void);
5623 
getName(void) const5624 	const char*	getName						(void) const { return "RenderVertexStorageImage"; }
5625 	void		logPrepare					(TestLog&, size_t) const;
5626 	void		logSubmit					(TestLog&, size_t) const;
5627 	void		prepare						(PrepareRenderPassContext&);
5628 	void		submit						(SubmitContext& context);
5629 	void		verify						(VerifyRenderPassContext&, size_t);
5630 
5631 private:
5632 	PipelineResources				m_resources;
5633 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5634 	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
5635 	vk::Move<vk::VkImageView>		m_imageView;
5636 };
5637 
~RenderVertexStorageImage(void)5638 RenderVertexStorageImage::~RenderVertexStorageImage (void)
5639 {
5640 }
5641 
logPrepare(TestLog & log,size_t commandIndex) const5642 void RenderVertexStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
5643 {
5644 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
5645 }
5646 
logSubmit(TestLog & log,size_t commandIndex) const5647 void RenderVertexStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
5648 {
5649 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
5650 }
5651 
prepare(PrepareRenderPassContext & context)5652 void RenderVertexStorageImage::prepare (PrepareRenderPassContext& context)
5653 {
5654 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5655 	const vk::VkDevice							device					= context.getContext().getDevice();
5656 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5657 	const deUint32								subpass					= 0;
5658 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.vert"), 0));
5659 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5660 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5661 
5662 	{
5663 		const vk::VkDescriptorSetLayoutBinding binding =
5664 		{
5665 			0u,
5666 			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5667 			1,
5668 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5669 			DE_NULL
5670 		};
5671 
5672 		bindings.push_back(binding);
5673 	}
5674 
5675 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5676 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5677 
5678 	{
5679 		const vk::VkDescriptorPoolSize			poolSizes		=
5680 		{
5681 			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5682 			1
5683 		};
5684 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5685 		{
5686 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5687 			DE_NULL,
5688 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5689 
5690 			1u,
5691 			1u,
5692 			&poolSizes,
5693 		};
5694 
5695 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5696 	}
5697 
5698 	{
5699 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5700 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5701 		{
5702 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5703 			DE_NULL,
5704 
5705 			*m_descriptorPool,
5706 			1,
5707 			&layout
5708 		};
5709 
5710 		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5711 
5712 		{
5713 			const vk::VkImageViewCreateInfo createInfo =
5714 			{
5715 				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5716 				DE_NULL,
5717 				0u,
5718 
5719 				context.getImage(),
5720 				vk::VK_IMAGE_VIEW_TYPE_2D,
5721 				vk::VK_FORMAT_R8G8B8A8_UNORM,
5722 				vk::makeComponentMappingRGBA(),
5723 				{
5724 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
5725 					0u,
5726 					1u,
5727 					0u,
5728 					1u
5729 				}
5730 			};
5731 
5732 			m_imageView = vk::createImageView(vkd, device, &createInfo);
5733 		}
5734 
5735 		{
5736 			const vk::VkDescriptorImageInfo			imageInfo	=
5737 			{
5738 				0,
5739 				*m_imageView,
5740 				context.getImageLayout()
5741 			};
5742 			const vk::VkWriteDescriptorSet			write		=
5743 			{
5744 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5745 				DE_NULL,
5746 				*m_descriptorSet,
5747 				0u,
5748 				0u,
5749 				1u,
5750 				vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5751 				&imageInfo,
5752 				DE_NULL,
5753 				DE_NULL,
5754 			};
5755 
5756 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5757 		}
5758 	}
5759 }
5760 
submit(SubmitContext & context)5761 void RenderVertexStorageImage::submit (SubmitContext& context)
5762 {
5763 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5764 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5765 
5766 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5767 
5768 	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
5769 	vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
5770 }
5771 
verify(VerifyRenderPassContext & context,size_t)5772 void RenderVertexStorageImage::verify (VerifyRenderPassContext& context, size_t)
5773 {
5774 	for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
5775 	{
5776 		const tcu::IVec3		size	= context.getReferenceImage().getAccess().getSize();
5777 		const tcu::UVec4		pixel	= context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
5778 
5779 		if (pos % 2 == 0)
5780 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
5781 		else
5782 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
5783 	}
5784 }
5785 
5786 class RenderVertexSampledImage : public RenderPassCommand
5787 {
5788 public:
RenderVertexSampledImage(void)5789 				RenderVertexSampledImage	(void) {}
5790 				~RenderVertexSampledImage	(void);
5791 
getName(void) const5792 	const char*	getName						(void) const { return "RenderVertexSampledImage"; }
5793 	void		logPrepare					(TestLog&, size_t) const;
5794 	void		logSubmit					(TestLog&, size_t) const;
5795 	void		prepare						(PrepareRenderPassContext&);
5796 	void		submit						(SubmitContext& context);
5797 	void		verify						(VerifyRenderPassContext&, size_t);
5798 
5799 private:
5800 	PipelineResources				m_resources;
5801 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5802 	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
5803 	vk::Move<vk::VkImageView>		m_imageView;
5804 	vk::Move<vk::VkSampler>			m_sampler;
5805 };
5806 
~RenderVertexSampledImage(void)5807 RenderVertexSampledImage::~RenderVertexSampledImage (void)
5808 {
5809 }
5810 
logPrepare(TestLog & log,size_t commandIndex) const5811 void RenderVertexSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
5812 {
5813 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render sampled image." << TestLog::EndMessage;
5814 }
5815 
logSubmit(TestLog & log,size_t commandIndex) const5816 void RenderVertexSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
5817 {
5818 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using sampled image." << TestLog::EndMessage;
5819 }
5820 
prepare(PrepareRenderPassContext & context)5821 void RenderVertexSampledImage::prepare (PrepareRenderPassContext& context)
5822 {
5823 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5824 	const vk::VkDevice							device					= context.getContext().getDevice();
5825 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5826 	const deUint32								subpass					= 0;
5827 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.vert"), 0));
5828 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5829 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5830 
5831 	{
5832 		const vk::VkDescriptorSetLayoutBinding binding =
5833 		{
5834 			0u,
5835 			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5836 			1,
5837 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5838 			DE_NULL
5839 		};
5840 
5841 		bindings.push_back(binding);
5842 	}
5843 
5844 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5845 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5846 
5847 	{
5848 		const vk::VkDescriptorPoolSize			poolSizes		=
5849 		{
5850 			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5851 			1
5852 		};
5853 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5854 		{
5855 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5856 			DE_NULL,
5857 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5858 
5859 			1u,
5860 			1u,
5861 			&poolSizes,
5862 		};
5863 
5864 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5865 	}
5866 
5867 	{
5868 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5869 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5870 		{
5871 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5872 			DE_NULL,
5873 
5874 			*m_descriptorPool,
5875 			1,
5876 			&layout
5877 		};
5878 
5879 		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5880 
5881 		{
5882 			const vk::VkImageViewCreateInfo createInfo =
5883 			{
5884 				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5885 				DE_NULL,
5886 				0u,
5887 
5888 				context.getImage(),
5889 				vk::VK_IMAGE_VIEW_TYPE_2D,
5890 				vk::VK_FORMAT_R8G8B8A8_UNORM,
5891 				vk::makeComponentMappingRGBA(),
5892 				{
5893 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
5894 					0u,
5895 					1u,
5896 					0u,
5897 					1u
5898 				}
5899 			};
5900 
5901 			m_imageView = vk::createImageView(vkd, device, &createInfo);
5902 		}
5903 
5904 		{
5905 			const vk::VkSamplerCreateInfo createInfo =
5906 			{
5907 				vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
5908 				DE_NULL,
5909 				0u,
5910 
5911 				vk::VK_FILTER_NEAREST,
5912 				vk::VK_FILTER_NEAREST,
5913 
5914 				vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
5915 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
5916 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
5917 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
5918 				0.0f,
5919 				VK_FALSE,
5920 				1.0f,
5921 				VK_FALSE,
5922 				vk::VK_COMPARE_OP_ALWAYS,
5923 				0.0f,
5924 				0.0f,
5925 				vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
5926 				VK_FALSE
5927 			};
5928 
5929 			m_sampler = vk::createSampler(vkd, device, &createInfo);
5930 		}
5931 
5932 		{
5933 			const vk::VkDescriptorImageInfo			imageInfo	=
5934 			{
5935 				*m_sampler,
5936 				*m_imageView,
5937 				context.getImageLayout()
5938 			};
5939 			const vk::VkWriteDescriptorSet			write		=
5940 			{
5941 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5942 				DE_NULL,
5943 				*m_descriptorSet,
5944 				0u,
5945 				0u,
5946 				1u,
5947 				vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5948 				&imageInfo,
5949 				DE_NULL,
5950 				DE_NULL,
5951 			};
5952 
5953 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5954 		}
5955 	}
5956 }
5957 
submit(SubmitContext & context)5958 void RenderVertexSampledImage::submit (SubmitContext& context)
5959 {
5960 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5961 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5962 
5963 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5964 
5965 	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
5966 	vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
5967 }
5968 
verify(VerifyRenderPassContext & context,size_t)5969 void RenderVertexSampledImage::verify (VerifyRenderPassContext& context, size_t)
5970 {
5971 	for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
5972 	{
5973 		const tcu::IVec3	size	= context.getReferenceImage().getAccess().getSize();
5974 		const tcu::UVec4	pixel	= context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
5975 
5976 		if (pos % 2 == 0)
5977 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
5978 		else
5979 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
5980 	}
5981 }
5982 
5983 class RenderFragmentUniformBuffer : public RenderPassCommand
5984 {
5985 public:
RenderFragmentUniformBuffer(void)5986 									RenderFragmentUniformBuffer		(void) {}
5987 									~RenderFragmentUniformBuffer	(void);
5988 
getName(void) const5989 	const char*						getName							(void) const { return "RenderFragmentUniformBuffer"; }
5990 	void							logPrepare						(TestLog&, size_t) const;
5991 	void							logSubmit						(TestLog&, size_t) const;
5992 	void							prepare							(PrepareRenderPassContext&);
5993 	void							submit							(SubmitContext& context);
5994 	void							verify							(VerifyRenderPassContext&, size_t);
5995 
5996 protected:
5997 
5998 	deUint32						calculateBufferPartSize			(size_t descriptorSetNdx) const;
5999 
6000 private:
6001 	PipelineResources				m_resources;
6002 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6003 	vector<vk::VkDescriptorSet>		m_descriptorSets;
6004 
6005 	vk::VkDeviceSize				m_bufferSize;
6006 	size_t							m_targetWidth;
6007 	size_t							m_targetHeight;
6008 	deUint32						m_valuesPerPixel;
6009 };
6010 
~RenderFragmentUniformBuffer(void)6011 RenderFragmentUniformBuffer::~RenderFragmentUniformBuffer (void)
6012 {
6013 }
6014 
logPrepare(TestLog & log,size_t commandIndex) const6015 void RenderFragmentUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6016 {
6017 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6018 }
6019 
logSubmit(TestLog & log,size_t commandIndex) const6020 void RenderFragmentUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6021 {
6022 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6023 }
6024 
prepare(PrepareRenderPassContext & context)6025 void RenderFragmentUniformBuffer::prepare (PrepareRenderPassContext& context)
6026 {
6027 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6028 	const vk::VkDevice							device					= context.getContext().getDevice();
6029 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6030 	const deUint32								subpass					= 0;
6031 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6032 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.frag"), 0));
6033 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6034 
6035 	// make sure buffer is smaller then MAX_SIZE and is multiple of 16 (in glsl we use uvec4 to store 16 values)
6036 	m_bufferSize	= de::min(context.getBufferSize(), (vk::VkDeviceSize)MAX_SIZE);
6037 	m_bufferSize	= static_cast<vk::VkDeviceSize>(m_bufferSize / 16u) * 16u;
6038 	m_targetWidth	= context.getTargetWidth();
6039 	m_targetHeight	= context.getTargetHeight();
6040 
6041 	{
6042 		const vk::VkDescriptorSetLayoutBinding binding =
6043 		{
6044 			0u,
6045 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6046 			1,
6047 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6048 			DE_NULL
6049 		};
6050 
6051 		bindings.push_back(binding);
6052 	}
6053 	const vk::VkPushConstantRange pushConstantRange =
6054 	{
6055 		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6056 		0u,
6057 		12u
6058 	};
6059 
6060 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6061 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6062 
6063 	{
6064 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
6065 		const vk::VkDescriptorPoolSize			poolSizes		=
6066 		{
6067 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6068 			descriptorCount
6069 		};
6070 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6071 		{
6072 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6073 			DE_NULL,
6074 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6075 
6076 			descriptorCount,
6077 			1u,
6078 			&poolSizes,
6079 		};
6080 
6081 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6082 		m_descriptorSets.resize(descriptorCount);
6083 
6084 		m_valuesPerPixel = (deUint32)divRoundUp<size_t>(descriptorCount * de::min<size_t>((size_t)m_bufferSize / 4, MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight);
6085 	}
6086 
6087 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6088 	{
6089 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6090 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6091 		{
6092 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6093 			DE_NULL,
6094 
6095 			*m_descriptorPool,
6096 			1,
6097 			&layout
6098 		};
6099 
6100 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6101 
6102 		{
6103 			const vk::VkDescriptorBufferInfo		bufferInfo	=
6104 			{
6105 				context.getBuffer(),
6106 				(vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
6107 				calculateBufferPartSize(descriptorSetNdx)
6108 			};
6109 			const vk::VkWriteDescriptorSet			write		=
6110 			{
6111 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6112 				DE_NULL,
6113 				m_descriptorSets[descriptorSetNdx],
6114 				0u,
6115 				0u,
6116 				1u,
6117 				vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6118 				DE_NULL,
6119 				&bufferInfo,
6120 				DE_NULL,
6121 			};
6122 
6123 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6124 		}
6125 	}
6126 }
6127 
submit(SubmitContext & context)6128 void RenderFragmentUniformBuffer::submit (SubmitContext& context)
6129 {
6130 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6131 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6132 
6133 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6134 
6135 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6136 	{
6137 		const struct
6138 		{
6139 			const deUint32	callId;
6140 			const deUint32	valuesPerPixel;
6141 			const deUint32	bufferSize;
6142 		} callParams =
6143 		{
6144 			(deUint32)descriptorSetNdx,
6145 			m_valuesPerPixel,
6146 			calculateBufferPartSize(descriptorSetNdx) / 16u
6147 		};
6148 
6149 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6150 		vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6151 		vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6152 	}
6153 }
6154 
verify(VerifyRenderPassContext & context,size_t)6155 void RenderFragmentUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
6156 {
6157 	const size_t	arrayIntSize	= MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32);
6158 
6159 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6160 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6161 	{
6162 		const deUint32	id						= (deUint32)y * 256u + (deUint32)x;
6163 		const size_t	firstDescriptorSetNdx	= de::min<size_t>(id / (arrayIntSize / m_valuesPerPixel), m_descriptorSets.size() - 1);
6164 
6165 		for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6166 		{
6167 			const size_t	offset	= descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
6168 			const deUint32	callId	= (deUint32)descriptorSetNdx;
6169 			const deUint32	count	= calculateBufferPartSize(descriptorSetNdx) / 16u;
6170 
6171 			if (id < callId * (arrayIntSize / m_valuesPerPixel))
6172 				continue;
6173 			else
6174 			{
6175 				deUint32 value = id;
6176 
6177 				for (deUint32 i = 0; i < m_valuesPerPixel; i++)
6178 				{
6179 					// in shader UBO has up to 64 items of uvec4, each uvec4 contains 16 values
6180 					size_t index = offset + size_t((value % count) * 16u) + size_t((value % 4u) * 4u);
6181 					value	= (((deUint32)context.getReference().get(index + 0)))
6182 							| (((deUint32)context.getReference().get(index + 1)) << 8u)
6183 							| (((deUint32)context.getReference().get(index + 2)) << 16u)
6184 							| (((deUint32)context.getReference().get(index + 3)) << 24u);
6185 				}
6186 				const UVec4	vec	((value >>  0u) & 0xFFu,
6187 								 (value >>  8u) & 0xFFu,
6188 								 (value >> 16u) & 0xFFu,
6189 								 (value >> 24u) & 0xFFu);
6190 
6191 				context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6192 			}
6193 		}
6194 	}
6195 }
6196 
calculateBufferPartSize(size_t descriptorSetNdx) const6197 deUint32 RenderFragmentUniformBuffer::calculateBufferPartSize(size_t descriptorSetNdx) const
6198 {
6199 	deUint32 size = static_cast<deUint32>(m_bufferSize) - static_cast<deUint32>(descriptorSetNdx) * MAX_UNIFORM_BUFFER_SIZE;
6200 	if (size < MAX_UNIFORM_BUFFER_SIZE)
6201 		return size;
6202 	return MAX_UNIFORM_BUFFER_SIZE;
6203 }
6204 
6205 class RenderFragmentStorageBuffer : public RenderPassCommand
6206 {
6207 public:
RenderFragmentStorageBuffer(void)6208 									RenderFragmentStorageBuffer		(void) {}
6209 									~RenderFragmentStorageBuffer	(void);
6210 
getName(void) const6211 	const char*						getName							(void) const { return "RenderFragmentStorageBuffer"; }
6212 	void							logPrepare						(TestLog&, size_t) const;
6213 	void							logSubmit						(TestLog&, size_t) const;
6214 	void							prepare							(PrepareRenderPassContext&);
6215 	void							submit							(SubmitContext& context);
6216 	void							verify							(VerifyRenderPassContext&, size_t);
6217 
6218 private:
6219 	PipelineResources				m_resources;
6220 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6221 	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
6222 
6223 	vk::VkDeviceSize				m_bufferSize;
6224 	size_t							m_targetWidth;
6225 	size_t							m_targetHeight;
6226 };
6227 
~RenderFragmentStorageBuffer(void)6228 RenderFragmentStorageBuffer::~RenderFragmentStorageBuffer (void)
6229 {
6230 }
6231 
logPrepare(TestLog & log,size_t commandIndex) const6232 void RenderFragmentStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6233 {
6234 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline to render buffer as storage buffer." << TestLog::EndMessage;
6235 }
6236 
logSubmit(TestLog & log,size_t commandIndex) const6237 void RenderFragmentStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6238 {
6239 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6240 }
6241 
prepare(PrepareRenderPassContext & context)6242 void RenderFragmentStorageBuffer::prepare (PrepareRenderPassContext& context)
6243 {
6244 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6245 	const vk::VkDevice							device					= context.getContext().getDevice();
6246 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6247 	const deUint32								subpass					= 0;
6248 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6249 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.frag"), 0));
6250 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6251 
6252 	// make sure buffer size is multiple of 16 (in glsl we use uvec4 to store 16 values)
6253 	m_bufferSize	= context.getBufferSize();
6254 	m_bufferSize	= static_cast<vk::VkDeviceSize>(m_bufferSize / 16u) * 16u;
6255 	m_targetWidth	= context.getTargetWidth();
6256 	m_targetHeight	= context.getTargetHeight();
6257 
6258 	{
6259 		const vk::VkDescriptorSetLayoutBinding binding =
6260 		{
6261 			0u,
6262 			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6263 			1,
6264 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6265 			DE_NULL
6266 		};
6267 
6268 		bindings.push_back(binding);
6269 	}
6270 	const vk::VkPushConstantRange pushConstantRange =
6271 	{
6272 		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6273 		0u,
6274 		12u
6275 	};
6276 
6277 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6278 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6279 
6280 	{
6281 		const deUint32							descriptorCount	= 1;
6282 		const vk::VkDescriptorPoolSize			poolSizes		=
6283 		{
6284 			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6285 			descriptorCount
6286 		};
6287 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6288 		{
6289 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6290 			DE_NULL,
6291 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6292 
6293 			descriptorCount,
6294 			1u,
6295 			&poolSizes,
6296 		};
6297 
6298 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6299 	}
6300 
6301 	{
6302 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6303 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6304 		{
6305 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6306 			DE_NULL,
6307 
6308 			*m_descriptorPool,
6309 			1,
6310 			&layout
6311 		};
6312 
6313 		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6314 
6315 		{
6316 			const vk::VkDescriptorBufferInfo	bufferInfo	=
6317 			{
6318 				context.getBuffer(),
6319 				0u,
6320 				m_bufferSize
6321 			};
6322 			const vk::VkWriteDescriptorSet		write		=
6323 			{
6324 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6325 				DE_NULL,
6326 				m_descriptorSet.get(),
6327 				0u,
6328 				0u,
6329 				1u,
6330 				vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6331 				DE_NULL,
6332 				&bufferInfo,
6333 				DE_NULL,
6334 			};
6335 
6336 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6337 		}
6338 	}
6339 }
6340 
submit(SubmitContext & context)6341 void RenderFragmentStorageBuffer::submit (SubmitContext& context)
6342 {
6343 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6344 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6345 
6346 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6347 
6348 	const struct
6349 	{
6350 		const deUint32	valuesPerPixel;
6351 		const deUint32	bufferSize;
6352 	} callParams =
6353 	{
6354 		(deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight),
6355 		(deUint32)m_bufferSize
6356 	};
6357 
6358 	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
6359 	vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6360 	vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6361 }
6362 
verify(VerifyRenderPassContext & context,size_t)6363 void RenderFragmentStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
6364 {
6365 	const deUint32	valuesPerPixel	= (deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight);
6366 
6367 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6368 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6369 	{
6370 		const deUint32	id		= (deUint32)y * 256u + (deUint32)x;
6371 
6372 		deUint32 value = id;
6373 
6374 		for (deUint32 i = 0; i < valuesPerPixel; i++)
6375 		{
6376 			value	= (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 0)) << 0u)
6377 					| (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 1)) << 8u)
6378 					| (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 2)) << 16u)
6379 					| (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 3)) << 24u);
6380 
6381 		}
6382 		const UVec4	vec	((value >>  0u) & 0xFFu,
6383 						 (value >>  8u) & 0xFFu,
6384 						 (value >> 16u) & 0xFFu,
6385 						 (value >> 24u) & 0xFFu);
6386 
6387 		context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6388 	}
6389 }
6390 
6391 class RenderFragmentUniformTexelBuffer : public RenderPassCommand
6392 {
6393 public:
RenderFragmentUniformTexelBuffer(void)6394 									RenderFragmentUniformTexelBuffer	(void) {}
6395 									~RenderFragmentUniformTexelBuffer	(void);
6396 
getName(void) const6397 	const char*						getName								(void) const { return "RenderFragmentUniformTexelBuffer"; }
6398 	void							logPrepare							(TestLog&, size_t) const;
6399 	void							logSubmit							(TestLog&, size_t) const;
6400 	void							prepare								(PrepareRenderPassContext&);
6401 	void							submit								(SubmitContext& context);
6402 	void							verify								(VerifyRenderPassContext&, size_t);
6403 
6404 private:
6405 	PipelineResources				m_resources;
6406 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6407 	vector<vk::VkDescriptorSet>		m_descriptorSets;
6408 	vector<vk::VkBufferView>		m_bufferViews;
6409 
6410 	const vk::DeviceInterface*		m_vkd;
6411 	vk::VkDevice					m_device;
6412 	vk::VkDeviceSize				m_bufferSize;
6413 	deUint32						m_maxUniformTexelCount;
6414 	size_t							m_targetWidth;
6415 	size_t							m_targetHeight;
6416 };
6417 
~RenderFragmentUniformTexelBuffer(void)6418 RenderFragmentUniformTexelBuffer::~RenderFragmentUniformTexelBuffer (void)
6419 {
6420 	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6421 	{
6422 		if (!!m_bufferViews[bufferViewNdx])
6423 		{
6424 			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6425 			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6426 		}
6427 	}
6428 }
6429 
logPrepare(TestLog & log,size_t commandIndex) const6430 void RenderFragmentUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6431 {
6432 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6433 }
6434 
logSubmit(TestLog & log,size_t commandIndex) const6435 void RenderFragmentUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6436 {
6437 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6438 }
6439 
prepare(PrepareRenderPassContext & context)6440 void RenderFragmentUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
6441 {
6442 	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
6443 	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
6444 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6445 	const vk::VkDevice							device					= context.getContext().getDevice();
6446 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6447 	const deUint32								subpass					= 0;
6448 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6449 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.frag"), 0));
6450 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6451 
6452 	m_device				= device;
6453 	m_vkd					= &vkd;
6454 	m_bufferSize			= context.getBufferSize();
6455 	m_maxUniformTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6456 	m_targetWidth			= context.getTargetWidth();
6457 	m_targetHeight			= context.getTargetHeight();
6458 
6459 	{
6460 		const vk::VkDescriptorSetLayoutBinding binding =
6461 		{
6462 			0u,
6463 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6464 			1,
6465 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6466 			DE_NULL
6467 		};
6468 
6469 		bindings.push_back(binding);
6470 	}
6471 	const vk::VkPushConstantRange pushConstantRange =
6472 	{
6473 		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6474 		0u,
6475 		12u
6476 	};
6477 
6478 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6479 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6480 
6481 	{
6482 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 4));
6483 		const vk::VkDescriptorPoolSize			poolSizes		=
6484 		{
6485 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6486 			descriptorCount
6487 		};
6488 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6489 		{
6490 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6491 			DE_NULL,
6492 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6493 
6494 			descriptorCount,
6495 			1u,
6496 			&poolSizes,
6497 		};
6498 
6499 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6500 		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6501 		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6502 	}
6503 
6504 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6505 	{
6506 		const deUint32							count			= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6507 																? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6508 																: m_maxUniformTexelCount * 4) / 4;
6509 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6510 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6511 		{
6512 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6513 			DE_NULL,
6514 
6515 			*m_descriptorPool,
6516 			1,
6517 			&layout
6518 		};
6519 
6520 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6521 
6522 		{
6523 			const vk::VkBufferViewCreateInfo createInfo =
6524 			{
6525 				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6526 				DE_NULL,
6527 				0u,
6528 
6529 				context.getBuffer(),
6530 				vk::VK_FORMAT_R32_UINT,
6531 				descriptorSetNdx * m_maxUniformTexelCount * 4,
6532 				count * 4
6533 			};
6534 
6535 			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6536 		}
6537 
6538 		{
6539 			const vk::VkWriteDescriptorSet			write		=
6540 			{
6541 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6542 				DE_NULL,
6543 				m_descriptorSets[descriptorSetNdx],
6544 				0u,
6545 				0u,
6546 				1u,
6547 				vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6548 				DE_NULL,
6549 				DE_NULL,
6550 				&m_bufferViews[descriptorSetNdx]
6551 			};
6552 
6553 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6554 		}
6555 	}
6556 }
6557 
submit(SubmitContext & context)6558 void RenderFragmentUniformTexelBuffer::submit (SubmitContext& context)
6559 {
6560 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6561 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6562 
6563 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6564 
6565 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6566 	{
6567 		const struct
6568 		{
6569 			const deUint32	callId;
6570 			const deUint32	valuesPerPixel;
6571 			const deUint32	maxUniformTexelCount;
6572 		} callParams =
6573 		{
6574 			(deUint32)descriptorSetNdx,
6575 			(deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight),
6576 			m_maxUniformTexelCount
6577 		};
6578 
6579 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6580 		vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6581 		vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6582 	}
6583 }
6584 
verify(VerifyRenderPassContext & context,size_t)6585 void RenderFragmentUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6586 {
6587 	const deUint32	valuesPerPixel	= (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight);
6588 
6589 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6590 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6591 	{
6592 		const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxUniformTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6593 
6594 		for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6595 		{
6596 			const size_t	offset	= descriptorSetNdx * m_maxUniformTexelCount * 4;
6597 			const deUint32	callId	= (deUint32)descriptorSetNdx;
6598 
6599 			const deUint32	id		= (deUint32)y * 256u + (deUint32)x;
6600 			const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6601 									? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6602 									: m_maxUniformTexelCount * 4) / 4;
6603 
6604 			if (y * 256u + x < callId * (m_maxUniformTexelCount / valuesPerPixel))
6605 				continue;
6606 			else
6607 			{
6608 				deUint32 value = id;
6609 
6610 				for (deUint32 i = 0; i < valuesPerPixel; i++)
6611 				{
6612 					value	=  ((deUint32)context.getReference().get(offset + (value % count) * 4 + 0))
6613 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6614 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6615 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6616 
6617 				}
6618 				const UVec4	vec	((value >>  0u) & 0xFFu,
6619 								 (value >>  8u) & 0xFFu,
6620 								 (value >> 16u) & 0xFFu,
6621 								 (value >> 24u) & 0xFFu);
6622 
6623 				context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6624 			}
6625 		}
6626 	}
6627 }
6628 
6629 class RenderFragmentStorageTexelBuffer : public RenderPassCommand
6630 {
6631 public:
RenderFragmentStorageTexelBuffer(void)6632 									RenderFragmentStorageTexelBuffer	(void) {}
6633 									~RenderFragmentStorageTexelBuffer	(void);
6634 
getName(void) const6635 	const char*						getName								(void) const { return "RenderFragmentStorageTexelBuffer"; }
6636 	void							logPrepare							(TestLog&, size_t) const;
6637 	void							logSubmit							(TestLog&, size_t) const;
6638 	void							prepare								(PrepareRenderPassContext&);
6639 	void							submit								(SubmitContext& context);
6640 	void							verify								(VerifyRenderPassContext&, size_t);
6641 
6642 private:
6643 	PipelineResources				m_resources;
6644 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6645 	vector<vk::VkDescriptorSet>		m_descriptorSets;
6646 	vector<vk::VkBufferView>		m_bufferViews;
6647 
6648 	const vk::DeviceInterface*		m_vkd;
6649 	vk::VkDevice					m_device;
6650 	vk::VkDeviceSize				m_bufferSize;
6651 	deUint32						m_maxStorageTexelCount;
6652 	size_t							m_targetWidth;
6653 	size_t							m_targetHeight;
6654 };
6655 
~RenderFragmentStorageTexelBuffer(void)6656 RenderFragmentStorageTexelBuffer::~RenderFragmentStorageTexelBuffer (void)
6657 {
6658 	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6659 	{
6660 		if (!!m_bufferViews[bufferViewNdx])
6661 		{
6662 			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6663 			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6664 		}
6665 	}
6666 }
6667 
logPrepare(TestLog & log,size_t commandIndex) const6668 void RenderFragmentStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6669 {
6670 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
6671 }
6672 
logSubmit(TestLog & log,size_t commandIndex) const6673 void RenderFragmentStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6674 {
6675 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6676 }
6677 
prepare(PrepareRenderPassContext & context)6678 void RenderFragmentStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
6679 {
6680 	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
6681 	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
6682 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6683 	const vk::VkDevice							device					= context.getContext().getDevice();
6684 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6685 	const deUint32								subpass					= 0;
6686 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6687 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.frag"), 0));
6688 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6689 
6690 	m_device				= device;
6691 	m_vkd					= &vkd;
6692 	m_bufferSize			= context.getBufferSize();
6693 	m_maxStorageTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6694 	m_targetWidth			= context.getTargetWidth();
6695 	m_targetHeight			= context.getTargetHeight();
6696 
6697 	{
6698 		const vk::VkDescriptorSetLayoutBinding binding =
6699 		{
6700 			0u,
6701 			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6702 			1,
6703 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6704 			DE_NULL
6705 		};
6706 
6707 		bindings.push_back(binding);
6708 	}
6709 	const vk::VkPushConstantRange pushConstantRange =
6710 	{
6711 		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6712 		0u,
6713 		16u
6714 	};
6715 
6716 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6717 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6718 
6719 	{
6720 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
6721 		const vk::VkDescriptorPoolSize			poolSizes		=
6722 		{
6723 			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6724 			descriptorCount
6725 		};
6726 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6727 		{
6728 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6729 			DE_NULL,
6730 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6731 
6732 			descriptorCount,
6733 			1u,
6734 			&poolSizes,
6735 		};
6736 
6737 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6738 		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6739 		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6740 	}
6741 
6742 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6743 	{
6744 		const deUint32							count			= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6745 																? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6746 																: m_maxStorageTexelCount * 4) / 4;
6747 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6748 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6749 		{
6750 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6751 			DE_NULL,
6752 
6753 			*m_descriptorPool,
6754 			1,
6755 			&layout
6756 		};
6757 
6758 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6759 
6760 		{
6761 			const vk::VkBufferViewCreateInfo createInfo =
6762 			{
6763 				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6764 				DE_NULL,
6765 				0u,
6766 
6767 				context.getBuffer(),
6768 				vk::VK_FORMAT_R32_UINT,
6769 				descriptorSetNdx * m_maxStorageTexelCount * 4,
6770 				count * 4
6771 			};
6772 
6773 			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6774 		}
6775 
6776 		{
6777 			const vk::VkWriteDescriptorSet			write		=
6778 			{
6779 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6780 				DE_NULL,
6781 				m_descriptorSets[descriptorSetNdx],
6782 				0u,
6783 				0u,
6784 				1u,
6785 				vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6786 				DE_NULL,
6787 				DE_NULL,
6788 				&m_bufferViews[descriptorSetNdx]
6789 			};
6790 
6791 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6792 		}
6793 	}
6794 }
6795 
submit(SubmitContext & context)6796 void RenderFragmentStorageTexelBuffer::submit (SubmitContext& context)
6797 {
6798 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6799 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6800 
6801 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6802 
6803 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6804 	{
6805 		const struct
6806 		{
6807 			const deUint32	callId;
6808 			const deUint32	valuesPerPixel;
6809 			const deUint32	maxStorageTexelCount;
6810 			const deUint32	width;
6811 		} callParams =
6812 		{
6813 			(deUint32)descriptorSetNdx,
6814 			(deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight),
6815 			m_maxStorageTexelCount,
6816 			(deUint32)(m_bufferSize < (descriptorSetNdx + 1u) * m_maxStorageTexelCount * 4u
6817 								? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4u
6818 								: m_maxStorageTexelCount * 4u) / 4u
6819 		};
6820 
6821 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6822 		vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6823 		vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6824 	}
6825 }
6826 
verify(VerifyRenderPassContext & context,size_t)6827 void RenderFragmentStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6828 {
6829 	const deUint32	valuesPerPixel	= (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight);
6830 
6831 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6832 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6833 	{
6834 		const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxStorageTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6835 
6836 		for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6837 		{
6838 			const size_t	offset	= descriptorSetNdx * m_maxStorageTexelCount * 4;
6839 			const deUint32	callId	= (deUint32)descriptorSetNdx;
6840 
6841 			const deUint32	id		= (deUint32)y * 256u + (deUint32)x;
6842 			const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6843 									? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6844 									: m_maxStorageTexelCount * 4) / 4;
6845 
6846 			if (y * 256u + x < callId * (m_maxStorageTexelCount / valuesPerPixel))
6847 				continue;
6848 			else
6849 			{
6850 				deUint32 value = id;
6851 
6852 				for (deUint32 i = 0; i < valuesPerPixel; i++)
6853 				{
6854 					value	= ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6855 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6856 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6857 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6858 
6859 				}
6860 				const UVec4	vec	((value >>  0u) & 0xFFu,
6861 								 (value >>  8u) & 0xFFu,
6862 								 (value >> 16u) & 0xFFu,
6863 								 (value >> 24u) & 0xFFu);
6864 
6865 				context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6866 			}
6867 		}
6868 	}
6869 }
6870 
6871 class RenderFragmentStorageImage : public RenderPassCommand
6872 {
6873 public:
RenderFragmentStorageImage(void)6874 									RenderFragmentStorageImage	(void) {}
6875 									~RenderFragmentStorageImage	(void);
6876 
getName(void) const6877 	const char*						getName						(void) const { return "RenderFragmentStorageImage"; }
6878 	void							logPrepare					(TestLog&, size_t) const;
6879 	void							logSubmit					(TestLog&, size_t) const;
6880 	void							prepare						(PrepareRenderPassContext&);
6881 	void							submit						(SubmitContext& context);
6882 	void							verify						(VerifyRenderPassContext&, size_t);
6883 
6884 private:
6885 	PipelineResources				m_resources;
6886 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6887 	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
6888 	vk::Move<vk::VkImageView>		m_imageView;
6889 };
6890 
~RenderFragmentStorageImage(void)6891 RenderFragmentStorageImage::~RenderFragmentStorageImage (void)
6892 {
6893 }
6894 
logPrepare(TestLog & log,size_t commandIndex) const6895 void RenderFragmentStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
6896 {
6897 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
6898 }
6899 
logSubmit(TestLog & log,size_t commandIndex) const6900 void RenderFragmentStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
6901 {
6902 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
6903 }
6904 
prepare(PrepareRenderPassContext & context)6905 void RenderFragmentStorageImage::prepare (PrepareRenderPassContext& context)
6906 {
6907 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6908 	const vk::VkDevice							device					= context.getContext().getDevice();
6909 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6910 	const deUint32								subpass					= 0;
6911 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6912 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.frag"), 0));
6913 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6914 
6915 	{
6916 		const vk::VkDescriptorSetLayoutBinding binding =
6917 		{
6918 			0u,
6919 			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
6920 			1,
6921 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6922 			DE_NULL
6923 		};
6924 
6925 		bindings.push_back(binding);
6926 	}
6927 
6928 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6929 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
6930 
6931 	{
6932 		const vk::VkDescriptorPoolSize			poolSizes		=
6933 		{
6934 			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
6935 			1
6936 		};
6937 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6938 		{
6939 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6940 			DE_NULL,
6941 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6942 
6943 			1u,
6944 			1u,
6945 			&poolSizes,
6946 		};
6947 
6948 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6949 	}
6950 
6951 	{
6952 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6953 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6954 		{
6955 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6956 			DE_NULL,
6957 
6958 			*m_descriptorPool,
6959 			1,
6960 			&layout
6961 		};
6962 
6963 		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6964 
6965 		{
6966 			const vk::VkImageViewCreateInfo createInfo =
6967 			{
6968 				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
6969 				DE_NULL,
6970 				0u,
6971 
6972 				context.getImage(),
6973 				vk::VK_IMAGE_VIEW_TYPE_2D,
6974 				vk::VK_FORMAT_R8G8B8A8_UNORM,
6975 				vk::makeComponentMappingRGBA(),
6976 				{
6977 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
6978 					0u,
6979 					1u,
6980 					0u,
6981 					1u
6982 				}
6983 			};
6984 
6985 			m_imageView = vk::createImageView(vkd, device, &createInfo);
6986 		}
6987 
6988 		{
6989 			const vk::VkDescriptorImageInfo			imageInfo	=
6990 			{
6991 				0,
6992 				*m_imageView,
6993 				context.getImageLayout()
6994 			};
6995 			const vk::VkWriteDescriptorSet			write		=
6996 			{
6997 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6998 				DE_NULL,
6999 				*m_descriptorSet,
7000 				0u,
7001 				0u,
7002 				1u,
7003 				vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7004 				&imageInfo,
7005 				DE_NULL,
7006 				DE_NULL,
7007 			};
7008 
7009 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7010 		}
7011 	}
7012 }
7013 
submit(SubmitContext & context)7014 void RenderFragmentStorageImage::submit (SubmitContext& context)
7015 {
7016 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
7017 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
7018 
7019 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7020 
7021 	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
7022 	vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
7023 }
7024 
verify(VerifyRenderPassContext & context,size_t)7025 void RenderFragmentStorageImage::verify (VerifyRenderPassContext& context, size_t)
7026 {
7027 	const UVec2		size			= UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7028 	const deUint32	valuesPerPixel	= de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
7029 
7030 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7031 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7032 	{
7033 		UVec4	value	= UVec4(x, y, 0u, 0u);
7034 
7035 		for (deUint32 i = 0; i < valuesPerPixel; i++)
7036 		{
7037 			const UVec2	pos			= UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7038 			const Vec4	floatValue	= context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7039 
7040 			value = UVec4((deUint32)round(floatValue.x() * 255.0f),
7041 						  (deUint32)round(floatValue.y() * 255.0f),
7042 						  (deUint32)round(floatValue.z() * 255.0f),
7043 						  (deUint32)round(floatValue.w() * 255.0f));
7044 
7045 		}
7046 		context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7047 	}
7048 }
7049 
7050 class RenderFragmentSampledImage : public RenderPassCommand
7051 {
7052 public:
RenderFragmentSampledImage(void)7053 				RenderFragmentSampledImage	(void) {}
7054 				~RenderFragmentSampledImage	(void);
7055 
getName(void) const7056 	const char*	getName						(void) const { return "RenderFragmentSampledImage"; }
7057 	void		logPrepare					(TestLog&, size_t) const;
7058 	void		logSubmit					(TestLog&, size_t) const;
7059 	void		prepare						(PrepareRenderPassContext&);
7060 	void		submit						(SubmitContext& context);
7061 	void		verify						(VerifyRenderPassContext&, size_t);
7062 
7063 private:
7064 	PipelineResources				m_resources;
7065 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
7066 	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
7067 	vk::Move<vk::VkImageView>		m_imageView;
7068 	vk::Move<vk::VkSampler>			m_sampler;
7069 };
7070 
~RenderFragmentSampledImage(void)7071 RenderFragmentSampledImage::~RenderFragmentSampledImage (void)
7072 {
7073 }
7074 
logPrepare(TestLog & log,size_t commandIndex) const7075 void RenderFragmentSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
7076 {
7077 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
7078 }
7079 
logSubmit(TestLog & log,size_t commandIndex) const7080 void RenderFragmentSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
7081 {
7082 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
7083 }
7084 
prepare(PrepareRenderPassContext & context)7085 void RenderFragmentSampledImage::prepare (PrepareRenderPassContext& context)
7086 {
7087 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
7088 	const vk::VkDevice							device					= context.getContext().getDevice();
7089 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
7090 	const deUint32								subpass					= 0;
7091 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
7092 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.frag"), 0));
7093 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
7094 
7095 	{
7096 		const vk::VkDescriptorSetLayoutBinding binding =
7097 		{
7098 			0u,
7099 			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7100 			1,
7101 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
7102 			DE_NULL
7103 		};
7104 
7105 		bindings.push_back(binding);
7106 	}
7107 
7108 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
7109 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
7110 
7111 	{
7112 		const vk::VkDescriptorPoolSize			poolSizes		=
7113 		{
7114 			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7115 			1
7116 		};
7117 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
7118 		{
7119 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
7120 			DE_NULL,
7121 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
7122 
7123 			1u,
7124 			1u,
7125 			&poolSizes,
7126 		};
7127 
7128 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
7129 	}
7130 
7131 	{
7132 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
7133 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
7134 		{
7135 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
7136 			DE_NULL,
7137 
7138 			*m_descriptorPool,
7139 			1,
7140 			&layout
7141 		};
7142 
7143 		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
7144 
7145 		{
7146 			const vk::VkImageViewCreateInfo createInfo =
7147 			{
7148 				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
7149 				DE_NULL,
7150 				0u,
7151 
7152 				context.getImage(),
7153 				vk::VK_IMAGE_VIEW_TYPE_2D,
7154 				vk::VK_FORMAT_R8G8B8A8_UNORM,
7155 				vk::makeComponentMappingRGBA(),
7156 				{
7157 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
7158 					0u,
7159 					1u,
7160 					0u,
7161 					1u
7162 				}
7163 			};
7164 
7165 			m_imageView = vk::createImageView(vkd, device, &createInfo);
7166 		}
7167 
7168 		{
7169 			const vk::VkSamplerCreateInfo createInfo =
7170 			{
7171 				vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
7172 				DE_NULL,
7173 				0u,
7174 
7175 				vk::VK_FILTER_NEAREST,
7176 				vk::VK_FILTER_NEAREST,
7177 
7178 				vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
7179 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7180 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7181 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7182 				0.0f,
7183 				VK_FALSE,
7184 				1.0f,
7185 				VK_FALSE,
7186 				vk::VK_COMPARE_OP_ALWAYS,
7187 				0.0f,
7188 				0.0f,
7189 				vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
7190 				VK_FALSE
7191 			};
7192 
7193 			m_sampler = vk::createSampler(vkd, device, &createInfo);
7194 		}
7195 
7196 		{
7197 			const vk::VkDescriptorImageInfo			imageInfo	=
7198 			{
7199 				*m_sampler,
7200 				*m_imageView,
7201 				context.getImageLayout()
7202 			};
7203 			const vk::VkWriteDescriptorSet			write		=
7204 			{
7205 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
7206 				DE_NULL,
7207 				*m_descriptorSet,
7208 				0u,
7209 				0u,
7210 				1u,
7211 				vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7212 				&imageInfo,
7213 				DE_NULL,
7214 				DE_NULL,
7215 			};
7216 
7217 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7218 		}
7219 	}
7220 }
7221 
submit(SubmitContext & context)7222 void RenderFragmentSampledImage::submit (SubmitContext& context)
7223 {
7224 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
7225 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
7226 
7227 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7228 
7229 	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
7230 	vkd.cmdDraw(commandBuffer, 6u, 1u, 0u, 0u);
7231 }
7232 
verify(VerifyRenderPassContext & context,size_t)7233 void RenderFragmentSampledImage::verify (VerifyRenderPassContext& context, size_t)
7234 {
7235 	const UVec2		size			= UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7236 	const deUint32	valuesPerPixel	= de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
7237 
7238 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7239 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7240 	{
7241 		UVec4	value	= UVec4(x, y, 0u, 0u);
7242 
7243 		for (deUint32 i = 0; i < valuesPerPixel; i++)
7244 		{
7245 			const UVec2	pos			= UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7246 			const Vec4	floatValue	= context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7247 
7248 			value = UVec4((deUint32)round(floatValue.x() * 255.0f),
7249 						  (deUint32)round(floatValue.y() * 255.0f),
7250 						  (deUint32)round(floatValue.z() * 255.0f),
7251 						  (deUint32)round(floatValue.w() * 255.0f));
7252 
7253 		}
7254 
7255 		context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7256 	}
7257 }
7258 
7259 enum Op
7260 {
7261 	OP_MAP,
7262 	OP_UNMAP,
7263 
7264 	OP_MAP_FLUSH,
7265 	OP_MAP_INVALIDATE,
7266 
7267 	OP_MAP_READ,
7268 	OP_MAP_WRITE,
7269 	OP_MAP_MODIFY,
7270 
7271 	OP_BUFFER_CREATE,
7272 	OP_BUFFER_DESTROY,
7273 	OP_BUFFER_BINDMEMORY,
7274 
7275 	OP_QUEUE_WAIT_FOR_IDLE,
7276 	OP_DEVICE_WAIT_FOR_IDLE,
7277 
7278 	OP_COMMAND_BUFFER_BEGIN,
7279 	OP_COMMAND_BUFFER_END,
7280 
7281 	// Secondary, non render pass command buffers
7282 	// Render pass secondary command buffers are not currently covered
7283 	OP_SECONDARY_COMMAND_BUFFER_BEGIN,
7284 	OP_SECONDARY_COMMAND_BUFFER_END,
7285 
7286 	// Buffer transfer operations
7287 	OP_BUFFER_FILL,
7288 	OP_BUFFER_UPDATE,
7289 
7290 	OP_BUFFER_COPY_TO_BUFFER,
7291 	OP_BUFFER_COPY_FROM_BUFFER,
7292 
7293 	OP_BUFFER_COPY_TO_IMAGE,
7294 	OP_BUFFER_COPY_FROM_IMAGE,
7295 
7296 	OP_IMAGE_CREATE,
7297 	OP_IMAGE_DESTROY,
7298 	OP_IMAGE_BINDMEMORY,
7299 
7300 	OP_IMAGE_TRANSITION_LAYOUT,
7301 
7302 	OP_IMAGE_COPY_TO_BUFFER,
7303 	OP_IMAGE_COPY_FROM_BUFFER,
7304 
7305 	OP_IMAGE_COPY_TO_IMAGE,
7306 	OP_IMAGE_COPY_FROM_IMAGE,
7307 
7308 	OP_IMAGE_BLIT_TO_IMAGE,
7309 	OP_IMAGE_BLIT_FROM_IMAGE,
7310 
7311 	OP_IMAGE_RESOLVE,
7312 
7313 	OP_PIPELINE_BARRIER_GLOBAL,
7314 	OP_PIPELINE_BARRIER_BUFFER,
7315 	OP_PIPELINE_BARRIER_IMAGE,
7316 
7317 	// Renderpass operations
7318 	OP_RENDERPASS_BEGIN,
7319 	OP_RENDERPASS_END,
7320 
7321 	// Commands inside render pass
7322 	OP_RENDER_VERTEX_BUFFER,
7323 	OP_RENDER_INDEX_BUFFER,
7324 
7325 	OP_RENDER_VERTEX_UNIFORM_BUFFER,
7326 	OP_RENDER_FRAGMENT_UNIFORM_BUFFER,
7327 
7328 	OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER,
7329 	OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER,
7330 
7331 	OP_RENDER_VERTEX_STORAGE_BUFFER,
7332 	OP_RENDER_FRAGMENT_STORAGE_BUFFER,
7333 
7334 	OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER,
7335 	OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER,
7336 
7337 	OP_RENDER_VERTEX_STORAGE_IMAGE,
7338 	OP_RENDER_FRAGMENT_STORAGE_IMAGE,
7339 
7340 	OP_RENDER_VERTEX_SAMPLED_IMAGE,
7341 	OP_RENDER_FRAGMENT_SAMPLED_IMAGE,
7342 };
7343 
7344 enum Stage
7345 {
7346 	STAGE_HOST,
7347 	STAGE_COMMAND_BUFFER,
7348 	STAGE_SECONDARY_COMMAND_BUFFER,
7349 
7350 	STAGE_RENDER_PASS
7351 };
7352 
getWriteAccessFlags(void)7353 vk::VkAccessFlags getWriteAccessFlags (void)
7354 {
7355 	return vk::VK_ACCESS_SHADER_WRITE_BIT
7356 		| vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
7357 		| vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
7358 		| vk::VK_ACCESS_TRANSFER_WRITE_BIT
7359 		| vk::VK_ACCESS_HOST_WRITE_BIT
7360 		| vk::VK_ACCESS_MEMORY_WRITE_BIT;
7361 }
7362 
isWriteAccess(vk::VkAccessFlagBits access)7363 bool isWriteAccess (vk::VkAccessFlagBits access)
7364 {
7365 	return (getWriteAccessFlags() & access) != 0;
7366 }
7367 
7368 class CacheState
7369 {
7370 public:
7371 									CacheState				(vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses);
7372 
7373 	bool							isValid					(vk::VkPipelineStageFlagBits	stage,
7374 															 vk::VkAccessFlagBits			access) const;
7375 
7376 	void							perform					(vk::VkPipelineStageFlagBits	stage,
7377 															 vk::VkAccessFlagBits			access);
7378 
7379 	void							submitCommandBuffer		(void);
7380 	void							waitForIdle				(void);
7381 
7382 	void							getFullBarrier			(vk::VkPipelineStageFlags&	srcStages,
7383 															 vk::VkAccessFlags&			srcAccesses,
7384 															 vk::VkPipelineStageFlags&	dstStages,
7385 															 vk::VkAccessFlags&			dstAccesses) const;
7386 
7387 	void							barrier					(vk::VkPipelineStageFlags	srcStages,
7388 															 vk::VkAccessFlags			srcAccesses,
7389 															 vk::VkPipelineStageFlags	dstStages,
7390 															 vk::VkAccessFlags			dstAccesses);
7391 
7392 	void							imageLayoutBarrier		(vk::VkPipelineStageFlags	srcStages,
7393 															 vk::VkAccessFlags			srcAccesses,
7394 															 vk::VkPipelineStageFlags	dstStages,
7395 															 vk::VkAccessFlags			dstAccesses);
7396 
7397 	void							checkImageLayoutBarrier	(vk::VkPipelineStageFlags	srcStages,
7398 															 vk::VkAccessFlags			srcAccesses,
7399 															 vk::VkPipelineStageFlags	dstStages,
7400 															 vk::VkAccessFlags			dstAccesses);
7401 
7402 	// Everything is clean and there is no need for barriers
7403 	bool							isClean					(void) const;
7404 
getAllowedStages(void) const7405 	vk::VkPipelineStageFlags		getAllowedStages		(void) const { return m_allowedStages; }
getAllowedAcceses(void) const7406 	vk::VkAccessFlags				getAllowedAcceses		(void) const { return m_allowedAccesses; }
7407 private:
7408 	// Limit which stages and accesses are used by the CacheState tracker
7409 	const vk::VkPipelineStageFlags	m_allowedStages;
7410 	const vk::VkAccessFlags			m_allowedAccesses;
7411 
7412 	// [dstStage][srcStage][dstAccess] = srcAccesses
7413 	// In stage dstStage write srcAccesses from srcStage are not yet available for dstAccess
7414 	vk::VkAccessFlags				m_unavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST][ACCESS_LAST];
7415 	// Latest pipeline transition is not available in stage
7416 	bool							m_unavailableLayoutTransition[PIPELINESTAGE_LAST];
7417 	// [dstStage] = dstAccesses
7418 	// In stage dstStage ops with dstAccesses are not yet visible
7419 	vk::VkAccessFlags				m_invisibleOperations[PIPELINESTAGE_LAST];
7420 
7421 	// [dstStage] = srcStage
7422 	// Memory operation in srcStage have not completed before dstStage
7423 	vk::VkPipelineStageFlags		m_incompleteOperations[PIPELINESTAGE_LAST];
7424 };
7425 
CacheState(vk::VkPipelineStageFlags allowedStages,vk::VkAccessFlags allowedAccesses)7426 CacheState::CacheState (vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses)
7427 	: m_allowedStages	(allowedStages)
7428 	, m_allowedAccesses	(allowedAccesses)
7429 {
7430 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7431 	{
7432 		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7433 
7434 		if ((dstStage_ & m_allowedStages) == 0)
7435 			continue;
7436 
7437 		// All operations are initially visible
7438 		m_invisibleOperations[dstStage] = 0;
7439 
7440 		// There are no incomplete read operations initially
7441 		m_incompleteOperations[dstStage] = 0;
7442 
7443 		// There are no incomplete layout transitions
7444 		m_unavailableLayoutTransition[dstStage] = false;
7445 
7446 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7447 		{
7448 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7449 
7450 			if ((srcStage_ & m_allowedStages) == 0)
7451 				continue;
7452 
7453 			// There are no write operations that are not yet available
7454 			// initially.
7455 			for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7456 			{
7457 				const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7458 
7459 				if ((dstAccess_ & m_allowedAccesses) == 0)
7460 					continue;
7461 
7462 				m_unavailableWriteOperations[dstStage][srcStage][dstAccess] = 0;
7463 			}
7464 		}
7465 	}
7466 }
7467 
isValid(vk::VkPipelineStageFlagBits stage,vk::VkAccessFlagBits access) const7468 bool CacheState::isValid (vk::VkPipelineStageFlagBits	stage,
7469 						  vk::VkAccessFlagBits			access) const
7470 {
7471 	DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7472 	DE_ASSERT((stage & (~m_allowedStages)) == 0);
7473 
7474 	const PipelineStage	dstStage	= pipelineStageFlagToPipelineStage(stage);
7475 
7476 	// Previous operations are not visible to access on stage
7477 	if (m_unavailableLayoutTransition[dstStage] || (m_invisibleOperations[dstStage] & access) != 0)
7478 		return false;
7479 
7480 	if (isWriteAccess(access))
7481 	{
7482 		// Memory operations from other stages have not completed before
7483 		// dstStage
7484 		if (m_incompleteOperations[dstStage] != 0)
7485 			return false;
7486 	}
7487 
7488 	return true;
7489 }
7490 
perform(vk::VkPipelineStageFlagBits stage,vk::VkAccessFlagBits access)7491 void CacheState::perform (vk::VkPipelineStageFlagBits	stage,
7492 						  vk::VkAccessFlagBits			access)
7493 {
7494 	DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7495 	DE_ASSERT((stage & (~m_allowedStages)) == 0);
7496 
7497 	const PipelineStage srcStage = pipelineStageFlagToPipelineStage(stage);
7498 
7499 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7500 	{
7501 		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7502 
7503 		if ((dstStage_ & m_allowedStages) == 0)
7504 			continue;
7505 
7506 		// Mark stage as incomplete for all stages
7507 		m_incompleteOperations[dstStage] |= stage;
7508 
7509 		if (isWriteAccess(access))
7510 		{
7511 			// Mark all accesses from all stages invisible
7512 			m_invisibleOperations[dstStage] |= m_allowedAccesses;
7513 
7514 			// Mark write access from srcStage unavailable to all stages for all accesses
7515 			for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7516 			{
7517 				const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7518 
7519 				if ((dstAccess_ & m_allowedAccesses) == 0)
7520 					continue;
7521 
7522 				m_unavailableWriteOperations[dstStage][srcStage][dstAccess] |= access;
7523 			}
7524 		}
7525 	}
7526 }
7527 
submitCommandBuffer(void)7528 void CacheState::submitCommandBuffer (void)
7529 {
7530 	// Flush all host writes and reads
7531 	barrier(m_allowedStages & vk::VK_PIPELINE_STAGE_HOST_BIT,
7532 			m_allowedAccesses & (vk::VK_ACCESS_HOST_READ_BIT | vk::VK_ACCESS_HOST_WRITE_BIT),
7533 			m_allowedStages,
7534 			m_allowedAccesses);
7535 }
7536 
waitForIdle(void)7537 void CacheState::waitForIdle (void)
7538 {
7539 	// Make all writes available
7540 	barrier(m_allowedStages,
7541 			m_allowedAccesses & getWriteAccessFlags(),
7542 			m_allowedStages,
7543 			0);
7544 
7545 	// Make all writes visible on device side
7546 	barrier(m_allowedStages,
7547 			0,
7548 			m_allowedStages & (~vk::VK_PIPELINE_STAGE_HOST_BIT),
7549 			m_allowedAccesses);
7550 }
7551 
getFullBarrier(vk::VkPipelineStageFlags & srcStages,vk::VkAccessFlags & srcAccesses,vk::VkPipelineStageFlags & dstStages,vk::VkAccessFlags & dstAccesses) const7552 void CacheState::getFullBarrier (vk::VkPipelineStageFlags&	srcStages,
7553 								 vk::VkAccessFlags&			srcAccesses,
7554 								 vk::VkPipelineStageFlags&	dstStages,
7555 								 vk::VkAccessFlags&			dstAccesses) const
7556 {
7557 	srcStages	= 0;
7558 	srcAccesses	= 0;
7559 	dstStages	= 0;
7560 	dstAccesses	= 0;
7561 
7562 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7563 	{
7564 		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7565 
7566 		if ((dstStage_ & m_allowedStages) == 0)
7567 			continue;
7568 
7569 		// Make sure all previous operation are complete in all stages
7570 		if (m_incompleteOperations[dstStage])
7571 		{
7572 			dstStages |= dstStage_;
7573 			srcStages |= m_incompleteOperations[dstStage];
7574 		}
7575 
7576 		// Make sure all read operations are visible in dstStage
7577 		if (m_invisibleOperations[dstStage])
7578 		{
7579 			dstStages |= dstStage_;
7580 			dstAccesses |= m_invisibleOperations[dstStage];
7581 		}
7582 
7583 		// Make sure all write operations from all stages are available
7584 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7585 		{
7586 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7587 
7588 			if ((srcStage_ & m_allowedStages) == 0)
7589 				continue;
7590 
7591 			for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7592 			{
7593 				const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7594 
7595 				if ((dstAccess_ & m_allowedAccesses) == 0)
7596 					continue;
7597 
7598 				if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess])
7599 				{
7600 					dstStages |= dstStage_;
7601 					srcStages |= dstStage_;
7602 					srcAccesses |= m_unavailableWriteOperations[dstStage][srcStage][dstAccess];
7603 				}
7604 			}
7605 
7606 			if (m_unavailableLayoutTransition[dstStage] && !m_unavailableLayoutTransition[srcStage])
7607 			{
7608 				// Add dependency between srcStage and dstStage if layout transition has not completed in dstStage,
7609 				// but has completed in srcStage.
7610 				dstStages |= dstStage_;
7611 				srcStages |= dstStage_;
7612 			}
7613 		}
7614 	}
7615 
7616 	DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7617 	DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7618 	DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7619 	DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7620 }
7621 
checkImageLayoutBarrier(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses)7622 void CacheState::checkImageLayoutBarrier (vk::VkPipelineStageFlags	srcStages,
7623 										  vk::VkAccessFlags			srcAccesses,
7624 										  vk::VkPipelineStageFlags	dstStages,
7625 										  vk::VkAccessFlags			dstAccesses)
7626 {
7627 	DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7628 	DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7629 	DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7630 	DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7631 
7632 	DE_UNREF(srcStages);
7633 	DE_UNREF(srcAccesses);
7634 
7635 	DE_UNREF(dstStages);
7636 	DE_UNREF(dstAccesses);
7637 
7638 #if defined(DE_DEBUG)
7639 	// Check that all stages have completed before srcStages or are in srcStages.
7640 	{
7641 		vk::VkPipelineStageFlags completedStages = srcStages;
7642 
7643 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7644 		{
7645 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7646 
7647 			if ((srcStage_ & srcStages) == 0)
7648 				continue;
7649 
7650 			completedStages |= (~m_incompleteOperations[srcStage]);
7651 		}
7652 
7653 		DE_ASSERT((completedStages & m_allowedStages) == m_allowedStages);
7654 	}
7655 
7656 	// Check that any write is available at least in one stage. Since all stages are complete even single flush is enough.
7657 	if ((getWriteAccessFlags() & m_allowedAccesses) != 0 && (srcAccesses & getWriteAccessFlags()) == 0)
7658 	{
7659 		bool anyWriteAvailable = false;
7660 
7661 		for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7662 		{
7663 			const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7664 
7665 			if ((dstStage_ & m_allowedStages) == 0)
7666 				continue;
7667 
7668 			for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7669 			{
7670 				const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7671 
7672 				if ((srcStage_ & m_allowedStages) == 0)
7673 					continue;
7674 
7675 				for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7676 				{
7677 					const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7678 
7679 					if ((dstAccess_ & m_allowedAccesses) == 0)
7680 						continue;
7681 
7682 					if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess] != (getWriteAccessFlags() & m_allowedAccesses))
7683 					{
7684 						anyWriteAvailable = true;
7685 						break;
7686 					}
7687 				}
7688 			}
7689 		}
7690 
7691 		DE_ASSERT(anyWriteAvailable);
7692 	}
7693 #endif
7694 }
7695 
imageLayoutBarrier(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses)7696 void CacheState::imageLayoutBarrier (vk::VkPipelineStageFlags	srcStages,
7697 									 vk::VkAccessFlags			srcAccesses,
7698 									 vk::VkPipelineStageFlags	dstStages,
7699 									 vk::VkAccessFlags			dstAccesses)
7700 {
7701 	checkImageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
7702 
7703 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7704 	{
7705 		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7706 
7707 		if ((dstStage_ & m_allowedStages) == 0)
7708 			continue;
7709 
7710 		// All stages are incomplete after the barrier except each dstStage in it self.
7711 		m_incompleteOperations[dstStage] = m_allowedStages & (~dstStage_);
7712 
7713 		// All memory operations are invisible unless they are listed in dstAccess
7714 		m_invisibleOperations[dstStage] = m_allowedAccesses & (~dstAccesses);
7715 
7716 		// Layout transition is unavailable in stage unless it was listed in dstStages
7717 		m_unavailableLayoutTransition[dstStage]= (dstStage_ & dstStages) == 0;
7718 
7719 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7720 		{
7721 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7722 
7723 			if ((srcStage_ & m_allowedStages) == 0)
7724 				continue;
7725 
7726 			// All write operations are available after layout transition
7727 			for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7728 			{
7729 				const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7730 
7731 				if ((dstAccess_ & m_allowedAccesses) == 0)
7732 					continue;
7733 
7734 				m_unavailableWriteOperations[dstStage][srcStage][dstAccess] = 0;
7735 			}
7736 		}
7737 	}
7738 }
7739 
barrier(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses)7740 void CacheState::barrier (vk::VkPipelineStageFlags	srcStages,
7741 						  vk::VkAccessFlags			srcAccesses,
7742 						  vk::VkPipelineStageFlags	dstStages,
7743 						  vk::VkAccessFlags			dstAccesses)
7744 {
7745 	DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7746 	DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7747 	DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7748 	DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7749 
7750 	// Transitivity
7751 	{
7752 		vk::VkPipelineStageFlags		oldIncompleteOperations[PIPELINESTAGE_LAST];
7753 		vk::VkAccessFlags				oldUnavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST][ACCESS_LAST];
7754 		bool							oldUnavailableLayoutTransition[PIPELINESTAGE_LAST];
7755 
7756 		deMemcpy(oldIncompleteOperations, m_incompleteOperations, sizeof(oldIncompleteOperations));
7757 		deMemcpy(oldUnavailableWriteOperations, m_unavailableWriteOperations, sizeof(oldUnavailableWriteOperations));
7758 		deMemcpy(oldUnavailableLayoutTransition, m_unavailableLayoutTransition, sizeof(oldUnavailableLayoutTransition));
7759 
7760 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7761 		{
7762 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7763 
7764 			if ((srcStage_ & srcStages) == 0)
7765 				continue;
7766 
7767 			for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7768 			{
7769 				const PipelineStage	dstStage			= pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7770 
7771 				if ((dstStage_ & dstStages) == 0)
7772 					continue;
7773 
7774 				// Stages that have completed before srcStage have also completed before dstStage
7775 				m_incompleteOperations[dstStage] &= oldIncompleteOperations[srcStage];
7776 
7777 				// Image layout transition in srcStage are now available in dstStage
7778 				m_unavailableLayoutTransition[dstStage] &= oldUnavailableLayoutTransition[srcStage];
7779 
7780 				for (vk::VkPipelineStageFlags sharedStage_ = 1; sharedStage_ <= m_allowedStages; sharedStage_ <<= 1)
7781 				{
7782 					const PipelineStage	sharedStage			= pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)sharedStage_);
7783 
7784 					if ((sharedStage_ & m_allowedStages) == 0)
7785 						continue;
7786 
7787 					// Writes that are available in srcStage are also available in dstStage
7788 					for (vk::VkAccessFlags sharedAccess_ = 1; sharedAccess_ <= m_allowedAccesses; sharedAccess_ <<= 1)
7789 					{
7790 						const Access sharedAccess = accessFlagToAccess((vk::VkAccessFlagBits)sharedAccess_);
7791 
7792 						if ((sharedAccess_ & m_allowedAccesses) == 0)
7793 							continue;
7794 
7795 						m_unavailableWriteOperations[dstStage][sharedStage][sharedAccess] &= oldUnavailableWriteOperations[srcStage][sharedStage][sharedAccess];
7796 					}
7797 				}
7798 			}
7799 		}
7800 	}
7801 
7802 	// Barrier
7803 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7804 	{
7805 		const PipelineStage	dstStage			= pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7806 		bool				allWritesAvailable	= true;
7807 
7808 		if ((dstStage_ & dstStages) == 0)
7809 			continue;
7810 
7811 		// Operations in srcStages have completed before any stage in dstStages
7812 		m_incompleteOperations[dstStage] &= ~srcStages;
7813 
7814 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7815 		{
7816 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7817 
7818 			if ((srcStage_ & m_allowedStages) == 0)
7819 				continue;
7820 
7821 			// Make srcAccesses from srcStage available in dstStage for dstAccess
7822 			for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7823 			{
7824 				const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7825 
7826 				if ((dstAccess_ & m_allowedAccesses) == 0)
7827 					continue;
7828 
7829 				if (((srcStage_ & srcStages) != 0) && ((dstAccess_ & dstAccesses) != 0))
7830 					m_unavailableWriteOperations[dstStage][srcStage][dstAccess] &= ~srcAccesses;
7831 
7832 				if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess] != 0)
7833 					allWritesAvailable = false;
7834 			}
7835 		}
7836 
7837 		// If all writes are available in dstStage make dstAccesses also visible
7838 		if (allWritesAvailable)
7839 			m_invisibleOperations[dstStage] &= ~dstAccesses;
7840 	}
7841 }
7842 
isClean(void) const7843 bool CacheState::isClean (void) const
7844 {
7845 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7846 	{
7847 		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7848 
7849 		if ((dstStage_ & m_allowedStages) == 0)
7850 			continue;
7851 
7852 		// Some operations are not visible to some stages
7853 		if (m_invisibleOperations[dstStage] != 0)
7854 			return false;
7855 
7856 		// There are operation that have not completed yet
7857 		if (m_incompleteOperations[dstStage] != 0)
7858 			return false;
7859 
7860 		// Layout transition has not completed yet
7861 		if (m_unavailableLayoutTransition[dstStage])
7862 			return false;
7863 
7864 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7865 		{
7866 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7867 
7868 			if ((srcStage_ & m_allowedStages) == 0)
7869 				continue;
7870 
7871 			for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7872 			{
7873 				const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7874 
7875 				if ((dstAccess_ & m_allowedAccesses) == 0)
7876 					continue;
7877 
7878 				// Some write operations are not available yet
7879 				if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess] != 0)
7880 					return false;
7881 			}
7882 		}
7883 	}
7884 
7885 	return true;
7886 }
7887 
layoutSupportedByUsage(Usage usage,vk::VkImageLayout layout)7888 bool layoutSupportedByUsage (Usage usage, vk::VkImageLayout layout)
7889 {
7890 	switch (layout)
7891 	{
7892 		case vk::VK_IMAGE_LAYOUT_GENERAL:
7893 			return true;
7894 
7895 		case vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
7896 			return (usage & USAGE_COLOR_ATTACHMENT) != 0;
7897 
7898 		case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
7899 			return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7900 
7901 		case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
7902 			return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7903 
7904 		case vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
7905 			// \todo [2016-03-09 mika] Should include input attachment
7906 			return (usage & USAGE_SAMPLED_IMAGE) != 0;
7907 
7908 		case vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
7909 			return (usage & USAGE_TRANSFER_SRC) != 0;
7910 
7911 		case vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
7912 			return (usage & USAGE_TRANSFER_DST) != 0;
7913 
7914 		case vk::VK_IMAGE_LAYOUT_PREINITIALIZED:
7915 			return true;
7916 
7917 		default:
7918 			DE_FATAL("Unknown layout");
7919 			return false;
7920 	}
7921 }
7922 
getNumberOfSupportedLayouts(Usage usage)7923 size_t getNumberOfSupportedLayouts (Usage usage)
7924 {
7925 	const vk::VkImageLayout layouts[] =
7926 	{
7927 		vk::VK_IMAGE_LAYOUT_GENERAL,
7928 		vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7929 		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7930 		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7931 		vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7932 		vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7933 		vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7934 	};
7935 	size_t supportedLayoutCount = 0;
7936 
7937 	for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7938 	{
7939 		const vk::VkImageLayout layout = layouts[layoutNdx];
7940 
7941 		if (layoutSupportedByUsage(usage, layout))
7942 			supportedLayoutCount++;
7943 	}
7944 
7945 	return supportedLayoutCount;
7946 }
7947 
getRandomNextLayout(de::Random & rng,Usage usage,vk::VkImageLayout previousLayout)7948 vk::VkImageLayout getRandomNextLayout (de::Random&			rng,
7949 									   Usage				usage,
7950 									   vk::VkImageLayout	previousLayout)
7951 {
7952 	const vk::VkImageLayout	layouts[] =
7953 	{
7954 		vk::VK_IMAGE_LAYOUT_GENERAL,
7955 		vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7956 		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7957 		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7958 		vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7959 		vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7960 		vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7961 	};
7962 	const size_t			supportedLayoutCount = getNumberOfSupportedLayouts(usage);
7963 
7964 	DE_ASSERT(supportedLayoutCount > 0);
7965 
7966 	size_t nextLayoutNdx = ((size_t)rng.getUint32()) % (previousLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
7967 														? supportedLayoutCount
7968 														: supportedLayoutCount - 1);
7969 
7970 	for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7971 	{
7972 		const vk::VkImageLayout layout = layouts[layoutNdx];
7973 
7974 		if (layoutSupportedByUsage(usage, layout) && layout != previousLayout)
7975 		{
7976 			if (nextLayoutNdx == 0)
7977 				return layout;
7978 			else
7979 				nextLayoutNdx--;
7980 		}
7981 	}
7982 
7983 	DE_FATAL("Unreachable");
7984 	return vk::VK_IMAGE_LAYOUT_UNDEFINED;
7985 }
7986 
7987 struct State
7988 {
Statevkt::memory::__anon58c89e6c0111::State7989 	State (Usage usage, deUint32 seed)
7990 		: stage							(STAGE_HOST)
7991 		, cache							(usageToStageFlags(usage), usageToAccessFlags(usage))
7992 		, rng							(seed)
7993 		, mapped						(false)
7994 		, hostInvalidated				(true)
7995 		, hostFlushed					(true)
7996 		, memoryDefined					(false)
7997 		, hasBuffer						(false)
7998 		, hasBoundBufferMemory			(false)
7999 		, hasImage						(false)
8000 		, hasBoundImageMemory			(false)
8001 		, imageLayout					(vk::VK_IMAGE_LAYOUT_UNDEFINED)
8002 		, imageDefined					(false)
8003 		, queueIdle						(true)
8004 		, deviceIdle					(true)
8005 		, commandBufferIsEmpty			(true)
8006 		, primaryCommandBufferIsEmpty	(true)
8007 		, renderPassIsEmpty				(true)
8008 	{
8009 	}
8010 
8011 	Stage				stage;
8012 	CacheState			cache;
8013 	de::Random			rng;
8014 
8015 	bool				mapped;
8016 	bool				hostInvalidated;
8017 	bool				hostFlushed;
8018 	bool				memoryDefined;
8019 
8020 	bool				hasBuffer;
8021 	bool				hasBoundBufferMemory;
8022 
8023 	bool				hasImage;
8024 	bool				hasBoundImageMemory;
8025 	vk::VkImageLayout	imageLayout;
8026 	bool				imageDefined;
8027 
8028 	bool				queueIdle;
8029 	bool				deviceIdle;
8030 
8031 	bool				commandBufferIsEmpty;
8032 
8033 	// a copy of commandBufferIsEmpty value, when secondary command buffer is in use
8034 	bool				primaryCommandBufferIsEmpty;
8035 
8036 	bool				renderPassIsEmpty;
8037 };
8038 
getAvailableOps(const State & state,bool supportsBuffers,bool supportsImages,Usage usage,vector<Op> & ops)8039 void getAvailableOps (const State& state, bool supportsBuffers, bool supportsImages, Usage usage, vector<Op>& ops)
8040 {
8041 	if (state.stage == STAGE_HOST)
8042 	{
8043 		if (usage & (USAGE_HOST_READ | USAGE_HOST_WRITE))
8044 		{
8045 			// Host memory operations
8046 			if (state.mapped)
8047 			{
8048 				ops.push_back(OP_UNMAP);
8049 
8050 				// Avoid flush and finish if they are not needed
8051 				if (!state.hostFlushed)
8052 					ops.push_back(OP_MAP_FLUSH);
8053 
8054 				if (!state.hostInvalidated
8055 					&& state.queueIdle
8056 					&& ((usage & USAGE_HOST_READ) == 0
8057 						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8058 					&& ((usage & USAGE_HOST_WRITE) == 0
8059 						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)))
8060 				{
8061 					ops.push_back(OP_MAP_INVALIDATE);
8062 				}
8063 
8064 				if (usage & USAGE_HOST_READ
8065 					&& usage & USAGE_HOST_WRITE
8066 					&& state.memoryDefined
8067 					&& state.hostInvalidated
8068 					&& state.queueIdle
8069 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)
8070 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8071 				{
8072 					ops.push_back(OP_MAP_MODIFY);
8073 				}
8074 
8075 				if (usage & USAGE_HOST_READ
8076 					&& state.memoryDefined
8077 					&& state.hostInvalidated
8078 					&& state.queueIdle
8079 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8080 				{
8081 					ops.push_back(OP_MAP_READ);
8082 				}
8083 
8084 				if (usage & USAGE_HOST_WRITE
8085 					&& state.hostInvalidated
8086 					&& state.queueIdle
8087 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT))
8088 				{
8089 					ops.push_back(OP_MAP_WRITE);
8090 				}
8091 			}
8092 			else
8093 				ops.push_back(OP_MAP);
8094 		}
8095 
8096 		if (state.hasBoundBufferMemory && state.queueIdle)
8097 		{
8098 			// \note Destroy only buffers after they have been bound
8099 			ops.push_back(OP_BUFFER_DESTROY);
8100 		}
8101 		else
8102 		{
8103 			if (state.hasBuffer)
8104 			{
8105 				if (!state.hasBoundBufferMemory)
8106 					ops.push_back(OP_BUFFER_BINDMEMORY);
8107 			}
8108 			else if (!state.hasImage && supportsBuffers)	// Avoid creating buffer if there is already image
8109 				ops.push_back(OP_BUFFER_CREATE);
8110 		}
8111 
8112 		if (state.hasBoundImageMemory && state.queueIdle)
8113 		{
8114 			// \note Destroy only image after they have been bound
8115 			ops.push_back(OP_IMAGE_DESTROY);
8116 		}
8117 		else
8118 		{
8119 			if (state.hasImage)
8120 			{
8121 				if (!state.hasBoundImageMemory)
8122 					ops.push_back(OP_IMAGE_BINDMEMORY);
8123 			}
8124 			else if (!state.hasBuffer && supportsImages)	// Avoid creating image if there is already buffer
8125 				ops.push_back(OP_IMAGE_CREATE);
8126 		}
8127 
8128 		// Host writes must be flushed before GPU commands and there must be
8129 		// buffer or image for GPU commands
8130 		if (state.hostFlushed
8131 			&& (state.memoryDefined || supportsDeviceBufferWrites(usage) || state.imageDefined || supportsDeviceImageWrites(usage))
8132 			&& (state.hasBoundBufferMemory || state.hasBoundImageMemory) // Avoid command buffers if there is no object to use
8133 			&& (usageToStageFlags(usage) & (~vk::VK_PIPELINE_STAGE_HOST_BIT)) != 0) // Don't start command buffer if there are no ways to use memory from gpu
8134 		{
8135 			ops.push_back(OP_COMMAND_BUFFER_BEGIN);
8136 		}
8137 
8138 		if (!state.deviceIdle)
8139 			ops.push_back(OP_DEVICE_WAIT_FOR_IDLE);
8140 
8141 		if (!state.queueIdle)
8142 			ops.push_back(OP_QUEUE_WAIT_FOR_IDLE);
8143 	}
8144 	else if (state.stage == STAGE_COMMAND_BUFFER)
8145 	{
8146 		if (!state.cache.isClean())
8147 		{
8148 			ops.push_back(OP_PIPELINE_BARRIER_GLOBAL);
8149 
8150 			if (state.hasImage && (state.imageLayout != vk::VK_IMAGE_LAYOUT_UNDEFINED))
8151 				ops.push_back(OP_PIPELINE_BARRIER_IMAGE);
8152 
8153 			if (state.hasBuffer)
8154 				ops.push_back(OP_PIPELINE_BARRIER_BUFFER);
8155 		}
8156 
8157 		if (state.hasBoundBufferMemory)
8158 		{
8159 			if (usage & USAGE_TRANSFER_DST
8160 				&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8161 			{
8162 				ops.push_back(OP_BUFFER_FILL);
8163 				ops.push_back(OP_BUFFER_UPDATE);
8164 				ops.push_back(OP_BUFFER_COPY_FROM_BUFFER);
8165 				ops.push_back(OP_BUFFER_COPY_FROM_IMAGE);
8166 			}
8167 
8168 			if (usage & USAGE_TRANSFER_SRC
8169 				&& state.memoryDefined
8170 				&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8171 			{
8172 				ops.push_back(OP_BUFFER_COPY_TO_BUFFER);
8173 				ops.push_back(OP_BUFFER_COPY_TO_IMAGE);
8174 			}
8175 		}
8176 
8177 		if (state.hasBoundImageMemory
8178 			&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
8179 				|| getNumberOfSupportedLayouts(usage) > 1))
8180 		{
8181 			ops.push_back(OP_IMAGE_TRANSITION_LAYOUT);
8182 
8183 			{
8184 				if (usage & USAGE_TRANSFER_DST
8185 					&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8186 						|| state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
8187 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8188 				{
8189 					ops.push_back(OP_IMAGE_COPY_FROM_BUFFER);
8190 					ops.push_back(OP_IMAGE_COPY_FROM_IMAGE);
8191 					ops.push_back(OP_IMAGE_BLIT_FROM_IMAGE);
8192 				}
8193 
8194 				if (usage & USAGE_TRANSFER_SRC
8195 					&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8196 						|| state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
8197 					&& state.imageDefined
8198 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8199 				{
8200 					ops.push_back(OP_IMAGE_COPY_TO_BUFFER);
8201 					ops.push_back(OP_IMAGE_COPY_TO_IMAGE);
8202 					ops.push_back(OP_IMAGE_BLIT_TO_IMAGE);
8203 				}
8204 			}
8205 		}
8206 
8207 		// \todo [2016-03-09 mika] Add other usages?
8208 		if ((state.memoryDefined
8209 				&& state.hasBoundBufferMemory
8210 				&& (((usage & USAGE_VERTEX_BUFFER)
8211 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8212 				|| ((usage & USAGE_INDEX_BUFFER)
8213 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8214 				|| ((usage & USAGE_UNIFORM_BUFFER)
8215 					&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8216 						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8217 				|| ((usage & USAGE_UNIFORM_TEXEL_BUFFER)
8218 					&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8219 						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8220 				|| ((usage & USAGE_STORAGE_BUFFER)
8221 					&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8222 						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8223 				|| ((usage & USAGE_STORAGE_TEXEL_BUFFER)
8224 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))
8225 			|| (state.imageDefined
8226 				&& state.hasBoundImageMemory
8227 				&& (((usage & USAGE_STORAGE_IMAGE)
8228 						&& state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8229 						&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8230 							|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8231 					|| ((usage & USAGE_SAMPLED_IMAGE)
8232 						&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8233 							|| state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
8234 						&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8235 							|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))))
8236 		{
8237 			ops.push_back(OP_RENDERPASS_BEGIN);
8238 		}
8239 
8240 		ops.push_back(OP_SECONDARY_COMMAND_BUFFER_BEGIN);
8241 
8242 		// \note This depends on previous operations and has to be always the
8243 		// last command buffer operation check
8244 		if (ops.empty() || !state.commandBufferIsEmpty)
8245 			ops.push_back(OP_COMMAND_BUFFER_END);
8246 	}
8247 	else if (state.stage == STAGE_SECONDARY_COMMAND_BUFFER)
8248 	{
8249 		if (!state.cache.isClean())
8250 		{
8251 			ops.push_back(OP_PIPELINE_BARRIER_GLOBAL);
8252 
8253 			if (state.hasImage && (state.imageLayout != vk::VK_IMAGE_LAYOUT_UNDEFINED))
8254 				ops.push_back(OP_PIPELINE_BARRIER_IMAGE);
8255 
8256 			if (state.hasBuffer)
8257 				ops.push_back(OP_PIPELINE_BARRIER_BUFFER);
8258 		}
8259 
8260 		if (state.hasBoundBufferMemory)
8261 		{
8262 			if (usage & USAGE_TRANSFER_DST
8263 				&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8264 			{
8265 				ops.push_back(OP_BUFFER_FILL);
8266 				ops.push_back(OP_BUFFER_UPDATE);
8267 				ops.push_back(OP_BUFFER_COPY_FROM_BUFFER);
8268 				ops.push_back(OP_BUFFER_COPY_FROM_IMAGE);
8269 			}
8270 
8271 			if (usage & USAGE_TRANSFER_SRC
8272 				&& state.memoryDefined
8273 				&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8274 			{
8275 				ops.push_back(OP_BUFFER_COPY_TO_BUFFER);
8276 				ops.push_back(OP_BUFFER_COPY_TO_IMAGE);
8277 			}
8278 		}
8279 
8280 		if (state.hasBoundImageMemory
8281 			&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
8282 				|| getNumberOfSupportedLayouts(usage) > 1))
8283 		{
8284 			ops.push_back(OP_IMAGE_TRANSITION_LAYOUT);
8285 
8286 			{
8287 				if (usage & USAGE_TRANSFER_DST
8288 					&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8289 						|| state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
8290 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8291 				{
8292 					ops.push_back(OP_IMAGE_COPY_FROM_BUFFER);
8293 					ops.push_back(OP_IMAGE_COPY_FROM_IMAGE);
8294 					ops.push_back(OP_IMAGE_BLIT_FROM_IMAGE);
8295 				}
8296 
8297 				if (usage & USAGE_TRANSFER_SRC
8298 					&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8299 						|| state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
8300 					&& state.imageDefined
8301 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8302 				{
8303 					ops.push_back(OP_IMAGE_COPY_TO_BUFFER);
8304 					ops.push_back(OP_IMAGE_COPY_TO_IMAGE);
8305 					ops.push_back(OP_IMAGE_BLIT_TO_IMAGE);
8306 				}
8307 			}
8308 		}
8309 
8310 		// \note This depends on previous operations and has to be always the
8311 		// last command buffer operation check
8312 		if (ops.empty() || !state.commandBufferIsEmpty)
8313 			ops.push_back(OP_SECONDARY_COMMAND_BUFFER_END);
8314 	}
8315 	else if (state.stage == STAGE_RENDER_PASS)
8316 	{
8317 		if ((usage & USAGE_VERTEX_BUFFER) != 0
8318 			&& state.memoryDefined
8319 			&& state.hasBoundBufferMemory
8320 			&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8321 		{
8322 			ops.push_back(OP_RENDER_VERTEX_BUFFER);
8323 		}
8324 
8325 		if ((usage & USAGE_INDEX_BUFFER) != 0
8326 			&& state.memoryDefined
8327 			&& state.hasBoundBufferMemory
8328 			&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8329 		{
8330 			ops.push_back(OP_RENDER_INDEX_BUFFER);
8331 		}
8332 
8333 		if ((usage & USAGE_UNIFORM_BUFFER) != 0
8334 			&& state.memoryDefined
8335 			&& state.hasBoundBufferMemory)
8336 		{
8337 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8338 				ops.push_back(OP_RENDER_VERTEX_UNIFORM_BUFFER);
8339 
8340 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8341 				ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_BUFFER);
8342 		}
8343 
8344 		if ((usage & USAGE_UNIFORM_TEXEL_BUFFER) != 0
8345 			&& state.memoryDefined
8346 			&& state.hasBoundBufferMemory)
8347 		{
8348 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8349 				ops.push_back(OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER);
8350 
8351 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8352 				ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER);
8353 		}
8354 
8355 		if ((usage & USAGE_STORAGE_BUFFER) != 0
8356 			&& state.memoryDefined
8357 			&& state.hasBoundBufferMemory)
8358 		{
8359 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8360 				ops.push_back(OP_RENDER_VERTEX_STORAGE_BUFFER);
8361 
8362 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8363 				ops.push_back(OP_RENDER_FRAGMENT_STORAGE_BUFFER);
8364 		}
8365 
8366 		if ((usage & USAGE_STORAGE_TEXEL_BUFFER) != 0
8367 			&& state.memoryDefined
8368 			&& state.hasBoundBufferMemory)
8369 		{
8370 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8371 				ops.push_back(OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER);
8372 
8373 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8374 				ops.push_back(OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER);
8375 		}
8376 
8377 		if ((usage & USAGE_STORAGE_IMAGE) != 0
8378 			&& state.imageDefined
8379 			&& state.hasBoundImageMemory
8380 			&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL))
8381 		{
8382 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8383 				ops.push_back(OP_RENDER_VERTEX_STORAGE_IMAGE);
8384 
8385 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8386 				ops.push_back(OP_RENDER_FRAGMENT_STORAGE_IMAGE);
8387 		}
8388 
8389 		if ((usage & USAGE_SAMPLED_IMAGE) != 0
8390 			&& state.imageDefined
8391 			&& state.hasBoundImageMemory
8392 			&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8393 				|| state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))
8394 		{
8395 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8396 				ops.push_back(OP_RENDER_VERTEX_SAMPLED_IMAGE);
8397 
8398 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8399 				ops.push_back(OP_RENDER_FRAGMENT_SAMPLED_IMAGE);
8400 		}
8401 
8402 		if (!state.renderPassIsEmpty)
8403 			ops.push_back(OP_RENDERPASS_END);
8404 	}
8405 	else
8406 		DE_FATAL("Unknown stage");
8407 }
8408 
removeIllegalAccessFlags(vk::VkAccessFlags & accessflags,vk::VkPipelineStageFlags stageflags)8409 void removeIllegalAccessFlags (vk::VkAccessFlags& accessflags, vk::VkPipelineStageFlags stageflags)
8410 {
8411 	if (!(stageflags & vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT))
8412 		accessflags &= ~vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
8413 
8414 	if (!(stageflags & vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT))
8415 		accessflags &= ~vk::VK_ACCESS_INDEX_READ_BIT;
8416 
8417 	if (!(stageflags & vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT))
8418 		accessflags &= ~vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
8419 
8420 	if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
8421 						vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8422 						vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
8423 						vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8424 						vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
8425 						vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8426 		accessflags &= ~vk::VK_ACCESS_UNIFORM_READ_BIT;
8427 
8428 	if (!(stageflags & vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT))
8429 		accessflags &= ~vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
8430 
8431 	if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
8432 						vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8433 						vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
8434 						vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8435 						vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
8436 						vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8437 		accessflags &= ~vk::VK_ACCESS_SHADER_READ_BIT;
8438 
8439 	if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
8440 						vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8441 						vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
8442 						vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8443 						vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
8444 						vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8445 		accessflags &= ~vk::VK_ACCESS_SHADER_WRITE_BIT;
8446 
8447 	if (!(stageflags & vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT))
8448 		accessflags &= ~vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
8449 
8450 	if (!(stageflags & vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT))
8451 		accessflags &= ~vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
8452 
8453 	if (!(stageflags & (vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
8454 						vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)))
8455 		accessflags &= ~vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
8456 
8457 	if (!(stageflags & (vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
8458 						vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)))
8459 		accessflags &= ~vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
8460 
8461 	if (!(stageflags & vk::VK_PIPELINE_STAGE_TRANSFER_BIT))
8462 		accessflags &= ~vk::VK_ACCESS_TRANSFER_READ_BIT;
8463 
8464 	if (!(stageflags & vk::VK_PIPELINE_STAGE_TRANSFER_BIT))
8465 		accessflags &= ~vk::VK_ACCESS_TRANSFER_WRITE_BIT;
8466 
8467 	if (!(stageflags & vk::VK_PIPELINE_STAGE_HOST_BIT))
8468 		accessflags &= ~vk::VK_ACCESS_HOST_READ_BIT;
8469 
8470 	if (!(stageflags & vk::VK_PIPELINE_STAGE_HOST_BIT))
8471 		accessflags &= ~vk::VK_ACCESS_HOST_WRITE_BIT;
8472 }
8473 
applyOp(State & state,const Memory & memory,Op op,Usage usage)8474 void applyOp (State& state, const Memory& memory, Op op, Usage usage)
8475 {
8476 	switch (op)
8477 	{
8478 		case OP_MAP:
8479 			DE_ASSERT(state.stage == STAGE_HOST);
8480 			DE_ASSERT(!state.mapped);
8481 			state.mapped = true;
8482 			break;
8483 
8484 		case OP_UNMAP:
8485 			DE_ASSERT(state.stage == STAGE_HOST);
8486 			DE_ASSERT(state.mapped);
8487 			state.mapped = false;
8488 			break;
8489 
8490 		case OP_MAP_FLUSH:
8491 			DE_ASSERT(state.stage == STAGE_HOST);
8492 			DE_ASSERT(!state.hostFlushed);
8493 			state.hostFlushed = true;
8494 			break;
8495 
8496 		case OP_MAP_INVALIDATE:
8497 			DE_ASSERT(state.stage == STAGE_HOST);
8498 			DE_ASSERT(!state.hostInvalidated);
8499 			state.hostInvalidated = true;
8500 			break;
8501 
8502 		case OP_MAP_READ:
8503 			DE_ASSERT(state.stage == STAGE_HOST);
8504 			DE_ASSERT(state.hostInvalidated);
8505 			state.rng.getUint32();
8506 			break;
8507 
8508 		case OP_MAP_WRITE:
8509 			DE_ASSERT(state.stage == STAGE_HOST);
8510 			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8511 				state.hostFlushed = false;
8512 
8513 			state.memoryDefined = true;
8514 			state.imageDefined = false;
8515 			state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8516 			state.rng.getUint32();
8517 			break;
8518 
8519 		case OP_MAP_MODIFY:
8520 			DE_ASSERT(state.stage == STAGE_HOST);
8521 			DE_ASSERT(state.hostInvalidated);
8522 
8523 			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8524 				state.hostFlushed = false;
8525 
8526 			state.rng.getUint32();
8527 			break;
8528 
8529 		case OP_BUFFER_CREATE:
8530 			DE_ASSERT(state.stage == STAGE_HOST);
8531 			DE_ASSERT(!state.hasBuffer);
8532 
8533 			state.hasBuffer = true;
8534 			break;
8535 
8536 		case OP_BUFFER_DESTROY:
8537 			DE_ASSERT(state.stage == STAGE_HOST);
8538 			DE_ASSERT(state.hasBuffer);
8539 			DE_ASSERT(state.hasBoundBufferMemory);
8540 
8541 			state.hasBuffer = false;
8542 			state.hasBoundBufferMemory = false;
8543 			break;
8544 
8545 		case OP_BUFFER_BINDMEMORY:
8546 			DE_ASSERT(state.stage == STAGE_HOST);
8547 			DE_ASSERT(state.hasBuffer);
8548 			DE_ASSERT(!state.hasBoundBufferMemory);
8549 
8550 			state.hasBoundBufferMemory = true;
8551 			break;
8552 
8553 		case OP_IMAGE_CREATE:
8554 			DE_ASSERT(state.stage == STAGE_HOST);
8555 			DE_ASSERT(!state.hasImage);
8556 			DE_ASSERT(!state.hasBuffer);
8557 
8558 			state.hasImage = true;
8559 			break;
8560 
8561 		case OP_IMAGE_DESTROY:
8562 			DE_ASSERT(state.stage == STAGE_HOST);
8563 			DE_ASSERT(state.hasImage);
8564 			DE_ASSERT(state.hasBoundImageMemory);
8565 
8566 			state.hasImage = false;
8567 			state.hasBoundImageMemory = false;
8568 			state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8569 			state.imageDefined = false;
8570 			break;
8571 
8572 		case OP_IMAGE_BINDMEMORY:
8573 			DE_ASSERT(state.stage == STAGE_HOST);
8574 			DE_ASSERT(state.hasImage);
8575 			DE_ASSERT(!state.hasBoundImageMemory);
8576 
8577 			state.hasBoundImageMemory = true;
8578 			break;
8579 
8580 		case OP_IMAGE_TRANSITION_LAYOUT:
8581 		{
8582 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8583 			DE_ASSERT(state.hasImage);
8584 			DE_ASSERT(state.hasBoundImageMemory);
8585 
8586 			// \todo [2016-03-09 mika] Support linear tiling and predefined data
8587 			const vk::VkImageLayout		srcLayout	= state.rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8588 			const vk::VkImageLayout		dstLayout	= getRandomNextLayout(state.rng, usage, srcLayout);
8589 
8590 			vk::VkPipelineStageFlags	dirtySrcStages;
8591 			vk::VkAccessFlags			dirtySrcAccesses;
8592 			vk::VkPipelineStageFlags	dirtyDstStages;
8593 			vk::VkAccessFlags			dirtyDstAccesses;
8594 
8595 			vk::VkPipelineStageFlags	srcStages;
8596 			vk::VkAccessFlags			srcAccesses;
8597 			vk::VkPipelineStageFlags	dstStages;
8598 			vk::VkAccessFlags			dstAccesses;
8599 
8600 			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8601 
8602 			// Try masking some random bits
8603 			srcStages	= dirtySrcStages;
8604 			srcAccesses	= dirtySrcAccesses;
8605 
8606 			dstStages	= state.cache.getAllowedStages() & state.rng.getUint32();
8607 			dstAccesses	= state.cache.getAllowedAcceses() & state.rng.getUint32();
8608 
8609 			// If there are no bits in dst stage mask use all stages
8610 			dstStages	= dstStages ? dstStages : state.cache.getAllowedStages();
8611 
8612 			if (!srcStages)
8613 				srcStages = dstStages;
8614 
8615 			removeIllegalAccessFlags(dstAccesses, dstStages);
8616 			removeIllegalAccessFlags(srcAccesses, srcStages);
8617 
8618 			if (srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED)
8619 				state.imageDefined = false;
8620 
8621 			state.commandBufferIsEmpty = false;
8622 			state.imageLayout = dstLayout;
8623 			state.memoryDefined = false;
8624 			state.cache.imageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
8625 			break;
8626 		}
8627 
8628 		case OP_QUEUE_WAIT_FOR_IDLE:
8629 			DE_ASSERT(state.stage == STAGE_HOST);
8630 			DE_ASSERT(!state.queueIdle);
8631 
8632 			state.queueIdle = true;
8633 
8634 			state.cache.waitForIdle();
8635 			break;
8636 
8637 		case OP_DEVICE_WAIT_FOR_IDLE:
8638 			DE_ASSERT(state.stage == STAGE_HOST);
8639 			DE_ASSERT(!state.deviceIdle);
8640 
8641 			state.queueIdle = true;
8642 			state.deviceIdle = true;
8643 
8644 			state.cache.waitForIdle();
8645 			break;
8646 
8647 		case OP_COMMAND_BUFFER_BEGIN:
8648 			DE_ASSERT(state.stage == STAGE_HOST);
8649 			state.stage = STAGE_COMMAND_BUFFER;
8650 			state.commandBufferIsEmpty = true;
8651 			// Makes host writes visible to command buffer
8652 			state.cache.submitCommandBuffer();
8653 			break;
8654 
8655 		case OP_COMMAND_BUFFER_END:
8656 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8657 			state.stage = STAGE_HOST;
8658 			state.queueIdle = false;
8659 			state.deviceIdle = false;
8660 			break;
8661 
8662 		case OP_SECONDARY_COMMAND_BUFFER_BEGIN:
8663 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8664 			state.stage = STAGE_SECONDARY_COMMAND_BUFFER;
8665 			state.primaryCommandBufferIsEmpty = state.commandBufferIsEmpty;
8666 			state.commandBufferIsEmpty = true;
8667 			break;
8668 
8669 		case OP_SECONDARY_COMMAND_BUFFER_END:
8670 			DE_ASSERT(state.stage == STAGE_SECONDARY_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8671 			state.stage = STAGE_COMMAND_BUFFER;
8672 			state.commandBufferIsEmpty = state.primaryCommandBufferIsEmpty;
8673 			break;
8674 
8675 		case OP_BUFFER_COPY_FROM_BUFFER:
8676 		case OP_BUFFER_COPY_FROM_IMAGE:
8677 		case OP_BUFFER_UPDATE:
8678 		case OP_BUFFER_FILL:
8679 			state.rng.getUint32();
8680 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8681 
8682 			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8683 				state.hostInvalidated = false;
8684 
8685 			state.commandBufferIsEmpty = false;
8686 			state.memoryDefined = true;
8687 			state.imageDefined = false;
8688 			state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8689 			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8690 			break;
8691 
8692 		case OP_BUFFER_COPY_TO_BUFFER:
8693 		case OP_BUFFER_COPY_TO_IMAGE:
8694 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8695 
8696 			state.commandBufferIsEmpty = false;
8697 			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8698 			break;
8699 
8700 		case OP_IMAGE_BLIT_FROM_IMAGE:
8701 			state.rng.getBool();
8702 			// Fall through
8703 		case OP_IMAGE_COPY_FROM_BUFFER:
8704 		case OP_IMAGE_COPY_FROM_IMAGE:
8705 			state.rng.getUint32();
8706 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8707 
8708 			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8709 				state.hostInvalidated = false;
8710 
8711 			state.commandBufferIsEmpty = false;
8712 			state.memoryDefined = false;
8713 			state.imageDefined = true;
8714 			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8715 			break;
8716 
8717 		case OP_IMAGE_BLIT_TO_IMAGE:
8718 			state.rng.getBool();
8719 			// Fall through
8720 		case OP_IMAGE_COPY_TO_BUFFER:
8721 		case OP_IMAGE_COPY_TO_IMAGE:
8722 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8723 
8724 			state.commandBufferIsEmpty = false;
8725 			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8726 			break;
8727 
8728 		case OP_PIPELINE_BARRIER_GLOBAL:
8729 		case OP_PIPELINE_BARRIER_BUFFER:
8730 		case OP_PIPELINE_BARRIER_IMAGE:
8731 		{
8732 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8733 
8734 			vk::VkPipelineStageFlags	dirtySrcStages;
8735 			vk::VkAccessFlags			dirtySrcAccesses;
8736 			vk::VkPipelineStageFlags	dirtyDstStages;
8737 			vk::VkAccessFlags			dirtyDstAccesses;
8738 
8739 			vk::VkPipelineStageFlags	srcStages;
8740 			vk::VkAccessFlags			srcAccesses;
8741 			vk::VkPipelineStageFlags	dstStages;
8742 			vk::VkAccessFlags			dstAccesses;
8743 
8744 			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8745 
8746 			// Try masking some random bits
8747 			srcStages	= dirtySrcStages & state.rng.getUint32();
8748 			srcAccesses	= dirtySrcAccesses & state.rng.getUint32();
8749 
8750 			dstStages	= dirtyDstStages & state.rng.getUint32();
8751 			dstAccesses	= dirtyDstAccesses & state.rng.getUint32();
8752 
8753 			// If there are no bits in stage mask use the original dirty stages
8754 			srcStages	= srcStages ? srcStages : dirtySrcStages;
8755 			dstStages	= dstStages ? dstStages : dirtyDstStages;
8756 
8757 			if (!srcStages)
8758 				srcStages = dstStages;
8759 
8760 			removeIllegalAccessFlags(dstAccesses, dstStages);
8761 			removeIllegalAccessFlags(srcAccesses, srcStages);
8762 
8763 			state.commandBufferIsEmpty = false;
8764 			state.cache.barrier(srcStages, srcAccesses, dstStages, dstAccesses);
8765 			break;
8766 		}
8767 
8768 		case OP_RENDERPASS_BEGIN:
8769 		{
8770 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8771 
8772 			state.renderPassIsEmpty	= true;
8773 			state.stage				= STAGE_RENDER_PASS;
8774 			break;
8775 		}
8776 
8777 		case OP_RENDERPASS_END:
8778 		{
8779 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8780 
8781 			state.renderPassIsEmpty	= true;
8782 			state.stage				= STAGE_COMMAND_BUFFER;
8783 			break;
8784 		}
8785 
8786 		case OP_RENDER_VERTEX_BUFFER:
8787 		{
8788 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8789 
8790 			state.renderPassIsEmpty = false;
8791 			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT);
8792 			break;
8793 		}
8794 
8795 		case OP_RENDER_INDEX_BUFFER:
8796 		{
8797 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8798 
8799 			state.renderPassIsEmpty = false;
8800 			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT);
8801 			break;
8802 		}
8803 
8804 		case OP_RENDER_VERTEX_UNIFORM_BUFFER:
8805 		case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER:
8806 		{
8807 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8808 
8809 			state.renderPassIsEmpty = false;
8810 			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8811 			break;
8812 		}
8813 
8814 		case OP_RENDER_FRAGMENT_UNIFORM_BUFFER:
8815 		case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER:
8816 		{
8817 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8818 
8819 			state.renderPassIsEmpty = false;
8820 			state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8821 			break;
8822 		}
8823 
8824 		case OP_RENDER_VERTEX_STORAGE_BUFFER:
8825 		case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER:
8826 		{
8827 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8828 
8829 			state.renderPassIsEmpty = false;
8830 			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8831 			break;
8832 		}
8833 
8834 		case OP_RENDER_FRAGMENT_STORAGE_BUFFER:
8835 		case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER:
8836 		{
8837 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8838 
8839 			state.renderPassIsEmpty = false;
8840 			state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8841 			break;
8842 		}
8843 
8844 		case OP_RENDER_FRAGMENT_STORAGE_IMAGE:
8845 		case OP_RENDER_FRAGMENT_SAMPLED_IMAGE:
8846 		{
8847 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8848 
8849 			state.renderPassIsEmpty = false;
8850 			state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8851 			break;
8852 		}
8853 
8854 		case OP_RENDER_VERTEX_STORAGE_IMAGE:
8855 		case OP_RENDER_VERTEX_SAMPLED_IMAGE:
8856 		{
8857 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8858 
8859 			state.renderPassIsEmpty = false;
8860 			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8861 			break;
8862 		}
8863 
8864 		default:
8865 			DE_FATAL("Unknown op");
8866 	}
8867 }
8868 
createHostCommand(Op op,de::Random & rng,Usage usage,vk::VkSharingMode sharing)8869 de::MovePtr<Command> createHostCommand (Op					op,
8870 										de::Random&			rng,
8871 										Usage				usage,
8872 										vk::VkSharingMode	sharing)
8873 {
8874 	switch (op)
8875 	{
8876 		case OP_MAP:					return de::MovePtr<Command>(new Map());
8877 		case OP_UNMAP:					return de::MovePtr<Command>(new UnMap());
8878 
8879 		case OP_MAP_FLUSH:				return de::MovePtr<Command>(new Flush());
8880 		case OP_MAP_INVALIDATE:			return de::MovePtr<Command>(new Invalidate());
8881 
8882 		case OP_MAP_READ:				return de::MovePtr<Command>(new HostMemoryAccess(true, false, rng.getUint32()));
8883 		case OP_MAP_WRITE:				return de::MovePtr<Command>(new HostMemoryAccess(false, true, rng.getUint32()));
8884 		case OP_MAP_MODIFY:				return de::MovePtr<Command>(new HostMemoryAccess(true, true, rng.getUint32()));
8885 
8886 		case OP_BUFFER_CREATE:			return de::MovePtr<Command>(new CreateBuffer(usageToBufferUsageFlags(usage), sharing));
8887 		case OP_BUFFER_DESTROY:			return de::MovePtr<Command>(new DestroyBuffer());
8888 		case OP_BUFFER_BINDMEMORY:		return de::MovePtr<Command>(new BindBufferMemory());
8889 
8890 		case OP_IMAGE_CREATE:			return de::MovePtr<Command>(new CreateImage(usageToImageUsageFlags(usage), sharing));
8891 		case OP_IMAGE_DESTROY:			return de::MovePtr<Command>(new DestroyImage());
8892 		case OP_IMAGE_BINDMEMORY:		return de::MovePtr<Command>(new BindImageMemory());
8893 
8894 		case OP_QUEUE_WAIT_FOR_IDLE:	return de::MovePtr<Command>(new QueueWaitIdle());
8895 		case OP_DEVICE_WAIT_FOR_IDLE:	return de::MovePtr<Command>(new DeviceWaitIdle());
8896 
8897 		default:
8898 			DE_FATAL("Unknown op");
8899 			return de::MovePtr<Command>(DE_NULL);
8900 	}
8901 }
8902 
createCmdCommand(de::Random & rng,const State & state,Op op,Usage usage)8903 de::MovePtr<CmdCommand> createCmdCommand (de::Random&	rng,
8904 										  const State&	state,
8905 										  Op			op,
8906 										  Usage			usage)
8907 {
8908 	switch (op)
8909 	{
8910 		case OP_BUFFER_FILL:					return de::MovePtr<CmdCommand>(new FillBuffer(rng.getUint32()));
8911 		case OP_BUFFER_UPDATE:					return de::MovePtr<CmdCommand>(new UpdateBuffer(rng.getUint32()));
8912 		case OP_BUFFER_COPY_TO_BUFFER:			return de::MovePtr<CmdCommand>(new BufferCopyToBuffer());
8913 		case OP_BUFFER_COPY_FROM_BUFFER:		return de::MovePtr<CmdCommand>(new BufferCopyFromBuffer(rng.getUint32()));
8914 
8915 		case OP_BUFFER_COPY_TO_IMAGE:			return de::MovePtr<CmdCommand>(new BufferCopyToImage());
8916 		case OP_BUFFER_COPY_FROM_IMAGE:			return de::MovePtr<CmdCommand>(new BufferCopyFromImage(rng.getUint32()));
8917 
8918 		case OP_IMAGE_TRANSITION_LAYOUT:
8919 		{
8920 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8921 			DE_ASSERT(state.hasImage);
8922 			DE_ASSERT(state.hasBoundImageMemory);
8923 
8924 			const vk::VkImageLayout		srcLayout	= rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8925 			const vk::VkImageLayout		dstLayout	= getRandomNextLayout(rng, usage, srcLayout);
8926 
8927 			vk::VkPipelineStageFlags	dirtySrcStages;
8928 			vk::VkAccessFlags			dirtySrcAccesses;
8929 			vk::VkPipelineStageFlags	dirtyDstStages;
8930 			vk::VkAccessFlags			dirtyDstAccesses;
8931 
8932 			vk::VkPipelineStageFlags	srcStages;
8933 			vk::VkAccessFlags			srcAccesses;
8934 			vk::VkPipelineStageFlags	dstStages;
8935 			vk::VkAccessFlags			dstAccesses;
8936 
8937 			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8938 
8939 			// Try masking some random bits
8940 			srcStages	= dirtySrcStages;
8941 			srcAccesses	= dirtySrcAccesses;
8942 
8943 			dstStages	= state.cache.getAllowedStages() & rng.getUint32();
8944 			dstAccesses	= state.cache.getAllowedAcceses() & rng.getUint32();
8945 
8946 			// If there are no bits in dst stage mask use all stages
8947 			dstStages	= dstStages ? dstStages : state.cache.getAllowedStages();
8948 
8949 			if (!srcStages)
8950 				srcStages = dstStages;
8951 
8952 			removeIllegalAccessFlags(dstAccesses, dstStages);
8953 			removeIllegalAccessFlags(srcAccesses, srcStages);
8954 
8955 			return de::MovePtr<CmdCommand>(new ImageTransition(srcStages, srcAccesses, dstStages, dstAccesses, srcLayout, dstLayout));
8956 		}
8957 
8958 		case OP_IMAGE_COPY_TO_BUFFER:			return de::MovePtr<CmdCommand>(new ImageCopyToBuffer(state.imageLayout));
8959 		case OP_IMAGE_COPY_FROM_BUFFER:			return de::MovePtr<CmdCommand>(new ImageCopyFromBuffer(rng.getUint32(), state.imageLayout));
8960 		case OP_IMAGE_COPY_TO_IMAGE:			return de::MovePtr<CmdCommand>(new ImageCopyToImage(state.imageLayout));
8961 		case OP_IMAGE_COPY_FROM_IMAGE:			return de::MovePtr<CmdCommand>(new ImageCopyFromImage(rng.getUint32(), state.imageLayout));
8962 		case OP_IMAGE_BLIT_TO_IMAGE:
8963 		{
8964 			const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8965 			return de::MovePtr<CmdCommand>(new ImageBlitToImage(scale, state.imageLayout));
8966 		}
8967 
8968 		case OP_IMAGE_BLIT_FROM_IMAGE:
8969 		{
8970 			const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8971 			return de::MovePtr<CmdCommand>(new ImageBlitFromImage(rng.getUint32(), scale, state.imageLayout));
8972 		}
8973 
8974 		case OP_PIPELINE_BARRIER_GLOBAL:
8975 		case OP_PIPELINE_BARRIER_BUFFER:
8976 		case OP_PIPELINE_BARRIER_IMAGE:
8977 		{
8978 			vk::VkPipelineStageFlags	dirtySrcStages;
8979 			vk::VkAccessFlags			dirtySrcAccesses;
8980 			vk::VkPipelineStageFlags	dirtyDstStages;
8981 			vk::VkAccessFlags			dirtyDstAccesses;
8982 
8983 			vk::VkPipelineStageFlags	srcStages;
8984 			vk::VkAccessFlags			srcAccesses;
8985 			vk::VkPipelineStageFlags	dstStages;
8986 			vk::VkAccessFlags			dstAccesses;
8987 
8988 			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8989 
8990 			// Try masking some random bits
8991 			srcStages	= dirtySrcStages & rng.getUint32();
8992 			srcAccesses	= dirtySrcAccesses & rng.getUint32();
8993 
8994 			dstStages	= dirtyDstStages & rng.getUint32();
8995 			dstAccesses	= dirtyDstAccesses & rng.getUint32();
8996 
8997 			// If there are no bits in stage mask use the original dirty stages
8998 			srcStages	= srcStages ? srcStages : dirtySrcStages;
8999 			dstStages	= dstStages ? dstStages : dirtyDstStages;
9000 
9001 			if (!srcStages)
9002 				srcStages = dstStages;
9003 
9004 			removeIllegalAccessFlags(dstAccesses, dstStages);
9005 			removeIllegalAccessFlags(srcAccesses, srcStages);
9006 
9007 			PipelineBarrier::Type type;
9008 
9009 			if (op == OP_PIPELINE_BARRIER_IMAGE)
9010 				type = PipelineBarrier::TYPE_IMAGE;
9011 			else if (op == OP_PIPELINE_BARRIER_BUFFER)
9012 				type = PipelineBarrier::TYPE_BUFFER;
9013 			else if (op == OP_PIPELINE_BARRIER_GLOBAL)
9014 				type = PipelineBarrier::TYPE_GLOBAL;
9015 			else
9016 			{
9017 				type = PipelineBarrier::TYPE_LAST;
9018 				DE_FATAL("Unknown op");
9019 			}
9020 
9021 			if (type == PipelineBarrier::TYPE_IMAGE)
9022 				return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::just(state.imageLayout)));
9023 			else
9024 				return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::nothing<vk::VkImageLayout>()));
9025 		}
9026 
9027 		default:
9028 			DE_FATAL("Unknown op");
9029 			return de::MovePtr<CmdCommand>(DE_NULL);
9030 	}
9031 }
9032 
createRenderPassCommand(de::Random &,const State &,const TestConfig & testConfig,Op op)9033 de::MovePtr<RenderPassCommand> createRenderPassCommand (de::Random&,
9034 														const State&,
9035 														const TestConfig&	testConfig,
9036 														Op					op)
9037 {
9038 	switch (op)
9039 	{
9040 		case OP_RENDER_VERTEX_BUFFER:					return de::MovePtr<RenderPassCommand>(new RenderVertexBuffer(testConfig.vertexBufferStride));
9041 		case OP_RENDER_INDEX_BUFFER:					return de::MovePtr<RenderPassCommand>(new RenderIndexBuffer());
9042 
9043 		case OP_RENDER_VERTEX_UNIFORM_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderVertexUniformBuffer());
9044 		case OP_RENDER_FRAGMENT_UNIFORM_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformBuffer());
9045 
9046 		case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER:		return de::MovePtr<RenderPassCommand>(new RenderVertexUniformTexelBuffer());
9047 		case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER:	return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformTexelBuffer());
9048 
9049 		case OP_RENDER_VERTEX_STORAGE_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderVertexStorageBuffer());
9050 		case OP_RENDER_FRAGMENT_STORAGE_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageBuffer());
9051 
9052 		case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER:		return de::MovePtr<RenderPassCommand>(new RenderVertexStorageTexelBuffer());
9053 		case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER:	return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageTexelBuffer());
9054 
9055 		case OP_RENDER_VERTEX_STORAGE_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderVertexStorageImage());
9056 		case OP_RENDER_FRAGMENT_STORAGE_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageImage());
9057 
9058 		case OP_RENDER_VERTEX_SAMPLED_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderVertexSampledImage());
9059 		case OP_RENDER_FRAGMENT_SAMPLED_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderFragmentSampledImage());
9060 
9061 		default:
9062 			DE_FATAL("Unknown op");
9063 			return de::MovePtr<RenderPassCommand>(DE_NULL);
9064 	}
9065 }
9066 
createRenderPassCommands(const Memory & memory,de::Random & nextOpRng,State & state,const TestConfig & testConfig,size_t & opNdx,size_t opCount)9067 de::MovePtr<CmdCommand> createRenderPassCommands (const Memory&		memory,
9068 												  de::Random&		nextOpRng,
9069 												  State&			state,
9070 												  const TestConfig&	testConfig,
9071 												  size_t&			opNdx,
9072 												  size_t			opCount)
9073 {
9074 	vector<RenderPassCommand*>	commands;
9075 
9076 	try
9077 	{
9078 		for (; opNdx < opCount; opNdx++)
9079 		{
9080 			vector<Op>	ops;
9081 
9082 			getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), testConfig.usage, ops);
9083 
9084 			DE_ASSERT(!ops.empty());
9085 
9086 			{
9087 				const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9088 
9089 				if (op == OP_RENDERPASS_END)
9090 				{
9091 					break;
9092 				}
9093 				else
9094 				{
9095 					de::Random	rng	(state.rng);
9096 
9097 					commands.push_back(createRenderPassCommand(rng, state, testConfig, op).release());
9098 					applyOp(state, memory, op, testConfig.usage);
9099 
9100 					DE_ASSERT(state.rng == rng);
9101 				}
9102 			}
9103 		}
9104 
9105 		applyOp(state, memory, OP_RENDERPASS_END, testConfig.usage);
9106 		return de::MovePtr<CmdCommand>(new SubmitRenderPass(commands));
9107 	}
9108 	catch (...)
9109 	{
9110 		for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9111 			delete commands[commandNdx];
9112 
9113 		throw;
9114 	}
9115 }
9116 
createSecondaryCmdCommands(const Memory & memory,de::Random & nextOpRng,State & state,Usage usage,size_t & opNdx,size_t opCount)9117 de::MovePtr<CmdCommand> createSecondaryCmdCommands (const Memory&	memory,
9118 												    de::Random&		nextOpRng,
9119 												    State&			state,
9120 												    Usage			usage,
9121 												    size_t&			opNdx,
9122 												    size_t			opCount)
9123 {
9124 	vector<CmdCommand*>	commands;
9125 
9126 	try
9127 	{
9128 		for (; opNdx < opCount; opNdx++)
9129 		{
9130 			vector<Op>	ops;
9131 
9132 			getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9133 
9134 			DE_ASSERT(!ops.empty());
9135 
9136 			{
9137 				const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9138 
9139 				if (op == OP_SECONDARY_COMMAND_BUFFER_END)
9140 				{
9141 					break;
9142 				}
9143 				else
9144 				{
9145 					de::Random	rng(state.rng);
9146 
9147 					commands.push_back(createCmdCommand(rng, state, op, usage).release());
9148 					applyOp(state, memory, op, usage);
9149 
9150 					DE_ASSERT(state.rng == rng);
9151 				}
9152 			}
9153 		}
9154 
9155 		applyOp(state, memory, OP_SECONDARY_COMMAND_BUFFER_END, usage);
9156 		return de::MovePtr<CmdCommand>(new ExecuteSecondaryCommandBuffer(commands));
9157 	}
9158 	catch (...)
9159 	{
9160 		for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9161 			delete commands[commandNdx];
9162 
9163 		throw;
9164 	}
9165 }
9166 
createCmdCommands(const Memory & memory,de::Random & nextOpRng,State & state,const TestConfig & testConfig,size_t & opNdx,size_t opCount)9167 de::MovePtr<Command> createCmdCommands (const Memory&		memory,
9168 										de::Random&			nextOpRng,
9169 										State&				state,
9170 										const TestConfig&	testConfig,
9171 										size_t&				opNdx,
9172 										size_t				opCount)
9173 {
9174 	vector<CmdCommand*>	commands;
9175 
9176 	try
9177 	{
9178 		// Insert a mostly-full barrier to order this work wrt previous command buffer.
9179 		commands.push_back(new PipelineBarrier(state.cache.getAllowedStages(),
9180 											   state.cache.getAllowedAcceses(),
9181 											   state.cache.getAllowedStages(),
9182 											   state.cache.getAllowedAcceses(),
9183 											   PipelineBarrier::TYPE_GLOBAL,
9184 											   tcu::nothing<vk::VkImageLayout>()));
9185 
9186 		for (; opNdx < opCount; opNdx++)
9187 		{
9188 			vector<Op>	ops;
9189 
9190 			getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), testConfig.usage, ops);
9191 
9192 			DE_ASSERT(!ops.empty());
9193 
9194 			{
9195 				const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9196 
9197 				if (op == OP_COMMAND_BUFFER_END)
9198 				{
9199 					break;
9200 				}
9201 				else
9202 				{
9203 					// \note Command needs to known the state before the operation
9204 					if (op == OP_RENDERPASS_BEGIN)
9205 					{
9206 						applyOp(state, memory, op, testConfig.usage);
9207 						commands.push_back(createRenderPassCommands(memory, nextOpRng, state, testConfig, opNdx, opCount).release());
9208 					}
9209 					else if (op == OP_SECONDARY_COMMAND_BUFFER_BEGIN)
9210 					{
9211 						applyOp(state, memory, op, testConfig.usage);
9212 						commands.push_back(createSecondaryCmdCommands(memory, nextOpRng, state, testConfig.usage, opNdx, opCount).release());
9213 					}
9214 					else
9215 					{
9216 						de::Random	rng	(state.rng);
9217 
9218 						commands.push_back(createCmdCommand(rng, state, op, testConfig.usage).release());
9219 						applyOp(state, memory, op, testConfig.usage);
9220 
9221 						DE_ASSERT(state.rng == rng);
9222 					}
9223 
9224 				}
9225 			}
9226 		}
9227 
9228 		applyOp(state, memory, OP_COMMAND_BUFFER_END, testConfig.usage);
9229 		return de::MovePtr<Command>(new SubmitCommandBuffer(commands));
9230 	}
9231 	catch (...)
9232 	{
9233 		for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9234 			delete commands[commandNdx];
9235 
9236 		throw;
9237 	}
9238 }
9239 
createCommands(vector<Command * > & commands,deUint32 seed,const Memory & memory,const TestConfig & testConfig,size_t opCount)9240 void createCommands (vector<Command*>&	commands,
9241 					 deUint32			seed,
9242 					 const Memory&		memory,
9243 					 const TestConfig&	testConfig,
9244 					 size_t				opCount)
9245 {
9246 	State			state		(testConfig.usage, seed);
9247 	// Used to select next operation only
9248 	de::Random		nextOpRng	(seed ^ 12930809);
9249 
9250 	commands.reserve(opCount);
9251 
9252 	for (size_t opNdx = 0; opNdx < opCount; opNdx++)
9253 	{
9254 		vector<Op>	ops;
9255 
9256 		getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), testConfig.usage, ops);
9257 
9258 		DE_ASSERT(!ops.empty());
9259 
9260 		{
9261 			const Op	op	= nextOpRng.choose<Op>(ops.begin(), ops.end());
9262 
9263 			if (op == OP_COMMAND_BUFFER_BEGIN)
9264 			{
9265 				applyOp(state, memory, op, testConfig.usage);
9266 				commands.push_back(createCmdCommands(memory, nextOpRng, state, testConfig, opNdx, opCount).release());
9267 			}
9268 			else
9269 			{
9270 				de::Random	rng	(state.rng);
9271 
9272 				commands.push_back(createHostCommand(op, rng, testConfig.usage, testConfig.sharing).release());
9273 				applyOp(state, memory, op, testConfig.usage);
9274 
9275 				// Make sure that random generator is in sync
9276 				DE_ASSERT(state.rng == rng);
9277 			}
9278 		}
9279 	}
9280 
9281 	// Clean up resources
9282 	if (state.hasBuffer && state.hasImage)
9283 	{
9284 		if (!state.queueIdle)
9285 			commands.push_back(new QueueWaitIdle());
9286 
9287 		if (state.hasBuffer)
9288 			commands.push_back(new DestroyBuffer());
9289 
9290 		if (state.hasImage)
9291 			commands.push_back(new DestroyImage());
9292 	}
9293 }
9294 
9295 class MemoryTestInstance : public TestInstance
9296 {
9297 public:
9298 
9299 	typedef bool(MemoryTestInstance::*StageFunc)(void);
9300 
9301 												MemoryTestInstance				(::vkt::Context& context, const TestConfig& config);
9302 												~MemoryTestInstance				(void);
9303 
9304 	tcu::TestStatus								iterate							(void);
9305 
9306 private:
9307 	const TestConfig							m_config;
9308 	const size_t								m_iterationCount;
9309 	const size_t								m_opCount;
9310 	const vk::VkPhysicalDeviceMemoryProperties	m_memoryProperties;
9311 	deUint32									m_memoryTypeNdx;
9312 	size_t										m_iteration;
9313 	StageFunc									m_stage;
9314 	tcu::ResultCollector						m_resultCollector;
9315 
9316 	vector<Command*>							m_commands;
9317 	MovePtr<Memory>								m_memory;
9318 	MovePtr<Context>							m_renderContext;
9319 	MovePtr<PrepareContext>						m_prepareContext;
9320 
9321 	bool										nextIteration					(void);
9322 	bool										nextMemoryType					(void);
9323 
9324 	bool										createCommandsAndAllocateMemory	(void);
9325 	bool										prepare							(void);
9326 	bool										execute							(void);
9327 	bool										verify							(void);
9328 	void										resetResources					(void);
9329 };
9330 
resetResources(void)9331 void MemoryTestInstance::resetResources (void)
9332 {
9333 	const vk::DeviceInterface&	vkd		= m_context.getDeviceInterface();
9334 	const vk::VkDevice			device	= m_context.getDevice();
9335 
9336 	VK_CHECK(vkd.deviceWaitIdle(device));
9337 
9338 	for (size_t commandNdx = 0; commandNdx < m_commands.size(); commandNdx++)
9339 	{
9340 		delete m_commands[commandNdx];
9341 		m_commands[commandNdx] = DE_NULL;
9342 	}
9343 
9344 	m_commands.clear();
9345 	m_prepareContext.clear();
9346 	m_memory.clear();
9347 }
9348 
nextIteration(void)9349 bool MemoryTestInstance::nextIteration (void)
9350 {
9351 	m_iteration++;
9352 
9353 	if (m_iteration < m_iterationCount)
9354 	{
9355 		resetResources();
9356 		m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
9357 		return true;
9358 	}
9359 	else
9360 		return nextMemoryType();
9361 }
9362 
nextMemoryType(void)9363 bool MemoryTestInstance::nextMemoryType (void)
9364 {
9365 	resetResources();
9366 
9367 	DE_ASSERT(m_commands.empty());
9368 
9369 	m_memoryTypeNdx++;
9370 
9371 	if (m_memoryTypeNdx < m_memoryProperties.memoryTypeCount)
9372 	{
9373 		m_iteration	= 0;
9374 		m_stage		= &MemoryTestInstance::createCommandsAndAllocateMemory;
9375 
9376 		return true;
9377 	}
9378 	else
9379 	{
9380 		m_stage = DE_NULL;
9381 		return false;
9382 	}
9383 }
9384 
MemoryTestInstance(::vkt::Context & context,const TestConfig & config)9385 MemoryTestInstance::MemoryTestInstance (::vkt::Context& context, const TestConfig& config)
9386 	: TestInstance			(context)
9387 	, m_config				(config)
9388 	, m_iterationCount		(5)
9389 	, m_opCount				(50)
9390 	, m_memoryProperties	(vk::getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
9391 	, m_memoryTypeNdx		(0)
9392 	, m_iteration			(0)
9393 	, m_stage				(&MemoryTestInstance::createCommandsAndAllocateMemory)
9394 	, m_resultCollector		(context.getTestContext().getLog())
9395 
9396 	, m_memory				(DE_NULL)
9397 {
9398 	TestLog&	log	= context.getTestContext().getLog();
9399 	{
9400 		const tcu::ScopedLogSection section (log, "TestCaseInfo", "Test Case Info");
9401 
9402 		log << TestLog::Message << "Buffer size: " << config.size << TestLog::EndMessage;
9403 		log << TestLog::Message << "Sharing: " << config.sharing << TestLog::EndMessage;
9404 		log << TestLog::Message << "Access: " << config.usage << TestLog::EndMessage;
9405 	}
9406 
9407 	{
9408 		const tcu::ScopedLogSection section (log, "MemoryProperties", "Memory Properties");
9409 
9410 		for (deUint32 heapNdx = 0; heapNdx < m_memoryProperties.memoryHeapCount; heapNdx++)
9411 		{
9412 			const tcu::ScopedLogSection heapSection (log, "Heap" + de::toString(heapNdx), "Heap " + de::toString(heapNdx));
9413 
9414 			log << TestLog::Message << "Size: " << m_memoryProperties.memoryHeaps[heapNdx].size << TestLog::EndMessage;
9415 			log << TestLog::Message << "Flags: " << m_memoryProperties.memoryHeaps[heapNdx].flags << TestLog::EndMessage;
9416 		}
9417 
9418 		for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < m_memoryProperties.memoryTypeCount; memoryTypeNdx++)
9419 		{
9420 			const tcu::ScopedLogSection memoryTypeSection (log, "MemoryType" + de::toString(memoryTypeNdx), "Memory type " + de::toString(memoryTypeNdx));
9421 
9422 			log << TestLog::Message << "Properties: " << m_memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags << TestLog::EndMessage;
9423 			log << TestLog::Message << "Heap: " << m_memoryProperties.memoryTypes[memoryTypeNdx].heapIndex << TestLog::EndMessage;
9424 		}
9425 	}
9426 
9427 	{
9428 		const vk::InstanceInterface&			vki					= context.getInstanceInterface();
9429 		const vk::VkPhysicalDevice				physicalDevice		= context.getPhysicalDevice();
9430 		const vk::DeviceInterface&				vkd					= context.getDeviceInterface();
9431 		const vk::VkDevice						device				= context.getDevice();
9432 		const vk::VkQueue						queue				= context.getUniversalQueue();
9433 		const deUint32							queueFamilyIndex	= context.getUniversalQueueFamilyIndex();
9434 		vector<pair<deUint32, vk::VkQueue> >	queues;
9435 
9436 		queues.push_back(std::make_pair(queueFamilyIndex, queue));
9437 
9438 		m_renderContext = MovePtr<Context>(new Context(vki, vkd, physicalDevice, device, queue, queueFamilyIndex, queues, context.getBinaryCollection()));
9439 	}
9440 }
9441 
~MemoryTestInstance(void)9442 MemoryTestInstance::~MemoryTestInstance (void)
9443 {
9444 	resetResources();
9445 }
9446 
createCommandsAndAllocateMemory(void)9447 bool MemoryTestInstance::createCommandsAndAllocateMemory (void)
9448 {
9449 	const vk::VkDevice							device				= m_context.getDevice();
9450 	TestLog&									log					= m_context.getTestContext().getLog();
9451 	const vk::InstanceInterface&				vki					= m_context.getInstanceInterface();
9452 	const vk::VkPhysicalDevice					physicalDevice		= m_context.getPhysicalDevice();
9453 	const vk::DeviceInterface&					vkd					= m_context.getDeviceInterface();
9454 	const vk::VkPhysicalDeviceMemoryProperties	memoryProperties	= vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
9455 	const tcu::ScopedLogSection					section				(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "CreateCommands" + de::toString(m_iteration),
9456 																		  "Memory type " + de::toString(m_memoryTypeNdx) + " create commands iteration " + de::toString(m_iteration));
9457 	const vector<deUint32>&						queues				= m_renderContext->getQueueFamilies();
9458 
9459 	DE_ASSERT(m_commands.empty());
9460 
9461 	if (m_config.usage & (USAGE_HOST_READ | USAGE_HOST_WRITE)
9462 		&& !(memoryProperties.memoryTypes[m_memoryTypeNdx].propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
9463 	{
9464 		log << TestLog::Message << "Memory type not supported" << TestLog::EndMessage;
9465 
9466 		return nextMemoryType();
9467 	}
9468 	else
9469 	{
9470 		try
9471 		{
9472 			const vk::VkBufferUsageFlags	bufferUsage		= usageToBufferUsageFlags(m_config.usage);
9473 			const vk::VkImageUsageFlags		imageUsage		= usageToImageUsageFlags(m_config.usage);
9474 			const vk::VkDeviceSize			maxBufferSize	= bufferUsage != 0
9475 															? roundBufferSizeToWxHx4(findMaxBufferSize(vkd, device, bufferUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx))
9476 															: 0;
9477 			const IVec2						maxImageSize	= imageUsage != 0
9478 															? findMaxRGBA8ImageSize(vkd, device, imageUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx)
9479 															: IVec2(0, 0);
9480 
9481 			log << TestLog::Message << "Max buffer size: " << maxBufferSize << TestLog::EndMessage;
9482 			log << TestLog::Message << "Max RGBA8 image size: " << maxImageSize << TestLog::EndMessage;
9483 
9484 			// Skip tests if there are no supported operations
9485 			if (maxBufferSize == 0
9486 				&& maxImageSize[0] == 0
9487 				&& (m_config.usage & (USAGE_HOST_READ|USAGE_HOST_WRITE)) == 0)
9488 			{
9489 				log << TestLog::Message << "Skipping memory type. None of the usages are supported." << TestLog::EndMessage;
9490 
9491 				return nextMemoryType();
9492 			}
9493 			else
9494 			{
9495 				const deUint32	seed	= 2830980989u ^ deUint32Hash((deUint32)(m_iteration) * m_memoryProperties.memoryTypeCount +  m_memoryTypeNdx);
9496 
9497 				m_memory	= MovePtr<Memory>(new Memory(vki, vkd, physicalDevice, device, m_config.size, m_memoryTypeNdx, maxBufferSize, maxImageSize[0], maxImageSize[1]));
9498 
9499 				log << TestLog::Message << "Create commands" << TestLog::EndMessage;
9500 				createCommands(m_commands, seed, *m_memory, m_config, m_opCount);
9501 
9502 				m_stage = &MemoryTestInstance::prepare;
9503 				return true;
9504 			}
9505 		}
9506 		catch (const tcu::TestError& e)
9507 		{
9508 			m_resultCollector.fail("Failed, got exception: " + string(e.getMessage()));
9509 			return nextMemoryType();
9510 		}
9511 	}
9512 }
9513 
prepare(void)9514 bool MemoryTestInstance::prepare (void)
9515 {
9516 	TestLog&					log		= m_context.getTestContext().getLog();
9517 	const tcu::ScopedLogSection	section	(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Prepare" + de::toString(m_iteration),
9518 											  "Memory type " + de::toString(m_memoryTypeNdx) + " prepare iteration " + de::toString(m_iteration));
9519 
9520 	m_prepareContext = MovePtr<PrepareContext>(new PrepareContext(*m_renderContext, *m_memory));
9521 
9522 	DE_ASSERT(!m_commands.empty());
9523 
9524 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9525 	{
9526 		Command& command = *m_commands[cmdNdx];
9527 
9528 		try
9529 		{
9530 			command.prepare(*m_prepareContext);
9531 		}
9532 		catch (const tcu::TestError& e)
9533 		{
9534 			m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to prepare, got exception: " + string(e.getMessage()));
9535 			return nextMemoryType();
9536 		}
9537 	}
9538 
9539 	m_stage = &MemoryTestInstance::execute;
9540 	return true;
9541 }
9542 
execute(void)9543 bool MemoryTestInstance::execute (void)
9544 {
9545 	TestLog&					log				= m_context.getTestContext().getLog();
9546 	const tcu::ScopedLogSection	section			(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Execute" + de::toString(m_iteration),
9547 													  "Memory type " + de::toString(m_memoryTypeNdx) + " execute iteration " + de::toString(m_iteration));
9548 	ExecuteContext				executeContext	(*m_renderContext);
9549 	const vk::VkDevice			device			= m_context.getDevice();
9550 	const vk::DeviceInterface&	vkd				= m_context.getDeviceInterface();
9551 
9552 	DE_ASSERT(!m_commands.empty());
9553 
9554 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9555 	{
9556 		Command& command = *m_commands[cmdNdx];
9557 
9558 		try
9559 		{
9560 			command.execute(executeContext);
9561 		}
9562 		catch (const tcu::TestError& e)
9563 		{
9564 			m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to execute, got exception: " + string(e.getMessage()));
9565 			return nextIteration();
9566 		}
9567 	}
9568 
9569 	VK_CHECK(vkd.deviceWaitIdle(device));
9570 
9571 	m_stage = &MemoryTestInstance::verify;
9572 	return true;
9573 }
9574 
verify(void)9575 bool MemoryTestInstance::verify (void)
9576 {
9577 	DE_ASSERT(!m_commands.empty());
9578 
9579 	TestLog&					log				= m_context.getTestContext().getLog();
9580 	const tcu::ScopedLogSection	section			(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Verify" + de::toString(m_iteration),
9581 													  "Memory type " + de::toString(m_memoryTypeNdx) + " verify iteration " + de::toString(m_iteration));
9582 	VerifyContext				verifyContext	(log, m_resultCollector, *m_renderContext, m_config.size);
9583 
9584 	log << TestLog::Message << "Begin verify" << TestLog::EndMessage;
9585 
9586 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9587 	{
9588 		Command& command = *m_commands[cmdNdx];
9589 
9590 		try
9591 		{
9592 			command.verify(verifyContext, cmdNdx);
9593 		}
9594 		catch (const tcu::TestError& e)
9595 		{
9596 			m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to verify, got exception: " + string(e.getMessage()));
9597 			return nextIteration();
9598 		}
9599 	}
9600 
9601 	return nextIteration();
9602 }
9603 
iterate(void)9604 tcu::TestStatus MemoryTestInstance::iterate (void)
9605 {
9606 	if ((this->*m_stage)())
9607 		return tcu::TestStatus::incomplete();
9608 	else
9609 		return tcu::TestStatus(m_resultCollector.getResult(), m_resultCollector.getMessage());
9610 }
9611 
9612 struct AddPrograms
9613 {
initvkt::memory::__anon58c89e6c0111::AddPrograms9614 	void init (vk::SourceCollections& sources, TestConfig config) const
9615 	{
9616 		// Vertex buffer rendering
9617 		if (config.usage & USAGE_VERTEX_BUFFER)
9618 		{
9619 			const char* const vertexShader =
9620 				"#version 310 es\n"
9621 				"layout(location = 0) in highp vec2 a_position;\n"
9622 				"void main (void) {\n"
9623 				"\tgl_PointSize = 1.0;\n"
9624 				"\tgl_Position = vec4(1.998 * a_position - vec2(0.999), 0.0, 1.0);\n"
9625 				"}\n";
9626 
9627 			sources.glslSources.add("vertex-buffer.vert")
9628 				<< glu::VertexSource(vertexShader);
9629 		}
9630 
9631 		// Index buffer rendering
9632 		if (config.usage & USAGE_INDEX_BUFFER)
9633 		{
9634 			const char* const vertexShader =
9635 				"#version 310 es\n"
9636 				"precision highp float;\n"
9637 				"void main (void) {\n"
9638 				"\tgl_PointSize = 1.0;\n"
9639 				"\thighp vec2 pos = vec2(gl_VertexIndex % 256, gl_VertexIndex / 256) / vec2(255.0);\n"
9640 				"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9641 				"}\n";
9642 
9643 			sources.glslSources.add("index-buffer.vert")
9644 				<< glu::VertexSource(vertexShader);
9645 		}
9646 
9647 		if (config.usage & USAGE_UNIFORM_BUFFER)
9648 		{
9649 			{
9650 				std::ostringstream vertexShader;
9651 
9652 				vertexShader <<
9653 					"#version 310 es\n"
9654 					"precision highp float;\n"
9655 					"layout(set=0, binding=0) uniform Block\n"
9656 					"{\n"
9657 					"\thighp uvec4 values[" << de::toString<size_t>(MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4)) << "];\n"
9658 					"} block;\n"
9659 					"void main (void) {\n"
9660 					"\tgl_PointSize = 1.0;\n"
9661 					"\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9662 					"\thighp uint val;\n"
9663 					"\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9664 					"\t\tval = vecVal.x;\n"
9665 					"\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9666 					"\t\tval = vecVal.y;\n"
9667 					"\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9668 					"\t\tval = vecVal.z;\n"
9669 					"\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9670 					"\t\tval = vecVal.w;\n"
9671 					"\tif ((gl_VertexIndex % 2) == 0)\n"
9672 					"\t\tval = val & 0xFFFFu;\n"
9673 					"\telse\n"
9674 					"\t\tval = val >> 16u;\n"
9675 					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9676 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9677 					"}\n";
9678 
9679 				sources.glslSources.add("uniform-buffer.vert")
9680 					<< glu::VertexSource(vertexShader.str());
9681 			}
9682 
9683 			{
9684 				const size_t		arraySize		= MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
9685 				const size_t		arrayIntSize	= arraySize * 4;
9686 				std::ostringstream	fragmentShader;
9687 
9688 				fragmentShader <<
9689 					"#version 310 es\n"
9690 					"precision highp float;\n"
9691 					"precision highp int;\n"
9692 					"layout(location = 0) out highp vec4 o_color;\n"
9693 					"layout(set=0, binding=0) uniform Block\n"
9694 					"{\n"
9695 					"\thighp uvec4 values[" << arraySize << "];\n"
9696 					"} block;\n"
9697 					"layout(push_constant) uniform PushC\n"
9698 					"{\n"
9699 					"\tuint callId;\n"
9700 					"\tuint valuesPerPixel;\n"
9701 					"\tuint bufferSize;\n"
9702 					"} pushC;\n"
9703 					"void main (void) {\n"
9704 					"\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9705 					"\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (" << arrayIntSize  << "u / pushC.valuesPerPixel))\n"
9706 					"\t\tdiscard;\n"
9707 					"\thighp uint value = id;\n"
9708 					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9709 					"\t{\n"
9710 					"\t\thighp uvec4 vecVal = block.values[value % pushC.bufferSize];\n"
9711 					"\t\tif ((value % 4u) == 0u)\n"
9712 					"\t\t\tvalue = vecVal.x;\n"
9713 					"\t\telse if ((value % 4u) == 1u)\n"
9714 					"\t\t\tvalue = vecVal.y;\n"
9715 					"\t\telse if ((value % 4u) == 2u)\n"
9716 					"\t\t\tvalue = vecVal.z;\n"
9717 					"\t\telse if ((value % 4u) == 3u)\n"
9718 					"\t\t\tvalue = vecVal.w;\n"
9719 					"\t}\n"
9720 					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9721 					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9722 					"}\n";
9723 
9724 				sources.glslSources.add("uniform-buffer.frag")
9725 					<< glu::FragmentSource(fragmentShader.str());
9726 			}
9727 		}
9728 
9729 		if (config.usage & USAGE_STORAGE_BUFFER)
9730 		{
9731 			{
9732 				// Vertex storage buffer rendering
9733 				const char* const vertexShader =
9734 					"#version 310 es\n"
9735 					"precision highp float;\n"
9736 					"readonly layout(set=0, binding=0) buffer Block\n"
9737 					"{\n"
9738 					"\thighp uvec4 values[];\n"
9739 					"} block;\n"
9740 					"void main (void) {\n"
9741 					"\tgl_PointSize = 1.0;\n"
9742 					"\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9743 					"\thighp uint val;\n"
9744 					"\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9745 					"\t\tval = vecVal.x;\n"
9746 					"\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9747 					"\t\tval = vecVal.y;\n"
9748 					"\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9749 					"\t\tval = vecVal.z;\n"
9750 					"\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9751 					"\t\tval = vecVal.w;\n"
9752 					"\tif ((gl_VertexIndex % 2) == 0)\n"
9753 					"\t\tval = val & 0xFFFFu;\n"
9754 					"\telse\n"
9755 					"\t\tval = val >> 16u;\n"
9756 					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9757 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9758 					"}\n";
9759 
9760 				sources.glslSources.add("storage-buffer.vert")
9761 					<< glu::VertexSource(vertexShader);
9762 			}
9763 
9764 			{
9765 				std::ostringstream	fragmentShader;
9766 
9767 				fragmentShader <<
9768 					"#version 310 es\n"
9769 					"precision highp float;\n"
9770 					"precision highp int;\n"
9771 					"layout(location = 0) out highp vec4 o_color;\n"
9772 					"layout(set=0, binding=0) buffer Block\n"
9773 					"{\n"
9774 					"\thighp uvec4 values[];\n"
9775 					"} block;\n"
9776 					"layout(push_constant) uniform PushC\n"
9777 					"{\n"
9778 					"\tuint valuesPerPixel;\n"
9779 					"\tuint bufferSize;\n"
9780 					"} pushC;\n"
9781 					"void main (void) {\n"
9782 					"\thighp uint arrayIntSize = pushC.bufferSize / 4u;\n"
9783 					"\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9784 					"\thighp uint value = id;\n"
9785 					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9786 					"\t{\n"
9787 					"\t\thighp uvec4 vecVal = block.values[(value / 4u) % (arrayIntSize / 4u)];\n"
9788 					"\t\tif ((value % 4u) == 0u)\n"
9789 					"\t\t\tvalue = vecVal.x;\n"
9790 					"\t\telse if ((value % 4u) == 1u)\n"
9791 					"\t\t\tvalue = vecVal.y;\n"
9792 					"\t\telse if ((value % 4u) == 2u)\n"
9793 					"\t\t\tvalue = vecVal.z;\n"
9794 					"\t\telse if ((value % 4u) == 3u)\n"
9795 					"\t\t\tvalue = vecVal.w;\n"
9796 					"\t}\n"
9797 					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9798 					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9799 					"}\n";
9800 
9801 				sources.glslSources.add("storage-buffer.frag")
9802 					<< glu::FragmentSource(fragmentShader.str());
9803 			}
9804 		}
9805 
9806 		if (config.usage & USAGE_UNIFORM_TEXEL_BUFFER)
9807 		{
9808 			{
9809 				// Vertex uniform texel buffer rendering
9810 				const char* const vertexShader =
9811 					"#version 310 es\n"
9812 					"#extension GL_EXT_texture_buffer : require\n"
9813 					"precision highp float;\n"
9814 					"layout(set=0, binding=0) uniform highp utextureBuffer u_sampler;\n"
9815 					"void main (void) {\n"
9816 					"\tgl_PointSize = 1.0;\n"
9817 					"\thighp uint val = texelFetch(u_sampler, gl_VertexIndex).x;\n"
9818 					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9819 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9820 					"}\n";
9821 
9822 				sources.glslSources.add("uniform-texel-buffer.vert")
9823 					<< glu::VertexSource(vertexShader);
9824 			}
9825 
9826 			{
9827 				// Fragment uniform texel buffer rendering
9828 				const char* const fragmentShader =
9829 					"#version 310 es\n"
9830 					"#extension GL_EXT_texture_buffer : require\n"
9831 					"#extension GL_EXT_samplerless_texture_functions : require\n"
9832 					"precision highp float;\n"
9833 					"precision highp int;\n"
9834 					"layout(set=0, binding=0) uniform highp utextureBuffer u_sampler;\n"
9835 					"layout(location = 0) out highp vec4 o_color;\n"
9836 					"layout(push_constant) uniform PushC\n"
9837 					"{\n"
9838 					"\tuint callId;\n"
9839 					"\tuint valuesPerPixel;\n"
9840 					"\tuint maxTexelCount;\n"
9841 					"} pushC;\n"
9842 					"void main (void) {\n"
9843 					"\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9844 					"\thighp uint value = id;\n"
9845 					"\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9846 					"\t\tdiscard;\n"
9847 					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9848 					"\t{\n"
9849 					"\t\tvalue = texelFetch(u_sampler, int(value % uint(textureSize(u_sampler)))).x;\n"
9850 					"\t}\n"
9851 					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9852 					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9853 					"}\n";
9854 
9855 				sources.glslSources.add("uniform-texel-buffer.frag")
9856 					<< glu::FragmentSource(fragmentShader);
9857 			}
9858 		}
9859 
9860 		if (config.usage & USAGE_STORAGE_TEXEL_BUFFER)
9861 		{
9862 			{
9863 				// Vertex storage texel buffer rendering
9864 				const char* const vertexShader =
9865 					"#version 450\n"
9866 					"#extension GL_EXT_texture_buffer : require\n"
9867 					"precision highp float;\n"
9868 					"layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9869 					"out gl_PerVertex {\n"
9870 					"\tvec4 gl_Position;\n"
9871 					"\tfloat gl_PointSize;\n"
9872 					"};\n"
9873 					"void main (void) {\n"
9874 					"\tgl_PointSize = 1.0;\n"
9875 					"\thighp uint val = imageLoad(u_sampler, gl_VertexIndex / 2).x;\n"
9876 					"\tif (gl_VertexIndex % 2 == 0)\n"
9877 					"\t\tval = val & 0xFFFFu;\n"
9878 					"\telse\n"
9879 					"\t\tval = val >> 16;\n"
9880 					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9881 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9882 					"}\n";
9883 
9884 				sources.glslSources.add("storage-texel-buffer.vert")
9885 					<< glu::VertexSource(vertexShader);
9886 			}
9887 			{
9888 				// Fragment storage texel buffer rendering
9889 				const char* const fragmentShader =
9890 					"#version 310 es\n"
9891 					"#extension GL_EXT_texture_buffer : require\n"
9892 					"precision highp float;\n"
9893 					"precision highp int;\n"
9894 					"layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9895 					"layout(location = 0) out highp vec4 o_color;\n"
9896 					"layout(push_constant) uniform PushC\n"
9897 					"{\n"
9898 					"\tuint callId;\n"
9899 					"\tuint valuesPerPixel;\n"
9900 					"\tuint maxTexelCount;\n"
9901 					"\tuint width;\n"
9902 					"} pushC;\n"
9903 					"void main (void) {\n"
9904 					"\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9905 					"\thighp uint value = id;\n"
9906 					"\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9907 					"\t\tdiscard;\n"
9908 					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9909 					"\t{\n"
9910 					"\t\tvalue = imageLoad(u_sampler, int(value % pushC.width)).x;\n"
9911 					"\t}\n"
9912 					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9913 					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9914 					"}\n";
9915 
9916 				sources.glslSources.add("storage-texel-buffer.frag")
9917 					<< glu::FragmentSource(fragmentShader);
9918 			}
9919 		}
9920 
9921 		if (config.usage & USAGE_STORAGE_IMAGE)
9922 		{
9923 			{
9924 				// Vertex storage image
9925 				const char* const vertexShader =
9926 					"#version 450\n"
9927 					"precision highp float;\n"
9928 					"layout(set=0, binding=0, rgba8) uniform image2D u_image;\n"
9929 					"out gl_PerVertex {\n"
9930 					"\tvec4 gl_Position;\n"
9931 					"\tfloat gl_PointSize;\n"
9932 					"};\n"
9933 					"void main (void) {\n"
9934 					"\tgl_PointSize = 1.0;\n"
9935 					"\thighp vec4 val = imageLoad(u_image, ivec2((gl_VertexIndex / 2) / imageSize(u_image).x, (gl_VertexIndex / 2) % imageSize(u_image).x));\n"
9936 					"\thighp vec2 pos;\n"
9937 					"\tif (gl_VertexIndex % 2 == 0)\n"
9938 					"\t\tpos = val.xy;\n"
9939 					"\telse\n"
9940 					"\t\tpos = val.zw;\n"
9941 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9942 					"}\n";
9943 
9944 				sources.glslSources.add("storage-image.vert")
9945 					<< glu::VertexSource(vertexShader);
9946 			}
9947 			{
9948 				// Fragment storage image
9949 				const char* const fragmentShader =
9950 					"#version 450\n"
9951 					"#extension GL_EXT_texture_buffer : require\n"
9952 					"precision highp float;\n"
9953 					"layout(set=0, binding=0, rgba8) uniform image2D u_image;\n"
9954 					"layout(location = 0) out highp vec4 o_color;\n"
9955 					"void main (void) {\n"
9956 					"\thighp uvec2 size = uvec2(imageSize(u_image).x, imageSize(u_image).y);\n"
9957 					"\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
9958 					"\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
9959 					"\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
9960 					"\t{\n"
9961 					"\t\thighp vec4 floatValue = imageLoad(u_image, ivec2(int((value.z *  256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)));\n"
9962 					"\t\tvalue = uvec4(uint(round(floatValue.x * 255.0)), uint(round(floatValue.y * 255.0)), uint(round(floatValue.z * 255.0)), uint(round(floatValue.w * 255.0)));\n"
9963 					"\t}\n"
9964 					"\to_color = vec4(value) / vec4(255.0);\n"
9965 					"}\n";
9966 
9967 				sources.glslSources.add("storage-image.frag")
9968 					<< glu::FragmentSource(fragmentShader);
9969 			}
9970 		}
9971 
9972 		if (config.usage & USAGE_SAMPLED_IMAGE)
9973 		{
9974 			{
9975 				// Vertex storage image
9976 				const char* const vertexShader =
9977 					"#version 450\n"
9978 					"precision highp float;\n"
9979 					"layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
9980 					"out gl_PerVertex {\n"
9981 					"\tvec4 gl_Position;\n"
9982 					"\tfloat gl_PointSize;\n"
9983 					"};\n"
9984 					"void main (void) {\n"
9985 					"\tgl_PointSize = 1.0;\n"
9986 					"\thighp vec4 val = texelFetch(u_sampler, ivec2((gl_VertexIndex / 2) / textureSize(u_sampler, 0).x, (gl_VertexIndex / 2) % textureSize(u_sampler, 0).x), 0);\n"
9987 					"\thighp vec2 pos;\n"
9988 					"\tif (gl_VertexIndex % 2 == 0)\n"
9989 					"\t\tpos = val.xy;\n"
9990 					"\telse\n"
9991 					"\t\tpos = val.zw;\n"
9992 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9993 					"}\n";
9994 
9995 				sources.glslSources.add("sampled-image.vert")
9996 					<< glu::VertexSource(vertexShader);
9997 			}
9998 			{
9999 				// Fragment storage image
10000 				const char* const fragmentShader =
10001 					"#version 450\n"
10002 					"#extension GL_EXT_texture_buffer : require\n"
10003 					"precision highp float;\n"
10004 					"layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
10005 					"layout(location = 0) out highp vec4 o_color;\n"
10006 					"void main (void) {\n"
10007 					"\thighp uvec2 size = uvec2(textureSize(u_sampler, 0).x, textureSize(u_sampler, 0).y);\n"
10008 					"\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
10009 					"\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
10010 					"\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
10011 					"\t{\n"
10012 					"\t\thighp vec4 floatValue = texelFetch(u_sampler, ivec2(int((value.z *  256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)), 0);\n"
10013 					"\t\tvalue = uvec4(uint(round(floatValue.x * 255.0)), uint(round(floatValue.y * 255.0)), uint(round(floatValue.z * 255.0)), uint(round(floatValue.w * 255.0)));\n"
10014 					"\t}\n"
10015 					"\to_color = vec4(value) / vec4(255.0);\n"
10016 					"}\n";
10017 
10018 				sources.glslSources.add("sampled-image.frag")
10019 					<< glu::FragmentSource(fragmentShader);
10020 			}
10021 		}
10022 
10023 		{
10024 			const char* const vertexShader =
10025 				"#version 450\n"
10026 				"out gl_PerVertex {\n"
10027 				"\tvec4 gl_Position;\n"
10028 				"};\n"
10029 				"precision highp float;\n"
10030 				"void main (void) {\n"
10031 				"\tgl_Position = vec4(((gl_VertexIndex + 2) / 3) % 2 == 0 ? -1.0 : 1.0,\n"
10032 				"\t                   ((gl_VertexIndex + 1) / 3) % 2 == 0 ? -1.0 : 1.0, 0.0, 1.0);\n"
10033 				"}\n";
10034 
10035 			sources.glslSources.add("render-quad.vert")
10036 				<< glu::VertexSource(vertexShader);
10037 		}
10038 
10039 		{
10040 			const char* const fragmentShader =
10041 				"#version 310 es\n"
10042 				"layout(location = 0) out highp vec4 o_color;\n"
10043 				"void main (void) {\n"
10044 				"\to_color = vec4(1.0);\n"
10045 				"}\n";
10046 
10047 			sources.glslSources.add("render-white.frag")
10048 				<< glu::FragmentSource(fragmentShader);
10049 		}
10050 	}
10051 };
10052 
10053 } // anonymous
10054 
createPipelineBarrierTests(tcu::TestContext & testCtx)10055 tcu::TestCaseGroup* createPipelineBarrierTests (tcu::TestContext& testCtx)
10056 {
10057 	de::MovePtr<tcu::TestCaseGroup>	group			(new tcu::TestCaseGroup(testCtx, "pipeline_barrier", "Pipeline barrier tests."));
10058 	const vk::VkDeviceSize			sizes[]			=
10059 	{
10060 		1024,			// 1K
10061 		8*1024,			// 8K
10062 		64*1024,		// 64K
10063 		ONE_MEGABYTE,	// 1M
10064 	};
10065 	const Usage						usages[]		=
10066 	{
10067 		USAGE_HOST_READ,
10068 		USAGE_HOST_WRITE,
10069 		USAGE_TRANSFER_SRC,
10070 		USAGE_TRANSFER_DST,
10071 		USAGE_VERTEX_BUFFER,
10072 		USAGE_INDEX_BUFFER,
10073 		USAGE_UNIFORM_BUFFER,
10074 		USAGE_UNIFORM_TEXEL_BUFFER,
10075 		USAGE_STORAGE_BUFFER,
10076 		USAGE_STORAGE_TEXEL_BUFFER,
10077 		USAGE_STORAGE_IMAGE,
10078 		USAGE_SAMPLED_IMAGE
10079 	};
10080 	const Usage						readUsages[]		=
10081 	{
10082 		USAGE_HOST_READ,
10083 		USAGE_TRANSFER_SRC,
10084 		USAGE_VERTEX_BUFFER,
10085 		USAGE_INDEX_BUFFER,
10086 		USAGE_UNIFORM_BUFFER,
10087 		USAGE_UNIFORM_TEXEL_BUFFER,
10088 		USAGE_STORAGE_BUFFER,
10089 		USAGE_STORAGE_TEXEL_BUFFER,
10090 		USAGE_STORAGE_IMAGE,
10091 		USAGE_SAMPLED_IMAGE
10092 	};
10093 
10094 	const Usage						writeUsages[]	=
10095 	{
10096 		USAGE_HOST_WRITE,
10097 		USAGE_TRANSFER_DST
10098 	};
10099 
10100 	const deUint32					vertexStrides[]	=
10101 	{
10102 		DEFAULT_VERTEX_BUFFER_STRIDE,
10103 		ALTERNATIVE_VERTEX_BUFFER_STRIDE,
10104 	};
10105 
10106 	for (size_t writeUsageNdx = 0; writeUsageNdx < DE_LENGTH_OF_ARRAY(writeUsages); writeUsageNdx++)
10107 	{
10108 		const Usage	writeUsage	= writeUsages[writeUsageNdx];
10109 
10110 		for (size_t readUsageNdx = 0; readUsageNdx < DE_LENGTH_OF_ARRAY(readUsages); readUsageNdx++)
10111 		{
10112 			const Usage						readUsage		= readUsages[readUsageNdx];
10113 			const Usage						usage			= writeUsage | readUsage;
10114 			const string					usageGroupName	(usageToName(usage));
10115 			de::MovePtr<tcu::TestCaseGroup>	usageGroup		(new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
10116 
10117 			for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10118 			{
10119 				const vk::VkDeviceSize	size		= sizes[sizeNdx];
10120 				TestConfig				config		=
10121 				{
10122 					usage,
10123 					DEFAULT_VERTEX_BUFFER_STRIDE,
10124 					size,
10125 					vk::VK_SHARING_MODE_EXCLUSIVE
10126 				};
10127 				const string			testName	(de::toString((deUint64)(size)));
10128 
10129 				if (readUsage == USAGE_VERTEX_BUFFER)
10130 				{
10131 					for (size_t strideNdx = 0; strideNdx < DE_LENGTH_OF_ARRAY(vertexStrides); ++strideNdx)
10132 					{
10133 						const deUint32	stride			= vertexStrides[strideNdx];
10134 						const string	finalTestName	= testName + "_vertex_buffer_stride_" + de::toString(stride);
10135 
10136 						config.vertexBufferStride = stride;
10137 						usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE,  finalTestName, finalTestName, AddPrograms(), config));
10138 					}
10139 				}
10140 				else
10141 				{
10142 					usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE,  testName, testName, AddPrograms(), config));
10143 				}
10144 			}
10145 
10146 			group->addChild(usageGroup.get());
10147 			usageGroup.release();
10148 		}
10149 	}
10150 
10151 	{
10152 		Usage all = (Usage)0;
10153 
10154 		for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usages); usageNdx++)
10155 			all = all | usages[usageNdx];
10156 
10157 		{
10158 			const string					usageGroupName	("all");
10159 			de::MovePtr<tcu::TestCaseGroup>	usageGroup		(new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
10160 
10161 			for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10162 			{
10163 				const vk::VkDeviceSize	size		= sizes[sizeNdx];
10164 
10165 				for (size_t strideNdx = 0; strideNdx < DE_LENGTH_OF_ARRAY(vertexStrides); ++strideNdx)
10166 				{
10167 					const deUint32			stride		= vertexStrides[strideNdx];
10168 					const string			testName	= de::toString(size) + "_vertex_buffer_stride_" + de::toString(stride);
10169 					const TestConfig		config		=
10170 					{
10171 						all,
10172 						stride,
10173 						size,
10174 						vk::VK_SHARING_MODE_EXCLUSIVE
10175 					};
10176 
10177 					usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE,  testName, testName, AddPrograms(), config));
10178 				}
10179 			}
10180 
10181 			group->addChild(usageGroup.get());
10182 			usageGroup.release();
10183 		}
10184 
10185 		{
10186 			const string					usageGroupName	("all_device");
10187 			de::MovePtr<tcu::TestCaseGroup>	usageGroup		(new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
10188 
10189 			for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10190 			{
10191 				const vk::VkDeviceSize	size		= sizes[sizeNdx];
10192 
10193 				for (size_t strideNdx = 0; strideNdx < DE_LENGTH_OF_ARRAY(vertexStrides); ++strideNdx)
10194 				{
10195 					const deUint32			stride		= vertexStrides[strideNdx];
10196 					const string			testName	= de::toString(size) + "_vertex_buffer_stride_" + de::toString(stride);
10197 					const TestConfig		config		=
10198 					{
10199 						(Usage)(all & (~(USAGE_HOST_READ|USAGE_HOST_WRITE))),
10200 						stride,
10201 						size,
10202 						vk::VK_SHARING_MODE_EXCLUSIVE
10203 					};
10204 
10205 					usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE,  testName, testName, AddPrograms(), config));
10206 				}
10207 			}
10208 
10209 			group->addChild(usageGroup.get());
10210 			usageGroup.release();
10211 		}
10212 	}
10213 
10214 	return group.release();
10215 }
10216 
10217 } // memory
10218 } // vkt
10219