• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2015 Google Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file
21  * \brief Pipeline barrier tests
22  *//*--------------------------------------------------------------------*/
23 
24 #include "vktMemoryPipelineBarrierTests.hpp"
25 
26 #include "vktTestCaseUtil.hpp"
27 
28 #include "vkDefs.hpp"
29 #include "vkPlatform.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkQueryUtil.hpp"
32 #include "vkMemUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkPrograms.hpp"
35 
36 #include "tcuMaybe.hpp"
37 #include "tcuTextureUtil.hpp"
38 #include "tcuTestLog.hpp"
39 #include "tcuResultCollector.hpp"
40 #include "tcuTexture.hpp"
41 #include "tcuImageCompare.hpp"
42 
43 #include "deUniquePtr.hpp"
44 #include "deStringUtil.hpp"
45 #include "deRandom.hpp"
46 
47 #include "deInt32.h"
48 #include "deMath.h"
49 #include "deMemory.h"
50 
51 #include <map>
52 #include <set>
53 #include <sstream>
54 #include <string>
55 #include <vector>
56 
57 using tcu::TestLog;
58 using tcu::Maybe;
59 
60 using de::MovePtr;
61 
62 using std::string;
63 using std::vector;
64 using std::map;
65 using std::set;
66 using std::pair;
67 
68 using tcu::IVec2;
69 using tcu::UVec2;
70 using tcu::UVec4;
71 using tcu::Vec4;
72 using tcu::ConstPixelBufferAccess;
73 using tcu::PixelBufferAccess;
74 using tcu::TextureFormat;
75 using tcu::TextureLevel;
76 
77 namespace vkt
78 {
79 namespace memory
80 {
81 namespace
82 {
83 enum
84 {
85 	MAX_UNIFORM_BUFFER_SIZE = 1024,
86 	MAX_STORAGE_BUFFER_SIZE = (1<<28)
87 };
88 
89 // \todo [mika] Add to utilities
90 template<typename T>
divRoundUp(const T & a,const T & b)91 T divRoundUp (const T& a, const T& b)
92 {
93 	return (a / b) + (a % b == 0 ? 0 : 1);
94 }
95 
96 enum
97 {
98 	ALL_PIPELINE_STAGES = vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
99 						| vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
100 						| vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT
101 						| vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT
102 						| vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
103 						| vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
104 						| vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT
105 						| vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT
106 						| vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
107 						| vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
108 						| vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT
109 						| vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
110 						| vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT
111 						| vk::VK_PIPELINE_STAGE_TRANSFER_BIT
112 						| vk::VK_PIPELINE_STAGE_HOST_BIT
113 };
114 
115 enum
116 {
117 	ALL_ACCESSES = vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT
118 				 | vk::VK_ACCESS_INDEX_READ_BIT
119 				 | vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
120 				 | vk::VK_ACCESS_UNIFORM_READ_BIT
121 				 | vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
122 				 | vk::VK_ACCESS_SHADER_READ_BIT
123 				 | vk::VK_ACCESS_SHADER_WRITE_BIT
124 				 | vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
125 				 | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
126 				 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
127 				 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
128 				 | vk::VK_ACCESS_TRANSFER_READ_BIT
129 				 | vk::VK_ACCESS_TRANSFER_WRITE_BIT
130 				 | vk::VK_ACCESS_HOST_READ_BIT
131 				 | vk::VK_ACCESS_HOST_WRITE_BIT
132 				 | vk::VK_ACCESS_MEMORY_READ_BIT
133 				 | vk::VK_ACCESS_MEMORY_WRITE_BIT
134 };
135 
136 enum Usage
137 {
138 	// Mapped host read and write
139 	USAGE_HOST_READ = (0x1u<<0),
140 	USAGE_HOST_WRITE = (0x1u<<1),
141 
142 	// Copy and other transfer operations
143 	USAGE_TRANSFER_SRC = (0x1u<<2),
144 	USAGE_TRANSFER_DST = (0x1u<<3),
145 
146 	// Buffer usage flags
147 	USAGE_INDEX_BUFFER = (0x1u<<4),
148 	USAGE_VERTEX_BUFFER = (0x1u<<5),
149 
150 	USAGE_UNIFORM_BUFFER = (0x1u<<6),
151 	USAGE_STORAGE_BUFFER = (0x1u<<7),
152 
153 	USAGE_UNIFORM_TEXEL_BUFFER = (0x1u<<8),
154 	USAGE_STORAGE_TEXEL_BUFFER = (0x1u<<9),
155 
156 	// \todo [2016-03-09 mika] This is probably almost impossible to do
157 	USAGE_INDIRECT_BUFFER = (0x1u<<10),
158 
159 	// Texture usage flags
160 	USAGE_SAMPLED_IMAGE = (0x1u<<11),
161 	USAGE_STORAGE_IMAGE = (0x1u<<12),
162 	USAGE_COLOR_ATTACHMENT = (0x1u<<13),
163 	USAGE_INPUT_ATTACHMENT = (0x1u<<14),
164 	USAGE_DEPTH_STENCIL_ATTACHMENT = (0x1u<<15),
165 };
166 
supportsDeviceBufferWrites(Usage usage)167 bool supportsDeviceBufferWrites (Usage usage)
168 {
169 	if (usage & USAGE_TRANSFER_DST)
170 		return true;
171 
172 	if (usage & USAGE_STORAGE_BUFFER)
173 		return true;
174 
175 	if (usage & USAGE_STORAGE_TEXEL_BUFFER)
176 		return true;
177 
178 	return false;
179 }
180 
supportsDeviceImageWrites(Usage usage)181 bool supportsDeviceImageWrites (Usage usage)
182 {
183 	if (usage & USAGE_TRANSFER_DST)
184 		return true;
185 
186 	if (usage & USAGE_STORAGE_IMAGE)
187 		return true;
188 
189 	if (usage & USAGE_COLOR_ATTACHMENT)
190 		return true;
191 
192 	return false;
193 }
194 
195 // Sequential access enums
196 enum Access
197 {
198 	ACCESS_INDIRECT_COMMAND_READ_BIT = 0,
199 	ACCESS_INDEX_READ_BIT,
200 	ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
201 	ACCESS_UNIFORM_READ_BIT,
202 	ACCESS_INPUT_ATTACHMENT_READ_BIT,
203 	ACCESS_SHADER_READ_BIT,
204 	ACCESS_SHADER_WRITE_BIT,
205 	ACCESS_COLOR_ATTACHMENT_READ_BIT,
206 	ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
207 	ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
208 	ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
209 	ACCESS_TRANSFER_READ_BIT,
210 	ACCESS_TRANSFER_WRITE_BIT,
211 	ACCESS_HOST_READ_BIT,
212 	ACCESS_HOST_WRITE_BIT,
213 	ACCESS_MEMORY_READ_BIT,
214 	ACCESS_MEMORY_WRITE_BIT,
215 
216 	ACCESS_LAST
217 };
218 
219 // Sequential stage enums
220 enum PipelineStage
221 {
222 	PIPELINESTAGE_TOP_OF_PIPE_BIT = 0,
223 	PIPELINESTAGE_BOTTOM_OF_PIPE_BIT,
224 	PIPELINESTAGE_DRAW_INDIRECT_BIT,
225 	PIPELINESTAGE_VERTEX_INPUT_BIT,
226 	PIPELINESTAGE_VERTEX_SHADER_BIT,
227 	PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT,
228 	PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT,
229 	PIPELINESTAGE_GEOMETRY_SHADER_BIT,
230 	PIPELINESTAGE_FRAGMENT_SHADER_BIT,
231 	PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT,
232 	PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT,
233 	PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
234 	PIPELINESTAGE_COMPUTE_SHADER_BIT,
235 	PIPELINESTAGE_TRANSFER_BIT,
236 	PIPELINESTAGE_HOST_BIT,
237 
238 	PIPELINESTAGE_LAST
239 };
240 
pipelineStageFlagToPipelineStage(vk::VkPipelineStageFlagBits flags)241 PipelineStage pipelineStageFlagToPipelineStage (vk::VkPipelineStageFlagBits flags)
242 {
243 	switch (flags)
244 	{
245 		case vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:						return PIPELINESTAGE_TOP_OF_PIPE_BIT;
246 		case vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT:					return PIPELINESTAGE_BOTTOM_OF_PIPE_BIT;
247 		case vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT:					return PIPELINESTAGE_DRAW_INDIRECT_BIT;
248 		case vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT:					return PIPELINESTAGE_VERTEX_INPUT_BIT;
249 		case vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT:					return PIPELINESTAGE_VERTEX_SHADER_BIT;
250 		case vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT:		return PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT;
251 		case vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT:	return PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT;
252 		case vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT:					return PIPELINESTAGE_GEOMETRY_SHADER_BIT;
253 		case vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT:					return PIPELINESTAGE_FRAGMENT_SHADER_BIT;
254 		case vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT:			return PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT;
255 		case vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT:				return PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT;
256 		case vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT:			return PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
257 		case vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT:					return PIPELINESTAGE_COMPUTE_SHADER_BIT;
258 		case vk::VK_PIPELINE_STAGE_TRANSFER_BIT:						return PIPELINESTAGE_TRANSFER_BIT;
259 		case vk::VK_PIPELINE_STAGE_HOST_BIT:							return PIPELINESTAGE_HOST_BIT;
260 
261 		default:
262 			DE_FATAL("Unknown pipeline stage flags");
263 			return PIPELINESTAGE_LAST;
264 	}
265 }
266 
operator |(Usage a,Usage b)267 Usage operator| (Usage a, Usage b)
268 {
269 	return (Usage)((deUint32)a | (deUint32)b);
270 }
271 
operator &(Usage a,Usage b)272 Usage operator& (Usage a, Usage b)
273 {
274 	return (Usage)((deUint32)a & (deUint32)b);
275 }
276 
usageToName(Usage usage)277 string usageToName (Usage usage)
278 {
279 	const struct
280 	{
281 		Usage				usage;
282 		const char* const	name;
283 	} usageNames[] =
284 	{
285 		{ USAGE_HOST_READ,					"host_read" },
286 		{ USAGE_HOST_WRITE,					"host_write" },
287 
288 		{ USAGE_TRANSFER_SRC,				"transfer_src" },
289 		{ USAGE_TRANSFER_DST,				"transfer_dst" },
290 
291 		{ USAGE_INDEX_BUFFER,				"index_buffer" },
292 		{ USAGE_VERTEX_BUFFER,				"vertex_buffer" },
293 		{ USAGE_UNIFORM_BUFFER,				"uniform_buffer" },
294 		{ USAGE_STORAGE_BUFFER,				"storage_buffer" },
295 		{ USAGE_UNIFORM_TEXEL_BUFFER,		"uniform_texel_buffer" },
296 		{ USAGE_STORAGE_TEXEL_BUFFER,		"storage_texel_buffer" },
297 		{ USAGE_INDIRECT_BUFFER,			"indirect_buffer" },
298 		{ USAGE_SAMPLED_IMAGE,				"image_sampled" },
299 		{ USAGE_STORAGE_IMAGE,				"storage_image" },
300 		{ USAGE_COLOR_ATTACHMENT,			"color_attachment" },
301 		{ USAGE_INPUT_ATTACHMENT,			"input_attachment" },
302 		{ USAGE_DEPTH_STENCIL_ATTACHMENT,	"depth_stencil_attachment" },
303 	};
304 
305 	std::ostringstream	stream;
306 	bool				first = true;
307 
308 	for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usageNames); usageNdx++)
309 	{
310 		if (usage & usageNames[usageNdx].usage)
311 		{
312 			if (!first)
313 				stream << "_";
314 			else
315 				first = false;
316 
317 			stream << usageNames[usageNdx].name;
318 		}
319 	}
320 
321 	return stream.str();
322 }
323 
usageToBufferUsageFlags(Usage usage)324 vk::VkBufferUsageFlags usageToBufferUsageFlags (Usage usage)
325 {
326 	vk::VkBufferUsageFlags flags = 0;
327 
328 	if (usage & USAGE_TRANSFER_SRC)
329 		flags |= vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
330 
331 	if (usage & USAGE_TRANSFER_DST)
332 		flags |= vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT;
333 
334 	if (usage & USAGE_INDEX_BUFFER)
335 		flags |= vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
336 
337 	if (usage & USAGE_VERTEX_BUFFER)
338 		flags |= vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
339 
340 	if (usage & USAGE_INDIRECT_BUFFER)
341 		flags |= vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
342 
343 	if (usage & USAGE_UNIFORM_BUFFER)
344 		flags |= vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
345 
346 	if (usage & USAGE_STORAGE_BUFFER)
347 		flags |= vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
348 
349 	if (usage & USAGE_UNIFORM_TEXEL_BUFFER)
350 		flags |= vk::VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
351 
352 	if (usage & USAGE_STORAGE_TEXEL_BUFFER)
353 		flags |= vk::VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
354 
355 	return flags;
356 }
357 
usageToImageUsageFlags(Usage usage)358 vk::VkImageUsageFlags usageToImageUsageFlags (Usage usage)
359 {
360 	vk::VkImageUsageFlags flags = 0;
361 
362 	if (usage & USAGE_TRANSFER_SRC)
363 		flags |= vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
364 
365 	if (usage & USAGE_TRANSFER_DST)
366 		flags |= vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT;
367 
368 	if (usage & USAGE_SAMPLED_IMAGE)
369 		flags |= vk::VK_IMAGE_USAGE_SAMPLED_BIT;
370 
371 	if (usage & USAGE_STORAGE_IMAGE)
372 		flags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
373 
374 	if (usage & USAGE_COLOR_ATTACHMENT)
375 		flags |= vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
376 
377 	if (usage & USAGE_INPUT_ATTACHMENT)
378 		flags |= vk::VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
379 
380 	if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
381 		flags |= vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
382 
383 	return flags;
384 }
385 
usageToStageFlags(Usage usage)386 vk::VkPipelineStageFlags usageToStageFlags (Usage usage)
387 {
388 	vk::VkPipelineStageFlags flags = 0;
389 
390 	if (usage & (USAGE_HOST_READ|USAGE_HOST_WRITE))
391 		flags |= vk::VK_PIPELINE_STAGE_HOST_BIT;
392 
393 	if (usage & (USAGE_TRANSFER_SRC|USAGE_TRANSFER_DST))
394 		flags |= vk::VK_PIPELINE_STAGE_TRANSFER_BIT;
395 
396 	if (usage & (USAGE_VERTEX_BUFFER|USAGE_INDEX_BUFFER))
397 		flags |= vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
398 
399 	if (usage & USAGE_INDIRECT_BUFFER)
400 		flags |= vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
401 
402 	if (usage &
403 			(USAGE_UNIFORM_BUFFER
404 			| USAGE_STORAGE_BUFFER
405 			| USAGE_UNIFORM_TEXEL_BUFFER
406 			| USAGE_STORAGE_TEXEL_BUFFER
407 			| USAGE_SAMPLED_IMAGE
408 			| USAGE_STORAGE_IMAGE))
409 	{
410 		flags |= (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
411 				| vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
412 				| vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT
413 				| vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT
414 				| vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
415 				| vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
416 	}
417 
418 	if (usage & USAGE_INPUT_ATTACHMENT)
419 		flags |= vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
420 
421 	if (usage & USAGE_COLOR_ATTACHMENT)
422 		flags |= vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
423 
424 	if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
425 	{
426 		flags |= vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
427 				| vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
428 	}
429 
430 	return flags;
431 }
432 
usageToAccessFlags(Usage usage)433 vk::VkAccessFlags usageToAccessFlags (Usage usage)
434 {
435 	vk::VkAccessFlags flags = 0;
436 
437 	if (usage & USAGE_HOST_READ)
438 		flags |= vk::VK_ACCESS_HOST_READ_BIT;
439 
440 	if (usage & USAGE_HOST_WRITE)
441 		flags |= vk::VK_ACCESS_HOST_WRITE_BIT;
442 
443 	if (usage & USAGE_TRANSFER_SRC)
444 		flags |= vk::VK_ACCESS_TRANSFER_READ_BIT;
445 
446 	if (usage & USAGE_TRANSFER_DST)
447 		flags |= vk::VK_ACCESS_TRANSFER_WRITE_BIT;
448 
449 	if (usage & USAGE_INDEX_BUFFER)
450 		flags |= vk::VK_ACCESS_INDEX_READ_BIT;
451 
452 	if (usage & USAGE_VERTEX_BUFFER)
453 		flags |= vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
454 
455 	if (usage & (USAGE_UNIFORM_BUFFER | USAGE_UNIFORM_TEXEL_BUFFER))
456 		flags |= vk::VK_ACCESS_UNIFORM_READ_BIT;
457 
458 	if (usage & USAGE_SAMPLED_IMAGE)
459 		flags |= vk::VK_ACCESS_SHADER_READ_BIT;
460 
461 	if (usage & (USAGE_STORAGE_BUFFER
462 				| USAGE_STORAGE_TEXEL_BUFFER
463 				| USAGE_STORAGE_IMAGE))
464 		flags |= vk::VK_ACCESS_SHADER_READ_BIT | vk::VK_ACCESS_SHADER_WRITE_BIT;
465 
466 	if (usage & USAGE_INDIRECT_BUFFER)
467 		flags |= vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
468 
469 	if (usage & USAGE_COLOR_ATTACHMENT)
470 		flags |= vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
471 
472 	if (usage & USAGE_INPUT_ATTACHMENT)
473 		flags |= vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
474 
475 	if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
476 		flags |= vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
477 			| vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
478 
479 	return flags;
480 }
481 
482 struct TestConfig
483 {
484 	Usage				usage;
485 	vk::VkDeviceSize	size;
486 	vk::VkSharingMode	sharing;
487 };
488 
createBeginCommandBuffer(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkCommandPool pool,vk::VkCommandBufferLevel level)489 vk::Move<vk::VkCommandBuffer> createBeginCommandBuffer (const vk::DeviceInterface&	vkd,
490 														vk::VkDevice				device,
491 														vk::VkCommandPool			pool,
492 														vk::VkCommandBufferLevel	level)
493 {
494 	const vk::VkCommandBufferInheritanceInfo	inheritInfo	=
495 	{
496 		vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
497 		DE_NULL,
498 		0,
499 		0,
500 		0,
501 		VK_FALSE,
502 		0u,
503 		0u
504 	};
505 	const vk::VkCommandBufferBeginInfo			beginInfo =
506 	{
507 		vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
508 		DE_NULL,
509 		0u,
510 		(level == vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY ? &inheritInfo : (const vk::VkCommandBufferInheritanceInfo*)DE_NULL),
511 	};
512 
513 	vk::Move<vk::VkCommandBuffer> commandBuffer (allocateCommandBuffer(vkd, device, pool, level));
514 
515 	vkd.beginCommandBuffer(*commandBuffer, &beginInfo);
516 
517 	return commandBuffer;
518 }
519 
createBuffer(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkDeviceSize size,vk::VkBufferUsageFlags usage,vk::VkSharingMode sharingMode,const vector<deUint32> & queueFamilies)520 vk::Move<vk::VkBuffer> createBuffer (const vk::DeviceInterface&	vkd,
521 									 vk::VkDevice				device,
522 									 vk::VkDeviceSize			size,
523 									 vk::VkBufferUsageFlags		usage,
524 									 vk::VkSharingMode			sharingMode,
525 									 const vector<deUint32>&	queueFamilies)
526 {
527 	const vk::VkBufferCreateInfo	createInfo =
528 	{
529 		vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
530 		DE_NULL,
531 
532 		0,	// flags
533 		size,
534 		usage,
535 		sharingMode,
536 		(deUint32)queueFamilies.size(),
537 		&queueFamilies[0]
538 	};
539 
540 	return vk::createBuffer(vkd, device, &createInfo);
541 }
542 
allocMemory(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkDeviceSize size,deUint32 memoryTypeIndex)543 vk::Move<vk::VkDeviceMemory> allocMemory (const vk::DeviceInterface&	vkd,
544 										  vk::VkDevice					device,
545 										  vk::VkDeviceSize				size,
546 										  deUint32						memoryTypeIndex)
547 {
548 	const vk::VkMemoryAllocateInfo alloc =
549 	{
550 		vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,	// sType
551 		DE_NULL,									// pNext
552 
553 		size,
554 		memoryTypeIndex
555 	};
556 
557 	return vk::allocateMemory(vkd, device, &alloc);
558 }
559 
bindBufferMemory(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkBuffer buffer,vk::VkMemoryPropertyFlags properties)560 vk::Move<vk::VkDeviceMemory> bindBufferMemory (const vk::InstanceInterface&	vki,
561 											   const vk::DeviceInterface&	vkd,
562 											   vk::VkPhysicalDevice			physicalDevice,
563 											   vk::VkDevice					device,
564 											   vk::VkBuffer					buffer,
565 											   vk::VkMemoryPropertyFlags	properties)
566 {
567 	const vk::VkMemoryRequirements				memoryRequirements	= vk::getBufferMemoryRequirements(vkd, device, buffer);
568 	const vk::VkPhysicalDeviceMemoryProperties	memoryProperties	= vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
569 	deUint32									memoryTypeIndex;
570 
571 	for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
572 	{
573 		if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
574 			&& (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
575 		{
576 			try
577 			{
578 				const vk::VkMemoryAllocateInfo	allocationInfo	=
579 				{
580 					vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
581 					DE_NULL,
582 					memoryRequirements.size,
583 					memoryTypeIndex
584 				};
585 				vk::Move<vk::VkDeviceMemory>	memory			(vk::allocateMemory(vkd, device, &allocationInfo));
586 
587 				VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0));
588 
589 				return memory;
590 			}
591 			catch (const vk::Error& error)
592 			{
593 				if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
594 					|| error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
595 				{
596 					// Try next memory type/heap if out of memory
597 				}
598 				else
599 				{
600 					// Throw all other errors forward
601 					throw;
602 				}
603 			}
604 		}
605 	}
606 
607 	TCU_FAIL("Failed to allocate memory for buffer");
608 }
609 
bindImageMemory(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkImage image,vk::VkMemoryPropertyFlags properties)610 vk::Move<vk::VkDeviceMemory> bindImageMemory (const vk::InstanceInterface&	vki,
611 											   const vk::DeviceInterface&	vkd,
612 											   vk::VkPhysicalDevice			physicalDevice,
613 											   vk::VkDevice					device,
614 											   vk::VkImage					image,
615 											   vk::VkMemoryPropertyFlags	properties)
616 {
617 	const vk::VkMemoryRequirements				memoryRequirements	= vk::getImageMemoryRequirements(vkd, device, image);
618 	const vk::VkPhysicalDeviceMemoryProperties	memoryProperties	= vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
619 	deUint32									memoryTypeIndex;
620 
621 	for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
622 	{
623 		if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
624 			&& (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
625 		{
626 			try
627 			{
628 				const vk::VkMemoryAllocateInfo	allocationInfo	=
629 				{
630 					vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
631 					DE_NULL,
632 					memoryRequirements.size,
633 					memoryTypeIndex
634 				};
635 				vk::Move<vk::VkDeviceMemory>	memory			(vk::allocateMemory(vkd, device, &allocationInfo));
636 
637 				VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0));
638 
639 				return memory;
640 			}
641 			catch (const vk::Error& error)
642 			{
643 				if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
644 					|| error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
645 				{
646 					// Try next memory type/heap if out of memory
647 				}
648 				else
649 				{
650 					// Throw all other errors forward
651 					throw;
652 				}
653 			}
654 		}
655 	}
656 
657 	TCU_FAIL("Failed to allocate memory for image");
658 }
659 
queueRun(const vk::DeviceInterface & vkd,vk::VkQueue queue,vk::VkCommandBuffer commandBuffer)660 void queueRun (const vk::DeviceInterface&	vkd,
661 			   vk::VkQueue					queue,
662 			   vk::VkCommandBuffer			commandBuffer)
663 {
664 	const vk::VkSubmitInfo	submitInfo	=
665 	{
666 		vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
667 		DE_NULL,
668 
669 		0,
670 		DE_NULL,
671 		(const vk::VkPipelineStageFlags*)DE_NULL,
672 
673 		1,
674 		&commandBuffer,
675 
676 		0,
677 		DE_NULL
678 	};
679 
680 	VK_CHECK(vkd.queueSubmit(queue, 1, &submitInfo, 0));
681 	VK_CHECK(vkd.queueWaitIdle(queue));
682 }
683 
mapMemory(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkDeviceMemory memory,vk::VkDeviceSize size)684 void* mapMemory (const vk::DeviceInterface&	vkd,
685 				 vk::VkDevice				device,
686 				 vk::VkDeviceMemory			memory,
687 				 vk::VkDeviceSize			size)
688 {
689 	void* ptr;
690 
691 	VK_CHECK(vkd.mapMemory(device, memory, 0, size, 0, &ptr));
692 
693 	return ptr;
694 }
695 
696 class ReferenceMemory
697 {
698 public:
699 			ReferenceMemory	(size_t size);
700 
701 	void	set				(size_t pos, deUint8 val);
702 	deUint8	get				(size_t pos) const;
703 	bool	isDefined		(size_t pos) const;
704 
705 	void	setDefined		(size_t offset, size_t size, const void* data);
706 	void	setUndefined	(size_t offset, size_t size);
707 	void	setData			(size_t offset, size_t size, const void* data);
708 
getSize(void) const709 	size_t	getSize			(void) const { return m_data.size(); }
710 
711 private:
712 	vector<deUint8>		m_data;
713 	vector<deUint64>	m_defined;
714 };
715 
ReferenceMemory(size_t size)716 ReferenceMemory::ReferenceMemory (size_t size)
717 	: m_data	(size, 0)
718 	, m_defined	(size / 64 + (size % 64 == 0 ? 0 : 1), 0ull)
719 {
720 }
721 
set(size_t pos,deUint8 val)722 void ReferenceMemory::set (size_t pos, deUint8 val)
723 {
724 	DE_ASSERT(pos < m_data.size());
725 
726 	m_data[pos] = val;
727 	m_defined[pos / 64] |= 0x1ull << (pos % 64);
728 }
729 
setData(size_t offset,size_t size,const void * data_)730 void ReferenceMemory::setData (size_t offset, size_t size, const void* data_)
731 {
732 	const deUint8* data = (const deUint8*)data_;
733 
734 	DE_ASSERT(offset < m_data.size());
735 	DE_ASSERT(offset + size <= m_data.size());
736 
737 	// \todo [2016-03-09 mika] Optimize
738 	for (size_t pos = 0; pos < size; pos++)
739 	{
740 		m_data[offset + pos] = data[pos];
741 		m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
742 	}
743 }
744 
setUndefined(size_t offset,size_t size)745 void ReferenceMemory::setUndefined	(size_t offset, size_t size)
746 {
747 	// \todo [2016-03-09 mika] Optimize
748 	for (size_t pos = 0; pos < size; pos++)
749 		m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
750 }
751 
get(size_t pos) const752 deUint8 ReferenceMemory::get (size_t pos) const
753 {
754 	DE_ASSERT(pos < m_data.size());
755 	DE_ASSERT(isDefined(pos));
756 	return m_data[pos];
757 }
758 
isDefined(size_t pos) const759 bool ReferenceMemory::isDefined (size_t pos) const
760 {
761 	DE_ASSERT(pos < m_data.size());
762 
763 	return (m_defined[pos / 64] & (0x1ull << (pos % 64))) != 0;
764 }
765 
766 class Memory
767 {
768 public:
769 							Memory				(const vk::InstanceInterface&	vki,
770 												 const vk::DeviceInterface&		vkd,
771 												 vk::VkPhysicalDevice			physicalDevice,
772 												 vk::VkDevice					device,
773 												 vk::VkDeviceSize				size,
774 												 deUint32						memoryTypeIndex,
775 												 vk::VkDeviceSize				maxBufferSize,
776 												 deInt32						maxImageWidth,
777 												 deInt32						maxImageHeight);
778 
getSize(void) const779 	vk::VkDeviceSize		getSize				(void) const { return m_size; }
getMaxBufferSize(void) const780 	vk::VkDeviceSize		getMaxBufferSize	(void) const { return m_maxBufferSize; }
getSupportBuffers(void) const781 	bool					getSupportBuffers	(void) const { return m_maxBufferSize > 0; }
782 
getMaxImageWidth(void) const783 	deInt32					getMaxImageWidth	(void) const { return m_maxImageWidth; }
getMaxImageHeight(void) const784 	deInt32					getMaxImageHeight	(void) const { return m_maxImageHeight; }
getSupportImages(void) const785 	bool					getSupportImages	(void) const { return m_maxImageWidth > 0; }
786 
getMemoryType(void) const787 	const vk::VkMemoryType&	getMemoryType		(void) const { return m_memoryType; }
getMemoryTypeIndex(void) const788 	deUint32				getMemoryTypeIndex	(void) const { return m_memoryTypeIndex; }
getMemory(void) const789 	vk::VkDeviceMemory		getMemory			(void) const { return *m_memory; }
790 
791 private:
792 	const vk::VkDeviceSize					m_size;
793 	const deUint32							m_memoryTypeIndex;
794 	const vk::VkMemoryType					m_memoryType;
795 	const vk::Unique<vk::VkDeviceMemory>	m_memory;
796 	const vk::VkDeviceSize					m_maxBufferSize;
797 	const deInt32							m_maxImageWidth;
798 	const deInt32							m_maxImageHeight;
799 };
800 
getMemoryTypeInfo(const vk::InstanceInterface & vki,vk::VkPhysicalDevice device,deUint32 memoryTypeIndex)801 vk::VkMemoryType getMemoryTypeInfo (const vk::InstanceInterface&	vki,
802 									vk::VkPhysicalDevice			device,
803 									deUint32						memoryTypeIndex)
804 {
805 	const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, device);
806 
807 	DE_ASSERT(memoryTypeIndex < memoryProperties.memoryTypeCount);
808 
809 	return memoryProperties.memoryTypes[memoryTypeIndex];
810 }
811 
findMaxBufferSize(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkBufferUsageFlags usage,vk::VkSharingMode sharingMode,const vector<deUint32> & queueFamilies,vk::VkDeviceSize memorySize,deUint32 memoryTypeIndex)812 vk::VkDeviceSize findMaxBufferSize (const vk::DeviceInterface&		vkd,
813 									vk::VkDevice					device,
814 
815 									vk::VkBufferUsageFlags			usage,
816 									vk::VkSharingMode				sharingMode,
817 									const vector<deUint32>&			queueFamilies,
818 
819 									vk::VkDeviceSize				memorySize,
820 									deUint32						memoryTypeIndex)
821 {
822 	vk::VkDeviceSize	lastSuccess	= 0;
823 	vk::VkDeviceSize	currentSize	= memorySize / 2;
824 
825 	{
826 		const vk::Unique<vk::VkBuffer>  buffer			(createBuffer(vkd, device, memorySize, usage, sharingMode, queueFamilies));
827 		const vk::VkMemoryRequirements  requirements	(vk::getBufferMemoryRequirements(vkd, device, *buffer));
828 
829 		if (requirements.size == memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
830 			return memorySize;
831 	}
832 
833 	for (vk::VkDeviceSize stepSize = memorySize / 4; currentSize > 0; stepSize /= 2)
834 	{
835 		const vk::Unique<vk::VkBuffer>	buffer			(createBuffer(vkd, device, currentSize, usage, sharingMode, queueFamilies));
836 		const vk::VkMemoryRequirements	requirements	(vk::getBufferMemoryRequirements(vkd, device, *buffer));
837 
838 		if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
839 		{
840 			lastSuccess = currentSize;
841 			currentSize += stepSize;
842 		}
843 		else
844 			currentSize -= stepSize;
845 
846 		if (stepSize == 0)
847 			break;
848 	}
849 
850 	return lastSuccess;
851 }
852 
853 // Round size down maximum W * H * 4, where W and H < 4096
roundBufferSizeToWxHx4(vk::VkDeviceSize size)854 vk::VkDeviceSize roundBufferSizeToWxHx4 (vk::VkDeviceSize size)
855 {
856 	const vk::VkDeviceSize	maxTextureSize	= 4096;
857 	vk::VkDeviceSize		maxTexelCount	= size / 4;
858 	vk::VkDeviceSize		bestW			= de::max(maxTexelCount, maxTextureSize);
859 	vk::VkDeviceSize		bestH			= maxTexelCount / bestW;
860 
861 	// \todo [2016-03-09 mika] Could probably be faster?
862 	for (vk::VkDeviceSize w = 1; w * w < maxTexelCount && w < maxTextureSize && bestW * bestH * 4 < size; w++)
863 	{
864 		const vk::VkDeviceSize h = maxTexelCount / w;
865 
866 		if (bestW * bestH < w * h)
867 		{
868 			bestW = w;
869 			bestH = h;
870 		}
871 	}
872 
873 	return bestW * bestH * 4;
874 }
875 
876 // Find RGBA8 image size that has exactly "size" of number of bytes.
877 // "size" must be W * H * 4 where W and H < 4096
findImageSizeWxHx4(vk::VkDeviceSize size)878 IVec2 findImageSizeWxHx4 (vk::VkDeviceSize size)
879 {
880 	const vk::VkDeviceSize	maxTextureSize	= 4096;
881 	vk::VkDeviceSize		texelCount		= size / 4;
882 
883 	DE_ASSERT((size % 4) == 0);
884 
885 	// \todo [2016-03-09 mika] Could probably be faster?
886 	for (vk::VkDeviceSize w = 1; w < maxTextureSize && w < texelCount; w++)
887 	{
888 		const vk::VkDeviceSize	h	= texelCount / w;
889 
890 		if ((texelCount  % w) == 0 && h < maxTextureSize)
891 			return IVec2((int)w, (int)h);
892 	}
893 
894 	DE_FATAL("Invalid size");
895 	return IVec2(-1, -1);
896 }
897 
findMaxRGBA8ImageSize(const vk::DeviceInterface & vkd,vk::VkDevice device,vk::VkImageUsageFlags usage,vk::VkSharingMode sharingMode,const vector<deUint32> & queueFamilies,vk::VkDeviceSize memorySize,deUint32 memoryTypeIndex)898 IVec2 findMaxRGBA8ImageSize (const vk::DeviceInterface&	vkd,
899 							 vk::VkDevice				device,
900 
901 							 vk::VkImageUsageFlags		usage,
902 							 vk::VkSharingMode			sharingMode,
903 							 const vector<deUint32>&	queueFamilies,
904 
905 							 vk::VkDeviceSize			memorySize,
906 							 deUint32					memoryTypeIndex)
907 {
908 	IVec2		lastSuccess		(0);
909 	IVec2		currentSize;
910 
911 	{
912 		const deUint32	texelCount	= (deUint32)(memorySize / 4);
913 		const deUint32	width		= (deUint32)deFloatSqrt((float)texelCount);
914 		const deUint32	height		= texelCount / width;
915 
916 		currentSize[0] = deMaxu32(width, height);
917 		currentSize[1] = deMinu32(width, height);
918 	}
919 
920 	for (deInt32 stepSize = currentSize[0] / 2; currentSize[0] > 0; stepSize /= 2)
921 	{
922 		const vk::VkImageCreateInfo	createInfo		=
923 		{
924 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
925 			DE_NULL,
926 
927 			0u,
928 			vk::VK_IMAGE_TYPE_2D,
929 			vk::VK_FORMAT_R8G8B8A8_UNORM,
930 			{
931 				(deUint32)currentSize[0],
932 				(deUint32)currentSize[1],
933 				1u,
934 			},
935 			1u, 1u,
936 			vk::VK_SAMPLE_COUNT_1_BIT,
937 			vk::VK_IMAGE_TILING_OPTIMAL,
938 			usage,
939 			sharingMode,
940 			(deUint32)queueFamilies.size(),
941 			&queueFamilies[0],
942 			vk::VK_IMAGE_LAYOUT_UNDEFINED
943 		};
944 		const vk::Unique<vk::VkImage>	image			(vk::createImage(vkd, device, &createInfo));
945 		const vk::VkMemoryRequirements	requirements	(vk::getImageMemoryRequirements(vkd, device, *image));
946 
947 		if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
948 		{
949 			lastSuccess = currentSize;
950 			currentSize[0] += stepSize;
951 			currentSize[1] += stepSize;
952 		}
953 		else
954 		{
955 			currentSize[0] -= stepSize;
956 			currentSize[1] -= stepSize;
957 		}
958 
959 		if (stepSize == 0)
960 			break;
961 	}
962 
963 	return lastSuccess;
964 }
965 
Memory(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkDeviceSize size,deUint32 memoryTypeIndex,vk::VkDeviceSize maxBufferSize,deInt32 maxImageWidth,deInt32 maxImageHeight)966 Memory::Memory (const vk::InstanceInterface&	vki,
967 				const vk::DeviceInterface&		vkd,
968 				vk::VkPhysicalDevice			physicalDevice,
969 				vk::VkDevice					device,
970 				vk::VkDeviceSize				size,
971 				deUint32						memoryTypeIndex,
972 				vk::VkDeviceSize				maxBufferSize,
973 				deInt32							maxImageWidth,
974 				deInt32							maxImageHeight)
975 	: m_size			(size)
976 	, m_memoryTypeIndex	(memoryTypeIndex)
977 	, m_memoryType		(getMemoryTypeInfo(vki, physicalDevice, memoryTypeIndex))
978 	, m_memory			(allocMemory(vkd, device, size, memoryTypeIndex))
979 	, m_maxBufferSize	(maxBufferSize)
980 	, m_maxImageWidth	(maxImageWidth)
981 	, m_maxImageHeight	(maxImageHeight)
982 {
983 }
984 
985 class Context
986 {
987 public:
Context(const vk::InstanceInterface & vki,const vk::DeviceInterface & vkd,vk::VkPhysicalDevice physicalDevice,vk::VkDevice device,vk::VkQueue queue,deUint32 queueFamilyIndex,const vector<pair<deUint32,vk::VkQueue>> & queues,const vk::ProgramCollection<vk::ProgramBinary> & binaryCollection)988 													Context					(const vk::InstanceInterface&						vki,
989 																			 const vk::DeviceInterface&							vkd,
990 																			 vk::VkPhysicalDevice								physicalDevice,
991 																			 vk::VkDevice										device,
992 																			 vk::VkQueue										queue,
993 																			 deUint32											queueFamilyIndex,
994 																			 const vector<pair<deUint32, vk::VkQueue> >&		queues,
995 																			 const vk::ProgramCollection<vk::ProgramBinary>&	binaryCollection)
996 		: m_vki					(vki)
997 		, m_vkd					(vkd)
998 		, m_physicalDevice		(physicalDevice)
999 		, m_device				(device)
1000 		, m_queue				(queue)
1001 		, m_queueFamilyIndex	(queueFamilyIndex)
1002 		, m_queues				(queues)
1003 		, m_commandPool			(createCommandPool(vkd, device, vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex))
1004 		, m_binaryCollection	(binaryCollection)
1005 	{
1006 		for (size_t queueNdx = 0; queueNdx < m_queues.size(); queueNdx++)
1007 			m_queueFamilies.push_back(m_queues[queueNdx].first);
1008 	}
1009 
getInstanceInterface(void) const1010 	const vk::InstanceInterface&					getInstanceInterface	(void) const { return m_vki; }
getPhysicalDevice(void) const1011 	vk::VkPhysicalDevice							getPhysicalDevice		(void) const { return m_physicalDevice; }
getDevice(void) const1012 	vk::VkDevice									getDevice				(void) const { return m_device; }
getDeviceInterface(void) const1013 	const vk::DeviceInterface&						getDeviceInterface		(void) const { return m_vkd; }
getQueue(void) const1014 	vk::VkQueue										getQueue				(void) const { return m_queue; }
getQueueFamily(void) const1015 	deUint32										getQueueFamily			(void) const { return m_queueFamilyIndex; }
getQueues(void) const1016 	const vector<pair<deUint32, vk::VkQueue> >&		getQueues				(void) const { return m_queues; }
getQueueFamilies(void) const1017 	const vector<deUint32>							getQueueFamilies		(void) const { return m_queueFamilies; }
getCommandPool(void) const1018 	vk::VkCommandPool								getCommandPool			(void) const { return *m_commandPool; }
getBinaryCollection(void) const1019 	const vk::ProgramCollection<vk::ProgramBinary>&	getBinaryCollection		(void) const { return m_binaryCollection; }
1020 
1021 private:
1022 	const vk::InstanceInterface&					m_vki;
1023 	const vk::DeviceInterface&						m_vkd;
1024 	const vk::VkPhysicalDevice						m_physicalDevice;
1025 	const vk::VkDevice								m_device;
1026 	const vk::VkQueue								m_queue;
1027 	const deUint32									m_queueFamilyIndex;
1028 	const vector<pair<deUint32, vk::VkQueue> >		m_queues;
1029 	const vk::Unique<vk::VkCommandPool>				m_commandPool;
1030 	const vk::ProgramCollection<vk::ProgramBinary>&	m_binaryCollection;
1031 	vector<deUint32>								m_queueFamilies;
1032 };
1033 
1034 class PrepareContext
1035 {
1036 public:
PrepareContext(const Context & context,const Memory & memory)1037 													PrepareContext			(const Context&	context,
1038 																			 const Memory&	memory)
1039 		: m_context	(context)
1040 		, m_memory	(memory)
1041 	{
1042 	}
1043 
getMemory(void) const1044 	const Memory&									getMemory				(void) const { return m_memory; }
getContext(void) const1045 	const Context&									getContext				(void) const { return m_context; }
getBinaryCollection(void) const1046 	const vk::ProgramCollection<vk::ProgramBinary>&	getBinaryCollection		(void) const { return m_context.getBinaryCollection(); }
1047 
setBuffer(vk::Move<vk::VkBuffer> buffer,vk::VkDeviceSize size)1048 	void				setBuffer		(vk::Move<vk::VkBuffer>	buffer,
1049 										 vk::VkDeviceSize		size)
1050 	{
1051 		DE_ASSERT(!m_currentImage);
1052 		DE_ASSERT(!m_currentBuffer);
1053 
1054 		m_currentBuffer		= buffer;
1055 		m_currentBufferSize	= size;
1056 	}
1057 
getBuffer(void) const1058 	vk::VkBuffer		getBuffer		(void) const { return *m_currentBuffer; }
getBufferSize(void) const1059 	vk::VkDeviceSize	getBufferSize	(void) const
1060 	{
1061 		DE_ASSERT(m_currentBuffer);
1062 		return m_currentBufferSize;
1063 	}
1064 
releaseBuffer(void)1065 	void				releaseBuffer	(void) { m_currentBuffer.disown(); }
1066 
setImage(vk::Move<vk::VkImage> image,vk::VkImageLayout layout,vk::VkDeviceSize memorySize,deInt32 width,deInt32 height)1067 	void				setImage		(vk::Move<vk::VkImage>	image,
1068 										 vk::VkImageLayout		layout,
1069 										 vk::VkDeviceSize		memorySize,
1070 										 deInt32				width,
1071 										 deInt32				height)
1072 	{
1073 		DE_ASSERT(!m_currentImage);
1074 		DE_ASSERT(!m_currentBuffer);
1075 
1076 		m_currentImage				= image;
1077 		m_currentImageMemorySize	= memorySize;
1078 		m_currentImageLayout		= layout;
1079 		m_currentImageWidth			= width;
1080 		m_currentImageHeight		= height;
1081 	}
1082 
setImageLayout(vk::VkImageLayout layout)1083 	void				setImageLayout	(vk::VkImageLayout layout)
1084 	{
1085 		DE_ASSERT(m_currentImage);
1086 		m_currentImageLayout = layout;
1087 	}
1088 
getImage(void) const1089 	vk::VkImage			getImage		(void) const { return *m_currentImage; }
getImageWidth(void) const1090 	deInt32				getImageWidth	(void) const
1091 	{
1092 		DE_ASSERT(m_currentImage);
1093 		return m_currentImageWidth;
1094 	}
getImageHeight(void) const1095 	deInt32				getImageHeight	(void) const
1096 	{
1097 		DE_ASSERT(m_currentImage);
1098 		return m_currentImageHeight;
1099 	}
getImageMemorySize(void) const1100 	vk::VkDeviceSize	getImageMemorySize	(void) const
1101 	{
1102 		DE_ASSERT(m_currentImage);
1103 		return m_currentImageMemorySize;
1104 	}
1105 
releaseImage(void)1106 	void				releaseImage	(void) { m_currentImage.disown(); }
1107 
getImageLayout(void) const1108 	vk::VkImageLayout	getImageLayout	(void) const
1109 	{
1110 		DE_ASSERT(m_currentImage);
1111 		return m_currentImageLayout;
1112 	}
1113 
1114 private:
1115 	const Context&			m_context;
1116 	const Memory&			m_memory;
1117 
1118 	vk::Move<vk::VkBuffer>	m_currentBuffer;
1119 	vk::VkDeviceSize		m_currentBufferSize;
1120 
1121 	vk::Move<vk::VkImage>	m_currentImage;
1122 	vk::VkDeviceSize		m_currentImageMemorySize;
1123 	vk::VkImageLayout		m_currentImageLayout;
1124 	deInt32					m_currentImageWidth;
1125 	deInt32					m_currentImageHeight;
1126 };
1127 
1128 class ExecuteContext
1129 {
1130 public:
ExecuteContext(const Context & context)1131 					ExecuteContext	(const Context&	context)
1132 		: m_context	(context)
1133 	{
1134 	}
1135 
getContext(void) const1136 	const Context&	getContext		(void) const { return m_context; }
setMapping(void * ptr)1137 	void			setMapping		(void* ptr) { m_mapping = ptr; }
getMapping(void) const1138 	void*			getMapping		(void) const { return m_mapping; }
1139 
1140 private:
1141 	const Context&	m_context;
1142 	void*			m_mapping;
1143 };
1144 
1145 class VerifyContext
1146 {
1147 public:
VerifyContext(TestLog & log,tcu::ResultCollector & resultCollector,const Context & context,vk::VkDeviceSize size)1148 							VerifyContext		(TestLog&				log,
1149 												 tcu::ResultCollector&	resultCollector,
1150 												 const Context&			context,
1151 												 vk::VkDeviceSize		size)
1152 		: m_log				(log)
1153 		, m_resultCollector	(resultCollector)
1154 		, m_context			(context)
1155 		, m_reference		((size_t)size)
1156 	{
1157 	}
1158 
getContext(void) const1159 	const Context&			getContext			(void) const { return m_context; }
getLog(void) const1160 	TestLog&				getLog				(void) const { return m_log; }
getResultCollector(void) const1161 	tcu::ResultCollector&	getResultCollector	(void) const { return m_resultCollector; }
1162 
getReference(void)1163 	ReferenceMemory&		getReference		(void) { return m_reference; }
getReferenceImage(void)1164 	TextureLevel&			getReferenceImage	(void) { return m_referenceImage;}
1165 
1166 private:
1167 	TestLog&				m_log;
1168 	tcu::ResultCollector&	m_resultCollector;
1169 	const Context&			m_context;
1170 	ReferenceMemory			m_reference;
1171 	TextureLevel			m_referenceImage;
1172 };
1173 
1174 class Command
1175 {
1176 public:
1177 	// Constructor should allocate all non-vulkan resources.
~Command(void)1178 	virtual				~Command	(void) {}
1179 
1180 	// Get name of the command
1181 	virtual const char*	getName		(void) const = 0;
1182 
1183 	// Log prepare operations
logPrepare(TestLog &,size_t) const1184 	virtual void		logPrepare	(TestLog&, size_t) const {}
1185 	// Log executed operations
logExecute(TestLog &,size_t) const1186 	virtual void		logExecute	(TestLog&, size_t) const {}
1187 
1188 	// Prepare should allocate all vulkan resources and resources that require
1189 	// that buffer or memory has been already allocated. This should build all
1190 	// command buffers etc.
prepare(PrepareContext &)1191 	virtual void		prepare		(PrepareContext&) {}
1192 
1193 	// Execute command. Write or read mapped memory, submit commands to queue
1194 	// etc.
execute(ExecuteContext &)1195 	virtual void		execute		(ExecuteContext&) {}
1196 
1197 	// Verify that results are correct.
verify(VerifyContext &,size_t)1198 	virtual void		verify		(VerifyContext&, size_t) {}
1199 
1200 protected:
1201 	// Allow only inheritance
Command(void)1202 						Command		(void) {}
1203 
1204 private:
1205 	// Disallow copying
1206 						Command		(const Command&);
1207 	Command&			operator&	(const Command&);
1208 };
1209 
1210 class Map : public Command
1211 {
1212 public:
Map(void)1213 						Map			(void) {}
~Map(void)1214 						~Map		(void) {}
getName(void) const1215 	const char*			getName		(void) const { return "Map"; }
1216 
1217 
logExecute(TestLog & log,size_t commandIndex) const1218 	void				logExecute	(TestLog& log, size_t commandIndex) const
1219 	{
1220 		log << TestLog::Message << commandIndex << ":" << getName() << " Map memory" << TestLog::EndMessage;
1221 	}
1222 
prepare(PrepareContext & context)1223 	void				prepare		(PrepareContext& context)
1224 	{
1225 		m_memory	= context.getMemory().getMemory();
1226 		m_size		= context.getMemory().getSize();
1227 	}
1228 
execute(ExecuteContext & context)1229 	void				execute		(ExecuteContext& context)
1230 	{
1231 		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1232 		const vk::VkDevice			device	= context.getContext().getDevice();
1233 
1234 		context.setMapping(mapMemory(vkd, device, m_memory, m_size));
1235 	}
1236 
1237 private:
1238 	vk::VkDeviceMemory	m_memory;
1239 	vk::VkDeviceSize	m_size;
1240 };
1241 
1242 class UnMap : public Command
1243 {
1244 public:
UnMap(void)1245 						UnMap		(void) {}
~UnMap(void)1246 						~UnMap		(void) {}
getName(void) const1247 	const char*			getName		(void) const { return "UnMap"; }
1248 
logExecute(TestLog & log,size_t commandIndex) const1249 	void				logExecute	(TestLog& log, size_t commandIndex) const
1250 	{
1251 		log << TestLog::Message << commandIndex << ": Unmap memory" << TestLog::EndMessage;
1252 	}
1253 
prepare(PrepareContext & context)1254 	void				prepare		(PrepareContext& context)
1255 	{
1256 		m_memory	= context.getMemory().getMemory();
1257 	}
1258 
execute(ExecuteContext & context)1259 	void				execute		(ExecuteContext& context)
1260 	{
1261 		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1262 		const vk::VkDevice			device	= context.getContext().getDevice();
1263 
1264 		vkd.unmapMemory(device, m_memory);
1265 		context.setMapping(DE_NULL);
1266 	}
1267 
1268 private:
1269 	vk::VkDeviceMemory	m_memory;
1270 };
1271 
1272 class Invalidate : public Command
1273 {
1274 public:
Invalidate(void)1275 						Invalidate	(void) {}
~Invalidate(void)1276 						~Invalidate	(void) {}
getName(void) const1277 	const char*			getName		(void) const { return "Invalidate"; }
1278 
logExecute(TestLog & log,size_t commandIndex) const1279 	void				logExecute	(TestLog& log, size_t commandIndex) const
1280 	{
1281 		log << TestLog::Message << commandIndex << ": Invalidate mapped memory" << TestLog::EndMessage;
1282 	}
1283 
prepare(PrepareContext & context)1284 	void				prepare		(PrepareContext& context)
1285 	{
1286 		m_memory	= context.getMemory().getMemory();
1287 		m_size		= context.getMemory().getSize();
1288 	}
1289 
execute(ExecuteContext & context)1290 	void				execute		(ExecuteContext& context)
1291 	{
1292 		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1293 		const vk::VkDevice			device	= context.getContext().getDevice();
1294 
1295 		vk::invalidateMappedMemoryRange(vkd, device, m_memory, 0, m_size);
1296 	}
1297 
1298 private:
1299 	vk::VkDeviceMemory	m_memory;
1300 	vk::VkDeviceSize	m_size;
1301 };
1302 
1303 class Flush : public Command
1304 {
1305 public:
Flush(void)1306 						Flush		(void) {}
~Flush(void)1307 						~Flush		(void) {}
getName(void) const1308 	const char*			getName		(void) const { return "Flush"; }
1309 
logExecute(TestLog & log,size_t commandIndex) const1310 	void				logExecute	(TestLog& log, size_t commandIndex) const
1311 	{
1312 		log << TestLog::Message << commandIndex << ": Flush mapped memory" << TestLog::EndMessage;
1313 	}
1314 
prepare(PrepareContext & context)1315 	void				prepare		(PrepareContext& context)
1316 	{
1317 		m_memory	= context.getMemory().getMemory();
1318 		m_size		= context.getMemory().getSize();
1319 	}
1320 
execute(ExecuteContext & context)1321 	void				execute		(ExecuteContext& context)
1322 	{
1323 		const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1324 		const vk::VkDevice			device	= context.getContext().getDevice();
1325 
1326 		vk::flushMappedMemoryRange(vkd, device, m_memory, 0, m_size);
1327 	}
1328 
1329 private:
1330 	vk::VkDeviceMemory	m_memory;
1331 	vk::VkDeviceSize	m_size;
1332 };
1333 
1334 // Host memory reads and writes
1335 class HostMemoryAccess : public Command
1336 {
1337 public:
1338 					HostMemoryAccess	(bool read, bool write, deUint32 seed);
~HostMemoryAccess(void)1339 					~HostMemoryAccess	(void) {}
getName(void) const1340 	const char*		getName				(void) const { return "HostMemoryAccess"; }
1341 
1342 	void			logExecute			(TestLog& log, size_t commandIndex) const;
1343 	void			prepare				(PrepareContext& context);
1344 	void			execute				(ExecuteContext& context);
1345 	void			verify				(VerifyContext& context, size_t commandIndex);
1346 
1347 private:
1348 	const bool		m_read;
1349 	const bool		m_write;
1350 	const deUint32	m_seed;
1351 
1352 	size_t			m_size;
1353 	vector<deUint8>	m_readData;
1354 };
1355 
HostMemoryAccess(bool read,bool write,deUint32 seed)1356 HostMemoryAccess::HostMemoryAccess (bool read, bool write, deUint32 seed)
1357 	: m_read	(read)
1358 	, m_write	(write)
1359 	, m_seed	(seed)
1360 {
1361 }
1362 
logExecute(TestLog & log,size_t commandIndex) const1363 void HostMemoryAccess::logExecute (TestLog& log, size_t commandIndex) const
1364 {
1365 	log << TestLog::Message << commandIndex << ": Host memory access:" << (m_read ? " read" : "") << (m_write ? " write" : "")  << ", seed: " << m_seed << TestLog::EndMessage;
1366 }
1367 
prepare(PrepareContext & context)1368 void HostMemoryAccess::prepare (PrepareContext& context)
1369 {
1370 	m_size = (size_t)context.getMemory().getSize();
1371 
1372 	if (m_read)
1373 		m_readData.resize(m_size, 0);
1374 }
1375 
execute(ExecuteContext & context)1376 void HostMemoryAccess::execute (ExecuteContext& context)
1377 {
1378 	de::Random		rng	(m_seed);
1379 	deUint8* const	ptr	= (deUint8*)context.getMapping();
1380 
1381 	if (m_read && m_write)
1382 	{
1383 		for (size_t pos = 0; pos < m_size; pos++)
1384 		{
1385 			const deUint8	mask	= rng.getUint8();
1386 			const deUint8	value	= ptr[pos];
1387 
1388 			m_readData[pos] = value;
1389 			ptr[pos] = value ^ mask;
1390 		}
1391 	}
1392 	else if (m_read)
1393 	{
1394 		for (size_t pos = 0; pos < m_size; pos++)
1395 		{
1396 			const deUint8	value	= ptr[pos];
1397 
1398 			m_readData[pos] = value;
1399 		}
1400 	}
1401 	else if (m_write)
1402 	{
1403 		for (size_t pos = 0; pos < m_size; pos++)
1404 		{
1405 			const deUint8	value	= rng.getUint8();
1406 
1407 			ptr[pos] = value;
1408 		}
1409 	}
1410 	else
1411 		DE_FATAL("Host memory access without read or write.");
1412 }
1413 
verify(VerifyContext & context,size_t commandIndex)1414 void HostMemoryAccess::verify (VerifyContext& context, size_t commandIndex)
1415 {
1416 	tcu::ResultCollector&	resultCollector	= context.getResultCollector();
1417 	ReferenceMemory&		reference		= context.getReference();
1418 	de::Random				rng				(m_seed);
1419 
1420 	if (m_read && m_write)
1421 	{
1422 		for (size_t pos = 0; pos < m_size; pos++)
1423 		{
1424 			const deUint8	mask	= rng.getUint8();
1425 			const deUint8	value	= m_readData[pos];
1426 
1427 			if (reference.isDefined(pos))
1428 			{
1429 				if (value != reference.get(pos))
1430 				{
1431 					resultCollector.fail(
1432 							de::toString(commandIndex) + ":" + getName()
1433 							+ " Result differs from reference, Expected: "
1434 							+ de::toString(tcu::toHex<8>(reference.get(pos)))
1435 							+ ", Got: "
1436 							+ de::toString(tcu::toHex<8>(value))
1437 							+ ", At offset: "
1438 							+ de::toString(pos));
1439 					break;
1440 				}
1441 
1442 				reference.set(pos, reference.get(pos) ^ mask);
1443 			}
1444 		}
1445 	}
1446 	else if (m_read)
1447 	{
1448 		for (size_t pos = 0; pos < m_size; pos++)
1449 		{
1450 			const deUint8	value	= m_readData[pos];
1451 
1452 			if (reference.isDefined(pos))
1453 			{
1454 				if (value != reference.get(pos))
1455 				{
1456 					resultCollector.fail(
1457 							de::toString(commandIndex) + ":" + getName()
1458 							+ " Result differs from reference, Expected: "
1459 							+ de::toString(tcu::toHex<8>(reference.get(pos)))
1460 							+ ", Got: "
1461 							+ de::toString(tcu::toHex<8>(value))
1462 							+ ", At offset: "
1463 							+ de::toString(pos));
1464 					break;
1465 				}
1466 			}
1467 		}
1468 	}
1469 	else if (m_write)
1470 	{
1471 		for (size_t pos = 0; pos < m_size; pos++)
1472 		{
1473 			const deUint8	value	= rng.getUint8();
1474 
1475 			reference.set(pos, value);
1476 		}
1477 	}
1478 	else
1479 		DE_FATAL("Host memory access without read or write.");
1480 }
1481 
1482 class CreateBuffer : public Command
1483 {
1484 public:
1485 									CreateBuffer	(vk::VkBufferUsageFlags	usage,
1486 													 vk::VkSharingMode		sharing);
~CreateBuffer(void)1487 									~CreateBuffer	(void) {}
getName(void) const1488 	const char*						getName			(void) const { return "CreateBuffer"; }
1489 
1490 	void							logPrepare		(TestLog& log, size_t commandIndex) const;
1491 	void							prepare			(PrepareContext& context);
1492 
1493 private:
1494 	const vk::VkBufferUsageFlags	m_usage;
1495 	const vk::VkSharingMode			m_sharing;
1496 };
1497 
CreateBuffer(vk::VkBufferUsageFlags usage,vk::VkSharingMode sharing)1498 CreateBuffer::CreateBuffer (vk::VkBufferUsageFlags	usage,
1499 							vk::VkSharingMode		sharing)
1500 	: m_usage	(usage)
1501 	, m_sharing	(sharing)
1502 {
1503 }
1504 
logPrepare(TestLog & log,size_t commandIndex) const1505 void CreateBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1506 {
1507 	log << TestLog::Message << commandIndex << ":" << getName() << " Create buffer, Sharing mode: " << m_sharing << ", Usage: " << vk::getBufferUsageFlagsStr(m_usage) << TestLog::EndMessage;
1508 }
1509 
prepare(PrepareContext & context)1510 void CreateBuffer::prepare (PrepareContext& context)
1511 {
1512 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1513 	const vk::VkDevice			device			= context.getContext().getDevice();
1514 	const vk::VkDeviceSize		bufferSize		= context.getMemory().getMaxBufferSize();
1515 	const vector<deUint32>&		queueFamilies	= context.getContext().getQueueFamilies();
1516 
1517 	context.setBuffer(createBuffer(vkd, device, bufferSize, m_usage, m_sharing, queueFamilies), bufferSize);
1518 }
1519 
1520 class DestroyBuffer : public Command
1521 {
1522 public:
1523 							DestroyBuffer	(void);
~DestroyBuffer(void)1524 							~DestroyBuffer	(void) {}
getName(void) const1525 	const char*				getName			(void) const { return "DestroyBuffer"; }
1526 
1527 	void					logExecute		(TestLog& log, size_t commandIndex) const;
1528 	void					prepare			(PrepareContext& context);
1529 	void					execute			(ExecuteContext& context);
1530 
1531 private:
1532 	vk::Move<vk::VkBuffer>	m_buffer;
1533 };
1534 
DestroyBuffer(void)1535 DestroyBuffer::DestroyBuffer (void)
1536 {
1537 }
1538 
prepare(PrepareContext & context)1539 void DestroyBuffer::prepare (PrepareContext& context)
1540 {
1541 	m_buffer = vk::Move<vk::VkBuffer>(vk::check(context.getBuffer()), vk::Deleter<vk::VkBuffer>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1542 	context.releaseBuffer();
1543 }
1544 
logExecute(TestLog & log,size_t commandIndex) const1545 void DestroyBuffer::logExecute (TestLog& log, size_t commandIndex) const
1546 {
1547 	log << TestLog::Message << commandIndex << ":" << getName() << " Destroy buffer" << TestLog::EndMessage;
1548 }
1549 
execute(ExecuteContext & context)1550 void DestroyBuffer::execute (ExecuteContext& context)
1551 {
1552 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1553 	const vk::VkDevice			device			= context.getContext().getDevice();
1554 
1555 	vkd.destroyBuffer(device, m_buffer.disown(), DE_NULL);
1556 }
1557 
1558 class BindBufferMemory : public Command
1559 {
1560 public:
BindBufferMemory(void)1561 				BindBufferMemory	(void) {}
~BindBufferMemory(void)1562 				~BindBufferMemory	(void) {}
getName(void) const1563 	const char*	getName				(void) const { return "BindBufferMemory"; }
1564 
1565 	void		logPrepare			(TestLog& log, size_t commandIndex) const;
1566 	void		prepare				(PrepareContext& context);
1567 };
1568 
logPrepare(TestLog & log,size_t commandIndex) const1569 void BindBufferMemory::logPrepare (TestLog& log, size_t commandIndex) const
1570 {
1571 	log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to buffer" << TestLog::EndMessage;
1572 }
1573 
prepare(PrepareContext & context)1574 void BindBufferMemory::prepare (PrepareContext& context)
1575 {
1576 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1577 	const vk::VkDevice			device			= context.getContext().getDevice();
1578 
1579 	VK_CHECK(vkd.bindBufferMemory(device, context.getBuffer(), context.getMemory().getMemory(), 0));
1580 }
1581 
1582 class CreateImage : public Command
1583 {
1584 public:
1585 									CreateImage		(vk::VkImageUsageFlags	usage,
1586 													 vk::VkSharingMode		sharing);
~CreateImage(void)1587 									~CreateImage	(void) {}
getName(void) const1588 	const char*						getName			(void) const { return "CreateImage"; }
1589 
1590 	void							logPrepare		(TestLog& log, size_t commandIndex) const;
1591 	void							prepare			(PrepareContext& context);
1592 	void							verify			(VerifyContext& context, size_t commandIndex);
1593 
1594 private:
1595 	const vk::VkImageUsageFlags	m_usage;
1596 	const vk::VkSharingMode		m_sharing;
1597 	deInt32						m_imageWidth;
1598 	deInt32						m_imageHeight;
1599 };
1600 
CreateImage(vk::VkImageUsageFlags usage,vk::VkSharingMode sharing)1601 CreateImage::CreateImage (vk::VkImageUsageFlags	usage,
1602 						  vk::VkSharingMode		sharing)
1603 	: m_usage	(usage)
1604 	, m_sharing	(sharing)
1605 {
1606 }
1607 
logPrepare(TestLog & log,size_t commandIndex) const1608 void CreateImage::logPrepare (TestLog& log, size_t commandIndex) const
1609 {
1610 	log << TestLog::Message << commandIndex << ":" << getName() << " Create image, sharing: " << m_sharing << ", usage: " << vk::getImageUsageFlagsStr(m_usage)  << TestLog::EndMessage;
1611 }
1612 
prepare(PrepareContext & context)1613 void CreateImage::prepare (PrepareContext& context)
1614 {
1615 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1616 	const vk::VkDevice			device			= context.getContext().getDevice();
1617 	const vector<deUint32>&		queueFamilies	= context.getContext().getQueueFamilies();
1618 
1619 	m_imageWidth	= context.getMemory().getMaxImageWidth();
1620 	m_imageHeight	= context.getMemory().getMaxImageHeight();
1621 
1622 	{
1623 		const vk::VkImageCreateInfo	createInfo		=
1624 		{
1625 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1626 			DE_NULL,
1627 
1628 			0u,
1629 			vk::VK_IMAGE_TYPE_2D,
1630 			vk::VK_FORMAT_R8G8B8A8_UNORM,
1631 			{
1632 				(deUint32)m_imageWidth,
1633 				(deUint32)m_imageHeight,
1634 				1u,
1635 			},
1636 			1u, 1u,
1637 			vk::VK_SAMPLE_COUNT_1_BIT,
1638 			vk::VK_IMAGE_TILING_OPTIMAL,
1639 			m_usage,
1640 			m_sharing,
1641 			(deUint32)queueFamilies.size(),
1642 			&queueFamilies[0],
1643 			vk::VK_IMAGE_LAYOUT_UNDEFINED
1644 		};
1645 		vk::Move<vk::VkImage>			image			(createImage(vkd, device, &createInfo));
1646 		const vk::VkMemoryRequirements	requirements	= vk::getImageMemoryRequirements(vkd, device, *image);
1647 
1648 		context.setImage(image, vk::VK_IMAGE_LAYOUT_UNDEFINED, requirements.size, m_imageWidth, m_imageHeight);
1649 	}
1650 }
1651 
verify(VerifyContext & context,size_t)1652 void CreateImage::verify (VerifyContext& context, size_t)
1653 {
1654 	context.getReferenceImage() = TextureLevel(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight);
1655 }
1656 
1657 class DestroyImage : public Command
1658 {
1659 public:
1660 							DestroyImage	(void);
~DestroyImage(void)1661 							~DestroyImage	(void) {}
getName(void) const1662 	const char*				getName			(void) const { return "DestroyImage"; }
1663 
1664 	void					logExecute		(TestLog& log, size_t commandIndex) const;
1665 	void					prepare			(PrepareContext& context);
1666 	void					execute			(ExecuteContext& context);
1667 
1668 private:
1669 	vk::Move<vk::VkImage>	m_image;
1670 };
1671 
DestroyImage(void)1672 DestroyImage::DestroyImage (void)
1673 {
1674 }
1675 
prepare(PrepareContext & context)1676 void DestroyImage::prepare (PrepareContext& context)
1677 {
1678 	m_image = vk::Move<vk::VkImage>(vk::check(context.getImage()), vk::Deleter<vk::VkImage>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1679 	context.releaseImage();
1680 }
1681 
1682 
logExecute(TestLog & log,size_t commandIndex) const1683 void DestroyImage::logExecute (TestLog& log, size_t commandIndex) const
1684 {
1685 	log << TestLog::Message << commandIndex << ":" << getName() << " Destroy image" << TestLog::EndMessage;
1686 }
1687 
execute(ExecuteContext & context)1688 void DestroyImage::execute (ExecuteContext& context)
1689 {
1690 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
1691 	const vk::VkDevice			device			= context.getContext().getDevice();
1692 
1693 	vkd.destroyImage(device, m_image.disown(), DE_NULL);
1694 }
1695 
1696 class BindImageMemory : public Command
1697 {
1698 public:
BindImageMemory(void)1699 				BindImageMemory		(void) {}
~BindImageMemory(void)1700 				~BindImageMemory	(void) {}
getName(void) const1701 	const char*	getName				(void) const { return "BindImageMemory"; }
1702 
1703 	void		logPrepare			(TestLog& log, size_t commandIndex) const;
1704 	void		prepare				(PrepareContext& context);
1705 };
1706 
logPrepare(TestLog & log,size_t commandIndex) const1707 void BindImageMemory::logPrepare (TestLog& log, size_t commandIndex) const
1708 {
1709 	log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to image" << TestLog::EndMessage;
1710 }
1711 
prepare(PrepareContext & context)1712 void BindImageMemory::prepare (PrepareContext& context)
1713 {
1714 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
1715 	const vk::VkDevice				device			= context.getContext().getDevice();
1716 
1717 	VK_CHECK(vkd.bindImageMemory(device, context.getImage(), context.getMemory().getMemory(), 0));
1718 }
1719 
1720 class QueueWaitIdle : public Command
1721 {
1722 public:
QueueWaitIdle(void)1723 				QueueWaitIdle	(void) {}
~QueueWaitIdle(void)1724 				~QueueWaitIdle	(void) {}
getName(void) const1725 	const char*	getName			(void) const { return "QueuetWaitIdle"; }
1726 
1727 	void		logExecute		(TestLog& log, size_t commandIndex) const;
1728 	void		execute			(ExecuteContext& context);
1729 };
1730 
logExecute(TestLog & log,size_t commandIndex) const1731 void QueueWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1732 {
1733 	log << TestLog::Message << commandIndex << ":" << getName() << " Queue wait idle" << TestLog::EndMessage;
1734 }
1735 
execute(ExecuteContext & context)1736 void QueueWaitIdle::execute (ExecuteContext& context)
1737 {
1738 	const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1739 	const vk::VkQueue			queue	= context.getContext().getQueue();
1740 
1741 	VK_CHECK(vkd.queueWaitIdle(queue));
1742 }
1743 
1744 class DeviceWaitIdle : public Command
1745 {
1746 public:
DeviceWaitIdle(void)1747 				DeviceWaitIdle	(void) {}
~DeviceWaitIdle(void)1748 				~DeviceWaitIdle	(void) {}
getName(void) const1749 	const char*	getName			(void) const { return "DeviceWaitIdle"; }
1750 
1751 	void		logExecute		(TestLog& log, size_t commandIndex) const;
1752 	void		execute			(ExecuteContext& context);
1753 };
1754 
logExecute(TestLog & log,size_t commandIndex) const1755 void DeviceWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1756 {
1757 	log << TestLog::Message << commandIndex << ":" << getName() << " Device wait idle" << TestLog::EndMessage;
1758 }
1759 
execute(ExecuteContext & context)1760 void DeviceWaitIdle::execute (ExecuteContext& context)
1761 {
1762 	const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1763 	const vk::VkDevice			device	= context.getContext().getDevice();
1764 
1765 	VK_CHECK(vkd.deviceWaitIdle(device));
1766 }
1767 
1768 class SubmitContext
1769 {
1770 public:
SubmitContext(const PrepareContext & context,const vk::VkCommandBuffer commandBuffer)1771 								SubmitContext		(const PrepareContext&		context,
1772 													 const vk::VkCommandBuffer	commandBuffer)
1773 		: m_context			(context)
1774 		, m_commandBuffer	(commandBuffer)
1775 	{
1776 	}
1777 
getMemory(void) const1778 	const Memory&				getMemory			(void) const { return m_context.getMemory(); }
getContext(void) const1779 	const Context&				getContext			(void) const { return m_context.getContext(); }
getCommandBuffer(void) const1780 	vk::VkCommandBuffer			getCommandBuffer	(void) const { return m_commandBuffer; }
1781 
getBuffer(void) const1782 	vk::VkBuffer				getBuffer			(void) const { return m_context.getBuffer(); }
getBufferSize(void) const1783 	vk::VkDeviceSize			getBufferSize		(void) const { return m_context.getBufferSize(); }
1784 
getImage(void) const1785 	vk::VkImage					getImage			(void) const { return m_context.getImage(); }
getImageWidth(void) const1786 	deInt32						getImageWidth		(void) const { return m_context.getImageWidth(); }
getImageHeight(void) const1787 	deInt32						getImageHeight		(void) const { return m_context.getImageHeight(); }
1788 
1789 private:
1790 	const PrepareContext&		m_context;
1791 	const vk::VkCommandBuffer	m_commandBuffer;
1792 };
1793 
1794 class CmdCommand
1795 {
1796 public:
~CmdCommand(void)1797 	virtual				~CmdCommand	(void) {}
1798 	virtual const char*	getName		(void) const = 0;
1799 
1800 	// Log things that are done during prepare
logPrepare(TestLog &,size_t) const1801 	virtual void		logPrepare	(TestLog&, size_t) const {}
1802 	// Log submitted calls etc.
logSubmit(TestLog &,size_t) const1803 	virtual void		logSubmit	(TestLog&, size_t) const {}
1804 
1805 	// Allocate vulkan resources and prepare for submit.
prepare(PrepareContext &)1806 	virtual void		prepare		(PrepareContext&) {}
1807 
1808 	// Submit commands to command buffer.
submit(SubmitContext &)1809 	virtual void		submit		(SubmitContext&) {}
1810 
1811 	// Verify results
verify(VerifyContext &,size_t)1812 	virtual void		verify		(VerifyContext&, size_t) {}
1813 };
1814 
1815 class SubmitCommandBuffer : public Command
1816 {
1817 public:
1818 					SubmitCommandBuffer		(const vector<CmdCommand*>& commands);
1819 					~SubmitCommandBuffer	(void);
1820 
getName(void) const1821 	const char*		getName					(void) const { return "SubmitCommandBuffer"; }
1822 	void			logExecute				(TestLog& log, size_t commandIndex) const;
1823 	void			logPrepare				(TestLog& log, size_t commandIndex) const;
1824 
1825 	// Allocate command buffer and submit commands to command buffer
1826 	void			prepare					(PrepareContext& context);
1827 	void			execute					(ExecuteContext& context);
1828 
1829 	// Verify that results are correct.
1830 	void			verify					(VerifyContext& context, size_t commandIndex);
1831 
1832 private:
1833 	vector<CmdCommand*>				m_commands;
1834 	vk::Move<vk::VkCommandBuffer>	m_commandBuffer;
1835 };
1836 
SubmitCommandBuffer(const vector<CmdCommand * > & commands)1837 SubmitCommandBuffer::SubmitCommandBuffer (const vector<CmdCommand*>& commands)
1838 	: m_commands	(commands)
1839 {
1840 }
1841 
~SubmitCommandBuffer(void)1842 SubmitCommandBuffer::~SubmitCommandBuffer (void)
1843 {
1844 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1845 		delete m_commands[cmdNdx];
1846 }
1847 
prepare(PrepareContext & context)1848 void SubmitCommandBuffer::prepare (PrepareContext& context)
1849 {
1850 	const vk::DeviceInterface&	vkd			= context.getContext().getDeviceInterface();
1851 	const vk::VkDevice			device		= context.getContext().getDevice();
1852 	const vk::VkCommandPool		commandPool	= context.getContext().getCommandPool();
1853 
1854 	m_commandBuffer = createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1855 
1856 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1857 	{
1858 		CmdCommand& command = *m_commands[cmdNdx];
1859 
1860 		command.prepare(context);
1861 	}
1862 
1863 	{
1864 		SubmitContext submitContext (context, *m_commandBuffer);
1865 
1866 		for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1867 		{
1868 			CmdCommand& command = *m_commands[cmdNdx];
1869 
1870 			command.submit(submitContext);
1871 		}
1872 
1873 		VK_CHECK(vkd.endCommandBuffer(*m_commandBuffer));
1874 	}
1875 }
1876 
execute(ExecuteContext & context)1877 void SubmitCommandBuffer::execute (ExecuteContext& context)
1878 {
1879 	const vk::DeviceInterface&	vkd		= context.getContext().getDeviceInterface();
1880 	const vk::VkCommandBuffer	cmd		= *m_commandBuffer;
1881 	const vk::VkQueue			queue	= context.getContext().getQueue();
1882 	const vk::VkSubmitInfo		submit	=
1883 	{
1884 		vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
1885 		DE_NULL,
1886 
1887 		0,
1888 		DE_NULL,
1889 		(const vk::VkPipelineStageFlags*)DE_NULL,
1890 
1891 		1,
1892 		&cmd,
1893 
1894 		0,
1895 		DE_NULL
1896 	};
1897 
1898 	vkd.queueSubmit(queue, 1, &submit, 0);
1899 }
1900 
verify(VerifyContext & context,size_t commandIndex)1901 void SubmitCommandBuffer::verify (VerifyContext& context, size_t commandIndex)
1902 {
1903 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
1904 	const tcu::ScopedLogSection	section		(context.getLog(), sectionName, sectionName);
1905 
1906 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1907 		m_commands[cmdNdx]->verify(context, cmdNdx);
1908 }
1909 
logPrepare(TestLog & log,size_t commandIndex) const1910 void SubmitCommandBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1911 {
1912 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
1913 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
1914 
1915 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1916 		m_commands[cmdNdx]->logPrepare(log, cmdNdx);
1917 }
1918 
logExecute(TestLog & log,size_t commandIndex) const1919 void SubmitCommandBuffer::logExecute (TestLog& log, size_t commandIndex) const
1920 {
1921 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
1922 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
1923 
1924 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1925 		m_commands[cmdNdx]->logSubmit(log, cmdNdx);
1926 }
1927 
1928 class PipelineBarrier : public CmdCommand
1929 {
1930 public:
1931 	enum Type
1932 	{
1933 		TYPE_GLOBAL = 0,
1934 		TYPE_BUFFER,
1935 		TYPE_IMAGE,
1936 		TYPE_LAST
1937 	};
1938 									PipelineBarrier		(const vk::VkPipelineStageFlags			srcStages,
1939 														 const vk::VkAccessFlags				srcAccesses,
1940 														 const vk::VkPipelineStageFlags			dstStages,
1941 														 const vk::VkAccessFlags				dstAccesses,
1942 														 Type									type,
1943 														 const tcu::Maybe<vk::VkImageLayout>	imageLayout);
~PipelineBarrier(void)1944 									~PipelineBarrier	(void) {}
getName(void) const1945 	const char*						getName				(void) const { return "PipelineBarrier"; }
1946 
1947 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
1948 	void							submit				(SubmitContext& context);
1949 
1950 private:
1951 	const vk::VkPipelineStageFlags		m_srcStages;
1952 	const vk::VkAccessFlags				m_srcAccesses;
1953 	const vk::VkPipelineStageFlags		m_dstStages;
1954 	const vk::VkAccessFlags				m_dstAccesses;
1955 	const Type							m_type;
1956 	const tcu::Maybe<vk::VkImageLayout>	m_imageLayout;
1957 };
1958 
PipelineBarrier(const vk::VkPipelineStageFlags srcStages,const vk::VkAccessFlags srcAccesses,const vk::VkPipelineStageFlags dstStages,const vk::VkAccessFlags dstAccesses,Type type,const tcu::Maybe<vk::VkImageLayout> imageLayout)1959 PipelineBarrier::PipelineBarrier (const vk::VkPipelineStageFlags		srcStages,
1960 								  const vk::VkAccessFlags				srcAccesses,
1961 								  const vk::VkPipelineStageFlags		dstStages,
1962 								  const vk::VkAccessFlags				dstAccesses,
1963 								  Type									type,
1964 								  const tcu::Maybe<vk::VkImageLayout>	imageLayout)
1965 	: m_srcStages	(srcStages)
1966 	, m_srcAccesses	(srcAccesses)
1967 	, m_dstStages	(dstStages)
1968 	, m_dstAccesses	(dstAccesses)
1969 	, m_type		(type)
1970 	, m_imageLayout	(imageLayout)
1971 {
1972 }
1973 
logSubmit(TestLog & log,size_t commandIndex) const1974 void PipelineBarrier::logSubmit (TestLog& log, size_t commandIndex) const
1975 {
1976 	log << TestLog::Message << commandIndex << ":" << getName()
1977 		<< " " << (m_type == TYPE_GLOBAL ? "Global pipeline barrier"
1978 					: m_type == TYPE_BUFFER ? "Buffer pipeline barrier"
1979 					: "Image pipeline barrier")
1980 		<< ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
1981 		<< ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses) << TestLog::EndMessage;
1982 }
1983 
submit(SubmitContext & context)1984 void PipelineBarrier::submit (SubmitContext& context)
1985 {
1986 	const vk::DeviceInterface&	vkd	= context.getContext().getDeviceInterface();
1987 	const vk::VkCommandBuffer	cmd	= context.getCommandBuffer();
1988 
1989 	switch (m_type)
1990 	{
1991 		case TYPE_GLOBAL:
1992 		{
1993 			const vk::VkMemoryBarrier	barrier		=
1994 			{
1995 				vk::VK_STRUCTURE_TYPE_MEMORY_BARRIER,
1996 				DE_NULL,
1997 
1998 				m_srcAccesses,
1999 				m_dstAccesses
2000 			};
2001 
2002 			vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 1, &barrier, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2003 			break;
2004 		}
2005 
2006 		case TYPE_BUFFER:
2007 		{
2008 			const vk::VkBufferMemoryBarrier	barrier		=
2009 			{
2010 				vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2011 				DE_NULL,
2012 
2013 				m_srcAccesses,
2014 				m_dstAccesses,
2015 
2016 				VK_QUEUE_FAMILY_IGNORED,
2017 				VK_QUEUE_FAMILY_IGNORED,
2018 
2019 				context.getBuffer(),
2020 				0,
2021 				VK_WHOLE_SIZE
2022 			};
2023 
2024 			vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2025 			break;
2026 		}
2027 
2028 		case TYPE_IMAGE:
2029 		{
2030 			const vk::VkImageMemoryBarrier	barrier		=
2031 			{
2032 				vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2033 				DE_NULL,
2034 
2035 				m_srcAccesses,
2036 				m_dstAccesses,
2037 
2038 				*m_imageLayout,
2039 				*m_imageLayout,
2040 
2041 				VK_QUEUE_FAMILY_IGNORED,
2042 				VK_QUEUE_FAMILY_IGNORED,
2043 
2044 				context.getImage(),
2045 				{
2046 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
2047 					0, 1,
2048 					0, 1
2049 				}
2050 			};
2051 
2052 			vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2053 			break;
2054 		}
2055 
2056 		default:
2057 			DE_FATAL("Unknown pipeline barrier type");
2058 	}
2059 }
2060 
2061 class ImageTransition : public CmdCommand
2062 {
2063 public:
2064 						ImageTransition		(vk::VkPipelineStageFlags	srcStages,
2065 											 vk::VkAccessFlags			srcAccesses,
2066 
2067 											 vk::VkPipelineStageFlags	dstStages,
2068 											 vk::VkAccessFlags			dstAccesses,
2069 
2070 											 vk::VkImageLayout			srcLayout,
2071 											 vk::VkImageLayout			dstLayout);
2072 
~ImageTransition(void)2073 						~ImageTransition	(void) {}
getName(void) const2074 	const char*			getName				(void) const { return "ImageTransition"; }
2075 
2076 	void				prepare				(PrepareContext& context);
2077 	void				logSubmit			(TestLog& log, size_t commandIndex) const;
2078 	void				submit				(SubmitContext& context);
2079 	void				verify				(VerifyContext& context, size_t);
2080 
2081 private:
2082 	const vk::VkPipelineStageFlags	m_srcStages;
2083 	const vk::VkAccessFlags			m_srcAccesses;
2084 	const vk::VkPipelineStageFlags	m_dstStages;
2085 	const vk::VkAccessFlags			m_dstAccesses;
2086 	const vk::VkImageLayout			m_srcLayout;
2087 	const vk::VkImageLayout			m_dstLayout;
2088 
2089 	vk::VkDeviceSize				m_imageMemorySize;
2090 };
2091 
ImageTransition(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses,vk::VkImageLayout srcLayout,vk::VkImageLayout dstLayout)2092 ImageTransition::ImageTransition (vk::VkPipelineStageFlags	srcStages,
2093 								  vk::VkAccessFlags			srcAccesses,
2094 
2095 								  vk::VkPipelineStageFlags	dstStages,
2096 								  vk::VkAccessFlags			dstAccesses,
2097 
2098 								  vk::VkImageLayout			srcLayout,
2099 								  vk::VkImageLayout			dstLayout)
2100 	: m_srcStages		(srcStages)
2101 	, m_srcAccesses		(srcAccesses)
2102 	, m_dstStages		(dstStages)
2103 	, m_dstAccesses		(dstAccesses)
2104 	, m_srcLayout		(srcLayout)
2105 	, m_dstLayout		(dstLayout)
2106 {
2107 }
2108 
logSubmit(TestLog & log,size_t commandIndex) const2109 void ImageTransition::logSubmit (TestLog& log, size_t commandIndex) const
2110 {
2111 	log << TestLog::Message << commandIndex << ":" << getName()
2112 		<< " Image transition pipeline barrier"
2113 		<< ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
2114 		<< ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses)
2115 		<< ", srcLayout: " << m_srcLayout << ", dstLayout: " << m_dstLayout << TestLog::EndMessage;
2116 }
2117 
prepare(PrepareContext & context)2118 void ImageTransition::prepare (PrepareContext& context)
2119 {
2120 	DE_ASSERT(context.getImageLayout() == vk::VK_IMAGE_LAYOUT_UNDEFINED || m_srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED || context.getImageLayout() == m_srcLayout);
2121 
2122 	context.setImageLayout(m_dstLayout);
2123 	m_imageMemorySize = context.getImageMemorySize();
2124 }
2125 
submit(SubmitContext & context)2126 void ImageTransition::submit (SubmitContext& context)
2127 {
2128 	const vk::DeviceInterface&		vkd			= context.getContext().getDeviceInterface();
2129 	const vk::VkCommandBuffer		cmd			= context.getCommandBuffer();
2130 	const vk::VkImageMemoryBarrier	barrier		=
2131 	{
2132 		vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2133 		DE_NULL,
2134 
2135 		m_srcAccesses,
2136 		m_dstAccesses,
2137 
2138 		m_srcLayout,
2139 		m_dstLayout,
2140 
2141 		VK_QUEUE_FAMILY_IGNORED,
2142 		VK_QUEUE_FAMILY_IGNORED,
2143 
2144 		context.getImage(),
2145 		{
2146 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2147 			0u, 1u,
2148 			0u, 1u
2149 		}
2150 	};
2151 
2152 	vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2153 }
2154 
verify(VerifyContext & context,size_t)2155 void ImageTransition::verify (VerifyContext& context, size_t)
2156 {
2157 	context.getReference().setUndefined(0, (size_t)m_imageMemorySize);
2158 }
2159 
2160 class FillBuffer : public CmdCommand
2161 {
2162 public:
FillBuffer(deUint32 value)2163 						FillBuffer	(deUint32 value) : m_value(value) {}
~FillBuffer(void)2164 						~FillBuffer	(void) {}
getName(void) const2165 	const char*			getName		(void) const { return "FillBuffer"; }
2166 
2167 	void				logSubmit	(TestLog& log, size_t commandIndex) const;
2168 	void				submit		(SubmitContext& context);
2169 	void				verify		(VerifyContext& context, size_t commandIndex);
2170 
2171 private:
2172 	const deUint32		m_value;
2173 	vk::VkDeviceSize	m_bufferSize;
2174 };
2175 
logSubmit(TestLog & log,size_t commandIndex) const2176 void FillBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2177 {
2178 	log << TestLog::Message << commandIndex << ":" << getName() << " Fill value: " << m_value << TestLog::EndMessage;
2179 }
2180 
submit(SubmitContext & context)2181 void FillBuffer::submit (SubmitContext& context)
2182 {
2183 	const vk::DeviceInterface&	vkd			= context.getContext().getDeviceInterface();
2184 	const vk::VkCommandBuffer	cmd			= context.getCommandBuffer();
2185 	const vk::VkBuffer			buffer		= context.getBuffer();
2186 	const vk::VkDeviceSize		sizeMask	= ~(0x3ull); // \note Round down to multiple of 4
2187 
2188 	m_bufferSize = sizeMask & context.getBufferSize();
2189 	vkd.cmdFillBuffer(cmd, buffer, 0, m_bufferSize, m_value);
2190 }
2191 
verify(VerifyContext & context,size_t)2192 void FillBuffer::verify (VerifyContext& context, size_t)
2193 {
2194 	ReferenceMemory&	reference	= context.getReference();
2195 
2196 	for (size_t ndx = 0; ndx < m_bufferSize; ndx++)
2197 	{
2198 #if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
2199 		reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(ndx % 4)))));
2200 #else
2201 		reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(3 - (ndx % 4))))));
2202 #endif
2203 	}
2204 }
2205 
2206 class UpdateBuffer : public CmdCommand
2207 {
2208 public:
UpdateBuffer(deUint32 seed)2209 						UpdateBuffer	(deUint32 seed) : m_seed(seed) {}
~UpdateBuffer(void)2210 						~UpdateBuffer	(void) {}
getName(void) const2211 	const char*			getName			(void) const { return "UpdateBuffer"; }
2212 
2213 	void				logSubmit		(TestLog& log, size_t commandIndex) const;
2214 	void				submit			(SubmitContext& context);
2215 	void				verify			(VerifyContext& context, size_t commandIndex);
2216 
2217 private:
2218 	const deUint32		m_seed;
2219 	vk::VkDeviceSize	m_bufferSize;
2220 };
2221 
logSubmit(TestLog & log,size_t commandIndex) const2222 void UpdateBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2223 {
2224 	log << TestLog::Message << commandIndex << ":" << getName() << " Update buffer, seed: " << m_seed << TestLog::EndMessage;
2225 }
2226 
submit(SubmitContext & context)2227 void UpdateBuffer::submit (SubmitContext& context)
2228 {
2229 	const vk::DeviceInterface&	vkd			= context.getContext().getDeviceInterface();
2230 	const vk::VkCommandBuffer	cmd			= context.getCommandBuffer();
2231 	const vk::VkBuffer			buffer		= context.getBuffer();
2232 	const size_t				blockSize	= 65536;
2233 	std::vector<deUint8>		data		(blockSize, 0);
2234 	de::Random					rng			(m_seed);
2235 
2236 	m_bufferSize = context.getBufferSize();
2237 
2238 	for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2239 	{
2240 		for (size_t ndx = 0; ndx < data.size(); ndx++)
2241 			data[ndx] = rng.getUint8();
2242 
2243 		if (m_bufferSize - updated > blockSize)
2244 			vkd.cmdUpdateBuffer(cmd, buffer, updated, blockSize, (const deUint32*)(&data[0]));
2245 		else
2246 			vkd.cmdUpdateBuffer(cmd, buffer, updated, m_bufferSize - updated, (const deUint32*)(&data[0]));
2247 	}
2248 }
2249 
verify(VerifyContext & context,size_t)2250 void UpdateBuffer::verify (VerifyContext& context, size_t)
2251 {
2252 	ReferenceMemory&	reference	= context.getReference();
2253 	const size_t		blockSize	= 65536;
2254 	vector<deUint8>		data		(blockSize, 0);
2255 	de::Random			rng			(m_seed);
2256 
2257 	for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2258 	{
2259 		for (size_t ndx = 0; ndx < data.size(); ndx++)
2260 			data[ndx] = rng.getUint8();
2261 
2262 		if (m_bufferSize - updated > blockSize)
2263 			reference.setData(updated, blockSize, &data[0]);
2264 		else
2265 			reference.setData(updated, (size_t)(m_bufferSize - updated), &data[0]);
2266 	}
2267 }
2268 
2269 class BufferCopyToBuffer : public CmdCommand
2270 {
2271 public:
BufferCopyToBuffer(void)2272 									BufferCopyToBuffer	(void) {}
~BufferCopyToBuffer(void)2273 									~BufferCopyToBuffer	(void) {}
getName(void) const2274 	const char*						getName				(void) const { return "BufferCopyToBuffer"; }
2275 
2276 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
2277 	void							prepare				(PrepareContext& context);
2278 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
2279 	void							submit				(SubmitContext& context);
2280 	void							verify				(VerifyContext& context, size_t commandIndex);
2281 
2282 private:
2283 	vk::VkDeviceSize				m_bufferSize;
2284 	vk::Move<vk::VkBuffer>			m_dstBuffer;
2285 	vk::Move<vk::VkDeviceMemory>	m_memory;
2286 };
2287 
logPrepare(TestLog & log,size_t commandIndex) const2288 void BufferCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2289 {
2290 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for buffer to buffer copy." << TestLog::EndMessage;
2291 }
2292 
prepare(PrepareContext & context)2293 void BufferCopyToBuffer::prepare (PrepareContext& context)
2294 {
2295 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2296 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2297 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2298 	const vk::VkDevice				device			= context.getContext().getDevice();
2299 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2300 
2301 	m_bufferSize = context.getBufferSize();
2302 
2303 	m_dstBuffer	= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2304 	m_memory	= bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2305 }
2306 
logSubmit(TestLog & log,size_t commandIndex) const2307 void BufferCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2308 {
2309 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to another buffer" << TestLog::EndMessage;
2310 }
2311 
submit(SubmitContext & context)2312 void BufferCopyToBuffer::submit (SubmitContext& context)
2313 {
2314 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2315 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2316 	const vk::VkBufferCopy		range			=
2317 	{
2318 		0, 0, // Offsets
2319 		m_bufferSize
2320 	};
2321 
2322 	vkd.cmdCopyBuffer(commandBuffer, context.getBuffer(), *m_dstBuffer, 1, &range);
2323 }
2324 
verify(VerifyContext & context,size_t commandIndex)2325 void BufferCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2326 {
2327 	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
2328 	ReferenceMemory&						reference		(context.getReference());
2329 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
2330 	const vk::VkDevice						device			= context.getContext().getDevice();
2331 	const vk::VkQueue						queue			= context.getContext().getQueue();
2332 	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
2333 	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2334 	const vk::VkBufferMemoryBarrier			barrier			=
2335 	{
2336 		vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2337 		DE_NULL,
2338 
2339 		vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2340 		vk::VK_ACCESS_HOST_READ_BIT,
2341 
2342 		VK_QUEUE_FAMILY_IGNORED,
2343 		VK_QUEUE_FAMILY_IGNORED,
2344 		*m_dstBuffer,
2345 		0,
2346 		VK_WHOLE_SIZE
2347 	};
2348 
2349 	vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2350 
2351 	VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2352 	queueRun(vkd, queue, *commandBuffer);
2353 
2354 	{
2355 		void* const	ptr		= mapMemory(vkd, device, *m_memory, m_bufferSize);
2356 		bool		isOk	= true;
2357 
2358 		vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
2359 
2360 		{
2361 			const deUint8* const data = (const deUint8*)ptr;
2362 
2363 			for (size_t pos = 0; pos < (size_t)m_bufferSize; pos++)
2364 			{
2365 				if (reference.isDefined(pos))
2366 				{
2367 					if (data[pos] != reference.get(pos))
2368 					{
2369 						resultCollector.fail(
2370 								de::toString(commandIndex) + ":" + getName()
2371 								+ " Result differs from reference, Expected: "
2372 								+ de::toString(tcu::toHex<8>(reference.get(pos)))
2373 								+ ", Got: "
2374 								+ de::toString(tcu::toHex<8>(data[pos]))
2375 								+ ", At offset: "
2376 								+ de::toString(pos));
2377 						break;
2378 					}
2379 				}
2380 			}
2381 		}
2382 
2383 		vkd.unmapMemory(device, *m_memory);
2384 
2385 		if (!isOk)
2386 			context.getLog() << TestLog::Message << commandIndex << ": Buffer copy to buffer verification failed" << TestLog::EndMessage;
2387 	}
2388 }
2389 
2390 class BufferCopyFromBuffer : public CmdCommand
2391 {
2392 public:
BufferCopyFromBuffer(deUint32 seed)2393 									BufferCopyFromBuffer	(deUint32 seed) : m_seed(seed) {}
~BufferCopyFromBuffer(void)2394 									~BufferCopyFromBuffer	(void) {}
getName(void) const2395 	const char*						getName					(void) const { return "BufferCopyFromBuffer"; }
2396 
2397 	void							logPrepare				(TestLog& log, size_t commandIndex) const;
2398 	void							prepare					(PrepareContext& context);
2399 	void							logSubmit				(TestLog& log, size_t commandIndex) const;
2400 	void							submit					(SubmitContext& context);
2401 	void							verify					(VerifyContext& context, size_t commandIndex);
2402 
2403 private:
2404 	const deUint32					m_seed;
2405 	vk::VkDeviceSize				m_bufferSize;
2406 	vk::Move<vk::VkBuffer>			m_srcBuffer;
2407 	vk::Move<vk::VkDeviceMemory>	m_memory;
2408 };
2409 
logPrepare(TestLog & log,size_t commandIndex) const2410 void BufferCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2411 {
2412 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to buffer copy. Seed: " << m_seed << TestLog::EndMessage;
2413 }
2414 
prepare(PrepareContext & context)2415 void BufferCopyFromBuffer::prepare (PrepareContext& context)
2416 {
2417 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2418 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2419 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2420 	const vk::VkDevice				device			= context.getContext().getDevice();
2421 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2422 
2423 	m_bufferSize	= context.getBufferSize();
2424 	m_srcBuffer		= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2425 	m_memory		= bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2426 
2427 	{
2428 		void* const	ptr	= mapMemory(vkd, device, *m_memory, m_bufferSize);
2429 		de::Random	rng	(m_seed);
2430 
2431 		{
2432 			deUint8* const	data = (deUint8*)ptr;
2433 
2434 			for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2435 				data[ndx] = rng.getUint8();
2436 		}
2437 
2438 		vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
2439 		vkd.unmapMemory(device, *m_memory);
2440 	}
2441 }
2442 
logSubmit(TestLog & log,size_t commandIndex) const2443 void BufferCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2444 {
2445 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from another buffer" << TestLog::EndMessage;
2446 }
2447 
submit(SubmitContext & context)2448 void BufferCopyFromBuffer::submit (SubmitContext& context)
2449 {
2450 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2451 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2452 	const vk::VkBufferCopy		range			=
2453 	{
2454 		0, 0, // Offsets
2455 		m_bufferSize
2456 	};
2457 
2458 	vkd.cmdCopyBuffer(commandBuffer, *m_srcBuffer, context.getBuffer(), 1, &range);
2459 }
2460 
verify(VerifyContext & context,size_t)2461 void BufferCopyFromBuffer::verify (VerifyContext& context, size_t)
2462 {
2463 	ReferenceMemory&	reference	(context.getReference());
2464 	de::Random			rng			(m_seed);
2465 
2466 	for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2467 		reference.set(ndx, rng.getUint8());
2468 }
2469 
2470 class BufferCopyToImage : public CmdCommand
2471 {
2472 public:
BufferCopyToImage(void)2473 									BufferCopyToImage	(void) {}
~BufferCopyToImage(void)2474 									~BufferCopyToImage	(void) {}
getName(void) const2475 	const char*						getName				(void) const { return "BufferCopyToImage"; }
2476 
2477 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
2478 	void							prepare				(PrepareContext& context);
2479 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
2480 	void							submit				(SubmitContext& context);
2481 	void							verify				(VerifyContext& context, size_t commandIndex);
2482 
2483 private:
2484 	deInt32							m_imageWidth;
2485 	deInt32							m_imageHeight;
2486 	vk::Move<vk::VkImage>			m_dstImage;
2487 	vk::Move<vk::VkDeviceMemory>	m_memory;
2488 };
2489 
logPrepare(TestLog & log,size_t commandIndex) const2490 void BufferCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
2491 {
2492 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for buffer to image copy." << TestLog::EndMessage;
2493 }
2494 
prepare(PrepareContext & context)2495 void BufferCopyToImage::prepare (PrepareContext& context)
2496 {
2497 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2498 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2499 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2500 	const vk::VkDevice				device			= context.getContext().getDevice();
2501 	const vk::VkQueue				queue			= context.getContext().getQueue();
2502 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
2503 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2504 	const IVec2						imageSize		= findImageSizeWxHx4(context.getBufferSize());
2505 
2506 	m_imageWidth	= imageSize[0];
2507 	m_imageHeight	= imageSize[1];
2508 
2509 	{
2510 		const vk::VkImageCreateInfo	createInfo =
2511 		{
2512 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2513 			DE_NULL,
2514 
2515 			0,
2516 			vk::VK_IMAGE_TYPE_2D,
2517 			vk::VK_FORMAT_R8G8B8A8_UNORM,
2518 			{
2519 				(deUint32)m_imageWidth,
2520 				(deUint32)m_imageHeight,
2521 				1u,
2522 			},
2523 			1, 1, // mipLevels, arrayLayers
2524 			vk::VK_SAMPLE_COUNT_1_BIT,
2525 
2526 			vk::VK_IMAGE_TILING_OPTIMAL,
2527 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2528 			vk::VK_SHARING_MODE_EXCLUSIVE,
2529 
2530 			(deUint32)queueFamilies.size(),
2531 			&queueFamilies[0],
2532 			vk::VK_IMAGE_LAYOUT_UNDEFINED
2533 		};
2534 
2535 		m_dstImage = vk::createImage(vkd, device, &createInfo);
2536 	}
2537 
2538 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
2539 
2540 	{
2541 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2542 		const vk::VkImageMemoryBarrier			barrier			=
2543 		{
2544 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2545 			DE_NULL,
2546 
2547 			0,
2548 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2549 
2550 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
2551 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2552 
2553 			VK_QUEUE_FAMILY_IGNORED,
2554 			VK_QUEUE_FAMILY_IGNORED,
2555 
2556 			*m_dstImage,
2557 			{
2558 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2559 				0,	// Mip level
2560 				1,	// Mip level count
2561 				0,	// Layer
2562 				1	// Layer count
2563 			}
2564 		};
2565 
2566 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2567 
2568 		VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2569 		queueRun(vkd, queue, *commandBuffer);
2570 	}
2571 }
2572 
logSubmit(TestLog & log,size_t commandIndex) const2573 void BufferCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
2574 {
2575 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to image" << TestLog::EndMessage;
2576 }
2577 
submit(SubmitContext & context)2578 void BufferCopyToImage::submit (SubmitContext& context)
2579 {
2580 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2581 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2582 	const vk::VkBufferImageCopy	region			=
2583 	{
2584 		0,
2585 		0, 0,
2586 		{
2587 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2588 			0,	// mipLevel
2589 			0,	// arrayLayer
2590 			1	// layerCount
2591 		},
2592 		{ 0, 0, 0 },
2593 		{
2594 			(deUint32)m_imageWidth,
2595 			(deUint32)m_imageHeight,
2596 			1u
2597 		}
2598 	};
2599 
2600 	vkd.cmdCopyBufferToImage(commandBuffer, context.getBuffer(), *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
2601 }
2602 
verify(VerifyContext & context,size_t commandIndex)2603 void BufferCopyToImage::verify (VerifyContext& context, size_t commandIndex)
2604 {
2605 	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
2606 	ReferenceMemory&						reference		(context.getReference());
2607 	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
2608 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
2609 	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
2610 	const vk::VkDevice						device			= context.getContext().getDevice();
2611 	const vk::VkQueue						queue			= context.getContext().getQueue();
2612 	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
2613 	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2614 	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
2615 	const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2616 	const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2617 	{
2618 		const vk::VkImageMemoryBarrier		imageBarrier	=
2619 		{
2620 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2621 			DE_NULL,
2622 
2623 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2624 			vk::VK_ACCESS_TRANSFER_READ_BIT,
2625 
2626 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2627 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2628 
2629 			VK_QUEUE_FAMILY_IGNORED,
2630 			VK_QUEUE_FAMILY_IGNORED,
2631 
2632 			*m_dstImage,
2633 			{
2634 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2635 				0,	// Mip level
2636 				1,	// Mip level count
2637 				0,	// Layer
2638 				1	// Layer count
2639 			}
2640 		};
2641 		const vk::VkBufferMemoryBarrier bufferBarrier =
2642 		{
2643 			vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2644 			DE_NULL,
2645 
2646 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2647 			vk::VK_ACCESS_HOST_READ_BIT,
2648 
2649 			VK_QUEUE_FAMILY_IGNORED,
2650 			VK_QUEUE_FAMILY_IGNORED,
2651 			*dstBuffer,
2652 			0,
2653 			VK_WHOLE_SIZE
2654 		};
2655 
2656 		const vk::VkBufferImageCopy	region =
2657 		{
2658 			0,
2659 			0, 0,
2660 			{
2661 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2662 				0,	// mipLevel
2663 				0,	// arrayLayer
2664 				1	// layerCount
2665 			},
2666 			{ 0, 0, 0 },
2667 			{
2668 				(deUint32)m_imageWidth,
2669 				(deUint32)m_imageHeight,
2670 				1u
2671 			}
2672 		};
2673 
2674 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
2675 		vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
2676 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2677 	}
2678 
2679 	VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2680 	queueRun(vkd, queue, *commandBuffer);
2681 
2682 	{
2683 		void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2684 
2685 		vk::invalidateMappedMemoryRange(vkd, device, *memory, 0,  4 * m_imageWidth * m_imageHeight);
2686 
2687 		{
2688 			const deUint8* const	data = (const deUint8*)ptr;
2689 
2690 			for (size_t pos = 0; pos < (size_t)( 4 * m_imageWidth * m_imageHeight); pos++)
2691 			{
2692 				if (reference.isDefined(pos))
2693 				{
2694 					if (data[pos] != reference.get(pos))
2695 					{
2696 						resultCollector.fail(
2697 								de::toString(commandIndex) + ":" + getName()
2698 								+ " Result differs from reference, Expected: "
2699 								+ de::toString(tcu::toHex<8>(reference.get(pos)))
2700 								+ ", Got: "
2701 								+ de::toString(tcu::toHex<8>(data[pos]))
2702 								+ ", At offset: "
2703 								+ de::toString(pos));
2704 						break;
2705 					}
2706 				}
2707 			}
2708 		}
2709 
2710 		vkd.unmapMemory(device, *memory);
2711 	}
2712 }
2713 
2714 class BufferCopyFromImage : public CmdCommand
2715 {
2716 public:
BufferCopyFromImage(deUint32 seed)2717 									BufferCopyFromImage		(deUint32 seed) : m_seed(seed) {}
~BufferCopyFromImage(void)2718 									~BufferCopyFromImage	(void) {}
getName(void) const2719 	const char*						getName					(void) const { return "BufferCopyFromImage"; }
2720 
2721 	void							logPrepare				(TestLog& log, size_t commandIndex) const;
2722 	void							prepare					(PrepareContext& context);
2723 	void							logSubmit				(TestLog& log, size_t commandIndex) const;
2724 	void							submit					(SubmitContext& context);
2725 	void							verify					(VerifyContext& context, size_t commandIndex);
2726 
2727 private:
2728 	const deUint32					m_seed;
2729 	deInt32							m_imageWidth;
2730 	deInt32							m_imageHeight;
2731 	vk::Move<vk::VkImage>			m_srcImage;
2732 	vk::Move<vk::VkDeviceMemory>	m_memory;
2733 };
2734 
logPrepare(TestLog & log,size_t commandIndex) const2735 void BufferCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
2736 {
2737 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to buffer copy." << TestLog::EndMessage;
2738 }
2739 
prepare(PrepareContext & context)2740 void BufferCopyFromImage::prepare (PrepareContext& context)
2741 {
2742 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2743 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2744 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2745 	const vk::VkDevice				device			= context.getContext().getDevice();
2746 	const vk::VkQueue				queue			= context.getContext().getQueue();
2747 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
2748 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2749 	const IVec2						imageSize		= findImageSizeWxHx4(context.getBufferSize());
2750 
2751 	m_imageWidth	= imageSize[0];
2752 	m_imageHeight	= imageSize[1];
2753 
2754 	{
2755 		const vk::VkImageCreateInfo	createInfo =
2756 		{
2757 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2758 			DE_NULL,
2759 
2760 			0,
2761 			vk::VK_IMAGE_TYPE_2D,
2762 			vk::VK_FORMAT_R8G8B8A8_UNORM,
2763 			{
2764 				(deUint32)m_imageWidth,
2765 				(deUint32)m_imageHeight,
2766 				1u,
2767 			},
2768 			1, 1, // mipLevels, arrayLayers
2769 			vk::VK_SAMPLE_COUNT_1_BIT,
2770 
2771 			vk::VK_IMAGE_TILING_OPTIMAL,
2772 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2773 			vk::VK_SHARING_MODE_EXCLUSIVE,
2774 
2775 			(deUint32)queueFamilies.size(),
2776 			&queueFamilies[0],
2777 			vk::VK_IMAGE_LAYOUT_UNDEFINED
2778 		};
2779 
2780 		m_srcImage = vk::createImage(vkd, device, &createInfo);
2781 	}
2782 
2783 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
2784 
2785 	{
2786 		const vk::Unique<vk::VkBuffer>			srcBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2787 		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2788 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2789 		const vk::VkImageMemoryBarrier			preImageBarrier	=
2790 		{
2791 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2792 			DE_NULL,
2793 
2794 			0,
2795 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2796 
2797 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
2798 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2799 
2800 			VK_QUEUE_FAMILY_IGNORED,
2801 			VK_QUEUE_FAMILY_IGNORED,
2802 
2803 			*m_srcImage,
2804 			{
2805 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2806 				0,	// Mip level
2807 				1,	// Mip level count
2808 				0,	// Layer
2809 				1	// Layer count
2810 			}
2811 		};
2812 		const vk::VkImageMemoryBarrier			postImageBarrier =
2813 		{
2814 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2815 			DE_NULL,
2816 
2817 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2818 			0,
2819 
2820 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2821 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2822 
2823 			VK_QUEUE_FAMILY_IGNORED,
2824 			VK_QUEUE_FAMILY_IGNORED,
2825 
2826 			*m_srcImage,
2827 			{
2828 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2829 				0,	// Mip level
2830 				1,	// Mip level count
2831 				0,	// Layer
2832 				1	// Layer count
2833 			}
2834 		};
2835 		const vk::VkBufferImageCopy				region				=
2836 		{
2837 			0,
2838 			0, 0,
2839 			{
2840 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
2841 				0,	// mipLevel
2842 				0,	// arrayLayer
2843 				1	// layerCount
2844 			},
2845 			{ 0, 0, 0 },
2846 			{
2847 				(deUint32)m_imageWidth,
2848 				(deUint32)m_imageHeight,
2849 				1u
2850 			}
2851 		};
2852 
2853 		{
2854 			void* const	ptr	= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2855 			de::Random	rng	(m_seed);
2856 
2857 			{
2858 				deUint8* const	data = (deUint8*)ptr;
2859 
2860 				for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2861 					data[ndx] = rng.getUint8();
2862 			}
2863 
2864 			vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
2865 			vkd.unmapMemory(device, *memory);
2866 		}
2867 
2868 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2869 		vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
2870 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2871 
2872 		VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2873 		queueRun(vkd, queue, *commandBuffer);
2874 	}
2875 }
2876 
logSubmit(TestLog & log,size_t commandIndex) const2877 void BufferCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
2878 {
2879 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from image" << TestLog::EndMessage;
2880 }
2881 
submit(SubmitContext & context)2882 void BufferCopyFromImage::submit (SubmitContext& context)
2883 {
2884 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2885 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2886 	const vk::VkBufferImageCopy	region			=
2887 	{
2888 		0,
2889 		0, 0,
2890 		{
2891 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2892 			0,	// mipLevel
2893 			0,	// arrayLayer
2894 			1	// layerCount
2895 		},
2896 		{ 0, 0, 0 },
2897 		{
2898 			(deUint32)m_imageWidth,
2899 			(deUint32)m_imageHeight,
2900 			1u
2901 		}
2902 	};
2903 
2904 	vkd.cmdCopyImageToBuffer(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getBuffer(), 1, &region);
2905 }
2906 
verify(VerifyContext & context,size_t)2907 void BufferCopyFromImage::verify (VerifyContext& context, size_t)
2908 {
2909 	ReferenceMemory&	reference		(context.getReference());
2910 	de::Random			rng	(m_seed);
2911 
2912 	for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2913 		reference.set(ndx, rng.getUint8());
2914 }
2915 
2916 class ImageCopyToBuffer : public CmdCommand
2917 {
2918 public:
ImageCopyToBuffer(vk::VkImageLayout imageLayout)2919 									ImageCopyToBuffer	(vk::VkImageLayout imageLayout) : m_imageLayout (imageLayout) {}
~ImageCopyToBuffer(void)2920 									~ImageCopyToBuffer	(void) {}
getName(void) const2921 	const char*						getName				(void) const { return "BufferCopyToImage"; }
2922 
2923 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
2924 	void							prepare				(PrepareContext& context);
2925 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
2926 	void							submit				(SubmitContext& context);
2927 	void							verify				(VerifyContext& context, size_t commandIndex);
2928 
2929 private:
2930 	vk::VkImageLayout				m_imageLayout;
2931 	vk::VkDeviceSize				m_bufferSize;
2932 	vk::Move<vk::VkBuffer>			m_dstBuffer;
2933 	vk::Move<vk::VkDeviceMemory>	m_memory;
2934 	vk::VkDeviceSize				m_imageMemorySize;
2935 	deInt32							m_imageWidth;
2936 	deInt32							m_imageHeight;
2937 };
2938 
logPrepare(TestLog & log,size_t commandIndex) const2939 void ImageCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2940 {
2941 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for image to buffer copy." << TestLog::EndMessage;
2942 }
2943 
prepare(PrepareContext & context)2944 void ImageCopyToBuffer::prepare (PrepareContext& context)
2945 {
2946 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
2947 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
2948 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
2949 	const vk::VkDevice				device			= context.getContext().getDevice();
2950 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
2951 
2952 	m_imageWidth		= context.getImageWidth();
2953 	m_imageHeight		= context.getImageHeight();
2954 	m_bufferSize		= 4 * m_imageWidth * m_imageHeight;
2955 	m_imageMemorySize	= context.getImageMemorySize();
2956 	m_dstBuffer			= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2957 	m_memory			= bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2958 }
2959 
logSubmit(TestLog & log,size_t commandIndex) const2960 void ImageCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2961 {
2962 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to buffer" << TestLog::EndMessage;
2963 }
2964 
submit(SubmitContext & context)2965 void ImageCopyToBuffer::submit (SubmitContext& context)
2966 {
2967 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
2968 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
2969 	const vk::VkBufferImageCopy	region			=
2970 	{
2971 		0,
2972 		0, 0,
2973 		{
2974 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
2975 			0,	// mipLevel
2976 			0,	// arrayLayer
2977 			1	// layerCount
2978 		},
2979 		{ 0, 0, 0 },
2980 		{
2981 			(deUint32)m_imageWidth,
2982 			(deUint32)m_imageHeight,
2983 			1u
2984 		}
2985 	};
2986 
2987 	vkd.cmdCopyImageToBuffer(commandBuffer, context.getImage(), m_imageLayout, *m_dstBuffer, 1, &region);
2988 }
2989 
verify(VerifyContext & context,size_t commandIndex)2990 void ImageCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2991 {
2992 	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
2993 	ReferenceMemory&						reference		(context.getReference());
2994 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
2995 	const vk::VkDevice						device			= context.getContext().getDevice();
2996 	const vk::VkQueue						queue			= context.getContext().getQueue();
2997 	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
2998 	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2999 	const vk::VkBufferMemoryBarrier			barrier			=
3000 	{
3001 		vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3002 		DE_NULL,
3003 
3004 		vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3005 		vk::VK_ACCESS_HOST_READ_BIT,
3006 
3007 		VK_QUEUE_FAMILY_IGNORED,
3008 		VK_QUEUE_FAMILY_IGNORED,
3009 		*m_dstBuffer,
3010 		0,
3011 		VK_WHOLE_SIZE
3012 	};
3013 
3014 	vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3015 
3016 	VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3017 	queueRun(vkd, queue, *commandBuffer);
3018 
3019 	reference.setUndefined(0, (size_t)m_imageMemorySize);
3020 	{
3021 		void* const						ptr				= mapMemory(vkd, device, *m_memory, m_bufferSize);
3022 		const ConstPixelBufferAccess	referenceImage	(context.getReferenceImage().getAccess());
3023 		const ConstPixelBufferAccess	resultImage		(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, ptr);
3024 
3025 		vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
3026 
3027 		if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), referenceImage, resultImage, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3028 			resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3029 
3030 		vkd.unmapMemory(device, *m_memory);
3031 	}
3032 }
3033 
3034 class ImageCopyFromBuffer : public CmdCommand
3035 {
3036 public:
ImageCopyFromBuffer(deUint32 seed,vk::VkImageLayout imageLayout)3037 									ImageCopyFromBuffer		(deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
~ImageCopyFromBuffer(void)3038 									~ImageCopyFromBuffer	(void) {}
getName(void) const3039 	const char*						getName					(void) const { return "ImageCopyFromBuffer"; }
3040 
3041 	void							logPrepare				(TestLog& log, size_t commandIndex) const;
3042 	void							prepare					(PrepareContext& context);
3043 	void							logSubmit				(TestLog& log, size_t commandIndex) const;
3044 	void							submit					(SubmitContext& context);
3045 	void							verify					(VerifyContext& context, size_t commandIndex);
3046 
3047 private:
3048 	const deUint32					m_seed;
3049 	const vk::VkImageLayout			m_imageLayout;
3050 	deInt32							m_imageWidth;
3051 	deInt32							m_imageHeight;
3052 	vk::VkDeviceSize				m_imageMemorySize;
3053 	vk::VkDeviceSize				m_bufferSize;
3054 	vk::Move<vk::VkBuffer>			m_srcBuffer;
3055 	vk::Move<vk::VkDeviceMemory>	m_memory;
3056 };
3057 
logPrepare(TestLog & log,size_t commandIndex) const3058 void ImageCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
3059 {
3060 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to image copy. Seed: " << m_seed << TestLog::EndMessage;
3061 }
3062 
prepare(PrepareContext & context)3063 void ImageCopyFromBuffer::prepare (PrepareContext& context)
3064 {
3065 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3066 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3067 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3068 	const vk::VkDevice				device			= context.getContext().getDevice();
3069 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3070 
3071 	m_imageWidth		= context.getImageHeight();
3072 	m_imageHeight		= context.getImageWidth();
3073 	m_imageMemorySize	= context.getImageMemorySize();
3074 	m_bufferSize		= m_imageWidth * m_imageHeight * 4;
3075 	m_srcBuffer			= createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
3076 	m_memory			= bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
3077 
3078 	{
3079 		void* const	ptr	= mapMemory(vkd, device, *m_memory, m_bufferSize);
3080 		de::Random	rng	(m_seed);
3081 
3082 		{
3083 			deUint8* const	data = (deUint8*)ptr;
3084 
3085 			for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
3086 				data[ndx] = rng.getUint8();
3087 		}
3088 
3089 		vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
3090 		vkd.unmapMemory(device, *m_memory);
3091 	}
3092 }
3093 
logSubmit(TestLog & log,size_t commandIndex) const3094 void ImageCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
3095 {
3096 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from buffer" << TestLog::EndMessage;
3097 }
3098 
submit(SubmitContext & context)3099 void ImageCopyFromBuffer::submit (SubmitContext& context)
3100 {
3101 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3102 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3103 	const vk::VkBufferImageCopy	region			=
3104 	{
3105 		0,
3106 		0, 0,
3107 		{
3108 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3109 			0,	// mipLevel
3110 			0,	// arrayLayer
3111 			1	// layerCount
3112 		},
3113 		{ 0, 0, 0 },
3114 		{
3115 			(deUint32)m_imageWidth,
3116 			(deUint32)m_imageHeight,
3117 			1u
3118 		}
3119 	};
3120 
3121 	vkd.cmdCopyBufferToImage(commandBuffer, *m_srcBuffer, context.getImage(), m_imageLayout, 1, &region);
3122 }
3123 
verify(VerifyContext & context,size_t)3124 void ImageCopyFromBuffer::verify (VerifyContext& context, size_t)
3125 {
3126 	ReferenceMemory&	reference	(context.getReference());
3127 	de::Random			rng			(m_seed);
3128 
3129 	reference.setUndefined(0, (size_t)m_imageMemorySize);
3130 
3131 	{
3132 		const PixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3133 
3134 		for (deInt32 y = 0; y < m_imageHeight; y++)
3135 		for (deInt32 x = 0; x < m_imageWidth; x++)
3136 		{
3137 			const deUint8 r8 = rng.getUint8();
3138 			const deUint8 g8 = rng.getUint8();
3139 			const deUint8 b8 = rng.getUint8();
3140 			const deUint8 a8 = rng.getUint8();
3141 
3142 			refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3143 		}
3144 	}
3145 }
3146 
3147 class ImageCopyFromImage : public CmdCommand
3148 {
3149 public:
ImageCopyFromImage(deUint32 seed,vk::VkImageLayout imageLayout)3150 									ImageCopyFromImage	(deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
~ImageCopyFromImage(void)3151 									~ImageCopyFromImage	(void) {}
getName(void) const3152 	const char*						getName				(void) const { return "ImageCopyFromImage"; }
3153 
3154 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3155 	void							prepare				(PrepareContext& context);
3156 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3157 	void							submit				(SubmitContext& context);
3158 	void							verify				(VerifyContext& context, size_t commandIndex);
3159 
3160 private:
3161 	const deUint32					m_seed;
3162 	const vk::VkImageLayout			m_imageLayout;
3163 	deInt32							m_imageWidth;
3164 	deInt32							m_imageHeight;
3165 	vk::VkDeviceSize				m_imageMemorySize;
3166 	vk::Move<vk::VkImage>			m_srcImage;
3167 	vk::Move<vk::VkDeviceMemory>	m_memory;
3168 };
3169 
logPrepare(TestLog & log,size_t commandIndex) const3170 void ImageCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3171 {
3172 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image copy." << TestLog::EndMessage;
3173 }
3174 
prepare(PrepareContext & context)3175 void ImageCopyFromImage::prepare (PrepareContext& context)
3176 {
3177 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3178 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3179 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3180 	const vk::VkDevice				device			= context.getContext().getDevice();
3181 	const vk::VkQueue				queue			= context.getContext().getQueue();
3182 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3183 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3184 
3185 	m_imageWidth		= context.getImageWidth();
3186 	m_imageHeight		= context.getImageHeight();
3187 	m_imageMemorySize	= context.getImageMemorySize();
3188 
3189 	{
3190 		const vk::VkImageCreateInfo	createInfo =
3191 		{
3192 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3193 			DE_NULL,
3194 
3195 			0,
3196 			vk::VK_IMAGE_TYPE_2D,
3197 			vk::VK_FORMAT_R8G8B8A8_UNORM,
3198 			{
3199 				(deUint32)m_imageWidth,
3200 				(deUint32)m_imageHeight,
3201 				1u,
3202 			},
3203 			1, 1, // mipLevels, arrayLayers
3204 			vk::VK_SAMPLE_COUNT_1_BIT,
3205 
3206 			vk::VK_IMAGE_TILING_OPTIMAL,
3207 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3208 			vk::VK_SHARING_MODE_EXCLUSIVE,
3209 
3210 			(deUint32)queueFamilies.size(),
3211 			&queueFamilies[0],
3212 			vk::VK_IMAGE_LAYOUT_UNDEFINED
3213 		};
3214 
3215 		m_srcImage = vk::createImage(vkd, device, &createInfo);
3216 	}
3217 
3218 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3219 
3220 	{
3221 		const vk::Unique<vk::VkBuffer>			srcBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3222 		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3223 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3224 		const vk::VkImageMemoryBarrier			preImageBarrier	=
3225 		{
3226 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3227 			DE_NULL,
3228 
3229 			0,
3230 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3231 
3232 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3233 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3234 
3235 			VK_QUEUE_FAMILY_IGNORED,
3236 			VK_QUEUE_FAMILY_IGNORED,
3237 
3238 			*m_srcImage,
3239 			{
3240 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3241 				0,	// Mip level
3242 				1,	// Mip level count
3243 				0,	// Layer
3244 				1	// Layer count
3245 			}
3246 		};
3247 		const vk::VkImageMemoryBarrier			postImageBarrier =
3248 		{
3249 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3250 			DE_NULL,
3251 
3252 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3253 			0,
3254 
3255 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3256 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3257 
3258 			VK_QUEUE_FAMILY_IGNORED,
3259 			VK_QUEUE_FAMILY_IGNORED,
3260 
3261 			*m_srcImage,
3262 			{
3263 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3264 				0,	// Mip level
3265 				1,	// Mip level count
3266 				0,	// Layer
3267 				1	// Layer count
3268 			}
3269 		};
3270 		const vk::VkBufferImageCopy				region				=
3271 		{
3272 			0,
3273 			0, 0,
3274 			{
3275 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3276 				0,	// mipLevel
3277 				0,	// arrayLayer
3278 				1	// layerCount
3279 			},
3280 			{ 0, 0, 0 },
3281 			{
3282 				(deUint32)m_imageWidth,
3283 				(deUint32)m_imageHeight,
3284 				1u
3285 			}
3286 		};
3287 
3288 		{
3289 			void* const	ptr	= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3290 			de::Random	rng	(m_seed);
3291 
3292 			{
3293 				deUint8* const	data = (deUint8*)ptr;
3294 
3295 				for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
3296 					data[ndx] = rng.getUint8();
3297 			}
3298 
3299 			vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
3300 			vkd.unmapMemory(device, *memory);
3301 		}
3302 
3303 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3304 		vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
3305 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3306 
3307 		VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3308 		queueRun(vkd, queue, *commandBuffer);
3309 	}
3310 }
3311 
logSubmit(TestLog & log,size_t commandIndex) const3312 void ImageCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3313 {
3314 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from another image" << TestLog::EndMessage;
3315 }
3316 
submit(SubmitContext & context)3317 void ImageCopyFromImage::submit (SubmitContext& context)
3318 {
3319 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3320 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3321 	const vk::VkImageCopy		region			=
3322 	{
3323 		{
3324 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3325 			0,	// mipLevel
3326 			0,	// arrayLayer
3327 			1	// layerCount
3328 		},
3329 		{ 0, 0, 0 },
3330 
3331 		{
3332 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3333 			0,	// mipLevel
3334 			0,	// arrayLayer
3335 			1	// layerCount
3336 		},
3337 		{ 0, 0, 0 },
3338 		{
3339 			(deUint32)m_imageWidth,
3340 			(deUint32)m_imageHeight,
3341 			1u
3342 		}
3343 	};
3344 
3345 	vkd.cmdCopyImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, &region);
3346 }
3347 
verify(VerifyContext & context,size_t)3348 void ImageCopyFromImage::verify (VerifyContext& context, size_t)
3349 {
3350 	ReferenceMemory&	reference	(context.getReference());
3351 	de::Random			rng			(m_seed);
3352 
3353 	reference.setUndefined(0, (size_t)m_imageMemorySize);
3354 
3355 	{
3356 		const PixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3357 
3358 		for (deInt32 y = 0; y < m_imageHeight; y++)
3359 		for (deInt32 x = 0; x < m_imageWidth; x++)
3360 		{
3361 			const deUint8 r8 = rng.getUint8();
3362 			const deUint8 g8 = rng.getUint8();
3363 			const deUint8 b8 = rng.getUint8();
3364 			const deUint8 a8 = rng.getUint8();
3365 
3366 			refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3367 		}
3368 	}
3369 }
3370 
3371 class ImageCopyToImage : public CmdCommand
3372 {
3373 public:
ImageCopyToImage(vk::VkImageLayout imageLayout)3374 									ImageCopyToImage	(vk::VkImageLayout imageLayout) : m_imageLayout(imageLayout) {}
~ImageCopyToImage(void)3375 									~ImageCopyToImage	(void) {}
getName(void) const3376 	const char*						getName				(void) const { return "ImageCopyToImage"; }
3377 
3378 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3379 	void							prepare				(PrepareContext& context);
3380 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3381 	void							submit				(SubmitContext& context);
3382 	void							verify				(VerifyContext& context, size_t commandIndex);
3383 
3384 private:
3385 	const vk::VkImageLayout			m_imageLayout;
3386 	deInt32							m_imageWidth;
3387 	deInt32							m_imageHeight;
3388 	vk::VkDeviceSize				m_imageMemorySize;
3389 	vk::Move<vk::VkImage>			m_dstImage;
3390 	vk::Move<vk::VkDeviceMemory>	m_memory;
3391 };
3392 
logPrepare(TestLog & log,size_t commandIndex) const3393 void ImageCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
3394 {
3395 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image copy." << TestLog::EndMessage;
3396 }
3397 
prepare(PrepareContext & context)3398 void ImageCopyToImage::prepare (PrepareContext& context)
3399 {
3400 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3401 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3402 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3403 	const vk::VkDevice				device			= context.getContext().getDevice();
3404 	const vk::VkQueue				queue			= context.getContext().getQueue();
3405 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3406 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3407 
3408 	m_imageWidth		= context.getImageWidth();
3409 	m_imageHeight		= context.getImageHeight();
3410 	m_imageMemorySize	= context.getImageMemorySize();
3411 
3412 	{
3413 		const vk::VkImageCreateInfo	createInfo =
3414 		{
3415 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3416 			DE_NULL,
3417 
3418 			0,
3419 			vk::VK_IMAGE_TYPE_2D,
3420 			vk::VK_FORMAT_R8G8B8A8_UNORM,
3421 			{
3422 				(deUint32)m_imageWidth,
3423 				(deUint32)m_imageHeight,
3424 				1u,
3425 			},
3426 			1, 1, // mipLevels, arrayLayers
3427 			vk::VK_SAMPLE_COUNT_1_BIT,
3428 
3429 			vk::VK_IMAGE_TILING_OPTIMAL,
3430 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3431 			vk::VK_SHARING_MODE_EXCLUSIVE,
3432 
3433 			(deUint32)queueFamilies.size(),
3434 			&queueFamilies[0],
3435 			vk::VK_IMAGE_LAYOUT_UNDEFINED
3436 		};
3437 
3438 		m_dstImage = vk::createImage(vkd, device, &createInfo);
3439 	}
3440 
3441 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3442 
3443 	{
3444 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3445 		const vk::VkImageMemoryBarrier			barrier			=
3446 		{
3447 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3448 			DE_NULL,
3449 
3450 			0,
3451 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3452 
3453 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3454 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3455 
3456 			VK_QUEUE_FAMILY_IGNORED,
3457 			VK_QUEUE_FAMILY_IGNORED,
3458 
3459 			*m_dstImage,
3460 			{
3461 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3462 				0,	// Mip level
3463 				1,	// Mip level count
3464 				0,	// Layer
3465 				1	// Layer count
3466 			}
3467 		};
3468 
3469 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
3470 
3471 		VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3472 		queueRun(vkd, queue, *commandBuffer);
3473 	}
3474 }
3475 
logSubmit(TestLog & log,size_t commandIndex) const3476 void ImageCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
3477 {
3478 	log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to another image" << TestLog::EndMessage;
3479 }
3480 
submit(SubmitContext & context)3481 void ImageCopyToImage::submit (SubmitContext& context)
3482 {
3483 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3484 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3485 	const vk::VkImageCopy		region			=
3486 	{
3487 		{
3488 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3489 			0,	// mipLevel
3490 			0,	// arrayLayer
3491 			1	// layerCount
3492 		},
3493 		{ 0, 0, 0 },
3494 
3495 		{
3496 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3497 			0,	// mipLevel
3498 			0,	// arrayLayer
3499 			1	// layerCount
3500 		},
3501 		{ 0, 0, 0 },
3502 		{
3503 			(deUint32)m_imageWidth,
3504 			(deUint32)m_imageHeight,
3505 			1u
3506 		}
3507 	};
3508 
3509 	vkd.cmdCopyImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
3510 }
3511 
verify(VerifyContext & context,size_t commandIndex)3512 void ImageCopyToImage::verify (VerifyContext& context, size_t commandIndex)
3513 {
3514 	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
3515 	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
3516 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
3517 	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
3518 	const vk::VkDevice						device			= context.getContext().getDevice();
3519 	const vk::VkQueue						queue			= context.getContext().getQueue();
3520 	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
3521 	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3522 	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
3523 	const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3524 	const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3525 	{
3526 		const vk::VkImageMemoryBarrier		imageBarrier	=
3527 		{
3528 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3529 			DE_NULL,
3530 
3531 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3532 			vk::VK_ACCESS_TRANSFER_READ_BIT,
3533 
3534 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3535 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3536 
3537 			VK_QUEUE_FAMILY_IGNORED,
3538 			VK_QUEUE_FAMILY_IGNORED,
3539 
3540 			*m_dstImage,
3541 			{
3542 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3543 				0,	// Mip level
3544 				1,	// Mip level count
3545 				0,	// Layer
3546 				1	// Layer count
3547 			}
3548 		};
3549 		const vk::VkBufferMemoryBarrier bufferBarrier =
3550 		{
3551 			vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3552 			DE_NULL,
3553 
3554 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3555 			vk::VK_ACCESS_HOST_READ_BIT,
3556 
3557 			VK_QUEUE_FAMILY_IGNORED,
3558 			VK_QUEUE_FAMILY_IGNORED,
3559 			*dstBuffer,
3560 			0,
3561 			VK_WHOLE_SIZE
3562 		};
3563 		const vk::VkBufferImageCopy	region =
3564 		{
3565 			0,
3566 			0, 0,
3567 			{
3568 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3569 				0,	// mipLevel
3570 				0,	// arrayLayer
3571 				1	// layerCount
3572 			},
3573 			{ 0, 0, 0 },
3574 			{
3575 				(deUint32)m_imageWidth,
3576 				(deUint32)m_imageHeight,
3577 				1u
3578 			}
3579 		};
3580 
3581 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
3582 		vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
3583 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3584 	}
3585 
3586 	VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3587 	queueRun(vkd, queue, *commandBuffer);
3588 
3589 	{
3590 		void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3591 
3592 		vk::invalidateMappedMemoryRange(vkd, device, *memory, 0,  4 * m_imageWidth * m_imageHeight);
3593 
3594 		{
3595 			const deUint8* const			data		= (const deUint8*)ptr;
3596 			const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, data);
3597 			const ConstPixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3598 
3599 			if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3600 				resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3601 		}
3602 
3603 		vkd.unmapMemory(device, *memory);
3604 	}
3605 }
3606 
3607 enum BlitScale
3608 {
3609 	BLIT_SCALE_20,
3610 	BLIT_SCALE_10,
3611 };
3612 
3613 class ImageBlitFromImage : public CmdCommand
3614 {
3615 public:
ImageBlitFromImage(deUint32 seed,BlitScale scale,vk::VkImageLayout imageLayout)3616 									ImageBlitFromImage	(deUint32 seed, BlitScale scale, vk::VkImageLayout imageLayout) : m_seed(seed), m_scale(scale), m_imageLayout(imageLayout) {}
~ImageBlitFromImage(void)3617 									~ImageBlitFromImage	(void) {}
getName(void) const3618 	const char*						getName				(void) const { return "ImageBlitFromImage"; }
3619 
3620 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3621 	void							prepare				(PrepareContext& context);
3622 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3623 	void							submit				(SubmitContext& context);
3624 	void							verify				(VerifyContext& context, size_t commandIndex);
3625 
3626 private:
3627 	const deUint32					m_seed;
3628 	const BlitScale					m_scale;
3629 	const vk::VkImageLayout			m_imageLayout;
3630 	deInt32							m_imageWidth;
3631 	deInt32							m_imageHeight;
3632 	vk::VkDeviceSize				m_imageMemorySize;
3633 	deInt32							m_srcImageWidth;
3634 	deInt32							m_srcImageHeight;
3635 	vk::Move<vk::VkImage>			m_srcImage;
3636 	vk::Move<vk::VkDeviceMemory>	m_memory;
3637 };
3638 
logPrepare(TestLog & log,size_t commandIndex) const3639 void ImageBlitFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3640 {
3641 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image blit." << TestLog::EndMessage;
3642 }
3643 
prepare(PrepareContext & context)3644 void ImageBlitFromImage::prepare (PrepareContext& context)
3645 {
3646 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3647 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3648 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3649 	const vk::VkDevice				device			= context.getContext().getDevice();
3650 	const vk::VkQueue				queue			= context.getContext().getQueue();
3651 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3652 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3653 
3654 	m_imageWidth		= context.getImageWidth();
3655 	m_imageHeight		= context.getImageHeight();
3656 	m_imageMemorySize	= context.getImageMemorySize();
3657 
3658 	if (m_scale == BLIT_SCALE_10)
3659 	{
3660 		m_srcImageWidth			= m_imageWidth;
3661 		m_srcImageHeight		= m_imageHeight;
3662 	}
3663 	else if (m_scale == BLIT_SCALE_20)
3664 	{
3665 		m_srcImageWidth			= m_imageWidth / 2;
3666 		m_srcImageHeight		= m_imageHeight / 2;
3667 	}
3668 	else
3669 		DE_FATAL("Unsupported scale");
3670 
3671 	{
3672 		const vk::VkImageCreateInfo	createInfo =
3673 		{
3674 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3675 			DE_NULL,
3676 
3677 			0,
3678 			vk::VK_IMAGE_TYPE_2D,
3679 			vk::VK_FORMAT_R8G8B8A8_UNORM,
3680 			{
3681 				(deUint32)m_srcImageWidth,
3682 				(deUint32)m_srcImageHeight,
3683 				1u,
3684 			},
3685 			1, 1, // mipLevels, arrayLayers
3686 			vk::VK_SAMPLE_COUNT_1_BIT,
3687 
3688 			vk::VK_IMAGE_TILING_OPTIMAL,
3689 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3690 			vk::VK_SHARING_MODE_EXCLUSIVE,
3691 
3692 			(deUint32)queueFamilies.size(),
3693 			&queueFamilies[0],
3694 			vk::VK_IMAGE_LAYOUT_UNDEFINED
3695 		};
3696 
3697 		m_srcImage = vk::createImage(vkd, device, &createInfo);
3698 	}
3699 
3700 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3701 
3702 	{
3703 		const vk::Unique<vk::VkBuffer>			srcBuffer		(createBuffer(vkd, device, 4 * m_srcImageWidth * m_srcImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3704 		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3705 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3706 		const vk::VkImageMemoryBarrier			preImageBarrier	=
3707 		{
3708 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3709 			DE_NULL,
3710 
3711 			0,
3712 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3713 
3714 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3715 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3716 
3717 			VK_QUEUE_FAMILY_IGNORED,
3718 			VK_QUEUE_FAMILY_IGNORED,
3719 
3720 			*m_srcImage,
3721 			{
3722 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3723 				0,	// Mip level
3724 				1,	// Mip level count
3725 				0,	// Layer
3726 				1	// Layer count
3727 			}
3728 		};
3729 		const vk::VkImageMemoryBarrier			postImageBarrier =
3730 		{
3731 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3732 			DE_NULL,
3733 
3734 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3735 			0,
3736 
3737 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3738 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3739 
3740 			VK_QUEUE_FAMILY_IGNORED,
3741 			VK_QUEUE_FAMILY_IGNORED,
3742 
3743 			*m_srcImage,
3744 			{
3745 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3746 				0,	// Mip level
3747 				1,	// Mip level count
3748 				0,	// Layer
3749 				1	// Layer count
3750 			}
3751 		};
3752 		const vk::VkBufferImageCopy				region				=
3753 		{
3754 			0,
3755 			0, 0,
3756 			{
3757 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3758 				0,	// mipLevel
3759 				0,	// arrayLayer
3760 				1	// layerCount
3761 			},
3762 			{ 0, 0, 0 },
3763 			{
3764 				(deUint32)m_srcImageWidth,
3765 				(deUint32)m_srcImageHeight,
3766 				1u
3767 			}
3768 		};
3769 
3770 		{
3771 			void* const	ptr	= mapMemory(vkd, device, *memory, 4 * m_srcImageWidth * m_srcImageHeight);
3772 			de::Random	rng	(m_seed);
3773 
3774 			{
3775 				deUint8* const	data = (deUint8*)ptr;
3776 
3777 				for (size_t ndx = 0; ndx < (size_t)(4 * m_srcImageWidth * m_srcImageHeight); ndx++)
3778 					data[ndx] = rng.getUint8();
3779 			}
3780 
3781 			vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_srcImageWidth * m_srcImageHeight);
3782 			vkd.unmapMemory(device, *memory);
3783 		}
3784 
3785 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3786 		vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
3787 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3788 
3789 		VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3790 		queueRun(vkd, queue, *commandBuffer);
3791 	}
3792 }
3793 
logSubmit(TestLog & log,size_t commandIndex) const3794 void ImageBlitFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3795 {
3796 	log << TestLog::Message << commandIndex << ":" << getName() << " Blit from another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "")  << TestLog::EndMessage;
3797 }
3798 
submit(SubmitContext & context)3799 void ImageBlitFromImage::submit (SubmitContext& context)
3800 {
3801 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
3802 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
3803 	const vk::VkImageBlit		region			=
3804 	{
3805 		// Src
3806 		{
3807 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3808 			0,	// mipLevel
3809 			0,	// arrayLayer
3810 			1	// layerCount
3811 		},
3812 		{
3813 			{ 0, 0, 0 },
3814 			{
3815 				m_srcImageWidth,
3816 				m_srcImageHeight,
3817 				1
3818 			},
3819 		},
3820 
3821 		// Dst
3822 		{
3823 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
3824 			0,	// mipLevel
3825 			0,	// arrayLayer
3826 			1	// layerCount
3827 		},
3828 		{
3829 			{ 0, 0, 0 },
3830 			{
3831 				m_imageWidth,
3832 				m_imageHeight,
3833 				1u
3834 			}
3835 		}
3836 	};
3837 	vkd.cmdBlitImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, &region, vk::VK_FILTER_NEAREST);
3838 }
3839 
verify(VerifyContext & context,size_t)3840 void ImageBlitFromImage::verify (VerifyContext& context, size_t)
3841 {
3842 	ReferenceMemory&	reference	(context.getReference());
3843 	de::Random			rng			(m_seed);
3844 
3845 	reference.setUndefined(0, (size_t)m_imageMemorySize);
3846 
3847 	{
3848 		const PixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
3849 
3850 		if (m_scale == BLIT_SCALE_10)
3851 		{
3852 			for (deInt32 y = 0; y < m_imageHeight; y++)
3853 			for (deInt32 x = 0; x < m_imageWidth; x++)
3854 			{
3855 				const deUint8 r8 = rng.getUint8();
3856 				const deUint8 g8 = rng.getUint8();
3857 				const deUint8 b8 = rng.getUint8();
3858 				const deUint8 a8 = rng.getUint8();
3859 
3860 				refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3861 			}
3862 		}
3863 		else if (m_scale == BLIT_SCALE_20)
3864 		{
3865 			tcu::TextureLevel	source	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_srcImageWidth, m_srcImageHeight);
3866 			const float			xscale	= ((float)m_srcImageWidth)  / (float)m_imageWidth;
3867 			const float			yscale	= ((float)m_srcImageHeight) / (float)m_imageHeight;
3868 
3869 			for (deInt32 y = 0; y < m_srcImageHeight; y++)
3870 			for (deInt32 x = 0; x < m_srcImageWidth; x++)
3871 			{
3872 				const deUint8 r8 = rng.getUint8();
3873 				const deUint8 g8 = rng.getUint8();
3874 				const deUint8 b8 = rng.getUint8();
3875 				const deUint8 a8 = rng.getUint8();
3876 
3877 				source.getAccess().setPixel(UVec4(r8, g8, b8, a8), x, y);
3878 			}
3879 
3880 			for (deInt32 y = 0; y < m_imageHeight; y++)
3881 			for (deInt32 x = 0; x < m_imageWidth; x++)
3882 				refAccess.setPixel(source.getAccess().getPixelUint(int((float(x) + 0.5f) * xscale), int((float(y) + 0.5f) * yscale)), x, y);
3883 		}
3884 		else
3885 			DE_FATAL("Unsupported scale");
3886 	}
3887 }
3888 
3889 class ImageBlitToImage : public CmdCommand
3890 {
3891 public:
ImageBlitToImage(BlitScale scale,vk::VkImageLayout imageLayout)3892 									ImageBlitToImage	(BlitScale scale, vk::VkImageLayout imageLayout) : m_scale(scale), m_imageLayout(imageLayout) {}
~ImageBlitToImage(void)3893 									~ImageBlitToImage	(void) {}
getName(void) const3894 	const char*						getName				(void) const { return "ImageBlitToImage"; }
3895 
3896 	void							logPrepare			(TestLog& log, size_t commandIndex) const;
3897 	void							prepare				(PrepareContext& context);
3898 	void							logSubmit			(TestLog& log, size_t commandIndex) const;
3899 	void							submit				(SubmitContext& context);
3900 	void							verify				(VerifyContext& context, size_t commandIndex);
3901 
3902 private:
3903 	const BlitScale					m_scale;
3904 	const vk::VkImageLayout			m_imageLayout;
3905 	deInt32							m_imageWidth;
3906 	deInt32							m_imageHeight;
3907 	vk::VkDeviceSize				m_imageMemorySize;
3908 	deInt32							m_dstImageWidth;
3909 	deInt32							m_dstImageHeight;
3910 	vk::Move<vk::VkImage>			m_dstImage;
3911 	vk::Move<vk::VkDeviceMemory>	m_memory;
3912 };
3913 
logPrepare(TestLog & log,size_t commandIndex) const3914 void ImageBlitToImage::logPrepare (TestLog& log, size_t commandIndex) const
3915 {
3916 	log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image blit." << TestLog::EndMessage;
3917 }
3918 
prepare(PrepareContext & context)3919 void ImageBlitToImage::prepare (PrepareContext& context)
3920 {
3921 	const vk::InstanceInterface&	vki				= context.getContext().getInstanceInterface();
3922 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
3923 	const vk::VkPhysicalDevice		physicalDevice	= context.getContext().getPhysicalDevice();
3924 	const vk::VkDevice				device			= context.getContext().getDevice();
3925 	const vk::VkQueue				queue			= context.getContext().getQueue();
3926 	const vk::VkCommandPool			commandPool		= context.getContext().getCommandPool();
3927 	const vector<deUint32>&			queueFamilies	= context.getContext().getQueueFamilies();
3928 
3929 	m_imageWidth		= context.getImageWidth();
3930 	m_imageHeight		= context.getImageHeight();
3931 	m_imageMemorySize	= context.getImageMemorySize();
3932 
3933 	if (m_scale == BLIT_SCALE_10)
3934 	{
3935 		m_dstImageWidth		= context.getImageWidth();
3936 		m_dstImageHeight	= context.getImageHeight();
3937 	}
3938 	else if (m_scale == BLIT_SCALE_20)
3939 	{
3940 		m_dstImageWidth		= context.getImageWidth() * 2;
3941 		m_dstImageHeight	= context.getImageHeight() * 2;
3942 	}
3943 	else
3944 		DE_FATAL("Unsupportd blit scale");
3945 
3946 	{
3947 		const vk::VkImageCreateInfo	createInfo =
3948 		{
3949 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3950 			DE_NULL,
3951 
3952 			0,
3953 			vk::VK_IMAGE_TYPE_2D,
3954 			vk::VK_FORMAT_R8G8B8A8_UNORM,
3955 			{
3956 				(deUint32)m_dstImageWidth,
3957 				(deUint32)m_dstImageHeight,
3958 				1u,
3959 			},
3960 			1, 1, // mipLevels, arrayLayers
3961 			vk::VK_SAMPLE_COUNT_1_BIT,
3962 
3963 			vk::VK_IMAGE_TILING_OPTIMAL,
3964 			vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3965 			vk::VK_SHARING_MODE_EXCLUSIVE,
3966 
3967 			(deUint32)queueFamilies.size(),
3968 			&queueFamilies[0],
3969 			vk::VK_IMAGE_LAYOUT_UNDEFINED
3970 		};
3971 
3972 		m_dstImage = vk::createImage(vkd, device, &createInfo);
3973 	}
3974 
3975 	m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3976 
3977 	{
3978 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3979 		const vk::VkImageMemoryBarrier			barrier			=
3980 		{
3981 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3982 			DE_NULL,
3983 
3984 			0,
3985 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3986 
3987 			vk::VK_IMAGE_LAYOUT_UNDEFINED,
3988 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3989 
3990 			VK_QUEUE_FAMILY_IGNORED,
3991 			VK_QUEUE_FAMILY_IGNORED,
3992 
3993 			*m_dstImage,
3994 			{
3995 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
3996 				0,	// Mip level
3997 				1,	// Mip level count
3998 				0,	// Layer
3999 				1	// Layer count
4000 			}
4001 		};
4002 
4003 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
4004 
4005 		VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4006 		queueRun(vkd, queue, *commandBuffer);
4007 	}
4008 }
4009 
logSubmit(TestLog & log,size_t commandIndex) const4010 void ImageBlitToImage::logSubmit (TestLog& log, size_t commandIndex) const
4011 {
4012 	log << TestLog::Message << commandIndex << ":" << getName() << " Blit image to another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "")  << TestLog::EndMessage;
4013 }
4014 
submit(SubmitContext & context)4015 void ImageBlitToImage::submit (SubmitContext& context)
4016 {
4017 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
4018 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
4019 	const vk::VkImageBlit		region			=
4020 	{
4021 		// Src
4022 		{
4023 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
4024 			0,	// mipLevel
4025 			0,	// arrayLayer
4026 			1	// layerCount
4027 		},
4028 		{
4029 			{ 0, 0, 0 },
4030 			{
4031 				m_imageWidth,
4032 				m_imageHeight,
4033 				1
4034 			},
4035 		},
4036 
4037 		// Dst
4038 		{
4039 			vk::VK_IMAGE_ASPECT_COLOR_BIT,
4040 			0,	// mipLevel
4041 			0,	// arrayLayer
4042 			1	// layerCount
4043 		},
4044 		{
4045 			{ 0, 0, 0 },
4046 			{
4047 				m_dstImageWidth,
4048 				m_dstImageHeight,
4049 				1u
4050 			}
4051 		}
4052 	};
4053 	vkd.cmdBlitImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region, vk::VK_FILTER_NEAREST);
4054 }
4055 
verify(VerifyContext & context,size_t commandIndex)4056 void ImageBlitToImage::verify (VerifyContext& context, size_t commandIndex)
4057 {
4058 	tcu::ResultCollector&					resultCollector	(context.getResultCollector());
4059 	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
4060 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
4061 	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
4062 	const vk::VkDevice						device			= context.getContext().getDevice();
4063 	const vk::VkQueue						queue			= context.getContext().getQueue();
4064 	const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
4065 	const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4066 	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
4067 	const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_dstImageWidth * m_dstImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4068 	const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4069 	{
4070 		const vk::VkImageMemoryBarrier		imageBarrier	=
4071 		{
4072 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4073 			DE_NULL,
4074 
4075 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4076 			vk::VK_ACCESS_TRANSFER_READ_BIT,
4077 
4078 			vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
4079 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4080 
4081 			VK_QUEUE_FAMILY_IGNORED,
4082 			VK_QUEUE_FAMILY_IGNORED,
4083 
4084 			*m_dstImage,
4085 			{
4086 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
4087 				0,	// Mip level
4088 				1,	// Mip level count
4089 				0,	// Layer
4090 				1	// Layer count
4091 			}
4092 		};
4093 		const vk::VkBufferMemoryBarrier bufferBarrier =
4094 		{
4095 			vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4096 			DE_NULL,
4097 
4098 			vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4099 			vk::VK_ACCESS_HOST_READ_BIT,
4100 
4101 			VK_QUEUE_FAMILY_IGNORED,
4102 			VK_QUEUE_FAMILY_IGNORED,
4103 			*dstBuffer,
4104 			0,
4105 			VK_WHOLE_SIZE
4106 		};
4107 		const vk::VkBufferImageCopy	region =
4108 		{
4109 			0,
4110 			0, 0,
4111 			{
4112 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
4113 				0,	// mipLevel
4114 				0,	// arrayLayer
4115 				1	// layerCount
4116 			},
4117 			{ 0, 0, 0 },
4118 			{
4119 				(deUint32)m_dstImageWidth,
4120 				(deUint32)m_dstImageHeight,
4121 				1
4122 			}
4123 		};
4124 
4125 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4126 		vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
4127 		vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4128 	}
4129 
4130 	VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4131 	queueRun(vkd, queue, *commandBuffer);
4132 
4133 	{
4134 		void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_dstImageWidth * m_dstImageHeight);
4135 
4136 		vk::invalidateMappedMemoryRange(vkd, device, *memory, 0,  4 * m_dstImageWidth * m_dstImageHeight);
4137 
4138 		if (m_scale == BLIT_SCALE_10)
4139 		{
4140 			const deUint8* const			data		= (const deUint8*)ptr;
4141 			const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4142 			const ConstPixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
4143 
4144 			if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4145 				resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4146 		}
4147 		else if (m_scale == BLIT_SCALE_20)
4148 		{
4149 			const deUint8* const			data		= (const deUint8*)ptr;
4150 			const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4151 			tcu::TextureLevel				reference	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1);
4152 
4153 			{
4154 				const ConstPixelBufferAccess&	refAccess	(context.getReferenceImage().getAccess());
4155 
4156 				for (deInt32 y = 0; y < m_dstImageHeight; y++)
4157 				for (deInt32 x = 0; x < m_dstImageWidth; x++)
4158 				{
4159 					reference.getAccess().setPixel(refAccess.getPixel(x/2, y/2), x, y);
4160 				}
4161 			}
4162 
4163 			if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), reference.getAccess(), resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4164 				resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4165 		}
4166 		else
4167 			DE_FATAL("Unknown scale");
4168 
4169 		vkd.unmapMemory(device, *memory);
4170 	}
4171 }
4172 
4173 class PrepareRenderPassContext
4174 {
4175 public:
PrepareRenderPassContext(PrepareContext & context,vk::VkRenderPass renderPass,vk::VkFramebuffer framebuffer,deInt32 targetWidth,deInt32 targetHeight)4176 								PrepareRenderPassContext	(PrepareContext&	context,
4177 															 vk::VkRenderPass	renderPass,
4178 															 vk::VkFramebuffer	framebuffer,
4179 															 deInt32			targetWidth,
4180 															 deInt32			targetHeight)
4181 		: m_context			(context)
4182 		, m_renderPass		(renderPass)
4183 		, m_framebuffer		(framebuffer)
4184 		, m_targetWidth		(targetWidth)
4185 		, m_targetHeight	(targetHeight)
4186 	{
4187 	}
4188 
getMemory(void) const4189 	const Memory&									getMemory					(void) const { return m_context.getMemory(); }
getContext(void) const4190 	const Context&									getContext					(void) const { return m_context.getContext(); }
getBinaryCollection(void) const4191 	const vk::ProgramCollection<vk::ProgramBinary>&	getBinaryCollection			(void) const { return m_context.getBinaryCollection(); }
4192 
getBuffer(void) const4193 	vk::VkBuffer				getBuffer					(void) const { return m_context.getBuffer(); }
getBufferSize(void) const4194 	vk::VkDeviceSize			getBufferSize				(void) const { return m_context.getBufferSize(); }
4195 
getImage(void) const4196 	vk::VkImage					getImage					(void) const { return m_context.getImage(); }
getImageWidth(void) const4197 	deInt32						getImageWidth				(void) const { return m_context.getImageWidth(); }
getImageHeight(void) const4198 	deInt32						getImageHeight				(void) const { return m_context.getImageHeight(); }
getImageLayout(void) const4199 	vk::VkImageLayout			getImageLayout				(void) const { return m_context.getImageLayout(); }
4200 
getTargetWidth(void) const4201 	deInt32						getTargetWidth				(void) const { return m_targetWidth; }
getTargetHeight(void) const4202 	deInt32						getTargetHeight				(void) const { return m_targetHeight; }
4203 
getRenderPass(void) const4204 	vk::VkRenderPass			getRenderPass				(void) const { return m_renderPass; }
4205 
4206 private:
4207 	PrepareContext&				m_context;
4208 	const vk::VkRenderPass		m_renderPass;
4209 	const vk::VkFramebuffer		m_framebuffer;
4210 	const deInt32				m_targetWidth;
4211 	const deInt32				m_targetHeight;
4212 };
4213 
4214 class VerifyRenderPassContext
4215 {
4216 public:
VerifyRenderPassContext(VerifyContext & context,deInt32 targetWidth,deInt32 targetHeight)4217 							VerifyRenderPassContext		(VerifyContext&			context,
4218 														 deInt32				targetWidth,
4219 														 deInt32				targetHeight)
4220 		: m_context			(context)
4221 		, m_referenceTarget	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), targetWidth, targetHeight)
4222 	{
4223 	}
4224 
getContext(void) const4225 	const Context&			getContext			(void) const { return m_context.getContext(); }
getLog(void) const4226 	TestLog&				getLog				(void) const { return m_context.getLog(); }
getResultCollector(void) const4227 	tcu::ResultCollector&	getResultCollector	(void) const { return m_context.getResultCollector(); }
4228 
getReferenceTarget(void)4229 	TextureLevel&			getReferenceTarget	(void) { return m_referenceTarget; }
4230 
getReference(void)4231 	ReferenceMemory&		getReference		(void) { return m_context.getReference(); }
getReferenceImage(void)4232 	TextureLevel&			getReferenceImage	(void) { return m_context.getReferenceImage();}
4233 
4234 private:
4235 	VerifyContext&	m_context;
4236 	TextureLevel	m_referenceTarget;
4237 };
4238 
4239 class RenderPassCommand
4240 {
4241 public:
~RenderPassCommand(void)4242 	virtual				~RenderPassCommand	(void) {}
4243 	virtual const char*	getName				(void) const = 0;
4244 
4245 	// Log things that are done during prepare
logPrepare(TestLog &,size_t) const4246 	virtual void		logPrepare			(TestLog&, size_t) const {}
4247 	// Log submitted calls etc.
logSubmit(TestLog &,size_t) const4248 	virtual void		logSubmit			(TestLog&, size_t) const {}
4249 
4250 	// Allocate vulkan resources and prepare for submit.
prepare(PrepareRenderPassContext &)4251 	virtual void		prepare				(PrepareRenderPassContext&) {}
4252 
4253 	// Submit commands to command buffer.
submit(SubmitContext &)4254 	virtual void		submit				(SubmitContext&) {}
4255 
4256 	// Verify results
verify(VerifyRenderPassContext &,size_t)4257 	virtual void		verify				(VerifyRenderPassContext&, size_t) {}
4258 };
4259 
4260 class SubmitRenderPass : public CmdCommand
4261 {
4262 public:
4263 				SubmitRenderPass	(const vector<RenderPassCommand*>& commands);
4264 				~SubmitRenderPass	(void);
getName(void) const4265 	const char*	getName				(void) const { return "SubmitRenderPass"; }
4266 
4267 	void		logPrepare			(TestLog&, size_t) const;
4268 	void		logSubmit			(TestLog&, size_t) const;
4269 
4270 	void		prepare				(PrepareContext&);
4271 	void		submit				(SubmitContext&);
4272 
4273 	void		verify				(VerifyContext&, size_t);
4274 
4275 private:
4276 	const deInt32					m_targetWidth;
4277 	const deInt32					m_targetHeight;
4278 	vk::Move<vk::VkRenderPass>		m_renderPass;
4279 	vk::Move<vk::VkDeviceMemory>	m_colorTargetMemory;
4280 	de::MovePtr<vk::Allocation>		m_colorTargetMemory2;
4281 	vk::Move<vk::VkImage>			m_colorTarget;
4282 	vk::Move<vk::VkImageView>		m_colorTargetView;
4283 	vk::Move<vk::VkFramebuffer>		m_framebuffer;
4284 	vector<RenderPassCommand*>		m_commands;
4285 };
4286 
SubmitRenderPass(const vector<RenderPassCommand * > & commands)4287 SubmitRenderPass::SubmitRenderPass (const vector<RenderPassCommand*>& commands)
4288 	: m_targetWidth		(256)
4289 	, m_targetHeight	(256)
4290 	, m_commands		(commands)
4291 {
4292 }
4293 
~SubmitRenderPass()4294 SubmitRenderPass::~SubmitRenderPass()
4295 {
4296 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4297 		delete m_commands[cmdNdx];
4298 }
4299 
logPrepare(TestLog & log,size_t commandIndex) const4300 void SubmitRenderPass::logPrepare (TestLog& log, size_t commandIndex) const
4301 {
4302 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
4303 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
4304 
4305 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4306 	{
4307 		RenderPassCommand& command = *m_commands[cmdNdx];
4308 		command.logPrepare(log, cmdNdx);
4309 	}
4310 }
4311 
logSubmit(TestLog & log,size_t commandIndex) const4312 void SubmitRenderPass::logSubmit (TestLog& log, size_t commandIndex) const
4313 {
4314 	const string				sectionName	(de::toString(commandIndex) + ":" + getName());
4315 	const tcu::ScopedLogSection	section		(log, sectionName, sectionName);
4316 
4317 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4318 	{
4319 		RenderPassCommand& command = *m_commands[cmdNdx];
4320 		command.logSubmit(log, cmdNdx);
4321 	}
4322 }
4323 
prepare(PrepareContext & context)4324 void SubmitRenderPass::prepare (PrepareContext& context)
4325 {
4326 	const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
4327 	const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
4328 	const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
4329 	const vk::VkDevice						device			= context.getContext().getDevice();
4330 	const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
4331 
4332 	const vk::VkAttachmentReference	colorAttachments[]	=
4333 	{
4334 		{ 0, vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }
4335 	};
4336 	const vk::VkSubpassDescription	subpass				=
4337 	{
4338 		0u,
4339 		vk::VK_PIPELINE_BIND_POINT_GRAPHICS,
4340 
4341 		0u,
4342 		DE_NULL,
4343 
4344 		DE_LENGTH_OF_ARRAY(colorAttachments),
4345 		colorAttachments,
4346 		DE_NULL,
4347 		DE_NULL,
4348 		0u,
4349 		DE_NULL
4350 	};
4351 	const vk::VkAttachmentDescription attachment =
4352 	{
4353 		0u,
4354 		vk::VK_FORMAT_R8G8B8A8_UNORM,
4355 		vk::VK_SAMPLE_COUNT_1_BIT,
4356 
4357 		vk::VK_ATTACHMENT_LOAD_OP_CLEAR,
4358 		vk::VK_ATTACHMENT_STORE_OP_STORE,
4359 
4360 		vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE,
4361 		vk::VK_ATTACHMENT_STORE_OP_DONT_CARE,
4362 
4363 		vk::VK_IMAGE_LAYOUT_UNDEFINED,
4364 		vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
4365 	};
4366 	{
4367 		const vk::VkImageCreateInfo createInfo =
4368 		{
4369 			vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4370 			DE_NULL,
4371 			0u,
4372 
4373 			vk::VK_IMAGE_TYPE_2D,
4374 			vk::VK_FORMAT_R8G8B8A8_UNORM,
4375 			{ (deUint32)m_targetWidth, (deUint32)m_targetHeight, 1u },
4376 			1u,
4377 			1u,
4378 			vk::VK_SAMPLE_COUNT_1_BIT,
4379 			vk::VK_IMAGE_TILING_OPTIMAL,
4380 			vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
4381 			vk::VK_SHARING_MODE_EXCLUSIVE,
4382 			(deUint32)queueFamilies.size(),
4383 			&queueFamilies[0],
4384 			vk::VK_IMAGE_LAYOUT_UNDEFINED
4385 		};
4386 
4387 		m_colorTarget = vk::createImage(vkd, device, &createInfo);
4388 	}
4389 
4390 	m_colorTargetMemory = bindImageMemory(vki, vkd, physicalDevice, device, *m_colorTarget, 0);
4391 
4392 	{
4393 		const vk::VkImageViewCreateInfo createInfo =
4394 		{
4395 			vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
4396 			DE_NULL,
4397 
4398 			0u,
4399 			*m_colorTarget,
4400 			vk::VK_IMAGE_VIEW_TYPE_2D,
4401 			vk::VK_FORMAT_R8G8B8A8_UNORM,
4402 			{
4403 				vk::VK_COMPONENT_SWIZZLE_R,
4404 				vk::VK_COMPONENT_SWIZZLE_G,
4405 				vk::VK_COMPONENT_SWIZZLE_B,
4406 				vk::VK_COMPONENT_SWIZZLE_A
4407 			},
4408 			{
4409 				vk::VK_IMAGE_ASPECT_COLOR_BIT,
4410 				0u,
4411 				1u,
4412 				0u,
4413 				1u
4414 			}
4415 		};
4416 
4417 		m_colorTargetView = vk::createImageView(vkd, device, &createInfo);
4418 	}
4419 	{
4420 		const vk::VkRenderPassCreateInfo createInfo =
4421 		{
4422 			vk::VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
4423 			DE_NULL,
4424 			0u,
4425 
4426 			1u,
4427 			&attachment,
4428 
4429 			1u,
4430 			&subpass,
4431 
4432 			0,
4433 			DE_NULL
4434 		};
4435 
4436 		m_renderPass = vk::createRenderPass(vkd, device, &createInfo);
4437 	}
4438 
4439 	{
4440 		const vk::VkImageView				imageViews[]	=
4441 		{
4442 			*m_colorTargetView
4443 		};
4444 		const vk::VkFramebufferCreateInfo	createInfo		=
4445 		{
4446 			vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
4447 			DE_NULL,
4448 			0u,
4449 
4450 			*m_renderPass,
4451 			DE_LENGTH_OF_ARRAY(imageViews),
4452 			imageViews,
4453 			(deUint32)m_targetWidth,
4454 			(deUint32)m_targetHeight,
4455 			1u
4456 		};
4457 
4458 		m_framebuffer = vk::createFramebuffer(vkd, device, &createInfo);
4459 	}
4460 
4461 	{
4462 		PrepareRenderPassContext renderpassContext (context, *m_renderPass, *m_framebuffer, m_targetWidth, m_targetHeight);
4463 
4464 		for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4465 		{
4466 			RenderPassCommand& command = *m_commands[cmdNdx];
4467 			command.prepare(renderpassContext);
4468 		}
4469 	}
4470 }
4471 
submit(SubmitContext & context)4472 void SubmitRenderPass::submit (SubmitContext& context)
4473 {
4474 	const vk::DeviceInterface&		vkd				= context.getContext().getDeviceInterface();
4475 	const vk::VkCommandBuffer		commandBuffer	= context.getCommandBuffer();
4476 	const vk::VkClearValue			clearValue		= vk::makeClearValueColorF32(0.0f, 0.0f, 0.0f, 1.0f);
4477 
4478 	const vk::VkRenderPassBeginInfo	beginInfo		=
4479 	{
4480 		vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
4481 		DE_NULL,
4482 
4483 		*m_renderPass,
4484 		*m_framebuffer,
4485 
4486 		{ { 0, 0 },  { (deUint32)m_targetWidth, (deUint32)m_targetHeight } },
4487 		1u,
4488 		&clearValue
4489 	};
4490 
4491 	vkd.cmdBeginRenderPass(commandBuffer, &beginInfo, vk::VK_SUBPASS_CONTENTS_INLINE);
4492 
4493 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4494 	{
4495 		RenderPassCommand& command = *m_commands[cmdNdx];
4496 
4497 		command.submit(context);
4498 	}
4499 
4500 	vkd.cmdEndRenderPass(commandBuffer);
4501 }
4502 
verify(VerifyContext & context,size_t commandIndex)4503 void SubmitRenderPass::verify (VerifyContext& context, size_t commandIndex)
4504 {
4505 	TestLog&					log				(context.getLog());
4506 	tcu::ResultCollector&		resultCollector	(context.getResultCollector());
4507 	const string				sectionName		(de::toString(commandIndex) + ":" + getName());
4508 	const tcu::ScopedLogSection	section			(log, sectionName, sectionName);
4509 	VerifyRenderPassContext		verifyContext	(context, m_targetWidth, m_targetHeight);
4510 
4511 	tcu::clear(verifyContext.getReferenceTarget().getAccess(), Vec4(0.0f, 0.0f, 0.0f, 1.0f));
4512 
4513 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4514 	{
4515 		RenderPassCommand& command = *m_commands[cmdNdx];
4516 		command.verify(verifyContext, cmdNdx);
4517 	}
4518 
4519 	{
4520 		const vk::InstanceInterface&			vki				= context.getContext().getInstanceInterface();
4521 		const vk::DeviceInterface&				vkd				= context.getContext().getDeviceInterface();
4522 		const vk::VkPhysicalDevice				physicalDevice	= context.getContext().getPhysicalDevice();
4523 		const vk::VkDevice						device			= context.getContext().getDevice();
4524 		const vk::VkQueue						queue			= context.getContext().getQueue();
4525 		const vk::VkCommandPool					commandPool		= context.getContext().getCommandPool();
4526 		const vk::Unique<vk::VkCommandBuffer>	commandBuffer	(createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4527 		const vector<deUint32>&					queueFamilies	= context.getContext().getQueueFamilies();
4528 		const vk::Unique<vk::VkBuffer>			dstBuffer		(createBuffer(vkd, device, 4 * m_targetWidth * m_targetHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4529 		const vk::Unique<vk::VkDeviceMemory>	memory			(bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4530 		{
4531 			const vk::VkImageMemoryBarrier		imageBarrier	=
4532 			{
4533 				vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4534 				DE_NULL,
4535 
4536 				vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
4537 				vk::VK_ACCESS_TRANSFER_READ_BIT,
4538 
4539 				vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4540 				vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4541 
4542 				VK_QUEUE_FAMILY_IGNORED,
4543 				VK_QUEUE_FAMILY_IGNORED,
4544 
4545 				*m_colorTarget,
4546 				{
4547 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
4548 					0,	// Mip level
4549 					1,	// Mip level count
4550 					0,	// Layer
4551 					1	// Layer count
4552 				}
4553 			};
4554 			const vk::VkBufferMemoryBarrier bufferBarrier =
4555 			{
4556 				vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4557 				DE_NULL,
4558 
4559 				vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4560 				vk::VK_ACCESS_HOST_READ_BIT,
4561 
4562 				VK_QUEUE_FAMILY_IGNORED,
4563 				VK_QUEUE_FAMILY_IGNORED,
4564 				*dstBuffer,
4565 				0,
4566 				VK_WHOLE_SIZE
4567 			};
4568 			const vk::VkBufferImageCopy	region =
4569 			{
4570 				0,
4571 				0, 0,
4572 				{
4573 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
4574 					0,	// mipLevel
4575 					0,	// arrayLayer
4576 					1	// layerCount
4577 				},
4578 				{ 0, 0, 0 },
4579 				{
4580 					(deUint32)m_targetWidth,
4581 					(deUint32)m_targetHeight,
4582 					1u
4583 				}
4584 			};
4585 
4586 			vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4587 			vkd.cmdCopyImageToBuffer(*commandBuffer, *m_colorTarget, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
4588 			vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4589 		}
4590 
4591 		VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4592 		queueRun(vkd, queue, *commandBuffer);
4593 
4594 		{
4595 			void* const	ptr		= mapMemory(vkd, device, *memory, 4 * m_targetWidth * m_targetHeight);
4596 
4597 			vk::invalidateMappedMemoryRange(vkd, device, *memory, 0,  4 * m_targetWidth * m_targetHeight);
4598 
4599 			{
4600 				const deUint8* const			data		= (const deUint8*)ptr;
4601 				const ConstPixelBufferAccess	resAccess	(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_targetWidth, m_targetHeight, 1, data);
4602 				const ConstPixelBufferAccess&	refAccess	(verifyContext.getReferenceTarget().getAccess());
4603 
4604 				if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4605 					resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4606 			}
4607 
4608 			vkd.unmapMemory(device, *memory);
4609 		}
4610 	}
4611 }
4612 
4613 struct PipelineResources
4614 {
4615 	vk::Move<vk::VkPipeline>			pipeline;
4616 	vk::Move<vk::VkDescriptorSetLayout>	descriptorSetLayout;
4617 	vk::Move<vk::VkPipelineLayout>		pipelineLayout;
4618 };
4619 
createPipelineWithResources(const vk::DeviceInterface & vkd,const vk::VkDevice device,const vk::VkRenderPass renderPass,const deUint32 subpass,const vk::VkShaderModule & vertexShaderModule,const vk::VkShaderModule & fragmentShaderModule,const deUint32 viewPortWidth,const deUint32 viewPortHeight,const vector<vk::VkVertexInputBindingDescription> & vertexBindingDescriptions,const vector<vk::VkVertexInputAttributeDescription> & vertexAttributeDescriptions,const vector<vk::VkDescriptorSetLayoutBinding> & bindings,const vk::VkPrimitiveTopology topology,deUint32 pushConstantRangeCount,const vk::VkPushConstantRange * pushConstantRanges,PipelineResources & resources)4620 void createPipelineWithResources (const vk::DeviceInterface&							vkd,
4621 								  const vk::VkDevice									device,
4622 								  const vk::VkRenderPass								renderPass,
4623 								  const deUint32										subpass,
4624 								  const vk::VkShaderModule&								vertexShaderModule,
4625 								  const vk::VkShaderModule&								fragmentShaderModule,
4626 								  const deUint32										viewPortWidth,
4627 								  const deUint32										viewPortHeight,
4628 								  const vector<vk::VkVertexInputBindingDescription>&	vertexBindingDescriptions,
4629 								  const vector<vk::VkVertexInputAttributeDescription>&	vertexAttributeDescriptions,
4630 								  const vector<vk::VkDescriptorSetLayoutBinding>&		bindings,
4631 								  const vk::VkPrimitiveTopology							topology,
4632 								  deUint32												pushConstantRangeCount,
4633 								  const vk::VkPushConstantRange*						pushConstantRanges,
4634 								  PipelineResources&									resources)
4635 {
4636 	if (!bindings.empty())
4637 	{
4638 		const vk::VkDescriptorSetLayoutCreateInfo createInfo =
4639 		{
4640 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
4641 			DE_NULL,
4642 
4643 			0u,
4644 			(deUint32)bindings.size(),
4645 			bindings.empty() ? DE_NULL : &bindings[0]
4646 		};
4647 
4648 		resources.descriptorSetLayout = vk::createDescriptorSetLayout(vkd, device, &createInfo);
4649 	}
4650 
4651 	{
4652 		const vk::VkDescriptorSetLayout			descriptorSetLayout_	= *resources.descriptorSetLayout;
4653 		const vk::VkPipelineLayoutCreateInfo	createInfo				=
4654 		{
4655 			vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
4656 			DE_NULL,
4657 			0,
4658 
4659 			resources.descriptorSetLayout ? 1u : 0u,
4660 			resources.descriptorSetLayout ? &descriptorSetLayout_ : DE_NULL,
4661 
4662 			pushConstantRangeCount,
4663 			pushConstantRanges
4664 		};
4665 
4666 		resources.pipelineLayout = vk::createPipelineLayout(vkd, device, &createInfo);
4667 	}
4668 
4669 	{
4670 		const vk::VkPipelineShaderStageCreateInfo			shaderStages[]					=
4671 		{
4672 			{
4673 				vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
4674 				DE_NULL,
4675 				0,
4676 				vk::VK_SHADER_STAGE_VERTEX_BIT,
4677 				vertexShaderModule,
4678 				"main",
4679 				DE_NULL
4680 			},
4681 			{
4682 				vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
4683 				DE_NULL,
4684 				0,
4685 				vk::VK_SHADER_STAGE_FRAGMENT_BIT,
4686 				fragmentShaderModule,
4687 				"main",
4688 				DE_NULL
4689 			}
4690 		};
4691 		const vk::VkPipelineDepthStencilStateCreateInfo		depthStencilState				=
4692 		{
4693 			vk::VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
4694 			DE_NULL,
4695 			0u,
4696 			DE_FALSE,
4697 			DE_FALSE,
4698 			vk::VK_COMPARE_OP_ALWAYS,
4699 			DE_FALSE,
4700 			DE_FALSE,
4701 			{
4702 				vk::VK_STENCIL_OP_KEEP,
4703 				vk::VK_STENCIL_OP_KEEP,
4704 				vk::VK_STENCIL_OP_KEEP,
4705 				vk::VK_COMPARE_OP_ALWAYS,
4706 				0u,
4707 				0u,
4708 				0u,
4709 			},
4710 			{
4711 				vk::VK_STENCIL_OP_KEEP,
4712 				vk::VK_STENCIL_OP_KEEP,
4713 				vk::VK_STENCIL_OP_KEEP,
4714 				vk::VK_COMPARE_OP_ALWAYS,
4715 				0u,
4716 				0u,
4717 				0u,
4718 			},
4719 			-1.0f,
4720 			+1.0f
4721 		};
4722 		const vk::VkPipelineVertexInputStateCreateInfo		vertexInputState				=
4723 		{
4724 			vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
4725 			DE_NULL,
4726 			0u,
4727 
4728 			(deUint32)vertexBindingDescriptions.size(),
4729 			vertexBindingDescriptions.empty() ? DE_NULL : &vertexBindingDescriptions[0],
4730 
4731 			(deUint32)vertexAttributeDescriptions.size(),
4732 			vertexAttributeDescriptions.empty() ? DE_NULL : &vertexAttributeDescriptions[0]
4733 		};
4734 		const vk::VkPipelineInputAssemblyStateCreateInfo	inputAssemblyState				=
4735 		{
4736 			vk::VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
4737 			DE_NULL,
4738 			0,
4739 			topology,
4740 			VK_FALSE
4741 		};
4742 		const vk::VkViewport								viewports[]						=
4743 		{
4744 			{ 0.0f, 0.0f, (float)viewPortWidth, (float)viewPortHeight, 0.0f, 1.0f }
4745 		};
4746 		const vk::VkRect2D									scissors[]						=
4747 		{
4748 			{ { 0, 0 }, { (deUint32)viewPortWidth, (deUint32)viewPortHeight } }
4749 		};
4750 		const vk::VkPipelineViewportStateCreateInfo			viewportState					=
4751 		{
4752 			vk::VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
4753 			DE_NULL,
4754 			0,
4755 			DE_LENGTH_OF_ARRAY(viewports),
4756 			viewports,
4757 			DE_LENGTH_OF_ARRAY(scissors),
4758 			scissors
4759 		};
4760 		const vk::VkPipelineRasterizationStateCreateInfo	rasterState						=
4761 		{
4762 			vk::VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
4763 			DE_NULL,
4764 			0,
4765 
4766 			VK_TRUE,
4767 			VK_FALSE,
4768 			vk::VK_POLYGON_MODE_FILL,
4769 			vk::VK_CULL_MODE_NONE,
4770 			vk::VK_FRONT_FACE_COUNTER_CLOCKWISE,
4771 			VK_FALSE,
4772 			0.0f,
4773 			0.0f,
4774 			0.0f,
4775 			1.0f
4776 		};
4777 		const vk::VkSampleMask								sampleMask						= ~0u;
4778 		const vk::VkPipelineMultisampleStateCreateInfo		multisampleState				=
4779 		{
4780 			vk::VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
4781 			DE_NULL,
4782 			0,
4783 
4784 			vk::VK_SAMPLE_COUNT_1_BIT,
4785 			VK_FALSE,
4786 			0.0f,
4787 			&sampleMask,
4788 			VK_FALSE,
4789 			VK_FALSE
4790 		};
4791 		const vk::VkPipelineColorBlendAttachmentState		attachments[]					=
4792 		{
4793 			{
4794 				VK_FALSE,
4795 				vk::VK_BLEND_FACTOR_ONE,
4796 				vk::VK_BLEND_FACTOR_ZERO,
4797 				vk::VK_BLEND_OP_ADD,
4798 				vk::VK_BLEND_FACTOR_ONE,
4799 				vk::VK_BLEND_FACTOR_ZERO,
4800 				vk::VK_BLEND_OP_ADD,
4801 				(vk::VK_COLOR_COMPONENT_R_BIT|
4802 				 vk::VK_COLOR_COMPONENT_G_BIT|
4803 				 vk::VK_COLOR_COMPONENT_B_BIT|
4804 				 vk::VK_COLOR_COMPONENT_A_BIT)
4805 			}
4806 		};
4807 		const vk::VkPipelineColorBlendStateCreateInfo		colorBlendState					=
4808 		{
4809 			vk::VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
4810 			DE_NULL,
4811 			0,
4812 
4813 			VK_FALSE,
4814 			vk::VK_LOGIC_OP_COPY,
4815 			DE_LENGTH_OF_ARRAY(attachments),
4816 			attachments,
4817 			{ 0.0f, 0.0f, 0.0f, 0.0f }
4818 		};
4819 		const vk::VkGraphicsPipelineCreateInfo				createInfo						=
4820 		{
4821 			vk::VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
4822 			DE_NULL,
4823 			0u,
4824 
4825 			DE_LENGTH_OF_ARRAY(shaderStages),
4826 			shaderStages,
4827 
4828 			&vertexInputState,
4829 			&inputAssemblyState,
4830 			DE_NULL,
4831 			&viewportState,
4832 			&rasterState,
4833 			&multisampleState,
4834 			&depthStencilState,
4835 			&colorBlendState,
4836 			DE_NULL,
4837 			*resources.pipelineLayout,
4838 			renderPass,
4839 			subpass,
4840 			0,
4841 			0
4842 		};
4843 
4844 		resources.pipeline = vk::createGraphicsPipeline(vkd, device, 0, &createInfo);
4845 	}
4846 }
4847 
4848 class RenderIndexBuffer : public RenderPassCommand
4849 {
4850 public:
RenderIndexBuffer(void)4851 				RenderIndexBuffer	(void) {}
~RenderIndexBuffer(void)4852 				~RenderIndexBuffer	(void) {}
4853 
getName(void) const4854 	const char*	getName				(void) const { return "RenderIndexBuffer"; }
4855 	void		logPrepare			(TestLog&, size_t) const;
4856 	void		logSubmit			(TestLog&, size_t) const;
4857 	void		prepare				(PrepareRenderPassContext&);
4858 	void		submit				(SubmitContext& context);
4859 	void		verify				(VerifyRenderPassContext&, size_t);
4860 
4861 private:
4862 	PipelineResources				m_resources;
4863 	vk::VkDeviceSize				m_bufferSize;
4864 };
4865 
logPrepare(TestLog & log,size_t commandIndex) const4866 void RenderIndexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4867 {
4868 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as index buffer." << TestLog::EndMessage;
4869 }
4870 
logSubmit(TestLog & log,size_t commandIndex) const4871 void RenderIndexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4872 {
4873 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as index buffer." << TestLog::EndMessage;
4874 }
4875 
prepare(PrepareRenderPassContext & context)4876 void RenderIndexBuffer::prepare (PrepareRenderPassContext& context)
4877 {
4878 	const vk::DeviceInterface&				vkd						= context.getContext().getDeviceInterface();
4879 	const vk::VkDevice						device					= context.getContext().getDevice();
4880 	const vk::VkRenderPass					renderPass				= context.getRenderPass();
4881 	const deUint32							subpass					= 0;
4882 	const vk::Unique<vk::VkShaderModule>	vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("index-buffer.vert"), 0));
4883 	const vk::Unique<vk::VkShaderModule>	fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4884 
4885 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4886 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4887 	m_bufferSize = context.getBufferSize();
4888 }
4889 
submit(SubmitContext & context)4890 void RenderIndexBuffer::submit (SubmitContext& context)
4891 {
4892 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
4893 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
4894 
4895 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4896 	vkd.cmdBindIndexBuffer(commandBuffer, context.getBuffer(), 0, vk::VK_INDEX_TYPE_UINT16);
4897 	vkd.cmdDrawIndexed(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0, 0);
4898 }
4899 
verify(VerifyRenderPassContext & context,size_t)4900 void RenderIndexBuffer::verify (VerifyRenderPassContext& context, size_t)
4901 {
4902 	for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
4903 	{
4904 		const deUint8 x  = context.getReference().get(pos * 2);
4905 		const deUint8 y  = context.getReference().get((pos * 2) + 1);
4906 
4907 		context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
4908 	}
4909 }
4910 
4911 class RenderVertexBuffer : public RenderPassCommand
4912 {
4913 public:
RenderVertexBuffer(void)4914 				RenderVertexBuffer	(void) {}
~RenderVertexBuffer(void)4915 				~RenderVertexBuffer	(void) {}
4916 
getName(void) const4917 	const char*	getName				(void) const { return "RenderVertexBuffer"; }
4918 	void		logPrepare			(TestLog&, size_t) const;
4919 	void		logSubmit			(TestLog&, size_t) const;
4920 	void		prepare				(PrepareRenderPassContext&);
4921 	void		submit				(SubmitContext& context);
4922 	void		verify				(VerifyRenderPassContext&, size_t);
4923 
4924 private:
4925 	PipelineResources	m_resources;
4926 	vk::VkDeviceSize	m_bufferSize;
4927 };
4928 
logPrepare(TestLog & log,size_t commandIndex) const4929 void RenderVertexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4930 {
4931 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as vertex buffer." << TestLog::EndMessage;
4932 }
4933 
logSubmit(TestLog & log,size_t commandIndex) const4934 void RenderVertexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4935 {
4936 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as vertex buffer." << TestLog::EndMessage;
4937 }
4938 
prepare(PrepareRenderPassContext & context)4939 void RenderVertexBuffer::prepare (PrepareRenderPassContext& context)
4940 {
4941 	const vk::DeviceInterface&						vkd						= context.getContext().getDeviceInterface();
4942 	const vk::VkDevice								device					= context.getContext().getDevice();
4943 	const vk::VkRenderPass							renderPass				= context.getRenderPass();
4944 	const deUint32									subpass					= 0;
4945 	const vk::Unique<vk::VkShaderModule>			vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("vertex-buffer.vert"), 0));
4946 	const vk::Unique<vk::VkShaderModule>			fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4947 
4948 	vector<vk::VkVertexInputAttributeDescription>	vertexAttributeDescriptions;
4949 	vector<vk::VkVertexInputBindingDescription>		vertexBindingDescriptions;
4950 
4951 	{
4952 		const vk::VkVertexInputBindingDescription vertexBindingDescription =
4953 			{
4954 				0,
4955 				2,
4956 				vk::VK_VERTEX_INPUT_RATE_VERTEX
4957 			};
4958 
4959 		vertexBindingDescriptions.push_back(vertexBindingDescription);
4960 	}
4961 	{
4962 		const vk::VkVertexInputAttributeDescription vertexAttributeDescription =
4963 		{
4964 			0,
4965 			0,
4966 			vk::VK_FORMAT_R8G8_UNORM,
4967 			0
4968 		};
4969 
4970 		vertexAttributeDescriptions.push_back(vertexAttributeDescription);
4971 	}
4972 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4973 								vertexBindingDescriptions, vertexAttributeDescriptions, vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4974 
4975 	m_bufferSize = context.getBufferSize();
4976 }
4977 
submit(SubmitContext & context)4978 void RenderVertexBuffer::submit (SubmitContext& context)
4979 {
4980 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
4981 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
4982 	const vk::VkDeviceSize		offset			= 0;
4983 	const vk::VkBuffer			buffer			= context.getBuffer();
4984 
4985 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4986 	vkd.cmdBindVertexBuffers(commandBuffer, 0, 1, &buffer, &offset);
4987 	vkd.cmdDraw(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0);
4988 }
4989 
verify(VerifyRenderPassContext & context,size_t)4990 void RenderVertexBuffer::verify (VerifyRenderPassContext& context, size_t)
4991 {
4992 	for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
4993 	{
4994 		const deUint8 x  = context.getReference().get(pos * 2);
4995 		const deUint8 y  = context.getReference().get((pos * 2) + 1);
4996 
4997 		context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
4998 	}
4999 }
5000 
5001 class RenderVertexUniformBuffer : public RenderPassCommand
5002 {
5003 public:
RenderVertexUniformBuffer(void)5004 									RenderVertexUniformBuffer	(void) {}
5005 									~RenderVertexUniformBuffer	(void);
5006 
getName(void) const5007 	const char*						getName						(void) const { return "RenderVertexUniformBuffer"; }
5008 	void							logPrepare					(TestLog&, size_t) const;
5009 	void							logSubmit					(TestLog&, size_t) const;
5010 	void							prepare						(PrepareRenderPassContext&);
5011 	void							submit						(SubmitContext& context);
5012 	void							verify						(VerifyRenderPassContext&, size_t);
5013 
5014 private:
5015 	PipelineResources				m_resources;
5016 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5017 	vector<vk::VkDescriptorSet>		m_descriptorSets;
5018 
5019 	vk::VkDeviceSize				m_bufferSize;
5020 };
5021 
~RenderVertexUniformBuffer(void)5022 RenderVertexUniformBuffer::~RenderVertexUniformBuffer (void)
5023 {
5024 }
5025 
logPrepare(TestLog & log,size_t commandIndex) const5026 void RenderVertexUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5027 {
5028 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5029 }
5030 
logSubmit(TestLog & log,size_t commandIndex) const5031 void RenderVertexUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5032 {
5033 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
5034 }
5035 
prepare(PrepareRenderPassContext & context)5036 void RenderVertexUniformBuffer::prepare (PrepareRenderPassContext& context)
5037 {
5038 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5039 	const vk::VkDevice							device					= context.getContext().getDevice();
5040 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5041 	const deUint32								subpass					= 0;
5042 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.vert"), 0));
5043 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5044 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5045 
5046 	m_bufferSize = context.getBufferSize();
5047 
5048 	{
5049 		const vk::VkDescriptorSetLayoutBinding binding =
5050 		{
5051 			0u,
5052 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5053 			1,
5054 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5055 			DE_NULL
5056 		};
5057 
5058 		bindings.push_back(binding);
5059 	}
5060 
5061 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5062 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5063 
5064 	{
5065 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
5066 		const vk::VkDescriptorPoolSize			poolSizes		=
5067 		{
5068 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5069 			descriptorCount
5070 		};
5071 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5072 		{
5073 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5074 			DE_NULL,
5075 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5076 
5077 			descriptorCount,
5078 			1u,
5079 			&poolSizes,
5080 		};
5081 
5082 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5083 		m_descriptorSets.resize(descriptorCount);
5084 	}
5085 
5086 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5087 	{
5088 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5089 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5090 		{
5091 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5092 			DE_NULL,
5093 
5094 			*m_descriptorPool,
5095 			1,
5096 			&layout
5097 		};
5098 
5099 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5100 
5101 		{
5102 			const vk::VkDescriptorBufferInfo		bufferInfo	=
5103 			{
5104 				context.getBuffer(),
5105 				(vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
5106 				m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5107 					? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5108 					: (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5109 			};
5110 			const vk::VkWriteDescriptorSet			write		=
5111 			{
5112 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5113 				DE_NULL,
5114 				m_descriptorSets[descriptorSetNdx],
5115 				0u,
5116 				0u,
5117 				1u,
5118 				vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5119 				DE_NULL,
5120 				&bufferInfo,
5121 				DE_NULL,
5122 			};
5123 
5124 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5125 		}
5126 	}
5127 }
5128 
submit(SubmitContext & context)5129 void RenderVertexUniformBuffer::submit (SubmitContext& context)
5130 {
5131 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5132 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5133 
5134 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5135 
5136 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5137 	{
5138 		const size_t	size	= (size_t)(m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5139 								? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5140 								: (size_t)MAX_UNIFORM_BUFFER_SIZE);
5141 		const deUint32	count	= (deUint32)(size / 2);
5142 
5143 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5144 		vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5145 	}
5146 }
5147 
verify(VerifyRenderPassContext & context,size_t)5148 void RenderVertexUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
5149 {
5150 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5151 	{
5152 		const size_t	offset	= descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
5153 		const size_t	size	= (size_t)(m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5154 								? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5155 								: (size_t)MAX_UNIFORM_BUFFER_SIZE);
5156 		const size_t	count	= size / 2;
5157 
5158 		for (size_t pos = 0; pos < count; pos++)
5159 		{
5160 			const deUint8 x  = context.getReference().get(offset + pos * 2);
5161 			const deUint8 y  = context.getReference().get(offset + (pos * 2) + 1);
5162 
5163 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5164 		}
5165 	}
5166 }
5167 
5168 class RenderVertexUniformTexelBuffer : public RenderPassCommand
5169 {
5170 public:
RenderVertexUniformTexelBuffer(void)5171 				RenderVertexUniformTexelBuffer	(void) {}
5172 				~RenderVertexUniformTexelBuffer	(void);
5173 
getName(void) const5174 	const char*	getName							(void) const { return "RenderVertexUniformTexelBuffer"; }
5175 	void		logPrepare						(TestLog&, size_t) const;
5176 	void		logSubmit						(TestLog&, size_t) const;
5177 	void		prepare							(PrepareRenderPassContext&);
5178 	void		submit							(SubmitContext& context);
5179 	void		verify							(VerifyRenderPassContext&, size_t);
5180 
5181 private:
5182 	PipelineResources				m_resources;
5183 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5184 	vector<vk::VkDescriptorSet>		m_descriptorSets;
5185 	vector<vk::VkBufferView>		m_bufferViews;
5186 
5187 	const vk::DeviceInterface*		m_vkd;
5188 	vk::VkDevice					m_device;
5189 	vk::VkDeviceSize				m_bufferSize;
5190 	deUint32						m_maxUniformTexelCount;
5191 };
5192 
~RenderVertexUniformTexelBuffer(void)5193 RenderVertexUniformTexelBuffer::~RenderVertexUniformTexelBuffer (void)
5194 {
5195 	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5196 	{
5197 		if (!!m_bufferViews[bufferViewNdx])
5198 		{
5199 			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5200 			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5201 		}
5202 	}
5203 }
5204 
logPrepare(TestLog & log,size_t commandIndex) const5205 void RenderVertexUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5206 {
5207 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5208 }
5209 
logSubmit(TestLog & log,size_t commandIndex) const5210 void RenderVertexUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5211 {
5212 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
5213 }
5214 
prepare(PrepareRenderPassContext & context)5215 void RenderVertexUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
5216 {
5217 	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
5218 	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
5219 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5220 	const vk::VkDevice							device					= context.getContext().getDevice();
5221 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5222 	const deUint32								subpass					= 0;
5223 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.vert"), 0));
5224 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5225 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5226 
5227 	m_device				= device;
5228 	m_vkd					= &vkd;
5229 	m_bufferSize			= context.getBufferSize();
5230 	m_maxUniformTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5231 
5232 	{
5233 		const vk::VkDescriptorSetLayoutBinding binding =
5234 		{
5235 			0u,
5236 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5237 			1,
5238 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5239 			DE_NULL
5240 		};
5241 
5242 		bindings.push_back(binding);
5243 	}
5244 
5245 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5246 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5247 
5248 	{
5249 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 2));
5250 		const vk::VkDescriptorPoolSize			poolSizes		=
5251 		{
5252 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5253 			descriptorCount
5254 		};
5255 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5256 		{
5257 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5258 			DE_NULL,
5259 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5260 
5261 			descriptorCount,
5262 			1u,
5263 			&poolSizes,
5264 		};
5265 
5266 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5267 		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5268 		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5269 	}
5270 
5271 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5272 	{
5273 		const deUint32							count			= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5274 																? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5275 																: m_maxUniformTexelCount * 2) / 2;
5276 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5277 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5278 		{
5279 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5280 			DE_NULL,
5281 
5282 			*m_descriptorPool,
5283 			1,
5284 			&layout
5285 		};
5286 
5287 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5288 
5289 		{
5290 			const vk::VkBufferViewCreateInfo createInfo =
5291 			{
5292 				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5293 				DE_NULL,
5294 				0u,
5295 
5296 				context.getBuffer(),
5297 				vk::VK_FORMAT_R16_UINT,
5298 				descriptorSetNdx * m_maxUniformTexelCount * 2,
5299 				count * 2
5300 			};
5301 
5302 			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5303 		}
5304 
5305 		{
5306 			const vk::VkWriteDescriptorSet			write		=
5307 			{
5308 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5309 				DE_NULL,
5310 				m_descriptorSets[descriptorSetNdx],
5311 				0u,
5312 				0u,
5313 				1u,
5314 				vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5315 				DE_NULL,
5316 				DE_NULL,
5317 				&m_bufferViews[descriptorSetNdx]
5318 			};
5319 
5320 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5321 		}
5322 	}
5323 }
5324 
submit(SubmitContext & context)5325 void RenderVertexUniformTexelBuffer::submit (SubmitContext& context)
5326 {
5327 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5328 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5329 
5330 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5331 
5332 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5333 	{
5334 		const deUint32 count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5335 								? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5336 								: m_maxUniformTexelCount * 2) / 2;
5337 
5338 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5339 		vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5340 	}
5341 }
5342 
verify(VerifyRenderPassContext & context,size_t)5343 void RenderVertexUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5344 {
5345 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5346 	{
5347 		const size_t	offset	= descriptorSetNdx * m_maxUniformTexelCount * 2;
5348 		const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5349 								? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5350 								: m_maxUniformTexelCount * 2) / 2;
5351 
5352 		for (size_t pos = 0; pos < (size_t)count; pos++)
5353 		{
5354 			const deUint8 x  = context.getReference().get(offset + pos * 2);
5355 			const deUint8 y  = context.getReference().get(offset + (pos * 2) + 1);
5356 
5357 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5358 		}
5359 	}
5360 }
5361 
5362 class RenderVertexStorageBuffer : public RenderPassCommand
5363 {
5364 public:
RenderVertexStorageBuffer(void)5365 				RenderVertexStorageBuffer	(void) {}
5366 				~RenderVertexStorageBuffer	(void);
5367 
getName(void) const5368 	const char*	getName						(void) const { return "RenderVertexStorageBuffer"; }
5369 	void		logPrepare					(TestLog&, size_t) const;
5370 	void		logSubmit					(TestLog&, size_t) const;
5371 	void		prepare						(PrepareRenderPassContext&);
5372 	void		submit						(SubmitContext& context);
5373 	void		verify						(VerifyRenderPassContext&, size_t);
5374 
5375 private:
5376 	PipelineResources				m_resources;
5377 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5378 	vector<vk::VkDescriptorSet>		m_descriptorSets;
5379 
5380 	vk::VkDeviceSize				m_bufferSize;
5381 };
5382 
~RenderVertexStorageBuffer(void)5383 RenderVertexStorageBuffer::~RenderVertexStorageBuffer (void)
5384 {
5385 }
5386 
logPrepare(TestLog & log,size_t commandIndex) const5387 void RenderVertexStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5388 {
5389 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5390 }
5391 
logSubmit(TestLog & log,size_t commandIndex) const5392 void RenderVertexStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5393 {
5394 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5395 }
5396 
prepare(PrepareRenderPassContext & context)5397 void RenderVertexStorageBuffer::prepare (PrepareRenderPassContext& context)
5398 {
5399 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5400 	const vk::VkDevice							device					= context.getContext().getDevice();
5401 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5402 	const deUint32								subpass					= 0;
5403 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.vert"), 0));
5404 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5405 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5406 
5407 	m_bufferSize = context.getBufferSize();
5408 
5409 	{
5410 		const vk::VkDescriptorSetLayoutBinding binding =
5411 		{
5412 			0u,
5413 			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5414 			1,
5415 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5416 			DE_NULL
5417 		};
5418 
5419 		bindings.push_back(binding);
5420 	}
5421 
5422 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5423 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5424 
5425 	{
5426 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE));
5427 		const vk::VkDescriptorPoolSize			poolSizes		=
5428 		{
5429 			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5430 			descriptorCount
5431 		};
5432 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5433 		{
5434 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5435 			DE_NULL,
5436 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5437 
5438 			descriptorCount,
5439 			1u,
5440 			&poolSizes,
5441 		};
5442 
5443 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5444 		m_descriptorSets.resize(descriptorCount);
5445 	}
5446 
5447 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5448 	{
5449 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5450 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5451 		{
5452 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5453 			DE_NULL,
5454 
5455 			*m_descriptorPool,
5456 			1,
5457 			&layout
5458 		};
5459 
5460 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5461 
5462 		{
5463 			const vk::VkDescriptorBufferInfo		bufferInfo	=
5464 			{
5465 				context.getBuffer(),
5466 				descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE,
5467 				de::min(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE,  (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE)
5468 			};
5469 			const vk::VkWriteDescriptorSet			write		=
5470 			{
5471 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5472 				DE_NULL,
5473 				m_descriptorSets[descriptorSetNdx],
5474 				0u,
5475 				0u,
5476 				1u,
5477 				vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5478 				DE_NULL,
5479 				&bufferInfo,
5480 				DE_NULL,
5481 			};
5482 
5483 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5484 		}
5485 	}
5486 }
5487 
submit(SubmitContext & context)5488 void RenderVertexStorageBuffer::submit (SubmitContext& context)
5489 {
5490 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5491 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5492 
5493 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5494 
5495 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5496 	{
5497 		const size_t size	= m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5498 							? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5499 							: (size_t)(MAX_STORAGE_BUFFER_SIZE);
5500 
5501 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5502 		vkd.cmdDraw(commandBuffer, (deUint32)(size / 2), 1, 0, 0);
5503 	}
5504 }
5505 
verify(VerifyRenderPassContext & context,size_t)5506 void RenderVertexStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
5507 {
5508 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5509 	{
5510 		const size_t offset	= descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE;
5511 		const size_t size	= m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5512 							? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5513 							: (size_t)(MAX_STORAGE_BUFFER_SIZE);
5514 
5515 		for (size_t pos = 0; pos < size / 2; pos++)
5516 		{
5517 			const deUint8 x  = context.getReference().get(offset + pos * 2);
5518 			const deUint8 y  = context.getReference().get(offset + (pos * 2) + 1);
5519 
5520 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5521 		}
5522 	}
5523 }
5524 
5525 class RenderVertexStorageTexelBuffer : public RenderPassCommand
5526 {
5527 public:
RenderVertexStorageTexelBuffer(void)5528 				RenderVertexStorageTexelBuffer	(void) {}
5529 				~RenderVertexStorageTexelBuffer	(void);
5530 
getName(void) const5531 	const char*	getName							(void) const { return "RenderVertexStorageTexelBuffer"; }
5532 	void		logPrepare						(TestLog&, size_t) const;
5533 	void		logSubmit						(TestLog&, size_t) const;
5534 	void		prepare							(PrepareRenderPassContext&);
5535 	void		submit							(SubmitContext& context);
5536 	void		verify							(VerifyRenderPassContext&, size_t);
5537 
5538 private:
5539 	PipelineResources				m_resources;
5540 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5541 	vector<vk::VkDescriptorSet>		m_descriptorSets;
5542 	vector<vk::VkBufferView>		m_bufferViews;
5543 
5544 	const vk::DeviceInterface*		m_vkd;
5545 	vk::VkDevice					m_device;
5546 	vk::VkDeviceSize				m_bufferSize;
5547 	deUint32						m_maxStorageTexelCount;
5548 };
5549 
~RenderVertexStorageTexelBuffer(void)5550 RenderVertexStorageTexelBuffer::~RenderVertexStorageTexelBuffer (void)
5551 {
5552 	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5553 	{
5554 		if (!!m_bufferViews[bufferViewNdx])
5555 		{
5556 			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5557 			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5558 		}
5559 	}
5560 }
5561 
logPrepare(TestLog & log,size_t commandIndex) const5562 void RenderVertexStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5563 {
5564 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5565 }
5566 
logSubmit(TestLog & log,size_t commandIndex) const5567 void RenderVertexStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5568 {
5569 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5570 }
5571 
prepare(PrepareRenderPassContext & context)5572 void RenderVertexStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
5573 {
5574 	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
5575 	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
5576 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5577 	const vk::VkDevice							device					= context.getContext().getDevice();
5578 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5579 	const deUint32								subpass					= 0;
5580 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.vert"), 0));
5581 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5582 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5583 
5584 	m_device				= device;
5585 	m_vkd					= &vkd;
5586 	m_bufferSize			= context.getBufferSize();
5587 	m_maxStorageTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5588 
5589 	{
5590 		const vk::VkDescriptorSetLayoutBinding binding =
5591 		{
5592 			0u,
5593 			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5594 			1,
5595 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5596 			DE_NULL
5597 		};
5598 
5599 		bindings.push_back(binding);
5600 	}
5601 
5602 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5603 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5604 
5605 	{
5606 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
5607 		const vk::VkDescriptorPoolSize			poolSizes		=
5608 		{
5609 			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5610 			descriptorCount
5611 		};
5612 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5613 		{
5614 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5615 			DE_NULL,
5616 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5617 
5618 			descriptorCount,
5619 			1u,
5620 			&poolSizes,
5621 		};
5622 
5623 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5624 		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5625 		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5626 	}
5627 
5628 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5629 	{
5630 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5631 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5632 		{
5633 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5634 			DE_NULL,
5635 
5636 			*m_descriptorPool,
5637 			1,
5638 			&layout
5639 		};
5640 
5641 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5642 
5643 		{
5644 			const vk::VkBufferViewCreateInfo createInfo =
5645 			{
5646 				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5647 				DE_NULL,
5648 				0u,
5649 
5650 				context.getBuffer(),
5651 				vk::VK_FORMAT_R32_UINT,
5652 				descriptorSetNdx * m_maxStorageTexelCount * 4,
5653 				(deUint32)de::min<vk::VkDeviceSize>(m_maxStorageTexelCount * 4, m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4)
5654 			};
5655 
5656 			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5657 		}
5658 
5659 		{
5660 			const vk::VkWriteDescriptorSet			write		=
5661 			{
5662 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5663 				DE_NULL,
5664 				m_descriptorSets[descriptorSetNdx],
5665 				0u,
5666 				0u,
5667 				1u,
5668 				vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5669 				DE_NULL,
5670 				DE_NULL,
5671 				&m_bufferViews[descriptorSetNdx]
5672 			};
5673 
5674 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5675 		}
5676 	}
5677 }
5678 
submit(SubmitContext & context)5679 void RenderVertexStorageTexelBuffer::submit (SubmitContext& context)
5680 {
5681 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5682 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5683 
5684 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5685 
5686 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5687 	{
5688 		const deUint32 count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5689 								? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5690 								: m_maxStorageTexelCount * 4) / 2;
5691 
5692 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5693 		vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5694 	}
5695 }
5696 
verify(VerifyRenderPassContext & context,size_t)5697 void RenderVertexStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5698 {
5699 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5700 	{
5701 		const size_t	offset	= descriptorSetNdx * m_maxStorageTexelCount * 4;
5702 		const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5703 								? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5704 								: m_maxStorageTexelCount * 4) / 2;
5705 
5706 		DE_ASSERT(context.getReference().getSize() <= 4 * m_maxStorageTexelCount * m_descriptorSets.size());
5707 		DE_ASSERT(context.getReference().getSize() > offset);
5708 		DE_ASSERT(offset + count * 2 <= context.getReference().getSize());
5709 
5710 		for (size_t pos = 0; pos < (size_t)count; pos++)
5711 		{
5712 			const deUint8 x = context.getReference().get(offset + pos * 2);
5713 			const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5714 
5715 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5716 		}
5717 	}
5718 }
5719 
5720 class RenderVertexStorageImage : public RenderPassCommand
5721 {
5722 public:
RenderVertexStorageImage(void)5723 				RenderVertexStorageImage	(void) {}
5724 				~RenderVertexStorageImage	(void);
5725 
getName(void) const5726 	const char*	getName						(void) const { return "RenderVertexStorageImage"; }
5727 	void		logPrepare					(TestLog&, size_t) const;
5728 	void		logSubmit					(TestLog&, size_t) const;
5729 	void		prepare						(PrepareRenderPassContext&);
5730 	void		submit						(SubmitContext& context);
5731 	void		verify						(VerifyRenderPassContext&, size_t);
5732 
5733 private:
5734 	PipelineResources				m_resources;
5735 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5736 	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
5737 	vk::Move<vk::VkImageView>		m_imageView;
5738 };
5739 
~RenderVertexStorageImage(void)5740 RenderVertexStorageImage::~RenderVertexStorageImage (void)
5741 {
5742 }
5743 
logPrepare(TestLog & log,size_t commandIndex) const5744 void RenderVertexStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
5745 {
5746 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
5747 }
5748 
logSubmit(TestLog & log,size_t commandIndex) const5749 void RenderVertexStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
5750 {
5751 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
5752 }
5753 
prepare(PrepareRenderPassContext & context)5754 void RenderVertexStorageImage::prepare (PrepareRenderPassContext& context)
5755 {
5756 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5757 	const vk::VkDevice							device					= context.getContext().getDevice();
5758 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5759 	const deUint32								subpass					= 0;
5760 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.vert"), 0));
5761 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5762 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5763 
5764 	{
5765 		const vk::VkDescriptorSetLayoutBinding binding =
5766 		{
5767 			0u,
5768 			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5769 			1,
5770 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5771 			DE_NULL
5772 		};
5773 
5774 		bindings.push_back(binding);
5775 	}
5776 
5777 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5778 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5779 
5780 	{
5781 		const vk::VkDescriptorPoolSize			poolSizes		=
5782 		{
5783 			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5784 			1
5785 		};
5786 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5787 		{
5788 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5789 			DE_NULL,
5790 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5791 
5792 			1u,
5793 			1u,
5794 			&poolSizes,
5795 		};
5796 
5797 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5798 	}
5799 
5800 	{
5801 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5802 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5803 		{
5804 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5805 			DE_NULL,
5806 
5807 			*m_descriptorPool,
5808 			1,
5809 			&layout
5810 		};
5811 
5812 		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5813 
5814 		{
5815 			const vk::VkImageViewCreateInfo createInfo =
5816 			{
5817 				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5818 				DE_NULL,
5819 				0u,
5820 
5821 				context.getImage(),
5822 				vk::VK_IMAGE_VIEW_TYPE_2D,
5823 				vk::VK_FORMAT_R8G8B8A8_UNORM,
5824 				vk::makeComponentMappingRGBA(),
5825 				{
5826 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
5827 					0u,
5828 					1u,
5829 					0u,
5830 					1u
5831 				}
5832 			};
5833 
5834 			m_imageView = vk::createImageView(vkd, device, &createInfo);
5835 		}
5836 
5837 		{
5838 			const vk::VkDescriptorImageInfo			imageInfo	=
5839 			{
5840 				0,
5841 				*m_imageView,
5842 				context.getImageLayout()
5843 			};
5844 			const vk::VkWriteDescriptorSet			write		=
5845 			{
5846 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5847 				DE_NULL,
5848 				*m_descriptorSet,
5849 				0u,
5850 				0u,
5851 				1u,
5852 				vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5853 				&imageInfo,
5854 				DE_NULL,
5855 				DE_NULL,
5856 			};
5857 
5858 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5859 		}
5860 	}
5861 }
5862 
submit(SubmitContext & context)5863 void RenderVertexStorageImage::submit (SubmitContext& context)
5864 {
5865 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
5866 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
5867 
5868 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5869 
5870 	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
5871 	vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
5872 }
5873 
verify(VerifyRenderPassContext & context,size_t)5874 void RenderVertexStorageImage::verify (VerifyRenderPassContext& context, size_t)
5875 {
5876 	for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
5877 	{
5878 		const tcu::IVec3		size	= context.getReferenceImage().getAccess().getSize();
5879 		const tcu::UVec4		pixel	= context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
5880 
5881 		if (pos % 2 == 0)
5882 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
5883 		else
5884 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
5885 	}
5886 }
5887 
5888 class RenderVertexSampledImage : public RenderPassCommand
5889 {
5890 public:
RenderVertexSampledImage(void)5891 				RenderVertexSampledImage	(void) {}
5892 				~RenderVertexSampledImage	(void);
5893 
getName(void) const5894 	const char*	getName						(void) const { return "RenderVertexSampledImage"; }
5895 	void		logPrepare					(TestLog&, size_t) const;
5896 	void		logSubmit					(TestLog&, size_t) const;
5897 	void		prepare						(PrepareRenderPassContext&);
5898 	void		submit						(SubmitContext& context);
5899 	void		verify						(VerifyRenderPassContext&, size_t);
5900 
5901 private:
5902 	PipelineResources				m_resources;
5903 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
5904 	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
5905 	vk::Move<vk::VkImageView>		m_imageView;
5906 	vk::Move<vk::VkSampler>			m_sampler;
5907 };
5908 
~RenderVertexSampledImage(void)5909 RenderVertexSampledImage::~RenderVertexSampledImage (void)
5910 {
5911 }
5912 
logPrepare(TestLog & log,size_t commandIndex) const5913 void RenderVertexSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
5914 {
5915 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render sampled image." << TestLog::EndMessage;
5916 }
5917 
logSubmit(TestLog & log,size_t commandIndex) const5918 void RenderVertexSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
5919 {
5920 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using sampled image." << TestLog::EndMessage;
5921 }
5922 
prepare(PrepareRenderPassContext & context)5923 void RenderVertexSampledImage::prepare (PrepareRenderPassContext& context)
5924 {
5925 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
5926 	const vk::VkDevice							device					= context.getContext().getDevice();
5927 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
5928 	const deUint32								subpass					= 0;
5929 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.vert"), 0));
5930 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5931 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
5932 
5933 	{
5934 		const vk::VkDescriptorSetLayoutBinding binding =
5935 		{
5936 			0u,
5937 			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5938 			1,
5939 			vk::VK_SHADER_STAGE_VERTEX_BIT,
5940 			DE_NULL
5941 		};
5942 
5943 		bindings.push_back(binding);
5944 	}
5945 
5946 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5947 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5948 
5949 	{
5950 		const vk::VkDescriptorPoolSize			poolSizes		=
5951 		{
5952 			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5953 			1
5954 		};
5955 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
5956 		{
5957 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5958 			DE_NULL,
5959 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5960 
5961 			1u,
5962 			1u,
5963 			&poolSizes,
5964 		};
5965 
5966 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5967 	}
5968 
5969 	{
5970 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
5971 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
5972 		{
5973 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5974 			DE_NULL,
5975 
5976 			*m_descriptorPool,
5977 			1,
5978 			&layout
5979 		};
5980 
5981 		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5982 
5983 		{
5984 			const vk::VkImageViewCreateInfo createInfo =
5985 			{
5986 				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5987 				DE_NULL,
5988 				0u,
5989 
5990 				context.getImage(),
5991 				vk::VK_IMAGE_VIEW_TYPE_2D,
5992 				vk::VK_FORMAT_R8G8B8A8_UNORM,
5993 				vk::makeComponentMappingRGBA(),
5994 				{
5995 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
5996 					0u,
5997 					1u,
5998 					0u,
5999 					1u
6000 				}
6001 			};
6002 
6003 			m_imageView = vk::createImageView(vkd, device, &createInfo);
6004 		}
6005 
6006 		{
6007 			const vk::VkSamplerCreateInfo createInfo =
6008 			{
6009 				vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
6010 				DE_NULL,
6011 				0u,
6012 
6013 				vk::VK_FILTER_NEAREST,
6014 				vk::VK_FILTER_NEAREST,
6015 
6016 				vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
6017 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6018 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6019 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6020 				0.0f,
6021 				VK_FALSE,
6022 				1.0f,
6023 				VK_FALSE,
6024 				vk::VK_COMPARE_OP_ALWAYS,
6025 				0.0f,
6026 				0.0f,
6027 				vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
6028 				VK_FALSE
6029 			};
6030 
6031 			m_sampler = vk::createSampler(vkd, device, &createInfo);
6032 		}
6033 
6034 		{
6035 			const vk::VkDescriptorImageInfo			imageInfo	=
6036 			{
6037 				*m_sampler,
6038 				*m_imageView,
6039 				context.getImageLayout()
6040 			};
6041 			const vk::VkWriteDescriptorSet			write		=
6042 			{
6043 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6044 				DE_NULL,
6045 				*m_descriptorSet,
6046 				0u,
6047 				0u,
6048 				1u,
6049 				vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
6050 				&imageInfo,
6051 				DE_NULL,
6052 				DE_NULL,
6053 			};
6054 
6055 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6056 		}
6057 	}
6058 }
6059 
submit(SubmitContext & context)6060 void RenderVertexSampledImage::submit (SubmitContext& context)
6061 {
6062 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6063 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6064 
6065 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6066 
6067 	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
6068 	vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
6069 }
6070 
verify(VerifyRenderPassContext & context,size_t)6071 void RenderVertexSampledImage::verify (VerifyRenderPassContext& context, size_t)
6072 {
6073 	for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
6074 	{
6075 		const tcu::IVec3	size	= context.getReferenceImage().getAccess().getSize();
6076 		const tcu::UVec4	pixel	= context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
6077 
6078 		if (pos % 2 == 0)
6079 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
6080 		else
6081 			context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
6082 	}
6083 }
6084 
6085 class RenderFragmentUniformBuffer : public RenderPassCommand
6086 {
6087 public:
RenderFragmentUniformBuffer(void)6088 									RenderFragmentUniformBuffer		(void) {}
6089 									~RenderFragmentUniformBuffer	(void);
6090 
getName(void) const6091 	const char*						getName							(void) const { return "RenderFragmentUniformBuffer"; }
6092 	void							logPrepare						(TestLog&, size_t) const;
6093 	void							logSubmit						(TestLog&, size_t) const;
6094 	void							prepare							(PrepareRenderPassContext&);
6095 	void							submit							(SubmitContext& context);
6096 	void							verify							(VerifyRenderPassContext&, size_t);
6097 
6098 private:
6099 	PipelineResources				m_resources;
6100 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6101 	vector<vk::VkDescriptorSet>		m_descriptorSets;
6102 
6103 	vk::VkDeviceSize				m_bufferSize;
6104 	size_t							m_targetWidth;
6105 	size_t							m_targetHeight;
6106 };
6107 
~RenderFragmentUniformBuffer(void)6108 RenderFragmentUniformBuffer::~RenderFragmentUniformBuffer (void)
6109 {
6110 }
6111 
logPrepare(TestLog & log,size_t commandIndex) const6112 void RenderFragmentUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6113 {
6114 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6115 }
6116 
logSubmit(TestLog & log,size_t commandIndex) const6117 void RenderFragmentUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6118 {
6119 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6120 }
6121 
prepare(PrepareRenderPassContext & context)6122 void RenderFragmentUniformBuffer::prepare (PrepareRenderPassContext& context)
6123 {
6124 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6125 	const vk::VkDevice							device					= context.getContext().getDevice();
6126 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6127 	const deUint32								subpass					= 0;
6128 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6129 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.frag"), 0));
6130 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6131 
6132 	m_bufferSize	= context.getBufferSize();
6133 	m_targetWidth	= context.getTargetWidth();
6134 	m_targetHeight	= context.getTargetHeight();
6135 
6136 	{
6137 		const vk::VkDescriptorSetLayoutBinding binding =
6138 		{
6139 			0u,
6140 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6141 			1,
6142 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6143 			DE_NULL
6144 		};
6145 
6146 		bindings.push_back(binding);
6147 	}
6148 	const vk::VkPushConstantRange pushConstantRange =
6149 	{
6150 		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6151 		0u,
6152 		8u
6153 	};
6154 
6155 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6156 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6157 
6158 	{
6159 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
6160 		const vk::VkDescriptorPoolSize			poolSizes		=
6161 		{
6162 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6163 			descriptorCount
6164 		};
6165 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6166 		{
6167 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6168 			DE_NULL,
6169 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6170 
6171 			descriptorCount,
6172 			1u,
6173 			&poolSizes,
6174 		};
6175 
6176 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6177 		m_descriptorSets.resize(descriptorCount);
6178 	}
6179 
6180 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6181 	{
6182 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6183 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6184 		{
6185 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6186 			DE_NULL,
6187 
6188 			*m_descriptorPool,
6189 			1,
6190 			&layout
6191 		};
6192 
6193 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6194 
6195 		{
6196 			const vk::VkDescriptorBufferInfo		bufferInfo	=
6197 			{
6198 				context.getBuffer(),
6199 				(vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
6200 				m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6201 					? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6202 					: (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6203 			};
6204 			const vk::VkWriteDescriptorSet			write		=
6205 			{
6206 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6207 				DE_NULL,
6208 				m_descriptorSets[descriptorSetNdx],
6209 				0u,
6210 				0u,
6211 				1u,
6212 				vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6213 				DE_NULL,
6214 				&bufferInfo,
6215 				DE_NULL,
6216 			};
6217 
6218 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6219 		}
6220 	}
6221 }
6222 
submit(SubmitContext & context)6223 void RenderFragmentUniformBuffer::submit (SubmitContext& context)
6224 {
6225 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6226 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6227 
6228 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6229 
6230 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6231 	{
6232 		const struct
6233 		{
6234 			const deUint32	callId;
6235 			const deUint32	valuesPerPixel;
6236 		} callParams =
6237 		{
6238 			(deUint32)descriptorSetNdx,
6239 			(deUint32)divRoundUp<size_t>(m_descriptorSets.size() * (MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight)
6240 		};
6241 
6242 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6243 		vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6244 		vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6245 	}
6246 }
6247 
verify(VerifyRenderPassContext & context,size_t)6248 void RenderFragmentUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
6249 {
6250 	const deUint32	valuesPerPixel	= (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * (MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight);
6251 	const size_t	arraySize		= MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
6252 	const size_t	arrayIntSize	= arraySize * 4;
6253 
6254 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6255 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6256 	{
6257 		const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (arrayIntSize / valuesPerPixel), m_descriptorSets.size() - 1);
6258 
6259 		for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6260 		{
6261 			const size_t	offset	= descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
6262 			const deUint32	callId	= (deUint32)descriptorSetNdx;
6263 
6264 			const deUint32	id		= callId * ((deUint32)arrayIntSize / valuesPerPixel) + (deUint32)y * 256u + (deUint32)x;
6265 
6266 			if (y * 256u + x < callId * (arrayIntSize / valuesPerPixel))
6267 				continue;
6268 			else
6269 			{
6270 				deUint32 value = id;
6271 
6272 				for (deUint32 i = 0; i < valuesPerPixel; i++)
6273 				{
6274 					value	= ((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 0))
6275 							| (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 1)) << 8u)
6276 							| (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 2)) << 16u)
6277 							| (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 3)) << 24u);
6278 
6279 				}
6280 				const UVec4	vec	((value >>  0u) & 0xFFu,
6281 								 (value >>  8u) & 0xFFu,
6282 								 (value >> 16u) & 0xFFu,
6283 								 (value >> 24u) & 0xFFu);
6284 
6285 				context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6286 			}
6287 		}
6288 	}
6289 }
6290 
6291 class RenderFragmentStorageBuffer : public RenderPassCommand
6292 {
6293 public:
RenderFragmentStorageBuffer(void)6294 									RenderFragmentStorageBuffer		(void) {}
6295 									~RenderFragmentStorageBuffer	(void);
6296 
getName(void) const6297 	const char*						getName							(void) const { return "RenderFragmentStorageBuffer"; }
6298 	void							logPrepare						(TestLog&, size_t) const;
6299 	void							logSubmit						(TestLog&, size_t) const;
6300 	void							prepare							(PrepareRenderPassContext&);
6301 	void							submit							(SubmitContext& context);
6302 	void							verify							(VerifyRenderPassContext&, size_t);
6303 
6304 private:
6305 	PipelineResources				m_resources;
6306 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6307 	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
6308 
6309 	vk::VkDeviceSize				m_bufferSize;
6310 	size_t							m_targetWidth;
6311 	size_t							m_targetHeight;
6312 };
6313 
~RenderFragmentStorageBuffer(void)6314 RenderFragmentStorageBuffer::~RenderFragmentStorageBuffer (void)
6315 {
6316 }
6317 
logPrepare(TestLog & log,size_t commandIndex) const6318 void RenderFragmentStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6319 {
6320 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline to render buffer as storage buffer." << TestLog::EndMessage;
6321 }
6322 
logSubmit(TestLog & log,size_t commandIndex) const6323 void RenderFragmentStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6324 {
6325 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6326 }
6327 
prepare(PrepareRenderPassContext & context)6328 void RenderFragmentStorageBuffer::prepare (PrepareRenderPassContext& context)
6329 {
6330 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6331 	const vk::VkDevice							device					= context.getContext().getDevice();
6332 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6333 	const deUint32								subpass					= 0;
6334 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6335 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.frag"), 0));
6336 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6337 
6338 	m_bufferSize	= context.getBufferSize();
6339 	m_targetWidth	= context.getTargetWidth();
6340 	m_targetHeight	= context.getTargetHeight();
6341 
6342 	{
6343 		const vk::VkDescriptorSetLayoutBinding binding =
6344 		{
6345 			0u,
6346 			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6347 			1,
6348 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6349 			DE_NULL
6350 		};
6351 
6352 		bindings.push_back(binding);
6353 	}
6354 	const vk::VkPushConstantRange pushConstantRange =
6355 	{
6356 		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6357 		0u,
6358 		12u
6359 	};
6360 
6361 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6362 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6363 
6364 	{
6365 		const deUint32							descriptorCount	= 1;
6366 		const vk::VkDescriptorPoolSize			poolSizes		=
6367 		{
6368 			vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6369 			descriptorCount
6370 		};
6371 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6372 		{
6373 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6374 			DE_NULL,
6375 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6376 
6377 			descriptorCount,
6378 			1u,
6379 			&poolSizes,
6380 		};
6381 
6382 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6383 	}
6384 
6385 	{
6386 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6387 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6388 		{
6389 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6390 			DE_NULL,
6391 
6392 			*m_descriptorPool,
6393 			1,
6394 			&layout
6395 		};
6396 
6397 		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6398 
6399 		{
6400 			const vk::VkDescriptorBufferInfo	bufferInfo	=
6401 			{
6402 				context.getBuffer(),
6403 				0u,
6404 				m_bufferSize
6405 			};
6406 			const vk::VkWriteDescriptorSet		write		=
6407 			{
6408 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6409 				DE_NULL,
6410 				m_descriptorSet.get(),
6411 				0u,
6412 				0u,
6413 				1u,
6414 				vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6415 				DE_NULL,
6416 				&bufferInfo,
6417 				DE_NULL,
6418 			};
6419 
6420 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6421 		}
6422 	}
6423 }
6424 
submit(SubmitContext & context)6425 void RenderFragmentStorageBuffer::submit (SubmitContext& context)
6426 {
6427 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6428 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6429 
6430 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6431 
6432 	const struct
6433 	{
6434 		const deUint32	valuesPerPixel;
6435 		const deUint32	bufferSize;
6436 	} callParams =
6437 	{
6438 		(deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight),
6439 		(deUint32)m_bufferSize
6440 	};
6441 
6442 	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
6443 	vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6444 	vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6445 }
6446 
verify(VerifyRenderPassContext & context,size_t)6447 void RenderFragmentStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
6448 {
6449 	const deUint32	valuesPerPixel	= (deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight);
6450 
6451 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6452 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6453 	{
6454 		const deUint32	id		= (deUint32)y * 256u + (deUint32)x;
6455 
6456 		deUint32 value = id;
6457 
6458 		for (deUint32 i = 0; i < valuesPerPixel; i++)
6459 		{
6460 			value	= (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 0)) << 0u)
6461 					| (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 1)) << 8u)
6462 					| (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 2)) << 16u)
6463 					| (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 3)) << 24u);
6464 
6465 		}
6466 		const UVec4	vec	((value >>  0u) & 0xFFu,
6467 						 (value >>  8u) & 0xFFu,
6468 						 (value >> 16u) & 0xFFu,
6469 						 (value >> 24u) & 0xFFu);
6470 
6471 		context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6472 	}
6473 }
6474 
6475 class RenderFragmentUniformTexelBuffer : public RenderPassCommand
6476 {
6477 public:
RenderFragmentUniformTexelBuffer(void)6478 									RenderFragmentUniformTexelBuffer	(void) {}
6479 									~RenderFragmentUniformTexelBuffer	(void);
6480 
getName(void) const6481 	const char*						getName								(void) const { return "RenderFragmentUniformTexelBuffer"; }
6482 	void							logPrepare							(TestLog&, size_t) const;
6483 	void							logSubmit							(TestLog&, size_t) const;
6484 	void							prepare								(PrepareRenderPassContext&);
6485 	void							submit								(SubmitContext& context);
6486 	void							verify								(VerifyRenderPassContext&, size_t);
6487 
6488 private:
6489 	PipelineResources				m_resources;
6490 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6491 	vector<vk::VkDescriptorSet>		m_descriptorSets;
6492 	vector<vk::VkBufferView>		m_bufferViews;
6493 
6494 	const vk::DeviceInterface*		m_vkd;
6495 	vk::VkDevice					m_device;
6496 	vk::VkDeviceSize				m_bufferSize;
6497 	deUint32						m_maxUniformTexelCount;
6498 	size_t							m_targetWidth;
6499 	size_t							m_targetHeight;
6500 };
6501 
~RenderFragmentUniformTexelBuffer(void)6502 RenderFragmentUniformTexelBuffer::~RenderFragmentUniformTexelBuffer (void)
6503 {
6504 	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6505 	{
6506 		if (!!m_bufferViews[bufferViewNdx])
6507 		{
6508 			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6509 			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6510 		}
6511 	}
6512 }
6513 
logPrepare(TestLog & log,size_t commandIndex) const6514 void RenderFragmentUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6515 {
6516 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6517 }
6518 
logSubmit(TestLog & log,size_t commandIndex) const6519 void RenderFragmentUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6520 {
6521 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6522 }
6523 
prepare(PrepareRenderPassContext & context)6524 void RenderFragmentUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
6525 {
6526 	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
6527 	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
6528 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6529 	const vk::VkDevice							device					= context.getContext().getDevice();
6530 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6531 	const deUint32								subpass					= 0;
6532 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6533 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.frag"), 0));
6534 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6535 
6536 	m_device				= device;
6537 	m_vkd					= &vkd;
6538 	m_bufferSize			= context.getBufferSize();
6539 	m_maxUniformTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6540 	m_targetWidth			= context.getTargetWidth();
6541 	m_targetHeight			= context.getTargetHeight();
6542 
6543 	{
6544 		const vk::VkDescriptorSetLayoutBinding binding =
6545 		{
6546 			0u,
6547 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6548 			1,
6549 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6550 			DE_NULL
6551 		};
6552 
6553 		bindings.push_back(binding);
6554 	}
6555 	const vk::VkPushConstantRange pushConstantRange =
6556 	{
6557 		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6558 		0u,
6559 		12u
6560 	};
6561 
6562 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6563 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6564 
6565 	{
6566 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 4));
6567 		const vk::VkDescriptorPoolSize			poolSizes		=
6568 		{
6569 			vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6570 			descriptorCount
6571 		};
6572 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6573 		{
6574 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6575 			DE_NULL,
6576 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6577 
6578 			descriptorCount,
6579 			1u,
6580 			&poolSizes,
6581 		};
6582 
6583 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6584 		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6585 		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6586 	}
6587 
6588 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6589 	{
6590 		const deUint32							count			= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6591 																? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6592 																: m_maxUniformTexelCount * 4) / 4;
6593 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6594 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6595 		{
6596 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6597 			DE_NULL,
6598 
6599 			*m_descriptorPool,
6600 			1,
6601 			&layout
6602 		};
6603 
6604 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6605 
6606 		{
6607 			const vk::VkBufferViewCreateInfo createInfo =
6608 			{
6609 				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6610 				DE_NULL,
6611 				0u,
6612 
6613 				context.getBuffer(),
6614 				vk::VK_FORMAT_R32_UINT,
6615 				descriptorSetNdx * m_maxUniformTexelCount * 4,
6616 				count * 4
6617 			};
6618 
6619 			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6620 		}
6621 
6622 		{
6623 			const vk::VkWriteDescriptorSet			write		=
6624 			{
6625 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6626 				DE_NULL,
6627 				m_descriptorSets[descriptorSetNdx],
6628 				0u,
6629 				0u,
6630 				1u,
6631 				vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6632 				DE_NULL,
6633 				DE_NULL,
6634 				&m_bufferViews[descriptorSetNdx]
6635 			};
6636 
6637 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6638 		}
6639 	}
6640 }
6641 
submit(SubmitContext & context)6642 void RenderFragmentUniformTexelBuffer::submit (SubmitContext& context)
6643 {
6644 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6645 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6646 
6647 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6648 
6649 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6650 	{
6651 		const struct
6652 		{
6653 			const deUint32	callId;
6654 			const deUint32	valuesPerPixel;
6655 			const deUint32	maxUniformTexelCount;
6656 		} callParams =
6657 		{
6658 			(deUint32)descriptorSetNdx,
6659 			(deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight),
6660 			m_maxUniformTexelCount
6661 		};
6662 
6663 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6664 		vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6665 		vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6666 	}
6667 }
6668 
verify(VerifyRenderPassContext & context,size_t)6669 void RenderFragmentUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6670 {
6671 	const deUint32	valuesPerPixel	= (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight);
6672 
6673 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6674 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6675 	{
6676 		const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxUniformTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6677 
6678 		for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6679 		{
6680 			const size_t	offset	= descriptorSetNdx * m_maxUniformTexelCount * 4;
6681 			const deUint32	callId	= (deUint32)descriptorSetNdx;
6682 
6683 			const deUint32	id		= (deUint32)y * 256u + (deUint32)x;
6684 			const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6685 									? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6686 									: m_maxUniformTexelCount * 4) / 4;
6687 
6688 			if (y * 256u + x < callId * (m_maxUniformTexelCount / valuesPerPixel))
6689 				continue;
6690 			else
6691 			{
6692 				deUint32 value = id;
6693 
6694 				for (deUint32 i = 0; i < valuesPerPixel; i++)
6695 				{
6696 					value	= ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6697 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6698 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6699 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6700 
6701 				}
6702 				const UVec4	vec	((value >>  0u) & 0xFFu,
6703 								 (value >>  8u) & 0xFFu,
6704 								 (value >> 16u) & 0xFFu,
6705 								 (value >> 24u) & 0xFFu);
6706 
6707 				context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6708 			}
6709 		}
6710 	}
6711 }
6712 
6713 class RenderFragmentStorageTexelBuffer : public RenderPassCommand
6714 {
6715 public:
RenderFragmentStorageTexelBuffer(void)6716 									RenderFragmentStorageTexelBuffer	(void) {}
6717 									~RenderFragmentStorageTexelBuffer	(void);
6718 
getName(void) const6719 	const char*						getName								(void) const { return "RenderFragmentStorageTexelBuffer"; }
6720 	void							logPrepare							(TestLog&, size_t) const;
6721 	void							logSubmit							(TestLog&, size_t) const;
6722 	void							prepare								(PrepareRenderPassContext&);
6723 	void							submit								(SubmitContext& context);
6724 	void							verify								(VerifyRenderPassContext&, size_t);
6725 
6726 private:
6727 	PipelineResources				m_resources;
6728 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6729 	vector<vk::VkDescriptorSet>		m_descriptorSets;
6730 	vector<vk::VkBufferView>		m_bufferViews;
6731 
6732 	const vk::DeviceInterface*		m_vkd;
6733 	vk::VkDevice					m_device;
6734 	vk::VkDeviceSize				m_bufferSize;
6735 	deUint32						m_maxStorageTexelCount;
6736 	size_t							m_targetWidth;
6737 	size_t							m_targetHeight;
6738 };
6739 
~RenderFragmentStorageTexelBuffer(void)6740 RenderFragmentStorageTexelBuffer::~RenderFragmentStorageTexelBuffer (void)
6741 {
6742 	for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6743 	{
6744 		if (!!m_bufferViews[bufferViewNdx])
6745 		{
6746 			m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6747 			m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6748 		}
6749 	}
6750 }
6751 
logPrepare(TestLog & log,size_t commandIndex) const6752 void RenderFragmentStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6753 {
6754 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
6755 }
6756 
logSubmit(TestLog & log,size_t commandIndex) const6757 void RenderFragmentStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6758 {
6759 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6760 }
6761 
prepare(PrepareRenderPassContext & context)6762 void RenderFragmentStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
6763 {
6764 	const vk::InstanceInterface&				vki						= context.getContext().getInstanceInterface();
6765 	const vk::VkPhysicalDevice					physicalDevice			= context.getContext().getPhysicalDevice();
6766 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6767 	const vk::VkDevice							device					= context.getContext().getDevice();
6768 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6769 	const deUint32								subpass					= 0;
6770 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6771 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.frag"), 0));
6772 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6773 
6774 	m_device				= device;
6775 	m_vkd					= &vkd;
6776 	m_bufferSize			= context.getBufferSize();
6777 	m_maxStorageTexelCount	= vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6778 	m_targetWidth			= context.getTargetWidth();
6779 	m_targetHeight			= context.getTargetHeight();
6780 
6781 	{
6782 		const vk::VkDescriptorSetLayoutBinding binding =
6783 		{
6784 			0u,
6785 			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6786 			1,
6787 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6788 			DE_NULL
6789 		};
6790 
6791 		bindings.push_back(binding);
6792 	}
6793 	const vk::VkPushConstantRange pushConstantRange =
6794 	{
6795 		vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6796 		0u,
6797 		16u
6798 	};
6799 
6800 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6801 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6802 
6803 	{
6804 		const deUint32							descriptorCount	= (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
6805 		const vk::VkDescriptorPoolSize			poolSizes		=
6806 		{
6807 			vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6808 			descriptorCount
6809 		};
6810 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
6811 		{
6812 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6813 			DE_NULL,
6814 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6815 
6816 			descriptorCount,
6817 			1u,
6818 			&poolSizes,
6819 		};
6820 
6821 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6822 		m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6823 		m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6824 	}
6825 
6826 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6827 	{
6828 		const deUint32							count			= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6829 																? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6830 																: m_maxStorageTexelCount * 4) / 4;
6831 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
6832 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
6833 		{
6834 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6835 			DE_NULL,
6836 
6837 			*m_descriptorPool,
6838 			1,
6839 			&layout
6840 		};
6841 
6842 		m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6843 
6844 		{
6845 			const vk::VkBufferViewCreateInfo createInfo =
6846 			{
6847 				vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6848 				DE_NULL,
6849 				0u,
6850 
6851 				context.getBuffer(),
6852 				vk::VK_FORMAT_R32_UINT,
6853 				descriptorSetNdx * m_maxStorageTexelCount * 4,
6854 				count * 4
6855 			};
6856 
6857 			VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6858 		}
6859 
6860 		{
6861 			const vk::VkWriteDescriptorSet			write		=
6862 			{
6863 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6864 				DE_NULL,
6865 				m_descriptorSets[descriptorSetNdx],
6866 				0u,
6867 				0u,
6868 				1u,
6869 				vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6870 				DE_NULL,
6871 				DE_NULL,
6872 				&m_bufferViews[descriptorSetNdx]
6873 			};
6874 
6875 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6876 		}
6877 	}
6878 }
6879 
submit(SubmitContext & context)6880 void RenderFragmentStorageTexelBuffer::submit (SubmitContext& context)
6881 {
6882 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
6883 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
6884 
6885 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6886 
6887 	for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6888 	{
6889 		const struct
6890 		{
6891 			const deUint32	callId;
6892 			const deUint32	valuesPerPixel;
6893 			const deUint32	maxStorageTexelCount;
6894 			const deUint32	width;
6895 		} callParams =
6896 		{
6897 			(deUint32)descriptorSetNdx,
6898 			(deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight),
6899 			m_maxStorageTexelCount,
6900 			(deUint32)(m_bufferSize < (descriptorSetNdx + 1u) * m_maxStorageTexelCount * 4u
6901 								? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4u
6902 								: m_maxStorageTexelCount * 4u) / 4u
6903 		};
6904 
6905 		vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6906 		vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6907 		vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6908 	}
6909 }
6910 
verify(VerifyRenderPassContext & context,size_t)6911 void RenderFragmentStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6912 {
6913 	const deUint32	valuesPerPixel	= (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight);
6914 
6915 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6916 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6917 	{
6918 		const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxStorageTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6919 
6920 		for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6921 		{
6922 			const size_t	offset	= descriptorSetNdx * m_maxStorageTexelCount * 4;
6923 			const deUint32	callId	= (deUint32)descriptorSetNdx;
6924 
6925 			const deUint32	id		= (deUint32)y * 256u + (deUint32)x;
6926 			const deUint32	count	= (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6927 									? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6928 									: m_maxStorageTexelCount * 4) / 4;
6929 
6930 			if (y * 256u + x < callId * (m_maxStorageTexelCount / valuesPerPixel))
6931 				continue;
6932 			else
6933 			{
6934 				deUint32 value = id;
6935 
6936 				for (deUint32 i = 0; i < valuesPerPixel; i++)
6937 				{
6938 					value	= ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6939 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6940 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6941 							| (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6942 
6943 				}
6944 				const UVec4	vec	((value >>  0u) & 0xFFu,
6945 								 (value >>  8u) & 0xFFu,
6946 								 (value >> 16u) & 0xFFu,
6947 								 (value >> 24u) & 0xFFu);
6948 
6949 				context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6950 			}
6951 		}
6952 	}
6953 }
6954 
6955 class RenderFragmentStorageImage : public RenderPassCommand
6956 {
6957 public:
RenderFragmentStorageImage(void)6958 									RenderFragmentStorageImage	(void) {}
6959 									~RenderFragmentStorageImage	(void);
6960 
getName(void) const6961 	const char*						getName						(void) const { return "RenderFragmentStorageImage"; }
6962 	void							logPrepare					(TestLog&, size_t) const;
6963 	void							logSubmit					(TestLog&, size_t) const;
6964 	void							prepare						(PrepareRenderPassContext&);
6965 	void							submit						(SubmitContext& context);
6966 	void							verify						(VerifyRenderPassContext&, size_t);
6967 
6968 private:
6969 	PipelineResources				m_resources;
6970 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
6971 	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
6972 	vk::Move<vk::VkImageView>		m_imageView;
6973 };
6974 
~RenderFragmentStorageImage(void)6975 RenderFragmentStorageImage::~RenderFragmentStorageImage (void)
6976 {
6977 }
6978 
logPrepare(TestLog & log,size_t commandIndex) const6979 void RenderFragmentStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
6980 {
6981 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
6982 }
6983 
logSubmit(TestLog & log,size_t commandIndex) const6984 void RenderFragmentStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
6985 {
6986 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
6987 }
6988 
prepare(PrepareRenderPassContext & context)6989 void RenderFragmentStorageImage::prepare (PrepareRenderPassContext& context)
6990 {
6991 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
6992 	const vk::VkDevice							device					= context.getContext().getDevice();
6993 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
6994 	const deUint32								subpass					= 0;
6995 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6996 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.frag"), 0));
6997 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
6998 
6999 	{
7000 		const vk::VkDescriptorSetLayoutBinding binding =
7001 		{
7002 			0u,
7003 			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7004 			1,
7005 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
7006 			DE_NULL
7007 		};
7008 
7009 		bindings.push_back(binding);
7010 	}
7011 
7012 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
7013 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
7014 
7015 	{
7016 		const vk::VkDescriptorPoolSize			poolSizes		=
7017 		{
7018 			vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7019 			1
7020 		};
7021 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
7022 		{
7023 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
7024 			DE_NULL,
7025 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
7026 
7027 			1u,
7028 			1u,
7029 			&poolSizes,
7030 		};
7031 
7032 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
7033 	}
7034 
7035 	{
7036 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
7037 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
7038 		{
7039 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
7040 			DE_NULL,
7041 
7042 			*m_descriptorPool,
7043 			1,
7044 			&layout
7045 		};
7046 
7047 		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
7048 
7049 		{
7050 			const vk::VkImageViewCreateInfo createInfo =
7051 			{
7052 				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
7053 				DE_NULL,
7054 				0u,
7055 
7056 				context.getImage(),
7057 				vk::VK_IMAGE_VIEW_TYPE_2D,
7058 				vk::VK_FORMAT_R8G8B8A8_UNORM,
7059 				vk::makeComponentMappingRGBA(),
7060 				{
7061 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
7062 					0u,
7063 					1u,
7064 					0u,
7065 					1u
7066 				}
7067 			};
7068 
7069 			m_imageView = vk::createImageView(vkd, device, &createInfo);
7070 		}
7071 
7072 		{
7073 			const vk::VkDescriptorImageInfo			imageInfo	=
7074 			{
7075 				0,
7076 				*m_imageView,
7077 				context.getImageLayout()
7078 			};
7079 			const vk::VkWriteDescriptorSet			write		=
7080 			{
7081 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
7082 				DE_NULL,
7083 				*m_descriptorSet,
7084 				0u,
7085 				0u,
7086 				1u,
7087 				vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7088 				&imageInfo,
7089 				DE_NULL,
7090 				DE_NULL,
7091 			};
7092 
7093 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7094 		}
7095 	}
7096 }
7097 
submit(SubmitContext & context)7098 void RenderFragmentStorageImage::submit (SubmitContext& context)
7099 {
7100 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
7101 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
7102 
7103 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7104 
7105 	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
7106 	vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
7107 }
7108 
verify(VerifyRenderPassContext & context,size_t)7109 void RenderFragmentStorageImage::verify (VerifyRenderPassContext& context, size_t)
7110 {
7111 	const UVec2		size			= UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7112 	const deUint32	valuesPerPixel	= de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
7113 
7114 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7115 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7116 	{
7117 		UVec4	value	= UVec4(x, y, 0u, 0u);
7118 
7119 		for (deUint32 i = 0; i < valuesPerPixel; i++)
7120 		{
7121 			const UVec2	pos			= UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7122 			const Vec4	floatValue	= context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7123 
7124 			value = UVec4((deUint32)(floatValue.x() * 255.0f),
7125 						  (deUint32)(floatValue.y() * 255.0f),
7126 						  (deUint32)(floatValue.z() * 255.0f),
7127 						  (deUint32)(floatValue.w() * 255.0f));
7128 
7129 		}
7130 		context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7131 	}
7132 }
7133 
7134 class RenderFragmentSampledImage : public RenderPassCommand
7135 {
7136 public:
RenderFragmentSampledImage(void)7137 				RenderFragmentSampledImage	(void) {}
7138 				~RenderFragmentSampledImage	(void);
7139 
getName(void) const7140 	const char*	getName						(void) const { return "RenderFragmentSampledImage"; }
7141 	void		logPrepare					(TestLog&, size_t) const;
7142 	void		logSubmit					(TestLog&, size_t) const;
7143 	void		prepare						(PrepareRenderPassContext&);
7144 	void		submit						(SubmitContext& context);
7145 	void		verify						(VerifyRenderPassContext&, size_t);
7146 
7147 private:
7148 	PipelineResources				m_resources;
7149 	vk::Move<vk::VkDescriptorPool>	m_descriptorPool;
7150 	vk::Move<vk::VkDescriptorSet>	m_descriptorSet;
7151 	vk::Move<vk::VkImageView>		m_imageView;
7152 	vk::Move<vk::VkSampler>			m_sampler;
7153 };
7154 
~RenderFragmentSampledImage(void)7155 RenderFragmentSampledImage::~RenderFragmentSampledImage (void)
7156 {
7157 }
7158 
logPrepare(TestLog & log,size_t commandIndex) const7159 void RenderFragmentSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
7160 {
7161 	log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
7162 }
7163 
logSubmit(TestLog & log,size_t commandIndex) const7164 void RenderFragmentSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
7165 {
7166 	log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
7167 }
7168 
prepare(PrepareRenderPassContext & context)7169 void RenderFragmentSampledImage::prepare (PrepareRenderPassContext& context)
7170 {
7171 	const vk::DeviceInterface&					vkd						= context.getContext().getDeviceInterface();
7172 	const vk::VkDevice							device					= context.getContext().getDevice();
7173 	const vk::VkRenderPass						renderPass				= context.getRenderPass();
7174 	const deUint32								subpass					= 0;
7175 	const vk::Unique<vk::VkShaderModule>		vertexShaderModule		(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
7176 	const vk::Unique<vk::VkShaderModule>		fragmentShaderModule	(vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.frag"), 0));
7177 	vector<vk::VkDescriptorSetLayoutBinding>	bindings;
7178 
7179 	{
7180 		const vk::VkDescriptorSetLayoutBinding binding =
7181 		{
7182 			0u,
7183 			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7184 			1,
7185 			vk::VK_SHADER_STAGE_FRAGMENT_BIT,
7186 			DE_NULL
7187 		};
7188 
7189 		bindings.push_back(binding);
7190 	}
7191 
7192 	createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
7193 								vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
7194 
7195 	{
7196 		const vk::VkDescriptorPoolSize			poolSizes		=
7197 		{
7198 			vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7199 			1
7200 		};
7201 		const vk::VkDescriptorPoolCreateInfo	createInfo		=
7202 		{
7203 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
7204 			DE_NULL,
7205 			vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
7206 
7207 			1u,
7208 			1u,
7209 			&poolSizes,
7210 		};
7211 
7212 		m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
7213 	}
7214 
7215 	{
7216 		const vk::VkDescriptorSetLayout			layout			= *m_resources.descriptorSetLayout;
7217 		const vk::VkDescriptorSetAllocateInfo	allocateInfo	=
7218 		{
7219 			vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
7220 			DE_NULL,
7221 
7222 			*m_descriptorPool,
7223 			1,
7224 			&layout
7225 		};
7226 
7227 		m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
7228 
7229 		{
7230 			const vk::VkImageViewCreateInfo createInfo =
7231 			{
7232 				vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
7233 				DE_NULL,
7234 				0u,
7235 
7236 				context.getImage(),
7237 				vk::VK_IMAGE_VIEW_TYPE_2D,
7238 				vk::VK_FORMAT_R8G8B8A8_UNORM,
7239 				vk::makeComponentMappingRGBA(),
7240 				{
7241 					vk::VK_IMAGE_ASPECT_COLOR_BIT,
7242 					0u,
7243 					1u,
7244 					0u,
7245 					1u
7246 				}
7247 			};
7248 
7249 			m_imageView = vk::createImageView(vkd, device, &createInfo);
7250 		}
7251 
7252 		{
7253 			const vk::VkSamplerCreateInfo createInfo =
7254 			{
7255 				vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
7256 				DE_NULL,
7257 				0u,
7258 
7259 				vk::VK_FILTER_NEAREST,
7260 				vk::VK_FILTER_NEAREST,
7261 
7262 				vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
7263 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7264 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7265 				vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7266 				0.0f,
7267 				VK_FALSE,
7268 				1.0f,
7269 				VK_FALSE,
7270 				vk::VK_COMPARE_OP_ALWAYS,
7271 				0.0f,
7272 				0.0f,
7273 				vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
7274 				VK_FALSE
7275 			};
7276 
7277 			m_sampler = vk::createSampler(vkd, device, &createInfo);
7278 		}
7279 
7280 		{
7281 			const vk::VkDescriptorImageInfo			imageInfo	=
7282 			{
7283 				*m_sampler,
7284 				*m_imageView,
7285 				context.getImageLayout()
7286 			};
7287 			const vk::VkWriteDescriptorSet			write		=
7288 			{
7289 				vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
7290 				DE_NULL,
7291 				*m_descriptorSet,
7292 				0u,
7293 				0u,
7294 				1u,
7295 				vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7296 				&imageInfo,
7297 				DE_NULL,
7298 				DE_NULL,
7299 			};
7300 
7301 			vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7302 		}
7303 	}
7304 }
7305 
submit(SubmitContext & context)7306 void RenderFragmentSampledImage::submit (SubmitContext& context)
7307 {
7308 	const vk::DeviceInterface&	vkd				= context.getContext().getDeviceInterface();
7309 	const vk::VkCommandBuffer	commandBuffer	= context.getCommandBuffer();
7310 
7311 	vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7312 
7313 	vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
7314 	vkd.cmdDraw(commandBuffer, 6u, 1u, 0u, 0u);
7315 }
7316 
verify(VerifyRenderPassContext & context,size_t)7317 void RenderFragmentSampledImage::verify (VerifyRenderPassContext& context, size_t)
7318 {
7319 	const UVec2		size			= UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7320 	const deUint32	valuesPerPixel	= de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
7321 
7322 	for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7323 	for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7324 	{
7325 		UVec4	value	= UVec4(x, y, 0u, 0u);
7326 
7327 		for (deUint32 i = 0; i < valuesPerPixel; i++)
7328 		{
7329 			const UVec2	pos			= UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7330 			const Vec4	floatValue	= context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7331 
7332 			value = UVec4((deUint32)(floatValue.x() * 255.0f),
7333 						  (deUint32)(floatValue.y() * 255.0f),
7334 						  (deUint32)(floatValue.z() * 255.0f),
7335 						  (deUint32)(floatValue.w() * 255.0f));
7336 
7337 		}
7338 
7339 		context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7340 	}
7341 }
7342 
7343 enum Op
7344 {
7345 	OP_MAP,
7346 	OP_UNMAP,
7347 
7348 	OP_MAP_FLUSH,
7349 	OP_MAP_INVALIDATE,
7350 
7351 	OP_MAP_READ,
7352 	OP_MAP_WRITE,
7353 	OP_MAP_MODIFY,
7354 
7355 	OP_BUFFER_CREATE,
7356 	OP_BUFFER_DESTROY,
7357 	OP_BUFFER_BINDMEMORY,
7358 
7359 	OP_QUEUE_WAIT_FOR_IDLE,
7360 	OP_DEVICE_WAIT_FOR_IDLE,
7361 
7362 	OP_COMMAND_BUFFER_BEGIN,
7363 	OP_COMMAND_BUFFER_END,
7364 
7365 	// Buffer transfer operations
7366 	OP_BUFFER_FILL,
7367 	OP_BUFFER_UPDATE,
7368 
7369 	OP_BUFFER_COPY_TO_BUFFER,
7370 	OP_BUFFER_COPY_FROM_BUFFER,
7371 
7372 	OP_BUFFER_COPY_TO_IMAGE,
7373 	OP_BUFFER_COPY_FROM_IMAGE,
7374 
7375 	OP_IMAGE_CREATE,
7376 	OP_IMAGE_DESTROY,
7377 	OP_IMAGE_BINDMEMORY,
7378 
7379 	OP_IMAGE_TRANSITION_LAYOUT,
7380 
7381 	OP_IMAGE_COPY_TO_BUFFER,
7382 	OP_IMAGE_COPY_FROM_BUFFER,
7383 
7384 	OP_IMAGE_COPY_TO_IMAGE,
7385 	OP_IMAGE_COPY_FROM_IMAGE,
7386 
7387 	OP_IMAGE_BLIT_TO_IMAGE,
7388 	OP_IMAGE_BLIT_FROM_IMAGE,
7389 
7390 	OP_IMAGE_RESOLVE,
7391 
7392 	OP_PIPELINE_BARRIER_GLOBAL,
7393 	OP_PIPELINE_BARRIER_BUFFER,
7394 	OP_PIPELINE_BARRIER_IMAGE,
7395 
7396 	// Renderpass operations
7397 	OP_RENDERPASS_BEGIN,
7398 	OP_RENDERPASS_END,
7399 
7400 	// Commands inside render pass
7401 	OP_RENDER_VERTEX_BUFFER,
7402 	OP_RENDER_INDEX_BUFFER,
7403 
7404 	OP_RENDER_VERTEX_UNIFORM_BUFFER,
7405 	OP_RENDER_FRAGMENT_UNIFORM_BUFFER,
7406 
7407 	OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER,
7408 	OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER,
7409 
7410 	OP_RENDER_VERTEX_STORAGE_BUFFER,
7411 	OP_RENDER_FRAGMENT_STORAGE_BUFFER,
7412 
7413 	OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER,
7414 	OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER,
7415 
7416 	OP_RENDER_VERTEX_STORAGE_IMAGE,
7417 	OP_RENDER_FRAGMENT_STORAGE_IMAGE,
7418 
7419 	OP_RENDER_VERTEX_SAMPLED_IMAGE,
7420 	OP_RENDER_FRAGMENT_SAMPLED_IMAGE,
7421 };
7422 
7423 enum Stage
7424 {
7425 	STAGE_HOST,
7426 	STAGE_COMMAND_BUFFER,
7427 
7428 	STAGE_RENDER_PASS
7429 };
7430 
getWriteAccessFlags(void)7431 vk::VkAccessFlags getWriteAccessFlags (void)
7432 {
7433 	return vk::VK_ACCESS_SHADER_WRITE_BIT
7434 		| vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
7435 		| vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
7436 		| vk::VK_ACCESS_TRANSFER_WRITE_BIT
7437 		| vk::VK_ACCESS_HOST_WRITE_BIT
7438 		| vk::VK_ACCESS_MEMORY_WRITE_BIT;
7439 }
7440 
isWriteAccess(vk::VkAccessFlagBits access)7441 bool isWriteAccess (vk::VkAccessFlagBits access)
7442 {
7443 	return (getWriteAccessFlags() & access) != 0;
7444 }
7445 
7446 class CacheState
7447 {
7448 public:
7449 									CacheState				(vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses);
7450 
7451 	bool							isValid					(vk::VkPipelineStageFlagBits	stage,
7452 															 vk::VkAccessFlagBits			access) const;
7453 
7454 	void							perform					(vk::VkPipelineStageFlagBits	stage,
7455 															 vk::VkAccessFlagBits			access);
7456 
7457 	void							submitCommandBuffer		(void);
7458 	void							waitForIdle				(void);
7459 
7460 	void							getFullBarrier			(vk::VkPipelineStageFlags&	srcStages,
7461 															 vk::VkAccessFlags&			srcAccesses,
7462 															 vk::VkPipelineStageFlags&	dstStages,
7463 															 vk::VkAccessFlags&			dstAccesses) const;
7464 
7465 	void							barrier					(vk::VkPipelineStageFlags	srcStages,
7466 															 vk::VkAccessFlags			srcAccesses,
7467 															 vk::VkPipelineStageFlags	dstStages,
7468 															 vk::VkAccessFlags			dstAccesses);
7469 
7470 	void							imageLayoutBarrier		(vk::VkPipelineStageFlags	srcStages,
7471 															 vk::VkAccessFlags			srcAccesses,
7472 															 vk::VkPipelineStageFlags	dstStages,
7473 															 vk::VkAccessFlags			dstAccesses);
7474 
7475 	void							checkImageLayoutBarrier	(vk::VkPipelineStageFlags	srcStages,
7476 															 vk::VkAccessFlags			srcAccesses,
7477 															 vk::VkPipelineStageFlags	dstStages,
7478 															 vk::VkAccessFlags			dstAccesses);
7479 
7480 	// Everything is clean and there is no need for barriers
7481 	bool							isClean					(void) const;
7482 
getAllowedStages(void) const7483 	vk::VkPipelineStageFlags		getAllowedStages		(void) const { return m_allowedStages; }
getAllowedAcceses(void) const7484 	vk::VkAccessFlags				getAllowedAcceses		(void) const { return m_allowedAccesses; }
7485 private:
7486 	// Limit which stages and accesses are used by the CacheState tracker
7487 	const vk::VkPipelineStageFlags	m_allowedStages;
7488 	const vk::VkAccessFlags			m_allowedAccesses;
7489 
7490 	// [dstStage][srcStage] = srcAccesses
7491 	// In stage dstStage write srcAccesses from srcStage are not yet available
7492 	vk::VkAccessFlags				m_unavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST];
7493 	// Latest pipeline transition is not available in stage
7494 	bool							m_unavailableLayoutTransition[PIPELINESTAGE_LAST];
7495 	// [dstStage] = dstAccesses
7496 	// In stage dstStage ops with dstAccesses are not yet visible
7497 	vk::VkAccessFlags				m_invisibleOperations[PIPELINESTAGE_LAST];
7498 
7499 	// [dstStage] = srcStage
7500 	// Memory operation in srcStage have not completed before dstStage
7501 	vk::VkPipelineStageFlags		m_incompleteOperations[PIPELINESTAGE_LAST];
7502 };
7503 
CacheState(vk::VkPipelineStageFlags allowedStages,vk::VkAccessFlags allowedAccesses)7504 CacheState::CacheState (vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses)
7505 	: m_allowedStages	(allowedStages)
7506 	, m_allowedAccesses	(allowedAccesses)
7507 {
7508 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7509 	{
7510 		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7511 
7512 		if ((dstStage_ & m_allowedStages) == 0)
7513 			continue;
7514 
7515 		// All operations are initially visible
7516 		m_invisibleOperations[dstStage] = 0;
7517 
7518 		// There are no incomplete read operations initially
7519 		m_incompleteOperations[dstStage] = 0;
7520 
7521 		// There are no incomplete layout transitions
7522 		m_unavailableLayoutTransition[dstStage] = false;
7523 
7524 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7525 		{
7526 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7527 
7528 			if ((srcStage_ & m_allowedStages) == 0)
7529 				continue;
7530 
7531 			// There are no write operations that are not yet available
7532 			// initially.
7533 			m_unavailableWriteOperations[dstStage][srcStage] = 0;
7534 		}
7535 	}
7536 }
7537 
isValid(vk::VkPipelineStageFlagBits stage,vk::VkAccessFlagBits access) const7538 bool CacheState::isValid (vk::VkPipelineStageFlagBits	stage,
7539 						  vk::VkAccessFlagBits			access) const
7540 {
7541 	DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7542 	DE_ASSERT((stage & (~m_allowedStages)) == 0);
7543 
7544 	const PipelineStage	dstStage	= pipelineStageFlagToPipelineStage(stage);
7545 
7546 	// Previous operations are not visible to access on stage
7547 	if (m_unavailableLayoutTransition[dstStage] || (m_invisibleOperations[dstStage] & access) != 0)
7548 		return false;
7549 
7550 	if (isWriteAccess(access))
7551 	{
7552 		// Memory operations from other stages have not completed before
7553 		// dstStage
7554 		if (m_incompleteOperations[dstStage] != 0)
7555 			return false;
7556 	}
7557 
7558 	return true;
7559 }
7560 
perform(vk::VkPipelineStageFlagBits stage,vk::VkAccessFlagBits access)7561 void CacheState::perform (vk::VkPipelineStageFlagBits	stage,
7562 						  vk::VkAccessFlagBits			access)
7563 {
7564 	DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7565 	DE_ASSERT((stage & (~m_allowedStages)) == 0);
7566 
7567 	const PipelineStage srcStage = pipelineStageFlagToPipelineStage(stage);
7568 
7569 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7570 	{
7571 		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7572 
7573 		if ((dstStage_ & m_allowedStages) == 0)
7574 			continue;
7575 
7576 		// Mark stage as incomplete for all stages
7577 		m_incompleteOperations[dstStage] |= stage;
7578 
7579 		if (isWriteAccess(access))
7580 		{
7581 			// Mark all accesses from all stages invisible
7582 			m_invisibleOperations[dstStage] |= m_allowedAccesses;
7583 
7584 			// Mark write access from srcStage unavailable to all stages
7585 			m_unavailableWriteOperations[dstStage][srcStage] |= access;
7586 		}
7587 	}
7588 }
7589 
submitCommandBuffer(void)7590 void CacheState::submitCommandBuffer (void)
7591 {
7592 	// Flush all host writes and reads
7593 	barrier(m_allowedStages & vk::VK_PIPELINE_STAGE_HOST_BIT,
7594 			m_allowedAccesses & (vk::VK_ACCESS_HOST_READ_BIT | vk::VK_ACCESS_HOST_WRITE_BIT),
7595 			m_allowedStages,
7596 			m_allowedAccesses);
7597 }
7598 
waitForIdle(void)7599 void CacheState::waitForIdle (void)
7600 {
7601 	// Make all writes available
7602 	barrier(m_allowedStages,
7603 			m_allowedAccesses & getWriteAccessFlags(),
7604 			m_allowedStages,
7605 			0);
7606 
7607 	// Make all writes visible on device side
7608 	barrier(m_allowedStages,
7609 			0,
7610 			m_allowedStages & (~vk::VK_PIPELINE_STAGE_HOST_BIT),
7611 			m_allowedAccesses);
7612 }
7613 
getFullBarrier(vk::VkPipelineStageFlags & srcStages,vk::VkAccessFlags & srcAccesses,vk::VkPipelineStageFlags & dstStages,vk::VkAccessFlags & dstAccesses) const7614 void CacheState::getFullBarrier (vk::VkPipelineStageFlags&	srcStages,
7615 								 vk::VkAccessFlags&			srcAccesses,
7616 								 vk::VkPipelineStageFlags&	dstStages,
7617 								 vk::VkAccessFlags&			dstAccesses) const
7618 {
7619 	srcStages	= 0;
7620 	srcAccesses	= 0;
7621 	dstStages	= 0;
7622 	dstAccesses	= 0;
7623 
7624 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7625 	{
7626 		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7627 
7628 		if ((dstStage_ & m_allowedStages) == 0)
7629 			continue;
7630 
7631 		// Make sure all previous operation are complete in all stages
7632 		if (m_incompleteOperations[dstStage])
7633 		{
7634 			dstStages |= dstStage_;
7635 			srcStages |= m_incompleteOperations[dstStage];
7636 		}
7637 
7638 		// Make sure all read operations are visible in dstStage
7639 		if (m_invisibleOperations[dstStage])
7640 		{
7641 			dstStages |= dstStage_;
7642 			dstAccesses |= m_invisibleOperations[dstStage];
7643 		}
7644 
7645 		// Make sure all write operations fro mall stages are available
7646 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7647 		{
7648 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7649 
7650 			if ((srcStage_ & m_allowedStages) == 0)
7651 				continue;
7652 
7653 			if (m_unavailableWriteOperations[dstStage][srcStage])
7654 			{
7655 				dstStages |= dstStage_;
7656 				srcStages |= dstStage_;
7657 				srcAccesses |= m_unavailableWriteOperations[dstStage][srcStage];
7658 			}
7659 
7660 			if (m_unavailableLayoutTransition[dstStage] && !m_unavailableLayoutTransition[srcStage])
7661 			{
7662 				// Add dependency between srcStage and dstStage if layout transition has not completed in dstStage,
7663 				// but has completed in srcStage.
7664 				dstStages |= dstStage_;
7665 				srcStages |= dstStage_;
7666 			}
7667 		}
7668 	}
7669 
7670 	DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7671 	DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7672 	DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7673 	DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7674 }
7675 
checkImageLayoutBarrier(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses)7676 void CacheState::checkImageLayoutBarrier (vk::VkPipelineStageFlags	srcStages,
7677 										 vk::VkAccessFlags			srcAccesses,
7678 										 vk::VkPipelineStageFlags	dstStages,
7679 										 vk::VkAccessFlags			dstAccesses)
7680 {
7681 	DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7682 	DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7683 	DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7684 	DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7685 
7686 	DE_UNREF(srcStages);
7687 	DE_UNREF(srcAccesses);
7688 
7689 	DE_UNREF(dstStages);
7690 	DE_UNREF(dstAccesses);
7691 
7692 #if defined(DE_DEBUG)
7693 	// Check that all stages have completed before srcStages or are in srcStages.
7694 	{
7695 		vk::VkPipelineStageFlags completedStages = srcStages;
7696 
7697 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7698 		{
7699 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7700 
7701 			if ((srcStage_ & srcStages) == 0)
7702 				continue;
7703 
7704 			completedStages |= (~m_incompleteOperations[srcStage]);
7705 		}
7706 
7707 		DE_ASSERT((completedStages & m_allowedStages) == m_allowedStages);
7708 	}
7709 
7710 	// Check that any write is available at least in one stage. Since all stages are complete even single flush is enough.
7711 	if ((getWriteAccessFlags() & m_allowedAccesses) != 0 && (srcAccesses & getWriteAccessFlags()) == 0)
7712 	{
7713 		bool anyWriteAvailable = false;
7714 
7715 		for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7716 		{
7717 			const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7718 
7719 			if ((dstStage_ & m_allowedStages) == 0)
7720 				continue;
7721 
7722 			for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7723 			{
7724 				const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7725 
7726 				if ((srcStage_ & m_allowedStages) == 0)
7727 					continue;
7728 
7729 				if (m_unavailableWriteOperations[dstStage][srcStage] != (getWriteAccessFlags() & m_allowedAccesses))
7730 				{
7731 					anyWriteAvailable = true;
7732 					break;
7733 				}
7734 			}
7735 		}
7736 
7737 		DE_ASSERT(anyWriteAvailable);
7738 	}
7739 #endif
7740 }
7741 
imageLayoutBarrier(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses)7742 void CacheState::imageLayoutBarrier (vk::VkPipelineStageFlags	srcStages,
7743 									 vk::VkAccessFlags			srcAccesses,
7744 									 vk::VkPipelineStageFlags	dstStages,
7745 									 vk::VkAccessFlags			dstAccesses)
7746 {
7747 	checkImageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
7748 
7749 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7750 	{
7751 		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7752 
7753 		if ((dstStage_ & m_allowedStages) == 0)
7754 			continue;
7755 
7756 		// All stages are incomplete after the barrier except each dstStage in it self.
7757 		m_incompleteOperations[dstStage] = m_allowedStages & (~dstStage_);
7758 
7759 		// All memory operations are invisible unless they are listed in dstAccess
7760 		m_invisibleOperations[dstStage] = m_allowedAccesses & (~dstAccesses);
7761 
7762 		// Layout transition is unavailable in stage unless it was listed in dstStages
7763 		m_unavailableLayoutTransition[dstStage]= (dstStage_ & dstStages) == 0;
7764 
7765 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7766 		{
7767 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7768 
7769 			if ((srcStage_ & m_allowedStages) == 0)
7770 				continue;
7771 
7772 			// All write operations are available after layout transition
7773 			m_unavailableWriteOperations[dstStage][srcStage] = 0;
7774 		}
7775 	}
7776 }
7777 
barrier(vk::VkPipelineStageFlags srcStages,vk::VkAccessFlags srcAccesses,vk::VkPipelineStageFlags dstStages,vk::VkAccessFlags dstAccesses)7778 void CacheState::barrier (vk::VkPipelineStageFlags	srcStages,
7779 						  vk::VkAccessFlags			srcAccesses,
7780 						  vk::VkPipelineStageFlags	dstStages,
7781 						  vk::VkAccessFlags			dstAccesses)
7782 {
7783 	DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7784 	DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7785 	DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7786 	DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7787 
7788 	// Transitivity
7789 	{
7790 		vk::VkPipelineStageFlags		oldIncompleteOperations[PIPELINESTAGE_LAST];
7791 		vk::VkAccessFlags				oldUnavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST];
7792 		bool							oldUnavailableLayoutTransition[PIPELINESTAGE_LAST];
7793 
7794 		deMemcpy(oldIncompleteOperations, m_incompleteOperations, sizeof(oldIncompleteOperations));
7795 		deMemcpy(oldUnavailableWriteOperations, m_unavailableWriteOperations, sizeof(oldUnavailableWriteOperations));
7796 		deMemcpy(oldUnavailableLayoutTransition, m_unavailableLayoutTransition, sizeof(oldUnavailableLayoutTransition));
7797 
7798 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7799 		{
7800 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7801 
7802 			if ((srcStage_ & srcStages) == 0)
7803 				continue;
7804 
7805 			for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7806 			{
7807 				const PipelineStage	dstStage			= pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7808 
7809 				if ((dstStage_ & dstStages) == 0)
7810 					continue;
7811 
7812 				// Stages that have completed before srcStage have also completed before dstStage
7813 				m_incompleteOperations[dstStage] &= oldIncompleteOperations[srcStage];
7814 
7815 				// Image layout transition in srcStage are now available in dstStage
7816 				m_unavailableLayoutTransition[dstStage] &= oldUnavailableLayoutTransition[srcStage];
7817 
7818 				for (vk::VkPipelineStageFlags sharedStage_ = 1; sharedStage_ <= m_allowedStages; sharedStage_ <<= 1)
7819 				{
7820 					const PipelineStage	sharedStage			= pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)sharedStage_);
7821 
7822 					if ((sharedStage_ & m_allowedStages) == 0)
7823 						continue;
7824 
7825 					// Writes that are available in srcStage are also available in dstStage
7826 					m_unavailableWriteOperations[dstStage][sharedStage] &= oldUnavailableWriteOperations[srcStage][sharedStage];
7827 				}
7828 			}
7829 		}
7830 	}
7831 
7832 	// Barrier
7833 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7834 	{
7835 		const PipelineStage	dstStage			= pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7836 		bool				allWritesAvailable	= true;
7837 
7838 		if ((dstStage_ & dstStages) == 0)
7839 			continue;
7840 
7841 		// Operations in srcStages have completed before any stage in dstStages
7842 		m_incompleteOperations[dstStage] &= ~srcStages;
7843 
7844 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7845 		{
7846 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7847 
7848 			if ((srcStage_ & m_allowedStages) == 0)
7849 				continue;
7850 
7851 			// Make srcAccesses from srcStage available in dstStage
7852 			if ((srcStage_ & srcStages) != 0)
7853 				m_unavailableWriteOperations[dstStage][srcStage] &= ~srcAccesses;
7854 
7855 			if (m_unavailableWriteOperations[dstStage][srcStage] != 0)
7856 				allWritesAvailable = false;
7857 		}
7858 
7859 		// If all writes are available in dstStage make dstAccesses also visible
7860 		if (allWritesAvailable)
7861 			m_invisibleOperations[dstStage] &= ~dstAccesses;
7862 	}
7863 }
7864 
isClean(void) const7865 bool CacheState::isClean (void) const
7866 {
7867 	for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7868 	{
7869 		const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7870 
7871 		if ((dstStage_ & m_allowedStages) == 0)
7872 			continue;
7873 
7874 		// Some operations are not visible to some stages
7875 		if (m_invisibleOperations[dstStage] != 0)
7876 			return false;
7877 
7878 		// There are operation that have not completed yet
7879 		if (m_incompleteOperations[dstStage] != 0)
7880 			return false;
7881 
7882 		// Layout transition has not completed yet
7883 		if (m_unavailableLayoutTransition[dstStage])
7884 			return false;
7885 
7886 		for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7887 		{
7888 			const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7889 
7890 			if ((srcStage_ & m_allowedStages) == 0)
7891 				continue;
7892 
7893 			// Some write operations are not available yet
7894 			if (m_unavailableWriteOperations[dstStage][srcStage] != 0)
7895 				return false;
7896 		}
7897 	}
7898 
7899 	return true;
7900 }
7901 
layoutSupportedByUsage(Usage usage,vk::VkImageLayout layout)7902 bool layoutSupportedByUsage (Usage usage, vk::VkImageLayout layout)
7903 {
7904 	switch (layout)
7905 	{
7906 		case vk::VK_IMAGE_LAYOUT_GENERAL:
7907 			return true;
7908 
7909 		case vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
7910 			return (usage & USAGE_COLOR_ATTACHMENT) != 0;
7911 
7912 		case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
7913 			return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7914 
7915 		case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
7916 			return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7917 
7918 		case vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
7919 			// \todo [2016-03-09 mika] Should include input attachment
7920 			return (usage & USAGE_SAMPLED_IMAGE) != 0;
7921 
7922 		case vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
7923 			return (usage & USAGE_TRANSFER_SRC) != 0;
7924 
7925 		case vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
7926 			return (usage & USAGE_TRANSFER_DST) != 0;
7927 
7928 		case vk::VK_IMAGE_LAYOUT_PREINITIALIZED:
7929 			return true;
7930 
7931 		default:
7932 			DE_FATAL("Unknown layout");
7933 			return false;
7934 	}
7935 }
7936 
getNumberOfSupportedLayouts(Usage usage)7937 size_t getNumberOfSupportedLayouts (Usage usage)
7938 {
7939 	const vk::VkImageLayout layouts[] =
7940 	{
7941 		vk::VK_IMAGE_LAYOUT_GENERAL,
7942 		vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7943 		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7944 		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7945 		vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7946 		vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7947 		vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7948 	};
7949 	size_t supportedLayoutCount = 0;
7950 
7951 	for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7952 	{
7953 		const vk::VkImageLayout layout = layouts[layoutNdx];
7954 
7955 		if (layoutSupportedByUsage(usage, layout))
7956 			supportedLayoutCount++;
7957 	}
7958 
7959 	return supportedLayoutCount;
7960 }
7961 
getRandomNextLayout(de::Random & rng,Usage usage,vk::VkImageLayout previousLayout)7962 vk::VkImageLayout getRandomNextLayout (de::Random&			rng,
7963 									   Usage				usage,
7964 									   vk::VkImageLayout	previousLayout)
7965 {
7966 	const vk::VkImageLayout	layouts[] =
7967 	{
7968 		vk::VK_IMAGE_LAYOUT_GENERAL,
7969 		vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7970 		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7971 		vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7972 		vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7973 		vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7974 		vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7975 	};
7976 	const size_t			supportedLayoutCount = getNumberOfSupportedLayouts(usage);
7977 
7978 	DE_ASSERT(supportedLayoutCount > 0);
7979 
7980 	size_t nextLayoutNdx = ((size_t)rng.getUint64()) % (previousLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
7981 														? supportedLayoutCount
7982 														: supportedLayoutCount - 1);
7983 
7984 	for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7985 	{
7986 		const vk::VkImageLayout layout = layouts[layoutNdx];
7987 
7988 		if (layoutSupportedByUsage(usage, layout) && layout != previousLayout)
7989 		{
7990 			if (nextLayoutNdx == 0)
7991 				return layout;
7992 			else
7993 				nextLayoutNdx--;
7994 		}
7995 	}
7996 
7997 	DE_FATAL("Unreachable");
7998 	return vk::VK_IMAGE_LAYOUT_UNDEFINED;
7999 }
8000 
8001 struct State
8002 {
Statevkt::memory::__anone897a1840111::State8003 	State (Usage usage, deUint32 seed)
8004 		: stage					(STAGE_HOST)
8005 		, cache					(usageToStageFlags(usage), usageToAccessFlags(usage))
8006 		, rng					(seed)
8007 		, mapped				(false)
8008 		, hostInvalidated		(true)
8009 		, hostFlushed			(true)
8010 		, memoryDefined			(false)
8011 		, hasBuffer				(false)
8012 		, hasBoundBufferMemory	(false)
8013 		, hasImage				(false)
8014 		, hasBoundImageMemory	(false)
8015 		, imageLayout			(vk::VK_IMAGE_LAYOUT_UNDEFINED)
8016 		, imageDefined			(false)
8017 		, queueIdle				(true)
8018 		, deviceIdle			(true)
8019 		, commandBufferIsEmpty	(true)
8020 		, renderPassIsEmpty		(true)
8021 	{
8022 	}
8023 
8024 	Stage				stage;
8025 	CacheState			cache;
8026 	de::Random			rng;
8027 
8028 	bool				mapped;
8029 	bool				hostInvalidated;
8030 	bool				hostFlushed;
8031 	bool				memoryDefined;
8032 
8033 	bool				hasBuffer;
8034 	bool				hasBoundBufferMemory;
8035 
8036 	bool				hasImage;
8037 	bool				hasBoundImageMemory;
8038 	vk::VkImageLayout	imageLayout;
8039 	bool				imageDefined;
8040 
8041 	bool				queueIdle;
8042 	bool				deviceIdle;
8043 
8044 	bool				commandBufferIsEmpty;
8045 	bool				renderPassIsEmpty;
8046 };
8047 
getAvailableOps(const State & state,bool supportsBuffers,bool supportsImages,Usage usage,vector<Op> & ops)8048 void getAvailableOps (const State& state, bool supportsBuffers, bool supportsImages, Usage usage, vector<Op>& ops)
8049 {
8050 	if (state.stage == STAGE_HOST)
8051 	{
8052 		if (usage & (USAGE_HOST_READ | USAGE_HOST_WRITE))
8053 		{
8054 			// Host memory operations
8055 			if (state.mapped)
8056 			{
8057 				ops.push_back(OP_UNMAP);
8058 
8059 				// Avoid flush and finish if they are not needed
8060 				if (!state.hostFlushed)
8061 					ops.push_back(OP_MAP_FLUSH);
8062 
8063 				if (!state.hostInvalidated
8064 					&& state.queueIdle
8065 					&& ((usage & USAGE_HOST_READ) == 0
8066 						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8067 					&& ((usage & USAGE_HOST_WRITE) == 0
8068 						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)))
8069 				{
8070 					ops.push_back(OP_MAP_INVALIDATE);
8071 				}
8072 
8073 				if (usage & USAGE_HOST_READ
8074 					&& usage & USAGE_HOST_WRITE
8075 					&& state.memoryDefined
8076 					&& state.hostInvalidated
8077 					&& state.queueIdle
8078 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)
8079 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8080 				{
8081 					ops.push_back(OP_MAP_MODIFY);
8082 				}
8083 
8084 				if (usage & USAGE_HOST_READ
8085 					&& state.memoryDefined
8086 					&& state.hostInvalidated
8087 					&& state.queueIdle
8088 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8089 				{
8090 					ops.push_back(OP_MAP_READ);
8091 				}
8092 
8093 				if (usage & USAGE_HOST_WRITE
8094 					&& state.hostInvalidated
8095 					&& state.queueIdle
8096 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT))
8097 				{
8098 					ops.push_back(OP_MAP_WRITE);
8099 				}
8100 			}
8101 			else
8102 				ops.push_back(OP_MAP);
8103 		}
8104 
8105 		if (state.hasBoundBufferMemory && state.queueIdle)
8106 		{
8107 			// \note Destroy only buffers after they have been bound
8108 			ops.push_back(OP_BUFFER_DESTROY);
8109 		}
8110 		else
8111 		{
8112 			if (state.hasBuffer)
8113 			{
8114 				if (!state.hasBoundBufferMemory)
8115 					ops.push_back(OP_BUFFER_BINDMEMORY);
8116 			}
8117 			else if (!state.hasImage && supportsBuffers)	// Avoid creating buffer if there is already image
8118 				ops.push_back(OP_BUFFER_CREATE);
8119 		}
8120 
8121 		if (state.hasBoundImageMemory && state.queueIdle)
8122 		{
8123 			// \note Destroy only image after they have been bound
8124 			ops.push_back(OP_IMAGE_DESTROY);
8125 		}
8126 		else
8127 		{
8128 			if (state.hasImage)
8129 			{
8130 				if (!state.hasBoundImageMemory)
8131 					ops.push_back(OP_IMAGE_BINDMEMORY);
8132 			}
8133 			else if (!state.hasBuffer && supportsImages)	// Avoid creating image if there is already buffer
8134 				ops.push_back(OP_IMAGE_CREATE);
8135 		}
8136 
8137 		// Host writes must be flushed before GPU commands and there must be
8138 		// buffer or image for GPU commands
8139 		if (state.hostFlushed
8140 			&& (state.memoryDefined || supportsDeviceBufferWrites(usage) || state.imageDefined || supportsDeviceImageWrites(usage))
8141 			&& (state.hasBoundBufferMemory || state.hasBoundImageMemory) // Avoid command buffers if there is no object to use
8142 			&& (usageToStageFlags(usage) & (~vk::VK_PIPELINE_STAGE_HOST_BIT)) != 0) // Don't start command buffer if there are no ways to use memory from gpu
8143 		{
8144 			ops.push_back(OP_COMMAND_BUFFER_BEGIN);
8145 		}
8146 
8147 		if (!state.deviceIdle)
8148 			ops.push_back(OP_DEVICE_WAIT_FOR_IDLE);
8149 
8150 		if (!state.queueIdle)
8151 			ops.push_back(OP_QUEUE_WAIT_FOR_IDLE);
8152 	}
8153 	else if (state.stage == STAGE_COMMAND_BUFFER)
8154 	{
8155 		if (!state.cache.isClean())
8156 		{
8157 			ops.push_back(OP_PIPELINE_BARRIER_GLOBAL);
8158 
8159 			if (state.hasImage)
8160 				ops.push_back(OP_PIPELINE_BARRIER_IMAGE);
8161 
8162 			if (state.hasBuffer)
8163 				ops.push_back(OP_PIPELINE_BARRIER_BUFFER);
8164 		}
8165 
8166 		if (state.hasBoundBufferMemory)
8167 		{
8168 			if (usage & USAGE_TRANSFER_DST
8169 				&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8170 			{
8171 				ops.push_back(OP_BUFFER_FILL);
8172 				ops.push_back(OP_BUFFER_UPDATE);
8173 				ops.push_back(OP_BUFFER_COPY_FROM_BUFFER);
8174 				ops.push_back(OP_BUFFER_COPY_FROM_IMAGE);
8175 			}
8176 
8177 			if (usage & USAGE_TRANSFER_SRC
8178 				&& state.memoryDefined
8179 				&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8180 			{
8181 				ops.push_back(OP_BUFFER_COPY_TO_BUFFER);
8182 				ops.push_back(OP_BUFFER_COPY_TO_IMAGE);
8183 			}
8184 		}
8185 
8186 		if (state.hasBoundImageMemory
8187 			&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
8188 				|| getNumberOfSupportedLayouts(usage) > 1))
8189 		{
8190 			ops.push_back(OP_IMAGE_TRANSITION_LAYOUT);
8191 
8192 			{
8193 				if (usage & USAGE_TRANSFER_DST
8194 					&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8195 						|| state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
8196 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8197 				{
8198 					ops.push_back(OP_IMAGE_COPY_FROM_BUFFER);
8199 					ops.push_back(OP_IMAGE_COPY_FROM_IMAGE);
8200 					ops.push_back(OP_IMAGE_BLIT_FROM_IMAGE);
8201 				}
8202 
8203 				if (usage & USAGE_TRANSFER_SRC
8204 					&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8205 						|| state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
8206 					&& state.imageDefined
8207 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8208 				{
8209 					ops.push_back(OP_IMAGE_COPY_TO_BUFFER);
8210 					ops.push_back(OP_IMAGE_COPY_TO_IMAGE);
8211 					ops.push_back(OP_IMAGE_BLIT_TO_IMAGE);
8212 				}
8213 			}
8214 		}
8215 
8216 		// \todo [2016-03-09 mika] Add other usages?
8217 		if ((state.memoryDefined
8218 				&& state.hasBoundBufferMemory
8219 				&& (((usage & USAGE_VERTEX_BUFFER)
8220 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8221 				|| ((usage & USAGE_INDEX_BUFFER)
8222 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8223 				|| ((usage & USAGE_UNIFORM_BUFFER)
8224 					&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8225 						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8226 				|| ((usage & USAGE_UNIFORM_TEXEL_BUFFER)
8227 					&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8228 						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8229 				|| ((usage & USAGE_STORAGE_BUFFER)
8230 					&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8231 						|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8232 				|| ((usage & USAGE_STORAGE_TEXEL_BUFFER)
8233 					&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))
8234 			|| (state.imageDefined
8235 				&& state.hasBoundImageMemory
8236 				&& (((usage & USAGE_STORAGE_IMAGE)
8237 						&& state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8238 						&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8239 							|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8240 					|| ((usage & USAGE_SAMPLED_IMAGE)
8241 						&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8242 							|| state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
8243 						&& (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8244 							|| state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))))
8245 		{
8246 			ops.push_back(OP_RENDERPASS_BEGIN);
8247 		}
8248 
8249 		// \note This depends on previous operations and has to be always the
8250 		// last command buffer operation check
8251 		if (ops.empty() || !state.commandBufferIsEmpty)
8252 			ops.push_back(OP_COMMAND_BUFFER_END);
8253 	}
8254 	else if (state.stage == STAGE_RENDER_PASS)
8255 	{
8256 		if ((usage & USAGE_VERTEX_BUFFER) != 0
8257 			&& state.memoryDefined
8258 			&& state.hasBoundBufferMemory
8259 			&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8260 		{
8261 			ops.push_back(OP_RENDER_VERTEX_BUFFER);
8262 		}
8263 
8264 		if ((usage & USAGE_INDEX_BUFFER) != 0
8265 			&& state.memoryDefined
8266 			&& state.hasBoundBufferMemory
8267 			&& state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8268 		{
8269 			ops.push_back(OP_RENDER_INDEX_BUFFER);
8270 		}
8271 
8272 		if ((usage & USAGE_UNIFORM_BUFFER) != 0
8273 			&& state.memoryDefined
8274 			&& state.hasBoundBufferMemory)
8275 		{
8276 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8277 				ops.push_back(OP_RENDER_VERTEX_UNIFORM_BUFFER);
8278 
8279 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8280 				ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_BUFFER);
8281 		}
8282 
8283 		if ((usage & USAGE_UNIFORM_TEXEL_BUFFER) != 0
8284 			&& state.memoryDefined
8285 			&& state.hasBoundBufferMemory)
8286 		{
8287 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8288 				ops.push_back(OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER);
8289 
8290 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8291 				ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER);
8292 		}
8293 
8294 		if ((usage & USAGE_STORAGE_BUFFER) != 0
8295 			&& state.memoryDefined
8296 			&& state.hasBoundBufferMemory)
8297 		{
8298 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8299 				ops.push_back(OP_RENDER_VERTEX_STORAGE_BUFFER);
8300 
8301 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8302 				ops.push_back(OP_RENDER_FRAGMENT_STORAGE_BUFFER);
8303 		}
8304 
8305 		if ((usage & USAGE_STORAGE_TEXEL_BUFFER) != 0
8306 			&& state.memoryDefined
8307 			&& state.hasBoundBufferMemory)
8308 		{
8309 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8310 				ops.push_back(OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER);
8311 
8312 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8313 				ops.push_back(OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER);
8314 		}
8315 
8316 		if ((usage & USAGE_STORAGE_IMAGE) != 0
8317 			&& state.imageDefined
8318 			&& state.hasBoundImageMemory
8319 			&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL))
8320 		{
8321 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8322 				ops.push_back(OP_RENDER_VERTEX_STORAGE_IMAGE);
8323 
8324 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8325 				ops.push_back(OP_RENDER_FRAGMENT_STORAGE_IMAGE);
8326 		}
8327 
8328 		if ((usage & USAGE_SAMPLED_IMAGE) != 0
8329 			&& state.imageDefined
8330 			&& state.hasBoundImageMemory
8331 			&& (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8332 				|| state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))
8333 		{
8334 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8335 				ops.push_back(OP_RENDER_VERTEX_SAMPLED_IMAGE);
8336 
8337 			if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8338 				ops.push_back(OP_RENDER_FRAGMENT_SAMPLED_IMAGE);
8339 		}
8340 
8341 		if (!state.renderPassIsEmpty)
8342 			ops.push_back(OP_RENDERPASS_END);
8343 	}
8344 	else
8345 		DE_FATAL("Unknown stage");
8346 }
8347 
applyOp(State & state,const Memory & memory,Op op,Usage usage)8348 void applyOp (State& state, const Memory& memory, Op op, Usage usage)
8349 {
8350 	switch (op)
8351 	{
8352 		case OP_MAP:
8353 			DE_ASSERT(state.stage == STAGE_HOST);
8354 			DE_ASSERT(!state.mapped);
8355 			state.mapped = true;
8356 			break;
8357 
8358 		case OP_UNMAP:
8359 			DE_ASSERT(state.stage == STAGE_HOST);
8360 			DE_ASSERT(state.mapped);
8361 			state.mapped = false;
8362 			break;
8363 
8364 		case OP_MAP_FLUSH:
8365 			DE_ASSERT(state.stage == STAGE_HOST);
8366 			DE_ASSERT(!state.hostFlushed);
8367 			state.hostFlushed = true;
8368 			break;
8369 
8370 		case OP_MAP_INVALIDATE:
8371 			DE_ASSERT(state.stage == STAGE_HOST);
8372 			DE_ASSERT(!state.hostInvalidated);
8373 			state.hostInvalidated = true;
8374 			break;
8375 
8376 		case OP_MAP_READ:
8377 			DE_ASSERT(state.stage == STAGE_HOST);
8378 			DE_ASSERT(state.hostInvalidated);
8379 			state.rng.getUint32();
8380 			break;
8381 
8382 		case OP_MAP_WRITE:
8383 			DE_ASSERT(state.stage == STAGE_HOST);
8384 			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8385 				state.hostFlushed = false;
8386 
8387 			state.memoryDefined = true;
8388 			state.imageDefined = false;
8389 			state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8390 			state.rng.getUint32();
8391 			break;
8392 
8393 		case OP_MAP_MODIFY:
8394 			DE_ASSERT(state.stage == STAGE_HOST);
8395 			DE_ASSERT(state.hostInvalidated);
8396 
8397 			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8398 				state.hostFlushed = false;
8399 
8400 			state.rng.getUint32();
8401 			break;
8402 
8403 		case OP_BUFFER_CREATE:
8404 			DE_ASSERT(state.stage == STAGE_HOST);
8405 			DE_ASSERT(!state.hasBuffer);
8406 
8407 			state.hasBuffer = true;
8408 			break;
8409 
8410 		case OP_BUFFER_DESTROY:
8411 			DE_ASSERT(state.stage == STAGE_HOST);
8412 			DE_ASSERT(state.hasBuffer);
8413 			DE_ASSERT(state.hasBoundBufferMemory);
8414 
8415 			state.hasBuffer = false;
8416 			state.hasBoundBufferMemory = false;
8417 			break;
8418 
8419 		case OP_BUFFER_BINDMEMORY:
8420 			DE_ASSERT(state.stage == STAGE_HOST);
8421 			DE_ASSERT(state.hasBuffer);
8422 			DE_ASSERT(!state.hasBoundBufferMemory);
8423 
8424 			state.hasBoundBufferMemory = true;
8425 			break;
8426 
8427 		case OP_IMAGE_CREATE:
8428 			DE_ASSERT(state.stage == STAGE_HOST);
8429 			DE_ASSERT(!state.hasImage);
8430 			DE_ASSERT(!state.hasBuffer);
8431 
8432 			state.hasImage = true;
8433 			break;
8434 
8435 		case OP_IMAGE_DESTROY:
8436 			DE_ASSERT(state.stage == STAGE_HOST);
8437 			DE_ASSERT(state.hasImage);
8438 			DE_ASSERT(state.hasBoundImageMemory);
8439 
8440 			state.hasImage = false;
8441 			state.hasBoundImageMemory = false;
8442 			state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8443 			state.imageDefined = false;
8444 			break;
8445 
8446 		case OP_IMAGE_BINDMEMORY:
8447 			DE_ASSERT(state.stage == STAGE_HOST);
8448 			DE_ASSERT(state.hasImage);
8449 			DE_ASSERT(!state.hasBoundImageMemory);
8450 
8451 			state.hasBoundImageMemory = true;
8452 			break;
8453 
8454 		case OP_IMAGE_TRANSITION_LAYOUT:
8455 		{
8456 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8457 			DE_ASSERT(state.hasImage);
8458 			DE_ASSERT(state.hasBoundImageMemory);
8459 
8460 			// \todo [2016-03-09 mika] Support linear tiling and predefined data
8461 			const vk::VkImageLayout		srcLayout	= state.rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8462 			const vk::VkImageLayout		dstLayout	= getRandomNextLayout(state.rng, usage, srcLayout);
8463 
8464 			vk::VkPipelineStageFlags	dirtySrcStages;
8465 			vk::VkAccessFlags			dirtySrcAccesses;
8466 			vk::VkPipelineStageFlags	dirtyDstStages;
8467 			vk::VkAccessFlags			dirtyDstAccesses;
8468 
8469 			vk::VkPipelineStageFlags	srcStages;
8470 			vk::VkAccessFlags			srcAccesses;
8471 			vk::VkPipelineStageFlags	dstStages;
8472 			vk::VkAccessFlags			dstAccesses;
8473 
8474 			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8475 
8476 			// Try masking some random bits
8477 			srcStages	= dirtySrcStages;
8478 			srcAccesses	= dirtySrcAccesses;
8479 
8480 			dstStages	= state.cache.getAllowedStages() & state.rng.getUint32();
8481 			dstAccesses	= state.cache.getAllowedAcceses() & state.rng.getUint32();
8482 
8483 			// If there are no bits in dst stage mask use all stages
8484 			dstStages	= dstStages ? dstStages : state.cache.getAllowedStages();
8485 
8486 			if (!srcStages)
8487 				srcStages = dstStages;
8488 
8489 			if (srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED)
8490 				state.imageDefined = false;
8491 
8492 			state.commandBufferIsEmpty = false;
8493 			state.imageLayout = dstLayout;
8494 			state.memoryDefined = false;
8495 			state.cache.imageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
8496 			break;
8497 		}
8498 
8499 		case OP_QUEUE_WAIT_FOR_IDLE:
8500 			DE_ASSERT(state.stage == STAGE_HOST);
8501 			DE_ASSERT(!state.queueIdle);
8502 
8503 			state.queueIdle = true;
8504 
8505 			state.cache.waitForIdle();
8506 			break;
8507 
8508 		case OP_DEVICE_WAIT_FOR_IDLE:
8509 			DE_ASSERT(state.stage == STAGE_HOST);
8510 			DE_ASSERT(!state.deviceIdle);
8511 
8512 			state.queueIdle = true;
8513 			state.deviceIdle = true;
8514 
8515 			state.cache.waitForIdle();
8516 			break;
8517 
8518 		case OP_COMMAND_BUFFER_BEGIN:
8519 			DE_ASSERT(state.stage == STAGE_HOST);
8520 			state.stage = STAGE_COMMAND_BUFFER;
8521 			state.commandBufferIsEmpty = true;
8522 			// Makes host writes visible to command buffer
8523 			state.cache.submitCommandBuffer();
8524 			break;
8525 
8526 		case OP_COMMAND_BUFFER_END:
8527 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8528 			state.stage = STAGE_HOST;
8529 			state.queueIdle = false;
8530 			state.deviceIdle = false;
8531 			break;
8532 
8533 		case OP_BUFFER_COPY_FROM_BUFFER:
8534 		case OP_BUFFER_COPY_FROM_IMAGE:
8535 		case OP_BUFFER_UPDATE:
8536 		case OP_BUFFER_FILL:
8537 			state.rng.getUint32();
8538 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8539 
8540 			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8541 				state.hostInvalidated = false;
8542 
8543 			state.commandBufferIsEmpty = false;
8544 			state.memoryDefined = true;
8545 			state.imageDefined = false;
8546 			state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8547 			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8548 			break;
8549 
8550 		case OP_BUFFER_COPY_TO_BUFFER:
8551 		case OP_BUFFER_COPY_TO_IMAGE:
8552 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8553 
8554 			state.commandBufferIsEmpty = false;
8555 			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8556 			break;
8557 
8558 		case OP_IMAGE_BLIT_FROM_IMAGE:
8559 			state.rng.getBool();
8560 			// Fall through
8561 		case OP_IMAGE_COPY_FROM_BUFFER:
8562 		case OP_IMAGE_COPY_FROM_IMAGE:
8563 			state.rng.getUint32();
8564 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8565 
8566 			if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8567 				state.hostInvalidated = false;
8568 
8569 			state.commandBufferIsEmpty = false;
8570 			state.memoryDefined = false;
8571 			state.imageDefined = true;
8572 			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8573 			break;
8574 
8575 		case OP_IMAGE_BLIT_TO_IMAGE:
8576 			state.rng.getBool();
8577 			// Fall through
8578 		case OP_IMAGE_COPY_TO_BUFFER:
8579 		case OP_IMAGE_COPY_TO_IMAGE:
8580 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8581 
8582 			state.commandBufferIsEmpty = false;
8583 			state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8584 			break;
8585 
8586 		case OP_PIPELINE_BARRIER_GLOBAL:
8587 		case OP_PIPELINE_BARRIER_BUFFER:
8588 		case OP_PIPELINE_BARRIER_IMAGE:
8589 		{
8590 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8591 
8592 			vk::VkPipelineStageFlags	dirtySrcStages;
8593 			vk::VkAccessFlags			dirtySrcAccesses;
8594 			vk::VkPipelineStageFlags	dirtyDstStages;
8595 			vk::VkAccessFlags			dirtyDstAccesses;
8596 
8597 			vk::VkPipelineStageFlags	srcStages;
8598 			vk::VkAccessFlags			srcAccesses;
8599 			vk::VkPipelineStageFlags	dstStages;
8600 			vk::VkAccessFlags			dstAccesses;
8601 
8602 			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8603 
8604 			// Try masking some random bits
8605 			srcStages	= dirtySrcStages & state.rng.getUint32();
8606 			srcAccesses	= dirtySrcAccesses & state.rng.getUint32();
8607 
8608 			dstStages	= dirtyDstStages & state.rng.getUint32();
8609 			dstAccesses	= dirtyDstAccesses & state.rng.getUint32();
8610 
8611 			// If there are no bits in stage mask use the original dirty stages
8612 			srcStages	= srcStages ? srcStages : dirtySrcStages;
8613 			dstStages	= dstStages ? dstStages : dirtyDstStages;
8614 
8615 			if (!srcStages)
8616 				srcStages = dstStages;
8617 
8618 			state.commandBufferIsEmpty = false;
8619 			state.cache.barrier(srcStages, srcAccesses, dstStages, dstAccesses);
8620 			break;
8621 		}
8622 
8623 		case OP_RENDERPASS_BEGIN:
8624 		{
8625 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8626 
8627 			state.renderPassIsEmpty	= true;
8628 			state.stage				= STAGE_RENDER_PASS;
8629 			break;
8630 		}
8631 
8632 		case OP_RENDERPASS_END:
8633 		{
8634 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8635 
8636 			state.renderPassIsEmpty	= true;
8637 			state.stage				= STAGE_COMMAND_BUFFER;
8638 			break;
8639 		}
8640 
8641 		case OP_RENDER_VERTEX_BUFFER:
8642 		{
8643 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8644 
8645 			state.renderPassIsEmpty = false;
8646 			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT);
8647 			break;
8648 		}
8649 
8650 		case OP_RENDER_INDEX_BUFFER:
8651 		{
8652 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8653 
8654 			state.renderPassIsEmpty = false;
8655 			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT);
8656 			break;
8657 		}
8658 
8659 		case OP_RENDER_VERTEX_UNIFORM_BUFFER:
8660 		case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER:
8661 		{
8662 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8663 
8664 			state.renderPassIsEmpty = false;
8665 			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8666 			break;
8667 		}
8668 
8669 		case OP_RENDER_FRAGMENT_UNIFORM_BUFFER:
8670 		case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER:
8671 		{
8672 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8673 
8674 			state.renderPassIsEmpty = false;
8675 			state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8676 			break;
8677 		}
8678 
8679 		case OP_RENDER_VERTEX_STORAGE_BUFFER:
8680 		case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER:
8681 		{
8682 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8683 
8684 			state.renderPassIsEmpty = false;
8685 			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8686 			break;
8687 		}
8688 
8689 		case OP_RENDER_FRAGMENT_STORAGE_BUFFER:
8690 		case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER:
8691 		{
8692 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8693 
8694 			state.renderPassIsEmpty = false;
8695 			state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8696 			break;
8697 		}
8698 
8699 		case OP_RENDER_FRAGMENT_STORAGE_IMAGE:
8700 		case OP_RENDER_FRAGMENT_SAMPLED_IMAGE:
8701 		{
8702 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8703 
8704 			state.renderPassIsEmpty = false;
8705 			state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8706 			break;
8707 		}
8708 
8709 		case OP_RENDER_VERTEX_STORAGE_IMAGE:
8710 		case OP_RENDER_VERTEX_SAMPLED_IMAGE:
8711 		{
8712 			DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8713 
8714 			state.renderPassIsEmpty = false;
8715 			state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8716 			break;
8717 		}
8718 
8719 		default:
8720 			DE_FATAL("Unknown op");
8721 	}
8722 }
8723 
createHostCommand(Op op,de::Random & rng,Usage usage,vk::VkSharingMode sharing)8724 de::MovePtr<Command> createHostCommand (Op					op,
8725 										de::Random&			rng,
8726 										Usage				usage,
8727 										vk::VkSharingMode	sharing)
8728 {
8729 	switch (op)
8730 	{
8731 		case OP_MAP:					return de::MovePtr<Command>(new Map());
8732 		case OP_UNMAP:					return de::MovePtr<Command>(new UnMap());
8733 
8734 		case OP_MAP_FLUSH:				return de::MovePtr<Command>(new Flush());
8735 		case OP_MAP_INVALIDATE:			return de::MovePtr<Command>(new Invalidate());
8736 
8737 		case OP_MAP_READ:				return de::MovePtr<Command>(new HostMemoryAccess(true, false, rng.getUint32()));
8738 		case OP_MAP_WRITE:				return de::MovePtr<Command>(new HostMemoryAccess(false, true, rng.getUint32()));
8739 		case OP_MAP_MODIFY:				return de::MovePtr<Command>(new HostMemoryAccess(true, true, rng.getUint32()));
8740 
8741 		case OP_BUFFER_CREATE:			return de::MovePtr<Command>(new CreateBuffer(usageToBufferUsageFlags(usage), sharing));
8742 		case OP_BUFFER_DESTROY:			return de::MovePtr<Command>(new DestroyBuffer());
8743 		case OP_BUFFER_BINDMEMORY:		return de::MovePtr<Command>(new BindBufferMemory());
8744 
8745 		case OP_IMAGE_CREATE:			return de::MovePtr<Command>(new CreateImage(usageToImageUsageFlags(usage), sharing));
8746 		case OP_IMAGE_DESTROY:			return de::MovePtr<Command>(new DestroyImage());
8747 		case OP_IMAGE_BINDMEMORY:		return de::MovePtr<Command>(new BindImageMemory());
8748 
8749 		case OP_QUEUE_WAIT_FOR_IDLE:	return de::MovePtr<Command>(new QueueWaitIdle());
8750 		case OP_DEVICE_WAIT_FOR_IDLE:	return de::MovePtr<Command>(new DeviceWaitIdle());
8751 
8752 		default:
8753 			DE_FATAL("Unknown op");
8754 			return de::MovePtr<Command>(DE_NULL);
8755 	}
8756 }
8757 
createCmdCommand(de::Random & rng,const State & state,Op op,Usage usage)8758 de::MovePtr<CmdCommand> createCmdCommand (de::Random&	rng,
8759 										  const State&	state,
8760 										  Op			op,
8761 										  Usage			usage)
8762 {
8763 	switch (op)
8764 	{
8765 		case OP_BUFFER_FILL:					return de::MovePtr<CmdCommand>(new FillBuffer(rng.getUint32()));
8766 		case OP_BUFFER_UPDATE:					return de::MovePtr<CmdCommand>(new UpdateBuffer(rng.getUint32()));
8767 		case OP_BUFFER_COPY_TO_BUFFER:			return de::MovePtr<CmdCommand>(new BufferCopyToBuffer());
8768 		case OP_BUFFER_COPY_FROM_BUFFER:		return de::MovePtr<CmdCommand>(new BufferCopyFromBuffer(rng.getUint32()));
8769 
8770 		case OP_BUFFER_COPY_TO_IMAGE:			return de::MovePtr<CmdCommand>(new BufferCopyToImage());
8771 		case OP_BUFFER_COPY_FROM_IMAGE:			return de::MovePtr<CmdCommand>(new BufferCopyFromImage(rng.getUint32()));
8772 
8773 		case OP_IMAGE_TRANSITION_LAYOUT:
8774 		{
8775 			DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8776 			DE_ASSERT(state.hasImage);
8777 			DE_ASSERT(state.hasBoundImageMemory);
8778 
8779 			const vk::VkImageLayout		srcLayout	= rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8780 			const vk::VkImageLayout		dstLayout	= getRandomNextLayout(rng, usage, srcLayout);
8781 
8782 			vk::VkPipelineStageFlags	dirtySrcStages;
8783 			vk::VkAccessFlags			dirtySrcAccesses;
8784 			vk::VkPipelineStageFlags	dirtyDstStages;
8785 			vk::VkAccessFlags			dirtyDstAccesses;
8786 
8787 			vk::VkPipelineStageFlags	srcStages;
8788 			vk::VkAccessFlags			srcAccesses;
8789 			vk::VkPipelineStageFlags	dstStages;
8790 			vk::VkAccessFlags			dstAccesses;
8791 
8792 			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8793 
8794 			// Try masking some random bits
8795 			srcStages	= dirtySrcStages;
8796 			srcAccesses	= dirtySrcAccesses;
8797 
8798 			dstStages	= state.cache.getAllowedStages() & rng.getUint32();
8799 			dstAccesses	= state.cache.getAllowedAcceses() & rng.getUint32();
8800 
8801 			// If there are no bits in dst stage mask use all stages
8802 			dstStages	= dstStages ? dstStages : state.cache.getAllowedStages();
8803 
8804 			if (!srcStages)
8805 				srcStages = dstStages;
8806 
8807 			return de::MovePtr<CmdCommand>(new ImageTransition(srcStages, srcAccesses, dstStages, dstAccesses, srcLayout, dstLayout));
8808 		}
8809 
8810 		case OP_IMAGE_COPY_TO_BUFFER:			return de::MovePtr<CmdCommand>(new ImageCopyToBuffer(state.imageLayout));
8811 		case OP_IMAGE_COPY_FROM_BUFFER:			return de::MovePtr<CmdCommand>(new ImageCopyFromBuffer(rng.getUint32(), state.imageLayout));
8812 		case OP_IMAGE_COPY_TO_IMAGE:			return de::MovePtr<CmdCommand>(new ImageCopyToImage(state.imageLayout));
8813 		case OP_IMAGE_COPY_FROM_IMAGE:			return de::MovePtr<CmdCommand>(new ImageCopyFromImage(rng.getUint32(), state.imageLayout));
8814 		case OP_IMAGE_BLIT_TO_IMAGE:
8815 		{
8816 			const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8817 			return de::MovePtr<CmdCommand>(new ImageBlitToImage(scale, state.imageLayout));
8818 		}
8819 
8820 		case OP_IMAGE_BLIT_FROM_IMAGE:
8821 		{
8822 			const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8823 			return de::MovePtr<CmdCommand>(new ImageBlitFromImage(rng.getUint32(), scale, state.imageLayout));
8824 		}
8825 
8826 		case OP_PIPELINE_BARRIER_GLOBAL:
8827 		case OP_PIPELINE_BARRIER_BUFFER:
8828 		case OP_PIPELINE_BARRIER_IMAGE:
8829 		{
8830 			vk::VkPipelineStageFlags	dirtySrcStages;
8831 			vk::VkAccessFlags			dirtySrcAccesses;
8832 			vk::VkPipelineStageFlags	dirtyDstStages;
8833 			vk::VkAccessFlags			dirtyDstAccesses;
8834 
8835 			vk::VkPipelineStageFlags	srcStages;
8836 			vk::VkAccessFlags			srcAccesses;
8837 			vk::VkPipelineStageFlags	dstStages;
8838 			vk::VkAccessFlags			dstAccesses;
8839 
8840 			state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8841 
8842 			// Try masking some random bits
8843 			srcStages	= dirtySrcStages & rng.getUint32();
8844 			srcAccesses	= dirtySrcAccesses & rng.getUint32();
8845 
8846 			dstStages	= dirtyDstStages & rng.getUint32();
8847 			dstAccesses	= dirtyDstAccesses & rng.getUint32();
8848 
8849 			// If there are no bits in stage mask use the original dirty stages
8850 			srcStages	= srcStages ? srcStages : dirtySrcStages;
8851 			dstStages	= dstStages ? dstStages : dirtyDstStages;
8852 
8853 			if (!srcStages)
8854 				srcStages = dstStages;
8855 
8856 			PipelineBarrier::Type type;
8857 
8858 			if (op == OP_PIPELINE_BARRIER_IMAGE)
8859 				type = PipelineBarrier::TYPE_IMAGE;
8860 			else if (op == OP_PIPELINE_BARRIER_BUFFER)
8861 				type = PipelineBarrier::TYPE_BUFFER;
8862 			else if (op == OP_PIPELINE_BARRIER_GLOBAL)
8863 				type = PipelineBarrier::TYPE_GLOBAL;
8864 			else
8865 			{
8866 				type = PipelineBarrier::TYPE_LAST;
8867 				DE_FATAL("Unknown op");
8868 			}
8869 
8870 			if (type == PipelineBarrier::TYPE_IMAGE)
8871 				return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::just(state.imageLayout)));
8872 			else
8873 				return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::nothing<vk::VkImageLayout>()));
8874 		}
8875 
8876 		default:
8877 			DE_FATAL("Unknown op");
8878 			return de::MovePtr<CmdCommand>(DE_NULL);
8879 	}
8880 }
8881 
createRenderPassCommand(de::Random &,const State &,Op op)8882 de::MovePtr<RenderPassCommand> createRenderPassCommand (de::Random&,
8883 														const State&,
8884 														Op				op)
8885 {
8886 	switch (op)
8887 	{
8888 		case OP_RENDER_VERTEX_BUFFER:					return de::MovePtr<RenderPassCommand>(new RenderVertexBuffer());
8889 		case OP_RENDER_INDEX_BUFFER:					return de::MovePtr<RenderPassCommand>(new RenderIndexBuffer());
8890 
8891 		case OP_RENDER_VERTEX_UNIFORM_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderVertexUniformBuffer());
8892 		case OP_RENDER_FRAGMENT_UNIFORM_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformBuffer());
8893 
8894 		case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER:		return de::MovePtr<RenderPassCommand>(new RenderVertexUniformTexelBuffer());
8895 		case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER:	return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformTexelBuffer());
8896 
8897 		case OP_RENDER_VERTEX_STORAGE_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderVertexStorageBuffer());
8898 		case OP_RENDER_FRAGMENT_STORAGE_BUFFER:			return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageBuffer());
8899 
8900 		case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER:		return de::MovePtr<RenderPassCommand>(new RenderVertexStorageTexelBuffer());
8901 		case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER:	return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageTexelBuffer());
8902 
8903 		case OP_RENDER_VERTEX_STORAGE_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderVertexStorageImage());
8904 		case OP_RENDER_FRAGMENT_STORAGE_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageImage());
8905 
8906 		case OP_RENDER_VERTEX_SAMPLED_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderVertexSampledImage());
8907 		case OP_RENDER_FRAGMENT_SAMPLED_IMAGE:			return de::MovePtr<RenderPassCommand>(new RenderFragmentSampledImage());
8908 
8909 		default:
8910 			DE_FATAL("Unknown op");
8911 			return de::MovePtr<RenderPassCommand>(DE_NULL);
8912 	}
8913 }
8914 
createRenderPassCommands(const Memory & memory,de::Random & nextOpRng,State & state,Usage usage,size_t & opNdx,size_t opCount)8915 de::MovePtr<CmdCommand> createRenderPassCommands (const Memory&	memory,
8916 												  de::Random&	nextOpRng,
8917 												  State&		state,
8918 												  Usage			usage,
8919 												  size_t&		opNdx,
8920 												  size_t		opCount)
8921 {
8922 	vector<RenderPassCommand*>	commands;
8923 
8924 	try
8925 	{
8926 		for (; opNdx < opCount; opNdx++)
8927 		{
8928 			vector<Op>	ops;
8929 
8930 			getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
8931 
8932 			DE_ASSERT(!ops.empty());
8933 
8934 			{
8935 				const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
8936 
8937 				if (op == OP_RENDERPASS_END)
8938 				{
8939 					break;
8940 				}
8941 				else
8942 				{
8943 					de::Random	rng	(state.rng);
8944 
8945 					commands.push_back(createRenderPassCommand(rng, state, op).release());
8946 					applyOp(state, memory, op, usage);
8947 
8948 					DE_ASSERT(state.rng == rng);
8949 				}
8950 			}
8951 		}
8952 
8953 		applyOp(state, memory, OP_RENDERPASS_END, usage);
8954 		return de::MovePtr<CmdCommand>(new SubmitRenderPass(commands));
8955 	}
8956 	catch (...)
8957 	{
8958 		for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
8959 			delete commands[commandNdx];
8960 
8961 		throw;
8962 	}
8963 }
8964 
createCmdCommands(const Memory & memory,de::Random & nextOpRng,State & state,Usage usage,size_t & opNdx,size_t opCount)8965 de::MovePtr<Command> createCmdCommands (const Memory&	memory,
8966 										de::Random&		nextOpRng,
8967 										State&			state,
8968 										Usage			usage,
8969 										size_t&			opNdx,
8970 										size_t			opCount)
8971 {
8972 	vector<CmdCommand*>	commands;
8973 
8974 	try
8975 	{
8976 		for (; opNdx < opCount; opNdx++)
8977 		{
8978 			vector<Op>	ops;
8979 
8980 			getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
8981 
8982 			DE_ASSERT(!ops.empty());
8983 
8984 			{
8985 				const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
8986 
8987 				if (op == OP_COMMAND_BUFFER_END)
8988 				{
8989 					break;
8990 				}
8991 				else
8992 				{
8993 					// \note Command needs to known the state before the operation
8994 					if (op == OP_RENDERPASS_BEGIN)
8995 					{
8996 						applyOp(state, memory, op, usage);
8997 						commands.push_back(createRenderPassCommands(memory, nextOpRng, state, usage, opNdx, opCount).release());
8998 					}
8999 					else
9000 					{
9001 						de::Random	rng	(state.rng);
9002 
9003 						commands.push_back(createCmdCommand(rng, state, op, usage).release());
9004 						applyOp(state, memory, op, usage);
9005 
9006 						DE_ASSERT(state.rng == rng);
9007 					}
9008 
9009 				}
9010 			}
9011 		}
9012 
9013 		applyOp(state, memory, OP_COMMAND_BUFFER_END, usage);
9014 		return de::MovePtr<Command>(new SubmitCommandBuffer(commands));
9015 	}
9016 	catch (...)
9017 	{
9018 		for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9019 			delete commands[commandNdx];
9020 
9021 		throw;
9022 	}
9023 }
9024 
createCommands(vector<Command * > & commands,deUint32 seed,const Memory & memory,Usage usage,vk::VkSharingMode sharingMode,size_t opCount)9025 void createCommands (vector<Command*>&	commands,
9026 					 deUint32			seed,
9027 					 const Memory&		memory,
9028 					 Usage				usage,
9029 					 vk::VkSharingMode	sharingMode,
9030 					 size_t				opCount)
9031 {
9032 	State			state		(usage, seed);
9033 	// Used to select next operation only
9034 	de::Random		nextOpRng	(seed ^ 12930809);
9035 
9036 	commands.reserve(opCount);
9037 
9038 	for (size_t opNdx = 0; opNdx < opCount; opNdx++)
9039 	{
9040 		vector<Op>	ops;
9041 
9042 		getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9043 
9044 		DE_ASSERT(!ops.empty());
9045 
9046 		{
9047 			const Op	op	= nextOpRng.choose<Op>(ops.begin(), ops.end());
9048 
9049 			if (op == OP_COMMAND_BUFFER_BEGIN)
9050 			{
9051 				applyOp(state, memory, op, usage);
9052 				commands.push_back(createCmdCommands(memory, nextOpRng, state, usage, opNdx, opCount).release());
9053 			}
9054 			else
9055 			{
9056 				de::Random	rng	(state.rng);
9057 
9058 				commands.push_back(createHostCommand(op, rng, usage, sharingMode).release());
9059 				applyOp(state, memory, op, usage);
9060 
9061 				// Make sure that random generator is in sync
9062 				DE_ASSERT(state.rng == rng);
9063 			}
9064 		}
9065 	}
9066 
9067 	// Clean up resources
9068 	if (state.hasBuffer && state.hasImage)
9069 	{
9070 		if (!state.queueIdle)
9071 			commands.push_back(new QueueWaitIdle());
9072 
9073 		if (state.hasBuffer)
9074 			commands.push_back(new DestroyBuffer());
9075 
9076 		if (state.hasImage)
9077 			commands.push_back(new DestroyImage());
9078 	}
9079 }
9080 
9081 class MemoryTestInstance : public TestInstance
9082 {
9083 public:
9084 
9085 	typedef bool(MemoryTestInstance::*StageFunc)(void);
9086 
9087 												MemoryTestInstance				(::vkt::Context& context, const TestConfig& config);
9088 												~MemoryTestInstance				(void);
9089 
9090 	tcu::TestStatus								iterate							(void);
9091 
9092 private:
9093 	const TestConfig							m_config;
9094 	const size_t								m_iterationCount;
9095 	const size_t								m_opCount;
9096 	const vk::VkPhysicalDeviceMemoryProperties	m_memoryProperties;
9097 	deUint32									m_memoryTypeNdx;
9098 	size_t										m_iteration;
9099 	StageFunc									m_stage;
9100 	tcu::ResultCollector						m_resultCollector;
9101 
9102 	vector<Command*>							m_commands;
9103 	MovePtr<Memory>								m_memory;
9104 	MovePtr<Context>							m_renderContext;
9105 	MovePtr<PrepareContext>						m_prepareContext;
9106 
9107 	bool										nextIteration					(void);
9108 	bool										nextMemoryType					(void);
9109 
9110 	bool										createCommandsAndAllocateMemory	(void);
9111 	bool										prepare							(void);
9112 	bool										execute							(void);
9113 	bool										verify							(void);
9114 	void										resetResources					(void);
9115 };
9116 
resetResources(void)9117 void MemoryTestInstance::resetResources (void)
9118 {
9119 	const vk::DeviceInterface&	vkd		= m_context.getDeviceInterface();
9120 	const vk::VkDevice			device	= m_context.getDevice();
9121 
9122 	VK_CHECK(vkd.deviceWaitIdle(device));
9123 
9124 	for (size_t commandNdx = 0; commandNdx < m_commands.size(); commandNdx++)
9125 	{
9126 		delete m_commands[commandNdx];
9127 		m_commands[commandNdx] = DE_NULL;
9128 	}
9129 
9130 	m_commands.clear();
9131 	m_prepareContext.clear();
9132 	m_memory.clear();
9133 }
9134 
nextIteration(void)9135 bool MemoryTestInstance::nextIteration (void)
9136 {
9137 	m_iteration++;
9138 
9139 	if (m_iteration < m_iterationCount)
9140 	{
9141 		resetResources();
9142 		m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
9143 		return true;
9144 	}
9145 	else
9146 		return nextMemoryType();
9147 }
9148 
nextMemoryType(void)9149 bool MemoryTestInstance::nextMemoryType (void)
9150 {
9151 	resetResources();
9152 
9153 	DE_ASSERT(m_commands.empty());
9154 
9155 	m_memoryTypeNdx++;
9156 
9157 	if (m_memoryTypeNdx < m_memoryProperties.memoryTypeCount)
9158 	{
9159 		m_iteration	= 0;
9160 		m_stage		= &MemoryTestInstance::createCommandsAndAllocateMemory;
9161 
9162 		return true;
9163 	}
9164 	else
9165 	{
9166 		m_stage = DE_NULL;
9167 		return false;
9168 	}
9169 }
9170 
MemoryTestInstance(::vkt::Context & context,const TestConfig & config)9171 MemoryTestInstance::MemoryTestInstance (::vkt::Context& context, const TestConfig& config)
9172 	: TestInstance			(context)
9173 	, m_config				(config)
9174 	, m_iterationCount		(5)
9175 	, m_opCount				(50)
9176 	, m_memoryProperties	(vk::getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
9177 	, m_memoryTypeNdx		(0)
9178 	, m_iteration			(0)
9179 	, m_stage				(&MemoryTestInstance::createCommandsAndAllocateMemory)
9180 	, m_resultCollector		(context.getTestContext().getLog())
9181 
9182 	, m_memory				(DE_NULL)
9183 {
9184 	TestLog&	log	= context.getTestContext().getLog();
9185 	{
9186 		const tcu::ScopedLogSection section (log, "TestCaseInfo", "Test Case Info");
9187 
9188 		log << TestLog::Message << "Buffer size: " << config.size << TestLog::EndMessage;
9189 		log << TestLog::Message << "Sharing: " << config.sharing << TestLog::EndMessage;
9190 		log << TestLog::Message << "Access: " << config.usage << TestLog::EndMessage;
9191 	}
9192 
9193 	{
9194 		const tcu::ScopedLogSection section (log, "MemoryProperties", "Memory Properties");
9195 
9196 		for (deUint32 heapNdx = 0; heapNdx < m_memoryProperties.memoryHeapCount; heapNdx++)
9197 		{
9198 			const tcu::ScopedLogSection heapSection (log, "Heap" + de::toString(heapNdx), "Heap " + de::toString(heapNdx));
9199 
9200 			log << TestLog::Message << "Size: " << m_memoryProperties.memoryHeaps[heapNdx].size << TestLog::EndMessage;
9201 			log << TestLog::Message << "Flags: " << m_memoryProperties.memoryHeaps[heapNdx].flags << TestLog::EndMessage;
9202 		}
9203 
9204 		for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < m_memoryProperties.memoryTypeCount; memoryTypeNdx++)
9205 		{
9206 			const tcu::ScopedLogSection memoryTypeSection (log, "MemoryType" + de::toString(memoryTypeNdx), "Memory type " + de::toString(memoryTypeNdx));
9207 
9208 			log << TestLog::Message << "Properties: " << m_memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags << TestLog::EndMessage;
9209 			log << TestLog::Message << "Heap: " << m_memoryProperties.memoryTypes[memoryTypeNdx].heapIndex << TestLog::EndMessage;
9210 		}
9211 	}
9212 
9213 	{
9214 		const vk::InstanceInterface&			vki					= context.getInstanceInterface();
9215 		const vk::VkPhysicalDevice				physicalDevice		= context.getPhysicalDevice();
9216 		const vk::DeviceInterface&				vkd					= context.getDeviceInterface();
9217 		const vk::VkDevice						device				= context.getDevice();
9218 		const vk::VkQueue						queue				= context.getUniversalQueue();
9219 		const deUint32							queueFamilyIndex	= context.getUniversalQueueFamilyIndex();
9220 		vector<pair<deUint32, vk::VkQueue> >	queues;
9221 
9222 		queues.push_back(std::make_pair(queueFamilyIndex, queue));
9223 
9224 		m_renderContext = MovePtr<Context>(new Context(vki, vkd, physicalDevice, device, queue, queueFamilyIndex, queues, context.getBinaryCollection()));
9225 	}
9226 }
9227 
~MemoryTestInstance(void)9228 MemoryTestInstance::~MemoryTestInstance (void)
9229 {
9230 	resetResources();
9231 }
9232 
createCommandsAndAllocateMemory(void)9233 bool MemoryTestInstance::createCommandsAndAllocateMemory (void)
9234 {
9235 	const vk::VkDevice							device				= m_context.getDevice();
9236 	TestLog&									log					= m_context.getTestContext().getLog();
9237 	const vk::InstanceInterface&				vki					= m_context.getInstanceInterface();
9238 	const vk::VkPhysicalDevice					physicalDevice		= m_context.getPhysicalDevice();
9239 	const vk::DeviceInterface&					vkd					= m_context.getDeviceInterface();
9240 	const vk::VkPhysicalDeviceMemoryProperties	memoryProperties	= vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
9241 	const tcu::ScopedLogSection					section				(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "CreateCommands" + de::toString(m_iteration),
9242 																		  "Memory type " + de::toString(m_memoryTypeNdx) + " create commands iteration " + de::toString(m_iteration));
9243 	const vector<deUint32>&						queues				= m_renderContext->getQueueFamilies();
9244 
9245 	DE_ASSERT(m_commands.empty());
9246 
9247 	if (m_config.usage & (USAGE_HOST_READ | USAGE_HOST_WRITE)
9248 		&& !(memoryProperties.memoryTypes[m_memoryTypeNdx].propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
9249 	{
9250 		log << TestLog::Message << "Memory type not supported" << TestLog::EndMessage;
9251 
9252 		return nextMemoryType();
9253 	}
9254 	else
9255 	{
9256 		try
9257 		{
9258 			const vk::VkBufferUsageFlags	bufferUsage		= usageToBufferUsageFlags(m_config.usage);
9259 			const vk::VkImageUsageFlags		imageUsage		= usageToImageUsageFlags(m_config.usage);
9260 			const vk::VkDeviceSize			maxBufferSize	= bufferUsage != 0
9261 															? roundBufferSizeToWxHx4(findMaxBufferSize(vkd, device, bufferUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx))
9262 															: 0;
9263 			const IVec2						maxImageSize	= imageUsage != 0
9264 															? findMaxRGBA8ImageSize(vkd, device, imageUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx)
9265 															: IVec2(0, 0);
9266 
9267 			log << TestLog::Message << "Max buffer size: " << maxBufferSize << TestLog::EndMessage;
9268 			log << TestLog::Message << "Max RGBA8 image size: " << maxImageSize << TestLog::EndMessage;
9269 
9270 			// Skip tests if there are no supported operations
9271 			if (maxBufferSize == 0
9272 				&& maxImageSize[0] == 0
9273 				&& (m_config.usage & (USAGE_HOST_READ|USAGE_HOST_WRITE)) == 0)
9274 			{
9275 				log << TestLog::Message << "Skipping memory type. None of the usages are supported." << TestLog::EndMessage;
9276 
9277 				return nextMemoryType();
9278 			}
9279 			else
9280 			{
9281 				const deUint32	seed	= 2830980989u ^ deUint32Hash((deUint32)(m_iteration) * m_memoryProperties.memoryTypeCount +  m_memoryTypeNdx);
9282 
9283 				m_memory	= MovePtr<Memory>(new Memory(vki, vkd, physicalDevice, device, m_config.size, m_memoryTypeNdx, maxBufferSize, maxImageSize[0], maxImageSize[1]));
9284 
9285 				log << TestLog::Message << "Create commands" << TestLog::EndMessage;
9286 				createCommands(m_commands, seed, *m_memory, m_config.usage, m_config.sharing, m_opCount);
9287 
9288 				m_stage = &MemoryTestInstance::prepare;
9289 				return true;
9290 			}
9291 		}
9292 		catch (const tcu::TestError& e)
9293 		{
9294 			m_resultCollector.fail("Failed, got exception: " + string(e.getMessage()));
9295 			return nextMemoryType();
9296 		}
9297 	}
9298 }
9299 
prepare(void)9300 bool MemoryTestInstance::prepare (void)
9301 {
9302 	TestLog&					log		= m_context.getTestContext().getLog();
9303 	const tcu::ScopedLogSection	section	(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Prepare" + de::toString(m_iteration),
9304 											  "Memory type " + de::toString(m_memoryTypeNdx) + " prepare iteration" + de::toString(m_iteration));
9305 
9306 	m_prepareContext = MovePtr<PrepareContext>(new PrepareContext(*m_renderContext, *m_memory));
9307 
9308 	DE_ASSERT(!m_commands.empty());
9309 
9310 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9311 	{
9312 		Command& command = *m_commands[cmdNdx];
9313 
9314 		try
9315 		{
9316 			command.prepare(*m_prepareContext);
9317 		}
9318 		catch (const tcu::TestError& e)
9319 		{
9320 			m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to prepare, got exception: " + string(e.getMessage()));
9321 			return nextMemoryType();
9322 		}
9323 	}
9324 
9325 	m_stage = &MemoryTestInstance::execute;
9326 	return true;
9327 }
9328 
execute(void)9329 bool MemoryTestInstance::execute (void)
9330 {
9331 	TestLog&					log				= m_context.getTestContext().getLog();
9332 	const tcu::ScopedLogSection	section			(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Execute" + de::toString(m_iteration),
9333 													  "Memory type " + de::toString(m_memoryTypeNdx) + " execute iteration " + de::toString(m_iteration));
9334 	ExecuteContext				executeContext	(*m_renderContext);
9335 	const vk::VkDevice			device			= m_context.getDevice();
9336 	const vk::DeviceInterface&	vkd				= m_context.getDeviceInterface();
9337 
9338 	DE_ASSERT(!m_commands.empty());
9339 
9340 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9341 	{
9342 		Command& command = *m_commands[cmdNdx];
9343 
9344 		try
9345 		{
9346 			command.execute(executeContext);
9347 		}
9348 		catch (const tcu::TestError& e)
9349 		{
9350 			m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to execute, got exception: " + string(e.getMessage()));
9351 			return nextIteration();
9352 		}
9353 	}
9354 
9355 	VK_CHECK(vkd.deviceWaitIdle(device));
9356 
9357 	m_stage = &MemoryTestInstance::verify;
9358 	return true;
9359 }
9360 
verify(void)9361 bool MemoryTestInstance::verify (void)
9362 {
9363 	DE_ASSERT(!m_commands.empty());
9364 
9365 	TestLog&					log				= m_context.getTestContext().getLog();
9366 	const tcu::ScopedLogSection	section			(log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Verify" + de::toString(m_iteration),
9367 													  "Memory type " + de::toString(m_memoryTypeNdx) + " verify iteration " + de::toString(m_iteration));
9368 	VerifyContext				verifyContext	(log, m_resultCollector, *m_renderContext, m_config.size);
9369 
9370 	log << TestLog::Message << "Begin verify" << TestLog::EndMessage;
9371 
9372 	for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9373 	{
9374 		Command& command = *m_commands[cmdNdx];
9375 
9376 		try
9377 		{
9378 			command.verify(verifyContext, cmdNdx);
9379 		}
9380 		catch (const tcu::TestError& e)
9381 		{
9382 			m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to verify, got exception: " + string(e.getMessage()));
9383 			return nextIteration();
9384 		}
9385 	}
9386 
9387 	return nextIteration();
9388 }
9389 
iterate(void)9390 tcu::TestStatus MemoryTestInstance::iterate (void)
9391 {
9392 	if ((this->*m_stage)())
9393 		return tcu::TestStatus::incomplete();
9394 	else
9395 		return tcu::TestStatus(m_resultCollector.getResult(), m_resultCollector.getMessage());
9396 }
9397 
9398 struct AddPrograms
9399 {
initvkt::memory::__anone897a1840111::AddPrograms9400 	void init (vk::SourceCollections& sources, TestConfig config) const
9401 	{
9402 		// Vertex buffer rendering
9403 		if (config.usage & USAGE_VERTEX_BUFFER)
9404 		{
9405 			const char* const vertexShader =
9406 				"#version 310 es\n"
9407 				"layout(location = 0) in highp vec2 a_position;\n"
9408 				"void main (void) {\n"
9409 				"\tgl_PointSize = 1.0;\n"
9410 				"\tgl_Position = vec4(1.998 * a_position - vec2(0.999), 0.0, 1.0);\n"
9411 				"}\n";
9412 
9413 			sources.glslSources.add("vertex-buffer.vert")
9414 				<< glu::VertexSource(vertexShader);
9415 		}
9416 
9417 		// Index buffer rendering
9418 		if (config.usage & USAGE_INDEX_BUFFER)
9419 		{
9420 			const char* const vertexShader =
9421 				"#version 310 es\n"
9422 				"precision highp float;\n"
9423 				"void main (void) {\n"
9424 				"\tgl_PointSize = 1.0;\n"
9425 				"\thighp vec2 pos = vec2(gl_VertexIndex % 256, gl_VertexIndex / 256) / vec2(255.0);\n"
9426 				"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9427 				"}\n";
9428 
9429 			sources.glslSources.add("index-buffer.vert")
9430 				<< glu::VertexSource(vertexShader);
9431 		}
9432 
9433 		if (config.usage & USAGE_UNIFORM_BUFFER)
9434 		{
9435 			{
9436 				std::ostringstream vertexShader;
9437 
9438 				vertexShader <<
9439 					"#version 310 es\n"
9440 					"precision highp float;\n"
9441 					"layout(set=0, binding=0) uniform Block\n"
9442 					"{\n"
9443 					"\thighp uvec4 values[" << de::toString<size_t>(MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4)) << "];\n"
9444 					"} block;\n"
9445 					"void main (void) {\n"
9446 					"\tgl_PointSize = 1.0;\n"
9447 					"\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9448 					"\thighp uint val;\n"
9449 					"\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9450 					"\t\tval = vecVal.x;\n"
9451 					"\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9452 					"\t\tval = vecVal.y;\n"
9453 					"\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9454 					"\t\tval = vecVal.z;\n"
9455 					"\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9456 					"\t\tval = vecVal.w;\n"
9457 					"\tif ((gl_VertexIndex % 2) == 0)\n"
9458 					"\t\tval = val & 0xFFFFu;\n"
9459 					"\telse\n"
9460 					"\t\tval = val >> 16u;\n"
9461 					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9462 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9463 					"}\n";
9464 
9465 				sources.glslSources.add("uniform-buffer.vert")
9466 					<< glu::VertexSource(vertexShader.str());
9467 			}
9468 
9469 			{
9470 				const size_t		arraySize		= MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
9471 				const size_t		arrayIntSize	= arraySize * 4;
9472 				std::ostringstream	fragmentShader;
9473 
9474 				fragmentShader <<
9475 					"#version 310 es\n"
9476 					"precision highp float;\n"
9477 					"precision highp int;\n"
9478 					"layout(location = 0) out highp vec4 o_color;\n"
9479 					"layout(set=0, binding=0) uniform Block\n"
9480 					"{\n"
9481 					"\thighp uvec4 values[" << arraySize << "];\n"
9482 					"} block;\n"
9483 					"layout(push_constant) uniform PushC\n"
9484 					"{\n"
9485 					"\tuint callId;\n"
9486 					"\tuint valuesPerPixel;\n"
9487 					"} pushC;\n"
9488 					"void main (void) {\n"
9489 					"\thighp uint id = pushC.callId * (" << arrayIntSize << "u / pushC.valuesPerPixel) + uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9490 					"\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (" << arrayIntSize  << "u / pushC.valuesPerPixel))\n"
9491 					"\t\tdiscard;\n"
9492 					"\thighp uint value = id;\n"
9493 					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9494 					"\t{\n"
9495 					"\t\thighp uvec4 vecVal = block.values[(value / 4u) % " << arraySize << "u];\n"
9496 					"\t\tif ((value % 4u) == 0u)\n"
9497 					"\t\t\tvalue = vecVal.x;\n"
9498 					"\t\telse if ((value % 4u) == 1u)\n"
9499 					"\t\t\tvalue = vecVal.y;\n"
9500 					"\t\telse if ((value % 4u) == 2u)\n"
9501 					"\t\t\tvalue = vecVal.z;\n"
9502 					"\t\telse if ((value % 4u) == 3u)\n"
9503 					"\t\t\tvalue = vecVal.w;\n"
9504 					"\t}\n"
9505 					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9506 					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9507 					"}\n";
9508 
9509 				sources.glslSources.add("uniform-buffer.frag")
9510 					<< glu::FragmentSource(fragmentShader.str());
9511 			}
9512 		}
9513 
9514 		if (config.usage & USAGE_STORAGE_BUFFER)
9515 		{
9516 			{
9517 				// Vertex storage buffer rendering
9518 				const char* const vertexShader =
9519 					"#version 310 es\n"
9520 					"precision highp float;\n"
9521 					"layout(set=0, binding=0) buffer Block\n"
9522 					"{\n"
9523 					"\thighp uvec4 values[];\n"
9524 					"} block;\n"
9525 					"void main (void) {\n"
9526 					"\tgl_PointSize = 1.0;\n"
9527 					"\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9528 					"\thighp uint val;\n"
9529 					"\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9530 					"\t\tval = vecVal.x;\n"
9531 					"\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9532 					"\t\tval = vecVal.y;\n"
9533 					"\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9534 					"\t\tval = vecVal.z;\n"
9535 					"\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9536 					"\t\tval = vecVal.w;\n"
9537 					"\tif ((gl_VertexIndex % 2) == 0)\n"
9538 					"\t\tval = val & 0xFFFFu;\n"
9539 					"\telse\n"
9540 					"\t\tval = val >> 16u;\n"
9541 					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9542 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9543 					"}\n";
9544 
9545 				sources.glslSources.add("storage-buffer.vert")
9546 					<< glu::VertexSource(vertexShader);
9547 			}
9548 
9549 			{
9550 				std::ostringstream	fragmentShader;
9551 
9552 				fragmentShader <<
9553 					"#version 310 es\n"
9554 					"precision highp float;\n"
9555 					"precision highp int;\n"
9556 					"layout(location = 0) out highp vec4 o_color;\n"
9557 					"layout(set=0, binding=0) buffer Block\n"
9558 					"{\n"
9559 					"\thighp uvec4 values[];\n"
9560 					"} block;\n"
9561 					"layout(push_constant) uniform PushC\n"
9562 					"{\n"
9563 					"\tuint valuesPerPixel;\n"
9564 					"\tuint bufferSize;\n"
9565 					"} pushC;\n"
9566 					"void main (void) {\n"
9567 					"\thighp uint arrayIntSize = pushC.bufferSize / 4u;\n"
9568 					"\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9569 					"\thighp uint value = id;\n"
9570 					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9571 					"\t{\n"
9572 					"\t\thighp uvec4 vecVal = block.values[(value / 4u) % (arrayIntSize / 4u)];\n"
9573 					"\t\tif ((value % 4u) == 0u)\n"
9574 					"\t\t\tvalue = vecVal.x;\n"
9575 					"\t\telse if ((value % 4u) == 1u)\n"
9576 					"\t\t\tvalue = vecVal.y;\n"
9577 					"\t\telse if ((value % 4u) == 2u)\n"
9578 					"\t\t\tvalue = vecVal.z;\n"
9579 					"\t\telse if ((value % 4u) == 3u)\n"
9580 					"\t\t\tvalue = vecVal.w;\n"
9581 					"\t}\n"
9582 					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9583 					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9584 					"}\n";
9585 
9586 				sources.glslSources.add("storage-buffer.frag")
9587 					<< glu::FragmentSource(fragmentShader.str());
9588 			}
9589 		}
9590 
9591 		if (config.usage & USAGE_UNIFORM_TEXEL_BUFFER)
9592 		{
9593 			{
9594 				// Vertex uniform texel buffer rendering
9595 				const char* const vertexShader =
9596 					"#version 310 es\n"
9597 					"#extension GL_EXT_texture_buffer : require\n"
9598 					"precision highp float;\n"
9599 					"layout(set=0, binding=0) uniform highp usamplerBuffer u_sampler;\n"
9600 					"void main (void) {\n"
9601 					"\tgl_PointSize = 1.0;\n"
9602 					"\thighp uint val = texelFetch(u_sampler, gl_VertexIndex).x;\n"
9603 					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9604 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9605 					"}\n";
9606 
9607 				sources.glslSources.add("uniform-texel-buffer.vert")
9608 					<< glu::VertexSource(vertexShader);
9609 			}
9610 
9611 			{
9612 				// Fragment uniform texel buffer rendering
9613 				const char* const fragmentShader =
9614 					"#version 310 es\n"
9615 					"#extension GL_EXT_texture_buffer : require\n"
9616 					"precision highp float;\n"
9617 					"precision highp int;\n"
9618 					"layout(set=0, binding=0) uniform highp usamplerBuffer u_sampler;\n"
9619 					"layout(location = 0) out highp vec4 o_color;\n"
9620 					"layout(push_constant) uniform PushC\n"
9621 					"{\n"
9622 					"\tuint callId;\n"
9623 					"\tuint valuesPerPixel;\n"
9624 					"\tuint maxTexelCount;\n"
9625 					"} pushC;\n"
9626 					"void main (void) {\n"
9627 					"\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9628 					"\thighp uint value = id;\n"
9629 					"\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9630 					"\t\tdiscard;\n"
9631 					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9632 					"\t{\n"
9633 					"\t\tvalue = texelFetch(u_sampler, int(value % uint(textureSize(u_sampler)))).x;\n"
9634 					"\t}\n"
9635 					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9636 					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9637 					"}\n";
9638 
9639 				sources.glslSources.add("uniform-texel-buffer.frag")
9640 					<< glu::FragmentSource(fragmentShader);
9641 			}
9642 		}
9643 
9644 		if (config.usage & USAGE_STORAGE_TEXEL_BUFFER)
9645 		{
9646 			{
9647 				// Vertex storage texel buffer rendering
9648 				const char* const vertexShader =
9649 					"#version 450\n"
9650 					"#extension GL_EXT_texture_buffer : require\n"
9651 					"precision highp float;\n"
9652 					"layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9653 					"out gl_PerVertex {\n"
9654 					"\tvec4 gl_Position;\n"
9655 					"\tfloat gl_PointSize;\n"
9656 					"};\n"
9657 					"void main (void) {\n"
9658 					"\tgl_PointSize = 1.0;\n"
9659 					"\thighp uint val = imageLoad(u_sampler, gl_VertexIndex / 2).x;\n"
9660 					"\tif (gl_VertexIndex % 2 == 0)\n"
9661 					"\t\tval = val & 0xFFFFu;\n"
9662 					"\telse\n"
9663 					"\t\tval = val >> 16;\n"
9664 					"\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9665 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9666 					"}\n";
9667 
9668 				sources.glslSources.add("storage-texel-buffer.vert")
9669 					<< glu::VertexSource(vertexShader);
9670 			}
9671 			{
9672 				// Fragment storage texel buffer rendering
9673 				const char* const fragmentShader =
9674 					"#version 310 es\n"
9675 					"#extension GL_EXT_texture_buffer : require\n"
9676 					"precision highp float;\n"
9677 					"precision highp int;\n"
9678 					"layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9679 					"layout(location = 0) out highp vec4 o_color;\n"
9680 					"layout(push_constant) uniform PushC\n"
9681 					"{\n"
9682 					"\tuint callId;\n"
9683 					"\tuint valuesPerPixel;\n"
9684 					"\tuint maxTexelCount;\n"
9685 					"\tuint width;\n"
9686 					"} pushC;\n"
9687 					"void main (void) {\n"
9688 					"\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9689 					"\thighp uint value = id;\n"
9690 					"\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9691 					"\t\tdiscard;\n"
9692 					"\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9693 					"\t{\n"
9694 					"\t\tvalue = imageLoad(u_sampler, int(value % pushC.width)).x;\n"
9695 					"\t}\n"
9696 					"\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9697 					"\to_color = vec4(valueOut) / vec4(255.0);\n"
9698 					"}\n";
9699 
9700 				sources.glslSources.add("storage-texel-buffer.frag")
9701 					<< glu::FragmentSource(fragmentShader);
9702 			}
9703 		}
9704 
9705 		if (config.usage & USAGE_STORAGE_IMAGE)
9706 		{
9707 			{
9708 				// Vertex storage image
9709 				const char* const vertexShader =
9710 					"#version 450\n"
9711 					"precision highp float;\n"
9712 					"layout(set=0, binding=0, rgba8) uniform image2D u_image;\n"
9713 					"out gl_PerVertex {\n"
9714 					"\tvec4 gl_Position;\n"
9715 					"\tfloat gl_PointSize;\n"
9716 					"};\n"
9717 					"void main (void) {\n"
9718 					"\tgl_PointSize = 1.0;\n"
9719 					"\thighp vec4 val = imageLoad(u_image, ivec2((gl_VertexIndex / 2) / imageSize(u_image).x, (gl_VertexIndex / 2) % imageSize(u_image).x));\n"
9720 					"\thighp vec2 pos;\n"
9721 					"\tif (gl_VertexIndex % 2 == 0)\n"
9722 					"\t\tpos = val.xy;\n"
9723 					"\telse\n"
9724 					"\t\tpos = val.zw;\n"
9725 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9726 					"}\n";
9727 
9728 				sources.glslSources.add("storage-image.vert")
9729 					<< glu::VertexSource(vertexShader);
9730 			}
9731 			{
9732 				// Fragment storage image
9733 				const char* const fragmentShader =
9734 					"#version 450\n"
9735 					"#extension GL_EXT_texture_buffer : require\n"
9736 					"precision highp float;\n"
9737 					"layout(set=0, binding=0, rgba8) uniform image2D u_image;\n"
9738 					"layout(location = 0) out highp vec4 o_color;\n"
9739 					"void main (void) {\n"
9740 					"\thighp uvec2 size = uvec2(imageSize(u_image).x, imageSize(u_image).y);\n"
9741 					"\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
9742 					"\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
9743 					"\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
9744 					"\t{\n"
9745 					"\t\thighp vec4 floatValue = imageLoad(u_image, ivec2(int((value.z *  256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)));\n"
9746 					"\t\tvalue = uvec4(uint(floatValue.x * 255.0), uint(floatValue.y * 255.0), uint(floatValue.z * 255.0), uint(floatValue.w * 255.0));\n"
9747 					"\t}\n"
9748 					"\to_color = vec4(value) / vec4(255.0);\n"
9749 					"}\n";
9750 
9751 				sources.glslSources.add("storage-image.frag")
9752 					<< glu::FragmentSource(fragmentShader);
9753 			}
9754 		}
9755 
9756 		if (config.usage & USAGE_SAMPLED_IMAGE)
9757 		{
9758 			{
9759 				// Vertex storage image
9760 				const char* const vertexShader =
9761 					"#version 450\n"
9762 					"precision highp float;\n"
9763 					"layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
9764 					"out gl_PerVertex {\n"
9765 					"\tvec4 gl_Position;\n"
9766 					"\tfloat gl_PointSize;\n"
9767 					"};\n"
9768 					"void main (void) {\n"
9769 					"\tgl_PointSize = 1.0;\n"
9770 					"\thighp vec4 val = texelFetch(u_sampler, ivec2((gl_VertexIndex / 2) / textureSize(u_sampler, 0).x, (gl_VertexIndex / 2) % textureSize(u_sampler, 0).x), 0);\n"
9771 					"\thighp vec2 pos;\n"
9772 					"\tif (gl_VertexIndex % 2 == 0)\n"
9773 					"\t\tpos = val.xy;\n"
9774 					"\telse\n"
9775 					"\t\tpos = val.zw;\n"
9776 					"\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9777 					"}\n";
9778 
9779 				sources.glslSources.add("sampled-image.vert")
9780 					<< glu::VertexSource(vertexShader);
9781 			}
9782 			{
9783 				// Fragment storage image
9784 				const char* const fragmentShader =
9785 					"#version 450\n"
9786 					"#extension GL_EXT_texture_buffer : require\n"
9787 					"precision highp float;\n"
9788 					"layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
9789 					"layout(location = 0) out highp vec4 o_color;\n"
9790 					"void main (void) {\n"
9791 					"\thighp uvec2 size = uvec2(textureSize(u_sampler, 0).x, textureSize(u_sampler, 0).y);\n"
9792 					"\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
9793 					"\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
9794 					"\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
9795 					"\t{\n"
9796 					"\t\thighp vec4 floatValue = texelFetch(u_sampler, ivec2(int((value.z *  256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)), 0);\n"
9797 					"\t\tvalue = uvec4(uint(floatValue.x * 255.0), uint(floatValue.y * 255.0), uint(floatValue.z * 255.0), uint(floatValue.w * 255.0));\n"
9798 					"\t}\n"
9799 					"\to_color = vec4(value) / vec4(255.0);\n"
9800 					"}\n";
9801 
9802 				sources.glslSources.add("sampled-image.frag")
9803 					<< glu::FragmentSource(fragmentShader);
9804 			}
9805 		}
9806 
9807 		{
9808 			const char* const vertexShader =
9809 				"#version 450\n"
9810 				"out gl_PerVertex {\n"
9811 				"\tvec4 gl_Position;\n"
9812 				"};\n"
9813 				"precision highp float;\n"
9814 				"void main (void) {\n"
9815 				"\tgl_Position = vec4(((gl_VertexIndex + 2) / 3) % 2 == 0 ? -1.0 : 1.0,\n"
9816 				"\t                   ((gl_VertexIndex + 1) / 3) % 2 == 0 ? -1.0 : 1.0, 0.0, 1.0);\n"
9817 				"}\n";
9818 
9819 			sources.glslSources.add("render-quad.vert")
9820 				<< glu::VertexSource(vertexShader);
9821 		}
9822 
9823 		{
9824 			const char* const fragmentShader =
9825 				"#version 310 es\n"
9826 				"layout(location = 0) out highp vec4 o_color;\n"
9827 				"void main (void) {\n"
9828 				"\to_color = vec4(1.0);\n"
9829 				"}\n";
9830 
9831 			sources.glslSources.add("render-white.frag")
9832 				<< glu::FragmentSource(fragmentShader);
9833 		}
9834 	}
9835 };
9836 
9837 } // anonymous
9838 
createPipelineBarrierTests(tcu::TestContext & testCtx)9839 tcu::TestCaseGroup* createPipelineBarrierTests (tcu::TestContext& testCtx)
9840 {
9841 	de::MovePtr<tcu::TestCaseGroup>	group			(new tcu::TestCaseGroup(testCtx, "pipeline_barrier", "Pipeline barrier tests."));
9842 	const vk::VkDeviceSize			sizes[]			=
9843 	{
9844 		1024,		// 1K
9845 		8*1024,		// 8K
9846 		64*1024,	// 64K
9847 		1024*1024,	// 1M
9848 	};
9849 	const Usage						usages[]		=
9850 	{
9851 		USAGE_HOST_READ,
9852 		USAGE_HOST_WRITE,
9853 		USAGE_TRANSFER_SRC,
9854 		USAGE_TRANSFER_DST,
9855 		USAGE_VERTEX_BUFFER,
9856 		USAGE_INDEX_BUFFER,
9857 		USAGE_UNIFORM_BUFFER,
9858 		USAGE_UNIFORM_TEXEL_BUFFER,
9859 		USAGE_STORAGE_BUFFER,
9860 		USAGE_STORAGE_TEXEL_BUFFER,
9861 		USAGE_STORAGE_IMAGE,
9862 		USAGE_SAMPLED_IMAGE
9863 	};
9864 	const Usage						readUsages[]		=
9865 	{
9866 		USAGE_HOST_READ,
9867 		USAGE_TRANSFER_SRC,
9868 		USAGE_VERTEX_BUFFER,
9869 		USAGE_INDEX_BUFFER,
9870 		USAGE_UNIFORM_BUFFER,
9871 		USAGE_UNIFORM_TEXEL_BUFFER,
9872 		USAGE_STORAGE_BUFFER,
9873 		USAGE_STORAGE_TEXEL_BUFFER,
9874 		USAGE_STORAGE_IMAGE,
9875 		USAGE_SAMPLED_IMAGE
9876 	};
9877 
9878 	const Usage						writeUsages[]	=
9879 	{
9880 		USAGE_HOST_WRITE,
9881 		USAGE_TRANSFER_DST
9882 	};
9883 
9884 	for (size_t writeUsageNdx = 0; writeUsageNdx < DE_LENGTH_OF_ARRAY(writeUsages); writeUsageNdx++)
9885 	{
9886 		const Usage	writeUsage	= writeUsages[writeUsageNdx];
9887 
9888 		for (size_t readUsageNdx = 0; readUsageNdx < DE_LENGTH_OF_ARRAY(readUsages); readUsageNdx++)
9889 		{
9890 			const Usage						readUsage		= readUsages[readUsageNdx];
9891 			const Usage						usage			= writeUsage | readUsage;
9892 			const string					usageGroupName	(usageToName(usage));
9893 			de::MovePtr<tcu::TestCaseGroup>	usageGroup		(new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
9894 
9895 			for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
9896 			{
9897 				const vk::VkDeviceSize	size		= sizes[sizeNdx];
9898 				const string			testName	(de::toString((deUint64)(size)));
9899 				const TestConfig		config		=
9900 				{
9901 					usage,
9902 					size,
9903 					vk::VK_SHARING_MODE_EXCLUSIVE
9904 				};
9905 
9906 				usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE,  testName, testName, AddPrograms(), config));
9907 			}
9908 
9909 			group->addChild(usageGroup.get());
9910 			usageGroup.release();
9911 		}
9912 	}
9913 
9914 	{
9915 		Usage all = (Usage)0;
9916 
9917 		for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usages); usageNdx++)
9918 			all = all | usages[usageNdx];
9919 
9920 		{
9921 			const string					usageGroupName	("all");
9922 			de::MovePtr<tcu::TestCaseGroup>	usageGroup		(new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
9923 
9924 			for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
9925 			{
9926 				const vk::VkDeviceSize	size		= sizes[sizeNdx];
9927 				const string			testName	(de::toString((deUint64)(size)));
9928 				const TestConfig		config		=
9929 				{
9930 					all,
9931 					size,
9932 					vk::VK_SHARING_MODE_EXCLUSIVE
9933 				};
9934 
9935 				usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE,  testName, testName, AddPrograms(), config));
9936 			}
9937 
9938 			group->addChild(usageGroup.get());
9939 			usageGroup.release();
9940 		}
9941 
9942 		{
9943 			const string					usageGroupName	("all_device");
9944 			de::MovePtr<tcu::TestCaseGroup>	usageGroup		(new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
9945 
9946 			for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
9947 			{
9948 				const vk::VkDeviceSize	size		= sizes[sizeNdx];
9949 				const string			testName	(de::toString((deUint64)(size)));
9950 				const TestConfig		config		=
9951 				{
9952 					(Usage)(all & (~(USAGE_HOST_READ|USAGE_HOST_WRITE))),
9953 					size,
9954 					vk::VK_SHARING_MODE_EXCLUSIVE
9955 				};
9956 
9957 				usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE,  testName, testName, AddPrograms(), config));
9958 			}
9959 
9960 			group->addChild(usageGroup.get());
9961 			usageGroup.release();
9962 		}
9963 	}
9964 
9965 	return group.release();
9966 }
9967 
9968 } // memory
9969 } // vkt
9970