• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2016 The Khronos Group Inc.
6  * Copyright (c) 2016 Samsung Electronics Co., Ltd.
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *      http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Vulkan Fill Buffer Tests
23  *//*--------------------------------------------------------------------*/
24 
25 #include "vktApiFillBufferTests.hpp"
26 #include "vktApiBufferAndImageAllocationUtil.hpp"
27 #include "vktCustomInstancesDevices.hpp"
28 
29 #include "deStringUtil.hpp"
30 #include "deUniquePtr.hpp"
31 #include "vkImageUtil.hpp"
32 #include "vkMemUtil.hpp"
33 #include "vkCmdUtil.hpp"
34 #include "vktTestCase.hpp"
35 #include "vktTestCaseUtil.hpp"
36 #include "vkQueryUtil.hpp"
37 #include "vkRefUtil.hpp"
38 #include "vkCmdUtil.hpp"
39 #include "vkSafetyCriticalUtil.hpp"
40 #include "tcuImageCompare.hpp"
41 #include "tcuCommandLine.hpp"
42 #include "tcuTexture.hpp"
43 #include "tcuTextureUtil.hpp"
44 #include "tcuVectorType.hpp"
45 #include "deSharedPtr.hpp"
46 #include <limits>
47 
48 namespace vkt
49 {
50 
51 namespace api
52 {
53 
54 using namespace vk;
55 
56 namespace
57 {
58 
59 struct TestParams
60 {
61 	enum
62 	{
63 		TEST_DATA_SIZE													= 256
64 	};
65 
66 	VkDeviceSize					dstSize;
67 	VkDeviceSize					dstOffset;
68 	VkDeviceSize					size;
69 	deUint32						testData[TEST_DATA_SIZE];
70 	de::SharedPtr<IBufferAllocator>	bufferAllocator;
71 	bool							useTransferOnlyQueue;
72 };
73 
74 // Creates a device that has transfer only operations
createCustomDevice(Context & context,uint32_t & queueFamilyIndex)75 Move<VkDevice> createCustomDevice(Context& context, uint32_t& queueFamilyIndex)
76 {
77 	const InstanceInterface&	instanceDriver = context.getInstanceInterface();
78 	const VkPhysicalDevice		physicalDevice = context.getPhysicalDevice();
79 
80 	queueFamilyIndex = findQueueFamilyIndexWithCaps(instanceDriver, physicalDevice, VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT);
81 
82 	const std::vector<VkQueueFamilyProperties>	queueFamilies = getPhysicalDeviceQueueFamilyProperties(instanceDriver, physicalDevice);
83 
84 	// This must be found, findQueueFamilyIndexWithCaps would have
85 	// thrown a NotSupported exception if the requested queue type did
86 	// not exist. Similarly, this was written with the assumption the
87 	// "alternative" queue would be different to the universal queue.
88 	DE_ASSERT(queueFamilyIndex < queueFamilies.size() && queueFamilyIndex != context.getUniversalQueueFamilyIndex());
89 	const float queuePriority = 1.0f;
90 	const VkDeviceQueueCreateInfo deviceQueueCreateInfos
91 	{
92 		VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,		// VkStructureType				sType;
93 		nullptr,										// const void*					pNext;
94 		(VkDeviceQueueCreateFlags)0u,					// VkDeviceQueueCreateFlags		flags;
95 		queueFamilyIndex,								// uint32_t						queueFamilyIndex;
96 		1u,												// uint32_t						queueCount;
97 		&queuePriority,									// const float*					pQueuePriorities;
98 	};
99 
100 	// Replicate default device extension list.
101 	const auto	extensionNames				= context.getDeviceCreationExtensions();
102 	auto		synchronization2Features	= context.getSynchronization2Features();
103 	auto		deviceFeatures2				= context.getDeviceFeatures2();
104 	const void*	pNext						= &deviceFeatures2;
105 
106 	if (context.isDeviceFunctionalitySupported("VK_KHR_synchronization2"))
107 	{
108 		synchronization2Features.pNext = &deviceFeatures2;
109 		pNext = &synchronization2Features;
110 	}
111 
112 #ifdef CTS_USES_VULKANSC
113 	VkDeviceObjectReservationCreateInfo memReservationInfo = context.getTestContext().getCommandLine().isSubProcess() ? context.getResourceInterface()->getStatMax() : resetDeviceObjectReservationCreateInfo();
114 	memReservationInfo.pNext = pNext;
115 	pNext = &memReservationInfo;
116 
117 	VkPipelineCacheCreateInfo			pcCI;
118 	std::vector<VkPipelinePoolSize>		poolSizes;
119 	if (context.getTestContext().getCommandLine().isSubProcess())
120 	{
121 		if (context.getResourceInterface()->getCacheDataSize() > 0)
122 		{
123 			pcCI =
124 			{
125 				VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO,			// VkStructureType				sType;
126 				DE_NULL,												// const void*					pNext;
127 				VK_PIPELINE_CACHE_CREATE_READ_ONLY_BIT |
128 					VK_PIPELINE_CACHE_CREATE_USE_APPLICATION_STORAGE_BIT,	// VkPipelineCacheCreateFlags	flags;
129 				context.getResourceInterface()->getCacheDataSize(),	// deUintptr					initialDataSize;
130 				context.getResourceInterface()->getCacheData()		// const void*					pInitialData;
131 			};
132 			memReservationInfo.pipelineCacheCreateInfoCount = 1;
133 			memReservationInfo.pPipelineCacheCreateInfos = &pcCI;
134 		}
135 		poolSizes = context.getResourceInterface()->getPipelinePoolSizes();
136 		if (!poolSizes.empty())
137 		{
138 			memReservationInfo.pipelinePoolSizeCount = deUint32(poolSizes.size());
139 			memReservationInfo.pPipelinePoolSizes = poolSizes.data();
140 		}
141 	}
142 #endif // CTS_USES_VULKANSC
143 
144 	const VkDeviceCreateInfo deviceCreateInfo
145 	{
146 		VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,			// VkStructureType					sType;
147 		pNext,											// const void*						pNext;
148 		(VkDeviceCreateFlags)0u,						// VkDeviceCreateFlags				flags;
149 		1u,												// uint32_t							queueCreateInfoCount;
150 		&deviceQueueCreateInfos,						// const VkDeviceQueueCreateInfo*	pQueueCreateInfos;
151 		0u,												// uint32_t							enabledLayerCount;
152 		DE_NULL,										// const char* const*				ppEnabledLayerNames;
153 		static_cast<uint32_t>(extensionNames.size()),	// uint32_t							enabledExtensionCount;
154 		extensionNames.data(),							// const char* const*				ppEnabledExtensionNames;
155 		DE_NULL,										// const VkPhysicalDeviceFeatures*	pEnabledFeatures;
156 	};
157 
158 	return vkt::createCustomDevice(context.getTestContext().getCommandLine().isValidationEnabled(), context.getPlatformInterface(), context.getInstance(), instanceDriver, physicalDevice, &deviceCreateInfo);
159 }
160 
161 class FillWholeBufferTestInstance : public vkt::TestInstance
162 {
163 public:
164 							FillWholeBufferTestInstance	(Context& context, const TestParams& testParams);
165 	virtual tcu::TestStatus iterate						(void) override;
166 protected:
167 	// dstSize will be used as the buffer size.
168 	// dstOffset will be used as the offset for vkCmdFillBuffer.
169 	// size in vkCmdFillBuffer will always be VK_WHOLE_SIZE.
170 	const TestParams		m_params;
171 
172 	Move<VkDevice>			m_customDevice;
173 	de::MovePtr<Allocator>	m_customAllocator;
174 
175 	VkDevice				m_device;
176 	Allocator*				m_allocator;
177 	uint32_t				m_queueFamilyIndex;
178 
179 	Move<VkCommandPool>		m_cmdPool;
180 	Move<VkCommandBuffer>	m_cmdBuffer;
181 
182 	Move<VkBuffer>			m_destination;
183 	de::MovePtr<Allocation>	m_destinationBufferAlloc;
184 };
185 
FillWholeBufferTestInstance(Context & context,const TestParams & testParams)186 FillWholeBufferTestInstance::FillWholeBufferTestInstance(Context& context, const TestParams& testParams)
187 	: vkt::TestInstance(context), m_params(testParams)
188 {
189 	const InstanceInterface&	vki			= m_context.getInstanceInterface();
190 	const DeviceInterface&		vk			= m_context.getDeviceInterface();
191 	const VkPhysicalDevice		physDevice	= m_context.getPhysicalDevice();
192 
193 	if (testParams.useTransferOnlyQueue)
194 	{
195 		m_customDevice		= createCustomDevice(context, m_queueFamilyIndex);
196 		m_customAllocator	= de::MovePtr<Allocator>(new SimpleAllocator(vk, *m_customDevice, getPhysicalDeviceMemoryProperties(vki, physDevice)));
197 
198 		m_device			= *m_customDevice;
199 		m_allocator			= &(*m_customAllocator);
200 	}
201 	else
202 	{
203 		m_device			= context.getDevice();
204 		m_allocator			= &context.getDefaultAllocator();
205 		m_queueFamilyIndex	= context.getUniversalQueueFamilyIndex();
206 	}
207 
208 	m_cmdPool = createCommandPool(vk, m_device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, m_queueFamilyIndex);
209 	m_cmdBuffer = allocateCommandBuffer(vk, m_device, *m_cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
210 	testParams.bufferAllocator->createTestBuffer(vk, m_device, m_queueFamilyIndex, m_params.dstSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT, context, *m_allocator, m_destination, MemoryRequirement::HostVisible, m_destinationBufferAlloc);
211 }
212 
iterate(void)213 tcu::TestStatus FillWholeBufferTestInstance::iterate(void)
214 {
215 	const DeviceInterface&	vk		= m_context.getDeviceInterface();
216 	const VkQueue			queue	= getDeviceQueue(vk, m_device, m_queueFamilyIndex, 0);
217 
218 	// if posible use synchronization2 when testing transfer only queue
219 	const bool useSynchronization2 = m_context.isDeviceFunctionalitySupported("VK_KHR_synchronization2") && m_params.useTransferOnlyQueue;
220 
221 	// Make sure some stuff below will work.
222 	DE_ASSERT(m_params.dstSize >= sizeof(deUint32));
223 	DE_ASSERT(m_params.dstSize <  static_cast<VkDeviceSize>(std::numeric_limits<size_t>::max()));
224 	DE_ASSERT(m_params.dstOffset < m_params.dstSize);
225 
226 	// Fill buffer from the host and flush buffer memory.
227 	deUint8* bytes = reinterpret_cast<deUint8*>(m_destinationBufferAlloc->getHostPtr());
228 	deMemset(bytes, 0xff, static_cast<size_t>(m_params.dstSize));
229 	flushAlloc(vk, m_device, *m_destinationBufferAlloc);
230 
231 	const VkBufferMemoryBarrier	gpuToHostBarrier
232 	{
233 		VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,	// VkStructureType			sType;
234 		DE_NULL,									// const void*				pNext;
235 		VK_ACCESS_TRANSFER_WRITE_BIT,				// VkAccessFlags			srcAccessMask;
236 		VK_ACCESS_HOST_READ_BIT,					// VkAccessFlags			dstAccessMask;
237 		VK_QUEUE_FAMILY_IGNORED,					// deUint32					srcQueueFamilyIndex;
238 		VK_QUEUE_FAMILY_IGNORED,					// deUint32					dstQueueFamilyIndex;
239 		*m_destination,								// VkBuffer					buffer;
240 		0u,											// VkDeviceSize				offset;
241 		VK_WHOLE_SIZE								// VkDeviceSize				size;
242 	};
243 
244 #ifndef CTS_USES_VULKANSC
245 	using BufferMemoryBarrier2		= VkBufferMemoryBarrier2;
246 	using DependencyInfo			= VkDependencyInfo;
247 	using CommandBufferSubmitInfo	= VkCommandBufferSubmitInfo;
248 	using SubmitInfo2				= VkSubmitInfo2;
249 	auto cmdPipelineBarrier2Fun		= &DeviceInterface::cmdPipelineBarrier2;
250 	auto queueSubmit2Fun			= &DeviceInterface::queueSubmit2;
251 #else
252 	using BufferMemoryBarrier2		= VkBufferMemoryBarrier2KHR;
253 	using DependencyInfo			= VkDependencyInfoKHR;
254 	using CommandBufferSubmitInfo	= VkCommandBufferSubmitInfoKHR;
255 	using SubmitInfo2				= VkSubmitInfo2KHR;
256 	auto cmdPipelineBarrier2Fun		= &DeviceInterface::cmdPipelineBarrier2KHR;
257 	auto queueSubmit2Fun			= &DeviceInterface::queueSubmit2KHR;
258 #endif // CTS_USES_VULKANSC
259 
260 	BufferMemoryBarrier2 gpuToHostBarrier2	= initVulkanStructure();
261 	gpuToHostBarrier2.srcStageMask			= VK_PIPELINE_STAGE_2_TRANSFER_BIT_KHR;
262 	gpuToHostBarrier2.srcAccessMask			= VK_ACCESS_2_TRANSFER_WRITE_BIT_KHR;
263 	gpuToHostBarrier2.dstStageMask			= VK_PIPELINE_STAGE_2_HOST_BIT_KHR;
264 	gpuToHostBarrier2.dstAccessMask			= VK_ACCESS_2_HOST_READ_BIT_KHR;
265 	gpuToHostBarrier2.srcQueueFamilyIndex	= VK_QUEUE_FAMILY_IGNORED;
266 	gpuToHostBarrier2.dstQueueFamilyIndex	= VK_QUEUE_FAMILY_IGNORED;
267 	gpuToHostBarrier2.buffer				= *m_destination;
268 	gpuToHostBarrier2.size					= VK_WHOLE_SIZE;
269 
270 	DependencyInfo depInfo = initVulkanStructure();
271 	depInfo.bufferMemoryBarrierCount	= 1;
272 	depInfo.pBufferMemoryBarriers		= &gpuToHostBarrier2;
273 
274 	// Fill buffer using VK_WHOLE_SIZE.
275 	beginCommandBuffer(vk, *m_cmdBuffer);
276 	vk.cmdFillBuffer(*m_cmdBuffer, *m_destination, m_params.dstOffset, VK_WHOLE_SIZE, deUint32{0x01010101});
277 
278 	if (useSynchronization2)
279 		(vk.*(cmdPipelineBarrier2Fun))(*m_cmdBuffer, &depInfo);
280 	else
281 		vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, DE_NULL, 1, &gpuToHostBarrier, 0, DE_NULL);
282 
283 	endCommandBuffer(vk, *m_cmdBuffer);
284 
285 	Move<VkFence> fence(createFence(vk, m_device));
286 	if (useSynchronization2)
287 	{
288 		CommandBufferSubmitInfo commandBufferInfos	= initVulkanStructure();
289 		commandBufferInfos.commandBuffer			= *m_cmdBuffer;
290 
291 		SubmitInfo2 submitInfo2						= initVulkanStructure();
292 		submitInfo2.commandBufferInfoCount			= 1u;
293 		submitInfo2.pCommandBufferInfos				= &commandBufferInfos;
294 
295 		(vk.*(queueSubmit2Fun))(queue, 1u, &submitInfo2, *fence);
296 	}
297 	else
298 	{
299 		VkSubmitInfo submitInfo			= initVulkanStructure();
300 		submitInfo.commandBufferCount	= 1u;
301 		submitInfo.pCommandBuffers		= &m_cmdBuffer.get();
302 
303 		VK_CHECK(vk.queueSubmit(queue, 1u, &submitInfo, *fence));
304 	}
305 	waitForFence(vk, m_device, *fence);
306 
307 	// Invalidate buffer memory and check the buffer contains the expected results.
308 	invalidateAlloc(vk, m_device, *m_destinationBufferAlloc);
309 
310 	const VkDeviceSize startOfExtra = (m_params.dstSize / sizeof(deUint32)) * sizeof(deUint32);
311 	for (VkDeviceSize i = 0; i < m_params.dstSize; ++i)
312 	{
313 		const deUint8 expectedByte = ((i >= m_params.dstOffset && i < startOfExtra)? 0x01 : 0xff);
314 		if (bytes[i] != expectedByte)
315 		{
316 			std::ostringstream msg;
317 			msg << "Invalid byte at position " << i << " in the buffer (found 0x"
318 				<< std::hex << static_cast<int>(bytes[i]) << " but expected 0x" << static_cast<int>(expectedByte) << ")";
319 			return tcu::TestStatus::fail(msg.str());
320 		}
321 	}
322 
323 	return tcu::TestStatus::pass("Pass");
324 }
325 
326 class FillWholeBufferTestCase : public vkt::TestCase
327 {
328 public:
FillWholeBufferTestCase(tcu::TestContext & testCtx,const std::string & name,const TestParams params)329 							FillWholeBufferTestCase	(tcu::TestContext&	testCtx,
330 													 const std::string&	name,
331 													 const TestParams	params)
332 		: vkt::TestCase(testCtx, name), m_params(params)
333 	{}
334 
createInstance(Context & context) const335 	virtual TestInstance*	createInstance			(Context&			context) const override
336 	{
337 		return static_cast<TestInstance*>(new FillWholeBufferTestInstance(context, m_params));
338 	}
339 private:
340 	const TestParams		m_params;
341 };
342 
343 
344 class FillBufferTestInstance : public vkt::TestInstance
345 {
346 public:
347 									FillBufferTestInstance				(Context&					context,
348 																		 TestParams					testParams);
349 	virtual tcu::TestStatus			iterate								(void);
350 protected:
351 	const TestParams				m_params;
352 
353 	Move<VkDevice>					m_customDevice;
354 	de::MovePtr<Allocator>			m_customAllocator;
355 
356 	VkDevice						m_device;
357 	Allocator*						m_allocator;
358 	uint32_t						m_queueFamilyIndex;
359 
360 	Move<VkCommandPool>				m_cmdPool;
361 	Move<VkCommandBuffer>			m_cmdBuffer;
362 	de::MovePtr<tcu::TextureLevel>	m_destinationTextureLevel;
363 	de::MovePtr<tcu::TextureLevel>	m_expectedTextureLevel;
364 
365 	VkCommandBufferBeginInfo		m_cmdBufferBeginInfo;
366 
367 	Move<VkBuffer>					m_destination;
368 	de::MovePtr<Allocation>			m_destinationBufferAlloc;
369 
370 	void							generateBuffer						(tcu::PixelBufferAccess		buffer,
371 																		 int						width,
372 																		 int						height,
373 																		 int						depth = 1);
374 	virtual void					generateExpectedResult				(void);
375 	void							uploadBuffer						(tcu::ConstPixelBufferAccess
376 																									bufferAccess,
377 																		 const Allocation&			bufferAlloc);
378 	virtual tcu::TestStatus			checkTestResult						(tcu::ConstPixelBufferAccess
379 																									result);
calculateSize(tcu::ConstPixelBufferAccess src) const380 	deUint32						calculateSize						(tcu::ConstPixelBufferAccess
381 																									src) const
382 	{
383 		return src.getWidth() * src.getHeight() * src.getDepth() * tcu::getPixelSize(src.getFormat());
384 	}
385 };
386 
FillBufferTestInstance(Context & context,TestParams testParams)387 									FillBufferTestInstance::FillBufferTestInstance
388 																		(Context&					context,
389 																		 TestParams					testParams)
390 									: vkt::TestInstance					(context)
391 									, m_params							(testParams)
392 {
393 	const InstanceInterface&	vki			= m_context.getInstanceInterface();
394 	const DeviceInterface&		vk			= m_context.getDeviceInterface();
395 	const VkPhysicalDevice		physDevice	= m_context.getPhysicalDevice();
396 
397 	if (testParams.useTransferOnlyQueue)
398 	{
399 		m_customDevice		= createCustomDevice(context, m_queueFamilyIndex);
400 		m_customAllocator	= de::MovePtr<Allocator>(new SimpleAllocator(vk, *m_customDevice, getPhysicalDeviceMemoryProperties(vki, physDevice)));
401 
402 		m_device			= *m_customDevice;
403 		m_allocator			= &(*m_customAllocator);
404 	}
405 	else
406 	{
407 		m_device			= context.getDevice();
408 		m_allocator			= &context.getDefaultAllocator();
409 		m_queueFamilyIndex	= context.getUniversalQueueFamilyIndex();
410 	}
411 
412 	// Create command pool
413 	m_cmdPool = createCommandPool(vk, m_device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, m_queueFamilyIndex);
414 
415 	// Create command buffer
416 	m_cmdBuffer = allocateCommandBuffer(vk, m_device, *m_cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
417 
418 	testParams.bufferAllocator->createTestBuffer(vk, m_device, m_queueFamilyIndex, m_params.dstSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT, context, *m_allocator, m_destination, MemoryRequirement::HostVisible, m_destinationBufferAlloc);
419 }
420 
iterate(void)421 tcu::TestStatus						FillBufferTestInstance::iterate		(void)
422 {
423 	const int						dstLevelWidth						= (int)(m_params.dstSize / 4);
424 	m_destinationTextureLevel = de::MovePtr<tcu::TextureLevel>(new tcu::TextureLevel(mapVkFormat(VK_FORMAT_R8G8B8A8_UINT), dstLevelWidth, 1));
425 
426 	generateBuffer(m_destinationTextureLevel->getAccess(), dstLevelWidth, 1, 1);
427 
428 	generateExpectedResult();
429 
430 	uploadBuffer(m_destinationTextureLevel->getAccess(), *m_destinationBufferAlloc);
431 
432 	const DeviceInterface&	vk		= m_context.getDeviceInterface();
433 	const VkQueue			queue	= getDeviceQueue(vk, m_device, m_queueFamilyIndex, 0);
434 
435 	const VkBufferMemoryBarrier		dstBufferBarrier					=
436 	{
437 		VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,						// VkStructureType			sType;
438 		DE_NULL,														// const void*				pNext;
439 		VK_ACCESS_TRANSFER_WRITE_BIT,									// VkAccessFlags			srcAccessMask;
440 		VK_ACCESS_HOST_READ_BIT,										// VkAccessFlags			dstAccessMask;
441 		VK_QUEUE_FAMILY_IGNORED,										// deUint32					srcQueueFamilyIndex;
442 		VK_QUEUE_FAMILY_IGNORED,										// deUint32					dstQueueFamilyIndex;
443 		*m_destination,													// VkBuffer					buffer;
444 		m_params.dstOffset,												// VkDeviceSize				offset;
445 		VK_WHOLE_SIZE													// VkDeviceSize				size;
446 	};
447 
448 	beginCommandBuffer(vk, *m_cmdBuffer);
449 	vk.cmdFillBuffer(*m_cmdBuffer, *m_destination, m_params.dstOffset, m_params.size, m_params.testData[0]);
450 	vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &dstBufferBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
451 	endCommandBuffer(vk, *m_cmdBuffer);
452 
453 	submitCommandsAndWait(vk, m_device, queue, m_cmdBuffer.get());
454 
455 	// Read buffer data
456 	de::MovePtr<tcu::TextureLevel>	resultLevel	(new tcu::TextureLevel(m_destinationTextureLevel->getAccess().getFormat(), dstLevelWidth, 1));
457 	invalidateAlloc(vk, m_device, *m_destinationBufferAlloc);
458 	tcu::copy(*resultLevel, tcu::ConstPixelBufferAccess(resultLevel->getFormat(), resultLevel->getSize(), m_destinationBufferAlloc->getHostPtr()));
459 
460 	return checkTestResult(resultLevel->getAccess());
461 }
462 
generateBuffer(tcu::PixelBufferAccess buffer,int width,int height,int depth)463 void								FillBufferTestInstance::generateBuffer
464 																		(tcu::PixelBufferAccess		buffer,
465 																		 int						width,
466 																		 int						height,
467 																		 int						depth)
468 {
469 	for (int z = 0; z < depth; z++)
470 	{
471 		for (int y = 0; y < height; y++)
472 		{
473 			for (int x = 0; x < width; x++)
474 				buffer.setPixel(tcu::UVec4(x, y, z, 255), x, y, z);
475 		}
476 	}
477 }
478 
uploadBuffer(tcu::ConstPixelBufferAccess bufferAccess,const Allocation & bufferAlloc)479 void								FillBufferTestInstance::uploadBuffer
480 																		(tcu::ConstPixelBufferAccess
481 																									bufferAccess,
482 																		 const Allocation&			bufferAlloc)
483 {
484 	const DeviceInterface&			vk									= m_context.getDeviceInterface();
485 	const deUint32					bufferSize							= calculateSize(bufferAccess);
486 
487 	// Write buffer data
488 	deMemcpy(bufferAlloc.getHostPtr(), bufferAccess.getDataPtr(), bufferSize);
489 	flushAlloc(vk, m_device, bufferAlloc);
490 }
491 
checkTestResult(tcu::ConstPixelBufferAccess result)492 tcu::TestStatus						FillBufferTestInstance::checkTestResult
493 																		(tcu::ConstPixelBufferAccess
494 																									result)
495 {
496 	const tcu::ConstPixelBufferAccess
497 									expected							= m_expectedTextureLevel->getAccess();
498 	const tcu::UVec4				threshold							(0, 0, 0, 0);
499 
500 	if (!tcu::intThresholdCompare(m_context.getTestContext().getLog(), "Compare", "Result comparsion", expected, result, threshold, tcu::COMPARE_LOG_RESULT))
501 	{
502 		return tcu::TestStatus::fail("Fill and Update Buffer test");
503 	}
504 
505 	return tcu::TestStatus::pass("Fill and Update Buffer test");
506 }
507 
generateExpectedResult(void)508 void								FillBufferTestInstance::generateExpectedResult
509 																		(void)
510 {
511 	const tcu::ConstPixelBufferAccess
512 									dst									= m_destinationTextureLevel->getAccess();
513 
514 	m_expectedTextureLevel	= de::MovePtr<tcu::TextureLevel>(new tcu::TextureLevel(dst.getFormat(), dst.getWidth(), dst.getHeight(), dst.getDepth()));
515 	tcu::copy(m_expectedTextureLevel->getAccess(), dst);
516 
517 	deUint32*						currentPtr							= (deUint32*) m_expectedTextureLevel->getAccess().getDataPtr() + m_params.dstOffset / 4;
518 	deUint32*						endPtr								= currentPtr + m_params.size / 4;
519 
520 	while (currentPtr < endPtr)
521 	{
522 		*currentPtr = m_params.testData[0];
523 		currentPtr++;
524 	}
525 }
526 
527 class FillBufferTestCase : public vkt::TestCase
528 {
529 public:
FillBufferTestCase(tcu::TestContext & testCtx,const std::string & name,const TestParams params)530 									FillBufferTestCase					(tcu::TestContext&			testCtx,
531 																		 const std::string&			name,
532 																		 const TestParams			params)
533 									: vkt::TestCase						(testCtx, name)
534 									, m_params							(params)
535 	{}
536 
createInstance(Context & context) const537 	virtual TestInstance*			createInstance						(Context&					context) const
538 	{
539 		return static_cast<TestInstance*>(new FillBufferTestInstance(context, m_params));
540 	}
541 private:
542 	const TestParams				m_params;
543 };
544 
545 // Update Buffer
546 
547 class UpdateBufferTestInstance : public FillBufferTestInstance
548 {
549 public:
UpdateBufferTestInstance(Context & context,TestParams testParams)550 									UpdateBufferTestInstance			(Context&					context,
551 																		 TestParams					testParams)
552 									: FillBufferTestInstance			(context, testParams)
553 	{}
554 	virtual tcu::TestStatus			iterate								(void);
555 
556 protected:
557 	virtual void					generateExpectedResult				(void);
558 };
559 
iterate(void)560 tcu::TestStatus						UpdateBufferTestInstance::iterate	(void)
561 {
562 	const int						dstLevelWidth						= (int)(m_params.dstSize / 4);
563 	m_destinationTextureLevel = de::MovePtr<tcu::TextureLevel>(new tcu::TextureLevel(mapVkFormat(VK_FORMAT_R8G8B8A8_UINT), dstLevelWidth, 1));
564 
565 	generateBuffer(m_destinationTextureLevel->getAccess(), dstLevelWidth, 1, 1);
566 
567 	generateExpectedResult();
568 
569 	uploadBuffer(m_destinationTextureLevel->getAccess(), *m_destinationBufferAlloc);
570 
571 	const DeviceInterface&	vk		= m_context.getDeviceInterface();
572 	const VkQueue			queue	= getDeviceQueue(vk, m_device, m_queueFamilyIndex, 0);
573 
574 	const VkBufferMemoryBarrier		dstBufferBarrier					=
575 	{
576 		VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,						// VkStructureType			sType;
577 		DE_NULL,														// const void*				pNext;
578 		VK_ACCESS_TRANSFER_WRITE_BIT,									// VkAccessFlags			srcAccessMask;
579 		VK_ACCESS_HOST_READ_BIT,										// VkAccessFlags			dstAccessMask;
580 		VK_QUEUE_FAMILY_IGNORED,										// deUint32					srcQueueFamilyIndex;
581 		VK_QUEUE_FAMILY_IGNORED,										// deUint32					dstQueueFamilyIndex;
582 		*m_destination,													// VkBuffer					buffer;
583 		m_params.dstOffset,												// VkDeviceSize				offset;
584 		VK_WHOLE_SIZE													// VkDeviceSize				size;
585 	};
586 
587 	beginCommandBuffer(vk, *m_cmdBuffer);
588 	vk.cmdUpdateBuffer(*m_cmdBuffer, *m_destination, m_params.dstOffset, m_params.size, m_params.testData);
589 	vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &dstBufferBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
590 	endCommandBuffer(vk, *m_cmdBuffer);
591 
592 	submitCommandsAndWait(vk, m_device, queue, m_cmdBuffer.get());
593 
594 	// Read buffer data
595 	de::MovePtr<tcu::TextureLevel>	resultLevel	(new tcu::TextureLevel(m_destinationTextureLevel->getAccess().getFormat(), dstLevelWidth, 1));
596 	invalidateAlloc(vk, m_device, *m_destinationBufferAlloc);
597 	tcu::copy(*resultLevel, tcu::ConstPixelBufferAccess(resultLevel->getFormat(), resultLevel->getSize(), m_destinationBufferAlloc->getHostPtr()));
598 
599 	return checkTestResult(resultLevel->getAccess());
600 }
601 
generateExpectedResult(void)602 void								UpdateBufferTestInstance::generateExpectedResult
603 																		(void)
604 {
605 	const tcu::ConstPixelBufferAccess
606 									dst									= m_destinationTextureLevel->getAccess();
607 
608 	m_expectedTextureLevel	= de::MovePtr<tcu::TextureLevel>(new tcu::TextureLevel(dst.getFormat(), dst.getWidth(), dst.getHeight(), dst.getDepth()));
609 	tcu::copy(m_expectedTextureLevel->getAccess(), dst);
610 
611 	deUint32*						currentPtr							= (deUint32*) m_expectedTextureLevel->getAccess().getDataPtr() + m_params.dstOffset / 4;
612 
613 	deMemcpy(currentPtr, m_params.testData, (size_t)m_params.size);
614 }
615 
616 class UpdateBufferTestCase : public vkt::TestCase
617 {
618 public:
UpdateBufferTestCase(tcu::TestContext & testCtx,const std::string & name,const TestParams params)619 									UpdateBufferTestCase				(tcu::TestContext&			testCtx,
620 																		 const std::string&			name,
621 																		 const TestParams			params)
622 									: vkt::TestCase						(testCtx, name)
623 									, m_params							(params)
624 	{}
625 
createInstance(Context & context) const626 	virtual TestInstance*			createInstance						(Context&					context) const
627 	{
628 		return (TestInstance*) new UpdateBufferTestInstance(context, m_params);
629 	}
630 private:
631 	TestParams						m_params;
632 };
633 
634 } // anonymous
635 
createFillAndUpdateBufferTests(tcu::TestContext & testCtx)636 tcu::TestCaseGroup*					createFillAndUpdateBufferTests	(tcu::TestContext&			testCtx)
637 {
638 	const de::SharedPtr<IBufferAllocator> bufferAllocators[]
639 	{
640 		de::SharedPtr<BufferSuballocation>(new BufferSuballocation()),
641 		de::SharedPtr<BufferDedicatedAllocation>(new BufferDedicatedAllocation())
642 	};
643 
644 	de::MovePtr<tcu::TestCaseGroup> fillAndUpdateBufferTests(new tcu::TestCaseGroup(testCtx, "fill_and_update_buffer"));
645 
646 	struct TestGroupData
647 	{
648 		const char*		name;
649 		bool			useDedicatedAllocation;
650 		bool			useTransferOnlyQueue;
651 	};
652 	const TestGroupData testGroupData[]
653 	{
654 		// BufferView Fill and Update Tests for Suballocated Objects
655 		{ "suballocation",false,	false },
656 		// BufferView Fill and Update Tests for Suballocated Objects on transfer only queue
657 		{ "suballocation_transfer_queue",false,	true },
658 		// BufferView Fill and Update Tests for Dedicatedly Allocated Objects
659 		{ "dedicated_alloc",true,	false },
660 	};
661 
662 	TestParams params;
663 	for (const auto& groupData : testGroupData)
664 	{
665 		de::MovePtr<tcu::TestCaseGroup> currentTestsGroup(new tcu::TestCaseGroup(testCtx, groupData.name));
666 
667 		params.dstSize				= TestParams::TEST_DATA_SIZE;
668 		params.bufferAllocator		= bufferAllocators[groupData.useDedicatedAllocation];
669 		params.useTransferOnlyQueue	= groupData.useTransferOnlyQueue;
670 
671 		deUint8* data = (deUint8*) params.testData;
672 		for (deUint32 b = 0u; b < (params.dstSize * sizeof(params.testData[0])); b++)
673 			data[b] = (deUint8) (b % 255);
674 
675 		{
676 			const std::string		testName							("buffer_whole");
677 
678 			params.dstOffset = 0;
679 			params.size = params.dstSize;
680 
681 			currentTestsGroup->addChild(new FillBufferTestCase(testCtx, "fill_" + testName, params));
682 			currentTestsGroup->addChild(new UpdateBufferTestCase(testCtx, "update_" + testName, params));
683 		}
684 
685 		{
686 			const std::string		testName							("buffer_first_one");
687 
688 			params.dstOffset = 0;
689 			params.size = 4;
690 
691 			currentTestsGroup->addChild(new FillBufferTestCase(testCtx, "fill_" + testName, params));
692 			currentTestsGroup->addChild(new UpdateBufferTestCase(testCtx, "update_" + testName, params));
693 		}
694 
695 		{
696 			const std::string		testName							("buffer_second_one");
697 
698 			params.dstOffset = 4;
699 			params.size = 4;
700 
701 			currentTestsGroup->addChild(new FillBufferTestCase(testCtx, "fill_" + testName, params));
702 			currentTestsGroup->addChild(new UpdateBufferTestCase(testCtx, "update_" + testName, params));
703 		}
704 
705 		{
706 			const std::string		testName							("buffer_second_part");
707 
708 			params.dstOffset = params.dstSize / 2;
709 			params.size = params.dstSize / 2;
710 
711 			currentTestsGroup->addChild(new FillBufferTestCase(testCtx, "fill_" + testName, params));
712 			currentTestsGroup->addChild(new UpdateBufferTestCase(testCtx, "update_" + testName, params));
713 		}
714 
715 		// VK_WHOLE_SIZE tests.
716 		{
717 			for (VkDeviceSize i = 0; i < sizeof(deUint32); ++i)
718 			{
719 				for (VkDeviceSize j = 0; j < sizeof(deUint32); ++j)
720 				{
721 					params.dstSize		= TestParams::TEST_DATA_SIZE + i;
722 					params.dstOffset	= j * sizeof(deUint32);
723 					params.size			= VK_WHOLE_SIZE;
724 
725 					const VkDeviceSize	extraBytes	= params.dstSize % sizeof(deUint32);
726 					const std::string	name		= "fill_buffer_vk_whole_size_" + de::toString(extraBytes) + "_extra_bytes_offset_" + de::toString(params.dstOffset);
727 
728 					currentTestsGroup->addChild(new FillWholeBufferTestCase{testCtx, name, params});
729 				}
730 			}
731 		}
732 
733 		fillAndUpdateBufferTests->addChild(currentTestsGroup.release());
734 	}
735 
736 	return fillAndUpdateBufferTests.release();
737 }
738 
739 } // api
740 } // vkt
741