• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2015 Google Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file
21  * \brief Simple memory allocation tests.
22  *//*--------------------------------------------------------------------*/
23 
24 #include "vktMemoryAllocationTests.hpp"
25 
26 #include "vktTestCaseUtil.hpp"
27 #include "vktCustomInstancesDevices.hpp"
28 
29 #include "tcuMaybe.hpp"
30 #include "tcuResultCollector.hpp"
31 #include "tcuTestLog.hpp"
32 #include "tcuPlatform.hpp"
33 #include "tcuCommandLine.hpp"
34 
35 #include "vkPlatform.hpp"
36 #include "vkStrUtil.hpp"
37 #include "vkRef.hpp"
38 #include "vkDeviceUtil.hpp"
39 #include "vkQueryUtil.hpp"
40 #include "vkRefUtil.hpp"
41 #include "vkAllocationCallbackUtil.hpp"
42 
43 #include "deUniquePtr.hpp"
44 #include "deStringUtil.hpp"
45 #include "deRandom.hpp"
46 
47 using tcu::Maybe;
48 using tcu::TestLog;
49 
50 using std::string;
51 using std::vector;
52 
53 using namespace vk;
54 
55 namespace vkt
56 {
57 namespace memory
58 {
59 namespace
60 {
61 
62 template<typename T>
roundUpToMultiple(const T & a,const T & b)63 T roundUpToMultiple(const T& a, const T& b)
64 {
65 	return b * (a / b + (a % b != 0 ? 1 : 0));
66 }
67 
68 enum
69 {
70 	// The min max for allocation count is 4096. Use 4000 to take into account
71 	// possible memory allocations made by layers etc.
72 	MAX_ALLOCATION_COUNT = 4000
73 };
74 
75 enum AllocationMode
76 {
77 	ALLOCATION_MODE_DEFAULT,
78 	ALLOCATION_MODE_DEVICE_GROUP,
79 	ALLOCATION_MODE_PAGEABLE
80 };
81 
82 struct TestConfig
83 {
84 	enum Order
85 	{
86 		ALLOC_FREE,
87 		ALLOC_REVERSE_FREE,
88 		MIXED_ALLOC_FREE,
89 		ORDER_LAST
90 	};
91 
92 	Maybe<VkDeviceSize>	memorySize;
93 	Maybe<float>		memoryPercentage;
94 	deUint32			memoryAllocationCount;
95 	Order				order;
96 	AllocationMode		allocationMode;
97 
TestConfigvkt::memory::__anon41024eee0111::TestConfig98 	TestConfig (void)
99 		: memoryAllocationCount	((deUint32)-1)
100 		, order					(ORDER_LAST)
101 		, allocationMode		(ALLOCATION_MODE_DEFAULT)
102 	{
103 	}
104 };
105 
106 struct TestConfigRandom
107 {
108 	const deUint32			seed;
109 	const AllocationMode	allocationMode;
110 
TestConfigRandomvkt::memory::__anon41024eee0111::TestConfigRandom111 	TestConfigRandom (const deUint32 _seed, const AllocationMode _allocationMode)
112 		: seed				(_seed)
113 		, allocationMode	(_allocationMode)
114 	{
115 	}
116 };
117 
118 template<typename T>
roundUpToNextMultiple(T value,T multiple)119 T roundUpToNextMultiple (T value, T multiple)
120 {
121 	if (value % multiple == 0)
122 		return value;
123 	else
124 		return value + multiple - (value % multiple);
125 }
126 
127 class BaseAllocateTestInstance : public TestInstance
128 {
129 public:
BaseAllocateTestInstance(Context & context,AllocationMode allocationMode)130 	BaseAllocateTestInstance		(Context& context, AllocationMode allocationMode)
131 		: TestInstance				(context)
132 		, m_allocationMode			(allocationMode)
133 		, m_subsetAllocationAllowed	(false)
134 		, m_numPhysDevices			(1)
135 		, m_memoryProperties		(getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
136 		, m_deviceCoherentMemSupported  (false)
137 	{
138 		if (m_allocationMode == ALLOCATION_MODE_DEVICE_GROUP)
139 			createDeviceGroup();
140 		else
141 			createTestDevice();
142 
143 		m_allocFlagsInfo.sType		= VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO;
144 		m_allocFlagsInfo.pNext		= DE_NULL;
145 		m_allocFlagsInfo.flags		= VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT;
146 		m_allocFlagsInfo.deviceMask	= 0;
147 	}
148 
149 	void						createTestDevice	(void);
150 	void						createDeviceGroup	(void);
getDeviceInterface(void)151 	const vk::DeviceInterface&	getDeviceInterface	(void) { return *m_deviceDriver; }
getDevice(void)152 	vk::VkDevice				getDevice			(void) { return m_logicalDevice.get();}
153 
154 protected:
155 	AllocationMode							m_allocationMode;
156 	bool									m_subsetAllocationAllowed;
157 	VkMemoryAllocateFlagsInfo				m_allocFlagsInfo;
158 	deUint32								m_numPhysDevices;
159 	VkPhysicalDeviceMemoryProperties		m_memoryProperties;
160 	bool									m_deviceCoherentMemSupported;
161 
162 private:
163 	vk::Move<vk::VkDevice>			m_logicalDevice;
164 #ifndef CTS_USES_VULKANSC
165 	de::MovePtr<vk::DeviceDriver>	m_deviceDriver;
166 #else
167 	de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter>	m_deviceDriver;
168 #endif // CTS_USES_VULKANSC
169 
170 };
171 
createTestDevice(void)172 void BaseAllocateTestInstance::createTestDevice (void)
173 {
174 	const auto&										instanceDriver			= m_context.getInstanceInterface();
175 	const VkInstance								instance				= m_context.getInstance();
176 	const VkPhysicalDevice							physicalDevice			= chooseDevice(instanceDriver, instance, m_context.getTestContext().getCommandLine());
177 	const VkPhysicalDeviceFeatures					deviceFeatures			= getPhysicalDeviceFeatures(instanceDriver, physicalDevice);
178 	const float										queuePriority			= 1.0f;
179 	deUint32										queueFamilyIndex		= 0;
180 	bool											protMemSupported		= false;
181 	const bool										usePageable				= m_allocationMode == ALLOCATION_MODE_PAGEABLE;
182 
183 	void* pNext = DE_NULL;
184 
185 	if (usePageable && !m_context.isDeviceFunctionalitySupported("VK_EXT_pageable_device_local_memory"))
186 		TCU_THROW(NotSupportedError, "VK_EXT_pageable_device_local_memory is not supported");
187 
188 #ifndef CTS_USES_VULKANSC
189 	VkPhysicalDevicePageableDeviceLocalMemoryFeaturesEXT pageableDeviceLocalMemoryFeature =
190 	{
191 		VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PAGEABLE_DEVICE_LOCAL_MEMORY_FEATURES_EXT,	// VkStructureType					sType
192 		pNext,																		// const void*						pNext
193 		VK_FALSE,																		// VkBool32							pageableDeviceLocalMemory;
194 	};
195 	pNext = (usePageable) ? &pageableDeviceLocalMemoryFeature : DE_NULL;
196 #endif // CTS_USES_VULKANSC
197 
198 	VkPhysicalDeviceProtectedMemoryFeatures protectedMemoryFeature =
199 	{
200 		VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES,	// VkStructureType					sType
201 		pNext,					// const void*						pNext
202 		VK_FALSE														// VkBool32							protectedMemory;
203 	};
204 	pNext = &protectedMemoryFeature;
205 
206 #ifndef CTS_USES_VULKANSC
207 	VkPhysicalDeviceCoherentMemoryFeaturesAMD coherentMemoryFeatures =
208 	{
209 		VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD, // VkStructureType                                      sType
210 		pNext,															// const void*                                          pNext
211 		VK_FALSE                                                        // VkBool32                                             deviceCoherentMemory;
212 	};
213 	if (m_context.isDeviceFunctionalitySupported("VK_AMD_device_coherent_memory"))
214 		pNext = &coherentMemoryFeatures;
215 #endif // CTS_USES_VULKANSC
216 
217 	VkPhysicalDeviceFeatures				features;
218 	deMemset(&features, 0, sizeof(vk::VkPhysicalDeviceFeatures));
219 
220 	VkPhysicalDeviceFeatures2				features2		=
221 	{
222 		VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,				// VkStructureType					sType
223 		pNext,														// const void*						pNext
224 		features													// VkPhysicalDeviceFeatures			features
225 	};
226 
227 	// Check if the physical device supports the protected memory feature
228 	m_context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
229 	instanceDriver.getPhysicalDeviceFeatures2(physicalDevice, &features2);
230 	protMemSupported			= protectedMemoryFeature.protectedMemory;
231 #ifndef CTS_USES_VULKANSC
232 	m_deviceCoherentMemSupported	= coherentMemoryFeatures.deviceCoherentMemory;
233 #endif // CTS_USES_VULKANSC
234 
235 	VkDeviceQueueCreateFlags queueCreateFlags = protMemSupported ? (vk::VkDeviceQueueCreateFlags)vk::VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0u;
236 
237 #ifndef CTS_USES_VULKANSC
238 	if (usePageable && !pageableDeviceLocalMemoryFeature.pageableDeviceLocalMemory)
239 		TCU_FAIL("pageableDeviceLocalMemory feature not supported but VK_EXT_pageable_device_local_memory advertised");
240 
241 	pageableDeviceLocalMemoryFeature.pageableDeviceLocalMemory = usePageable;
242 #endif // CTS_USES_VULKANSC
243 
244 	std::vector<const char*>						deviceExtensions;
245 	if (usePageable)
246 	{
247 		deviceExtensions.push_back("VK_EXT_memory_priority");
248 		deviceExtensions.push_back("VK_EXT_pageable_device_local_memory");
249 	}
250 
251 	VkDeviceQueueCreateInfo							queueInfo		=
252 	{
253 		VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,					// VkStructureType					sType;
254 		DE_NULL,													// const void*						pNext;
255 		queueCreateFlags,											// VkDeviceQueueCreateFlags			flags;
256 		queueFamilyIndex,											// deUint32							queueFamilyIndex;
257 		1u,															// deUint32							queueCount;
258 		&queuePriority												// const float*						pQueuePriorities;
259 	};
260 
261 	const VkDeviceCreateInfo						deviceInfo		=
262 	{
263 		VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,							// VkStructureType					sType;
264 		(protMemSupported || usePageable || m_deviceCoherentMemSupported) ? &features2 : DE_NULL,		// const void*						pNext;
265 		(VkDeviceCreateFlags)0,											// VkDeviceCreateFlags				flags;
266 		1u,																// uint32_t							queueCreateInfoCount;
267 		&queueInfo,														// const VkDeviceQueueCreateInfo*	pQueueCreateInfos;
268 		0u,																// uint32_t							enabledLayerCount;
269 		DE_NULL,														// const char* const*				ppEnabledLayerNames;
270 		deUint32(deviceExtensions.size()),								// uint32_t							enabledExtensionCount;
271 		(deviceExtensions.empty()) ? DE_NULL : deviceExtensions.data(),	// const char* const*				ppEnabledExtensionNames;
272 		(protMemSupported || usePageable || m_deviceCoherentMemSupported) ? DE_NULL : &deviceFeatures	// const VkPhysicalDeviceFeatures*	pEnabledFeatures;
273 	};
274 
275 	m_logicalDevice		= createCustomDevice(m_context.getTestContext().getCommandLine().isValidationEnabled(), m_context.getPlatformInterface(), instance, instanceDriver, physicalDevice, &deviceInfo);
276 #ifndef CTS_USES_VULKANSC
277 	m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(m_context.getPlatformInterface(), instance, *m_logicalDevice, m_context.getUsedApiVersion()));
278 #else
279 	m_deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(new DeviceDriverSC(m_context.getPlatformInterface(), instance, *m_logicalDevice, m_context.getTestContext().getCommandLine(), m_context.getResourceInterface(), m_context.getDeviceVulkanSC10Properties(), m_context.getDeviceProperties(), m_context.getUsedApiVersion()), vk::DeinitDeviceDeleter(m_context.getResourceInterface().get(), *m_logicalDevice));
280 #endif // CTS_USES_VULKANSC
281 }
282 
createDeviceGroup(void)283 void BaseAllocateTestInstance::createDeviceGroup (void)
284 {
285 	const tcu::CommandLine&							cmdLine					= m_context.getTestContext().getCommandLine();
286 	const deUint32									devGroupIdx				= cmdLine.getVKDeviceGroupId() - 1;
287 	const deUint32									physDeviceIdx			= cmdLine.getVKDeviceId() - 1;
288 	const float										queuePriority			= 1.0f;
289 	deUint32										queueFamilyIndex		= 0;
290 	const InstanceInterface&						instanceDriver			= m_context.getInstanceInterface();
291 	const VkInstance								instance				= m_context.getInstance();
292 	std::vector<VkPhysicalDeviceGroupProperties>	devGroupProperties		= enumeratePhysicalDeviceGroups(instanceDriver, instance);
293 	m_numPhysDevices														= devGroupProperties[devGroupIdx].physicalDeviceCount;
294 	m_subsetAllocationAllowed												= devGroupProperties[devGroupIdx].subsetAllocation;
295 	if (m_numPhysDevices < 2)
296 		TCU_THROW(NotSupportedError, "Device group allocation tests not supported with 1 physical device");
297 	std::vector<const char*>						deviceExtensions;
298 
299 	if (!isCoreDeviceExtension(m_context.getUsedApiVersion(), "VK_KHR_device_group"))
300 		deviceExtensions.push_back("VK_KHR_device_group");
301 
302 	VkDeviceGroupDeviceCreateInfo					deviceGroupInfo =
303 	{
304 		VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO,									//stype
305 		DE_NULL,																			//pNext
306 		devGroupProperties[devGroupIdx].physicalDeviceCount,								//physicalDeviceCount
307 		devGroupProperties[devGroupIdx].physicalDevices										//physicalDevices
308 	};
309 
310 	const VkPhysicalDeviceFeatures					deviceFeatures	= getPhysicalDeviceFeatures(instanceDriver, deviceGroupInfo.pPhysicalDevices[physDeviceIdx]);
311 
312 	const std::vector<VkQueueFamilyProperties>		queueProps		= getPhysicalDeviceQueueFamilyProperties(instanceDriver, devGroupProperties[devGroupIdx].physicalDevices[physDeviceIdx]);
313 	for (size_t queueNdx = 0; queueNdx < queueProps.size(); queueNdx++)
314 	{
315 		if (queueProps[queueNdx].queueFlags & VK_QUEUE_COMPUTE_BIT)
316 			queueFamilyIndex = (deUint32)queueNdx;
317 	}
318 
319 	VkDeviceQueueCreateInfo							queueInfo		=
320 	{
321 		VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,					// VkStructureType					sType;
322 		DE_NULL,													// const void*						pNext;
323 		(VkDeviceQueueCreateFlags)0u,								// VkDeviceQueueCreateFlags			flags;
324 		queueFamilyIndex,											// deUint32							queueFamilyIndex;
325 		1u,															// deUint32							queueCount;
326 		&queuePriority												// const float*						pQueuePriorities;
327 	};
328 
329 	const VkDeviceCreateInfo						deviceInfo		=
330 	{
331 		VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,						// VkStructureType					sType;
332 		&deviceGroupInfo,											// const void*						pNext;
333 		(VkDeviceCreateFlags)0,										// VkDeviceCreateFlags				flags;
334 		1u	,														// uint32_t							queueCreateInfoCount;
335 		&queueInfo,													// const VkDeviceQueueCreateInfo*	pQueueCreateInfos;
336 		0u,															// uint32_t							enabledLayerCount;
337 		DE_NULL,													// const char* const*				ppEnabledLayerNames;
338 		deUint32(deviceExtensions.size()),							// uint32_t							enabledExtensionCount;
339 		deviceExtensions.empty() ? DE_NULL : &deviceExtensions[0],	// const char* const*	ppEnabledExtensionNames;
340 		&deviceFeatures,											// const VkPhysicalDeviceFeatures*	pEnabledFeatures;
341 	};
342 
343 	m_logicalDevice		= createCustomDevice(m_context.getTestContext().getCommandLine().isValidationEnabled(), m_context.getPlatformInterface(), instance, instanceDriver, deviceGroupInfo.pPhysicalDevices[physDeviceIdx], &deviceInfo);
344 #ifndef CTS_USES_VULKANSC
345 	m_deviceDriver		= de::MovePtr<DeviceDriver>(new DeviceDriver(m_context.getPlatformInterface(), instance, *m_logicalDevice, m_context.getUsedApiVersion()));
346 #else
347 	m_deviceDriver		= de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(new DeviceDriverSC(m_context.getPlatformInterface(), instance, *m_logicalDevice, m_context.getTestContext().getCommandLine(), m_context.getResourceInterface(), m_context.getDeviceVulkanSC10Properties(), m_context.getDeviceProperties(), m_context.getUsedApiVersion()), vk::DeinitDeviceDeleter(m_context.getResourceInterface().get(), *m_logicalDevice));
348 #endif // CTS_USES_VULKANSC
349 
350 	m_memoryProperties	= getPhysicalDeviceMemoryProperties(instanceDriver, deviceGroupInfo.pPhysicalDevices[physDeviceIdx]);
351 }
352 
353 class AllocateFreeTestInstance : public BaseAllocateTestInstance
354 {
355 public:
AllocateFreeTestInstance(Context & context,const TestConfig config)356 	AllocateFreeTestInstance	(Context& context, const TestConfig config)
357 		: BaseAllocateTestInstance		(context, config.allocationMode)
358 		, m_config						(config)
359 		, m_result						(m_context.getTestContext().getLog())
360 		, m_memoryTypeIndex				(0)
361 		, m_memoryLimits				(tcu::getMemoryLimits(context.getTestContext().getPlatform()))
362 	{
363 		DE_ASSERT(!!m_config.memorySize != !!m_config.memoryPercentage);
364 	}
365 
366 	tcu::TestStatus		iterate							(void);
367 
368 private:
369 	const TestConfig						m_config;
370 	tcu::ResultCollector					m_result;
371 	deUint32								m_memoryTypeIndex;
372 	const tcu::PlatformMemoryLimits			m_memoryLimits;
373 };
374 
375 
iterate(void)376 tcu::TestStatus AllocateFreeTestInstance::iterate (void)
377 {
378 	TestLog&								log					= m_context.getTestContext().getLog();
379 	const VkDevice							device				= getDevice();
380 	const DeviceInterface&					vkd					= getDeviceInterface();
381 	VkMemoryRequirements					memReqs;
382 	const deUint32							queueFamilyIndex	= m_context.getUniversalQueueFamilyIndex();
383 	VkBufferCreateFlags						createFlags			= (vk::VkBufferCreateFlagBits)0u;
384 	VkBufferUsageFlags						usageFlags			= vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT;
385 	VkSharingMode							sharingMode			= vk::VK_SHARING_MODE_EXCLUSIVE;
386 	Move<VkBuffer>							buffer;
387 
388 	if ((m_memoryProperties.memoryTypes[m_memoryTypeIndex].propertyFlags & vk::VK_MEMORY_PROPERTY_PROTECTED_BIT) == vk::VK_MEMORY_PROPERTY_PROTECTED_BIT)
389 	{
390 		createFlags |= vk::VK_BUFFER_CREATE_PROTECTED_BIT;
391 	}
392 
393 	DE_ASSERT(m_config.memoryAllocationCount <= MAX_ALLOCATION_COUNT);
394 
395 	if (m_memoryTypeIndex == 0)
396 	{
397 		log << TestLog::Message << "Memory allocation count: " << m_config.memoryAllocationCount << TestLog::EndMessage;
398 		log << TestLog::Message << "Single allocation size: " << (m_config.memorySize ? de::toString(*m_config.memorySize) : de::toString(100.0f * (*m_config.memoryPercentage)) + " percent of the heap size.") << TestLog::EndMessage;
399 
400 		if (m_config.order == TestConfig::ALLOC_REVERSE_FREE)
401 			log << TestLog::Message << "Memory is freed in reversed order. " << TestLog::EndMessage;
402 		else if (m_config.order == TestConfig::ALLOC_FREE)
403 			log << TestLog::Message << "Memory is freed in same order as allocated. " << TestLog::EndMessage;
404 		else if (m_config.order == TestConfig::MIXED_ALLOC_FREE)
405 			log << TestLog::Message << "Memory is freed right after allocation. " << TestLog::EndMessage;
406 		else
407 			DE_FATAL("Unknown allocation order");
408 	}
409 
410 	bool memoryTypeSupported = true;
411 #ifndef CTS_USES_VULKANSC
412 	memoryTypeSupported = !((m_memoryProperties.memoryTypes[m_memoryTypeIndex].propertyFlags & vk::VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD) > 0 && !m_deviceCoherentMemSupported);
413 #endif
414 
415 	if (memoryTypeSupported)
416 	{
417 		try
418 		{
419 			const VkMemoryType		memoryType = m_memoryProperties.memoryTypes[m_memoryTypeIndex];
420 			const VkMemoryHeap		memoryHeap = m_memoryProperties.memoryHeaps[memoryType.heapIndex];
421 
422 			// Create a buffer to get the required size
423 			{
424 				const VkDeviceSize bufferSize = m_config.memorySize ? *m_config.memorySize : (VkDeviceSize)(*m_config.memoryPercentage * (float)memoryHeap.size);
425 
426 				VkBufferCreateInfo bufferParams =
427 				{
428 					VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,                       // VkStructureType          sType;
429 					DE_NULL,                                                    // const void*              pNext;
430 					createFlags,                                                // VkBufferCreateFlags      flags;
431 					bufferSize,                                                 // VkDeviceSize             size;
432 					usageFlags,                                                 // VkBufferUsageFlags       usage;
433 					sharingMode,                                                // VkSharingMode            sharingMode;
434 					1u,                                                         // uint32_t                 queueFamilyIndexCount;
435 					&queueFamilyIndex,                                          // const uint32_t*          pQueueFamilyIndices;
436 				};
437 
438 				buffer = createBuffer(vkd, device, &bufferParams);
439 				vkd.getBufferMemoryRequirements(device, *buffer, &memReqs);
440 			}
441 
442 			const VkDeviceSize		allocationSize = (m_config.memorySize ? memReqs.size : (VkDeviceSize)(*m_config.memoryPercentage * (float)memoryHeap.size));
443 			const VkDeviceSize		roundedUpAllocationSize = roundUpToNextMultiple(allocationSize, m_memoryLimits.deviceMemoryAllocationGranularity);
444 			vector<VkDeviceMemory>	memoryObjects(m_config.memoryAllocationCount, (VkDeviceMemory)0);
445 
446 			log << TestLog::Message << "Memory type index: " << m_memoryTypeIndex << TestLog::EndMessage;
447 
448 			if (memoryType.heapIndex >= m_memoryProperties.memoryHeapCount)
449 				m_result.fail("Invalid heap index defined for memory type.");
450 
451 			{
452 				log << TestLog::Message << "Memory type: " << memoryType << TestLog::EndMessage;
453 				log << TestLog::Message << "Memory heap: " << memoryHeap << TestLog::EndMessage;
454 
455 				if (roundedUpAllocationSize * m_config.memoryAllocationCount > memoryHeap.size)
456 					TCU_THROW(NotSupportedError, "Memory heap doesn't have enough memory.");
457 
458 #if (DE_PTR_SIZE == 4)
459 				// For 32-bit binaries we cap the total host visible allocations to 1.5GB to
460 				// avoid exhausting CPU virtual address space and throwing a false negative result.
461 				if ((memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) &&
462 					allocationSize * m_config.memoryAllocationCount * (m_subsetAllocationAllowed ? 1 : m_numPhysDevices) >= 1610612736)
463 
464 					log << TestLog::Message << "    Skipping: Not enough CPU virtual address space for all host visible allocations." << TestLog::EndMessage;
465 				else
466 				{
467 #else
468 					{
469 #endif
470 
471 						try
472 						{
473 							const deUint32 totalDeviceMaskCombinations = m_subsetAllocationAllowed ? (1 << m_numPhysDevices) - 1 : 1;
474 							for (deUint32 deviceMask = 1; deviceMask <= totalDeviceMaskCombinations; deviceMask++)
475 							{
476 								// Allocate on all physical devices if subset allocation is not allowed, do only once.
477 								if (!m_subsetAllocationAllowed)
478 									deviceMask = (1 << m_numPhysDevices) - 1;
479 								m_allocFlagsInfo.deviceMask = deviceMask;
480 
481 								if (m_config.order == TestConfig::ALLOC_FREE || m_config.order == TestConfig::ALLOC_REVERSE_FREE)
482 								{
483 									for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
484 									{
485 										VkMemoryAllocateInfo	alloc =
486 										{
487 											VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,				// sType
488 											(m_allocationMode == ALLOCATION_MODE_DEVICE_GROUP) ? &m_allocFlagsInfo : DE_NULL,	// pNext
489 											allocationSize,										// allocationSize
490 											m_memoryTypeIndex									// memoryTypeIndex;
491 										};
492 
493 										VkResult				res = vkd.allocateMemory(device, &alloc, (const VkAllocationCallbacks*)DE_NULL, &memoryObjects[ndx]);
494 
495 										// Some implementations might have limitations on protected heap, and these limitations
496 										// don't show up in Vulkan queries. Use a hard coded threshold after which out of memory
497 										// is allowed.
498 										if (res == VK_ERROR_OUT_OF_DEVICE_MEMORY && memoryType.propertyFlags & vk::VK_MEMORY_PROPERTY_PROTECTED_BIT && ndx > 80)
499 											break;
500 
501 										// We don't know the purpose of the memory type, memory type might have limitation not checked in this test.
502 										if (res == VK_ERROR_OUT_OF_DEVICE_MEMORY && (memReqs.memoryTypeBits & (1 << m_memoryTypeIndex)) == 0)
503 											break;
504 
505 										VK_CHECK(res);
506 
507 										TCU_CHECK(!!memoryObjects[ndx]);
508 									}
509 
510 									if (m_config.order == TestConfig::ALLOC_FREE)
511 									{
512 										for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
513 										{
514 											const VkDeviceMemory mem = memoryObjects[memoryObjects.size() - 1 - ndx];
515 
516 											if (!!mem)
517 											{
518 #ifndef CTS_USES_VULKANSC
519 												vkd.freeMemory(device, mem, (const VkAllocationCallbacks*)DE_NULL);
520 #endif // CTS_USES_VULKANSC
521 												memoryObjects[memoryObjects.size() - 1 - ndx] = (VkDeviceMemory)0;
522 											}
523 										}
524 									}
525 									else
526 									{
527 										for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
528 										{
529 											const VkDeviceMemory mem = memoryObjects[ndx];
530 
531 											if (!!mem)
532 											{
533 #ifndef CTS_USES_VULKANSC
534 												vkd.freeMemory(device, mem, (const VkAllocationCallbacks*)DE_NULL);
535 #endif // CTS_USES_VULKANSC
536 												memoryObjects[ndx] = (VkDeviceMemory)0;
537 											}
538 										}
539 									}
540 								}
541 								else
542 								{
543 									for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
544 									{
545 										const VkMemoryAllocateInfo alloc =
546 										{
547 											VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,				// sType
548 											(m_allocationMode == ALLOCATION_MODE_DEVICE_GROUP) ? &m_allocFlagsInfo : DE_NULL,	// pNext
549 											allocationSize,										// allocationSize
550 											m_memoryTypeIndex									// memoryTypeIndex;
551 										};
552 
553 										VK_CHECK(vkd.allocateMemory(device, &alloc, (const VkAllocationCallbacks*)DE_NULL, &memoryObjects[ndx]));
554 										TCU_CHECK(!!memoryObjects[ndx]);
555 #ifndef CTS_USES_VULKANSC
556 										vkd.freeMemory(device, memoryObjects[ndx], (const VkAllocationCallbacks*)DE_NULL);
557 #endif // CTS_USES_VULKANSC
558 										memoryObjects[ndx] = (VkDeviceMemory)0;
559 									}
560 								}
561 							}
562 						}
563 						catch (...)
564 						{
565 							for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
566 							{
567 								const VkDeviceMemory mem = memoryObjects[ndx];
568 
569 								if (!!mem)
570 								{
571 #ifndef CTS_USES_VULKANSC
572 									vkd.freeMemory(device, mem, (const VkAllocationCallbacks*)DE_NULL);
573 #endif // CTS_USES_VULKANSC
574 									memoryObjects[ndx] = (VkDeviceMemory)0;
575 								}
576 							}
577 
578 							throw;
579 						}
580 					}
581 				}
582 			}
583 		catch (const tcu::TestError& error)
584 		{
585 			m_result.fail(error.getMessage());
586 		}
587 	}
588 
589 	m_memoryTypeIndex++;
590 
591 	if (m_memoryTypeIndex < m_memoryProperties.memoryTypeCount)
592 		return tcu::TestStatus::incomplete();
593 	else
594 		return tcu::TestStatus(m_result.getResult(), m_result.getMessage());
595 }
596 
597 #ifndef CTS_USES_VULKANSC
598 
599 size_t computeDeviceMemorySystemMemFootprint (const DeviceInterface& vk, VkDevice device)
600 {
601 	AllocationCallbackRecorder	callbackRecorder	(getSystemAllocator());
602 
603 	{
604 		// 1 B allocation from memory type 0
605 		const VkMemoryAllocateInfo	allocInfo	=
606 		{
607 			VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
608 			DE_NULL,
609 			1u,
610 			0u,
611 		};
612 		const Unique<VkDeviceMemory>			memory			(allocateMemory(vk, device, &allocInfo, callbackRecorder.getCallbacks()));
613 		AllocationCallbackValidationResults		validateRes;
614 
615 		validateAllocationCallbacks(callbackRecorder, &validateRes);
616 
617 		TCU_CHECK(validateRes.violations.empty());
618 
619 		return getLiveSystemAllocationTotal(validateRes)
620 			   + sizeof(void*)*validateRes.liveAllocations.size(); // allocation overhead
621 	}
622 }
623 
624 struct MemoryType
625 {
626 	deUint32		index;
627 	VkMemoryType	type;
628 };
629 
630 struct MemoryObject
631 {
632 	VkDeviceMemory			memory;
633 	VkDeviceSize			size;
634 	VkMemoryPropertyFlags	propertyFlags;
635 };
636 
637 struct Heap
638 {
639 	VkMemoryHeap			heap;
640 	VkDeviceSize			memoryUsage;
641 	VkDeviceSize			maxMemoryUsage;
642 	vector<MemoryType>		types;
643 	vector<MemoryObject>	objects;
644 };
645 
646 class RandomAllocFreeTestInstance : public BaseAllocateTestInstance
647 {
648 public:
649 								RandomAllocFreeTestInstance		(Context& context, TestConfigRandom config);
650 								~RandomAllocFreeTestInstance	(void);
651 
652 	tcu::TestStatus				iterate							(void);
653 
654 private:
655 	const size_t					m_opCount;
656 	const size_t					m_allocSysMemSize;
657 	const tcu::PlatformMemoryLimits	m_memoryLimits;
658 	const deUint32					m_totalDeviceMaskCombinations;
659 
660 	deUint32						m_memoryObjectCount;
661 	deUint32						m_memoryProtectedObjectCount;
662 	deUint32						m_currentDeviceMask;
663 	size_t							m_opNdx;
664 	de::Random						m_rng;
665 	vector<Heap>					m_heaps;
666 	VkDeviceSize					m_totalSystemMem;
667 	VkDeviceSize					m_totalDeviceMem;
668 };
669 
670 RandomAllocFreeTestInstance::RandomAllocFreeTestInstance (Context& context, TestConfigRandom config)
671 	: BaseAllocateTestInstance	(context, config.allocationMode)
672 	, m_opCount						(128)
673 	, m_allocSysMemSize				(computeDeviceMemorySystemMemFootprint(getDeviceInterface(), context.getDevice())
674 									 + sizeof(MemoryObject))
675 	, m_memoryLimits				(tcu::getMemoryLimits(context.getTestContext().getPlatform()))
676 	, m_totalDeviceMaskCombinations	(m_subsetAllocationAllowed ? (1 << m_numPhysDevices) - 1 : 1)
677 	, m_memoryObjectCount			(0)
678 	, m_memoryProtectedObjectCount	(0)
679 	, m_currentDeviceMask			(m_subsetAllocationAllowed ? 1 : (1 << m_numPhysDevices) - 1)
680 	, m_opNdx						(0)
681 	, m_rng							(config.seed)
682 	, m_totalSystemMem				(0)
683 	, m_totalDeviceMem				(0)
684 {
685 	TCU_CHECK(m_memoryProperties.memoryHeapCount <= 32);
686 	TCU_CHECK(m_memoryProperties.memoryTypeCount <= 32);
687 
688 	m_heaps.resize(m_memoryProperties.memoryHeapCount);
689 
690 	for (deUint32 heapNdx = 0; heapNdx < m_memoryProperties.memoryHeapCount; heapNdx++)
691 	{
692 		m_heaps[heapNdx].heap			= m_memoryProperties.memoryHeaps[heapNdx];
693 		m_heaps[heapNdx].memoryUsage	= 0;
694 		m_heaps[heapNdx].maxMemoryUsage	= m_heaps[heapNdx].heap.size / 8; /* Use at maximum 12.5% of heap */
695 
696 		m_heaps[heapNdx].objects.reserve(100);
697 	}
698 
699 	for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < m_memoryProperties.memoryTypeCount; memoryTypeNdx++)
700 	{
701 		const MemoryType type =
702 		{
703 			memoryTypeNdx,
704 			m_memoryProperties.memoryTypes[memoryTypeNdx]
705 		};
706 
707 		TCU_CHECK(type.type.heapIndex < m_memoryProperties.memoryHeapCount);
708 
709 		if ((m_memoryProperties.memoryTypes[type.index].propertyFlags & vk::VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD) > 0 && !m_deviceCoherentMemSupported)
710 		{
711 			continue;
712 		}
713 
714 
715 		m_heaps[type.type.heapIndex].types.push_back(type);
716 	}
717 }
718 
719 RandomAllocFreeTestInstance::~RandomAllocFreeTestInstance (void)
720 {
721 #ifndef CTS_USES_VULKANSC
722 	const VkDevice							device				= getDevice();
723 	const DeviceInterface&					vkd					= getDeviceInterface();
724 
725 	for (deUint32 heapNdx = 0; heapNdx < (deUint32)m_heaps.size(); heapNdx++)
726 	{
727 		const Heap&	heap	= m_heaps[heapNdx];
728 
729 		for (size_t objectNdx = 0; objectNdx < heap.objects.size(); objectNdx++)
730 		{
731 			if (!!heap.objects[objectNdx].memory)
732 			{
733 				vkd.freeMemory(device, heap.objects[objectNdx].memory, (const VkAllocationCallbacks*)DE_NULL);
734 			}
735 		}
736 	}
737 #endif // CTS_USES_VULKANSC
738 }
739 
740 tcu::TestStatus RandomAllocFreeTestInstance::iterate (void)
741 {
742 	const VkDevice			device			= getDevice();
743 	const DeviceInterface&	vkd				= getDeviceInterface();
744 	TestLog&				log				= m_context.getTestContext().getLog();
745 	const bool				isUMA			= m_memoryLimits.totalDeviceLocalMemory == 0;
746 	const VkDeviceSize		usedSysMem		= isUMA ? (m_totalDeviceMem+m_totalSystemMem) : m_totalSystemMem;
747 	const bool				canAllocateSys	= usedSysMem + m_allocSysMemSize + 1024 < m_memoryLimits.totalSystemMemory; // \note Always leave room for 1 KiB sys mem alloc
748 	const bool				canAllocateDev	= isUMA ? canAllocateSys : (m_totalDeviceMem + 16 < m_memoryLimits.totalDeviceLocalMemory);
749 	vector<size_t>			nonFullHeaps;
750 	vector<size_t>			nonEmptyHeaps;
751 	bool					allocateMore;
752 
753 	if (m_opNdx == 0)
754 	{
755 		log << TestLog::Message << "Performing " << m_opCount << " random VkAllocMemory() / VkFreeMemory() calls before freeing all memory." << TestLog::EndMessage;
756 		log << TestLog::Message << "Using max 1/8 of the memory in each memory heap." << TestLog::EndMessage;
757 	}
758 
759 	// Sort heaps based on whether allocations or frees are possible
760 	for (size_t heapNdx = 0; heapNdx < m_heaps.size(); ++heapNdx)
761 	{
762 		const bool	isDeviceLocal	= (m_heaps[heapNdx].heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
763 		const bool	isHeapFull		= m_heaps[heapNdx].memoryUsage >= m_heaps[heapNdx].maxMemoryUsage;
764 		const bool	isHeapEmpty		= m_heaps[heapNdx].memoryUsage == 0;
765 
766 		if (!isHeapEmpty)
767 			nonEmptyHeaps.push_back(heapNdx);
768 
769 		if (!isHeapFull && ((isUMA && canAllocateSys) ||
770 							(!isUMA && isDeviceLocal && canAllocateDev) ||
771 							(!isUMA && !isDeviceLocal && canAllocateSys)))
772 			nonFullHeaps.push_back(heapNdx);
773 	}
774 
775 	if (m_opNdx >= m_opCount)
776 	{
777 		if (nonEmptyHeaps.empty())
778 		{
779 			m_currentDeviceMask++;
780 			if (m_currentDeviceMask > m_totalDeviceMaskCombinations)
781 				return tcu::TestStatus::pass("Pass");
782 			else
783 			{
784 				m_opNdx = 0;
785 				return tcu::TestStatus::incomplete();
786 			}
787 		}
788 		else
789 			allocateMore = false;
790 	}
791 	else if (!nonEmptyHeaps.empty() &&
792 			 !nonFullHeaps.empty() &&
793 			 (m_memoryObjectCount < MAX_ALLOCATION_COUNT) &&
794 			 canAllocateSys)
795 		allocateMore = m_rng.getBool(); // Randomize if both operations are doable.
796 	else if (nonEmptyHeaps.empty())
797 	{
798 		DE_ASSERT(canAllocateSys);
799 		allocateMore = true; // Allocate more if there are no objects to free.
800 	}
801 	else if (nonFullHeaps.empty() || !canAllocateSys)
802 		allocateMore = false; // Free objects if there is no free space for new objects.
803 	else
804 	{
805 		allocateMore = false;
806 		DE_FATAL("Fail");
807 	}
808 
809 	if (allocateMore)
810 	{
811 		const size_t		nonFullHeapNdx	= (size_t)(m_rng.getUint32() % (deUint32)nonFullHeaps.size());
812 		const size_t		heapNdx			= nonFullHeaps[nonFullHeapNdx];
813 		Heap&				heap			= m_heaps[heapNdx];
814 		const MemoryType&	memoryType		= m_rng.choose<MemoryType>(heap.types.begin(), heap.types.end());
815 		const bool			isDeviceLocal	= (heap.heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
816 		const bool			isProtected		= memoryType.type.propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT;
817 		VkDeviceSize		maxAllocSize	= (isDeviceLocal && !isUMA)
818 											? de::min(heap.maxMemoryUsage - heap.memoryUsage, (VkDeviceSize)m_memoryLimits.totalDeviceLocalMemory - m_totalDeviceMem)
819 											: de::min(heap.maxMemoryUsage - heap.memoryUsage, (VkDeviceSize)m_memoryLimits.totalSystemMemory - usedSysMem - m_allocSysMemSize);
820 		const VkDeviceSize	maxProtectedAllocSize = 1 * 1024 * 1024;
821 
822 		// Some implementations might have limitations on protected heap, and these
823 		// limitations don't show up in Vulkan queries. Use a hard coded limit for
824 		// allocations of arbitrarily selected size of 1MB as per Note at "Device
825 		// Memory Allocation" at the spec to use minimum-size allocations.
826 		if(isProtected)
827 			maxAllocSize = (maxAllocSize > maxProtectedAllocSize) ? maxProtectedAllocSize : maxAllocSize;
828 
829 		const VkDeviceSize allocationSize = 1 + (m_rng.getUint64() % maxAllocSize);
830 
831 		if ((allocationSize > (deUint64)(heap.maxMemoryUsage - heap.memoryUsage)) && (allocationSize != 1))
832 			TCU_THROW(InternalError, "Test Error: trying to allocate memory more than the available heap size.");
833 
834 		const MemoryObject object =
835 		{
836 			(VkDeviceMemory)0,
837 			allocationSize,
838 			memoryType.type.propertyFlags
839 		};
840 
841 		heap.objects.push_back(object);
842 
843 		m_allocFlagsInfo.deviceMask = m_currentDeviceMask;
844 		const VkMemoryAllocateInfo alloc =
845 		{
846 			VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,												// sType
847 			(m_allocationMode == ALLOCATION_MODE_DEVICE_GROUP) ? &m_allocFlagsInfo : DE_NULL,	// pNext
848 			object.size,																		// allocationSize
849 			memoryType.index																	// memoryTypeIndex;
850 		};
851 
852 		VkResult	res	= vkd.allocateMemory(device, &alloc, (const VkAllocationCallbacks*)DE_NULL, &heap.objects.back().memory);
853 
854 		// Some implementations might have limitations on protected heap, and these
855 		// limitations don't show up in Vulkan queries. Use a hard coded threshold
856 		// after which out of memory is allowed as per Note at "Device Memory Allocation"
857 		// at the spec to support at least 80 allocations concurrently.
858 		if (res == VK_ERROR_OUT_OF_DEVICE_MEMORY && isProtected && m_memoryProtectedObjectCount > 80)
859 		{
860 			heap.objects.pop_back();
861 		}
862 		else
863 		{
864 			VK_CHECK(res);
865 
866 			TCU_CHECK(!!heap.objects.back().memory);
867 			m_memoryObjectCount++;
868 
869 			if (isProtected)
870 				m_memoryProtectedObjectCount++;
871 
872 			heap.memoryUsage										+= allocationSize;
873 			(isDeviceLocal ? m_totalDeviceMem : m_totalSystemMem)	+= allocationSize;
874 			m_totalSystemMem										+= m_allocSysMemSize;
875 		}
876 	}
877 	else
878 	{
879 		const size_t		nonEmptyHeapNdx	= (size_t)(m_rng.getUint32() % (deUint32)nonEmptyHeaps.size());
880 		const size_t		heapNdx			= nonEmptyHeaps[nonEmptyHeapNdx];
881 		Heap&				heap			= m_heaps[heapNdx];
882 		const size_t		memoryObjectNdx	= m_rng.getUint32() % heap.objects.size();
883 		MemoryObject&		memoryObject	= heap.objects[memoryObjectNdx];
884 		const bool			isDeviceLocal	= (heap.heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
885 
886 #ifndef CTS_USES_VULKANSC
887 		vkd.freeMemory(device, memoryObject.memory, (const VkAllocationCallbacks*)DE_NULL);
888 #endif
889 		memoryObject.memory = (VkDeviceMemory)0;
890 		m_memoryObjectCount--;
891 
892 		if (memoryObject.propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT)
893 		{
894 			m_memoryProtectedObjectCount--;
895 			memoryObject.propertyFlags = (VkMemoryPropertyFlags)0;
896 		}
897 
898 		heap.memoryUsage										-= memoryObject.size;
899 		(isDeviceLocal ? m_totalDeviceMem : m_totalSystemMem)	-= memoryObject.size;
900 		m_totalSystemMem										-= m_allocSysMemSize;
901 
902 		heap.objects[memoryObjectNdx] = heap.objects.back();
903 		heap.objects.pop_back();
904 
905 		DE_ASSERT(heap.memoryUsage == 0 || !heap.objects.empty());
906 	}
907 
908 	m_opNdx++;
909 	return tcu::TestStatus::incomplete();
910 }
911 #endif // CTS_USES_VULKANSC
912 
913 
914 } // anonymous
915 
createAllocationTestsCommon(tcu::TestContext & testCtx,AllocationMode allocationMode)916 tcu::TestCaseGroup* createAllocationTestsCommon (tcu::TestContext& testCtx, AllocationMode allocationMode)
917 {
918 	const char* name = [&]{
919 		switch (allocationMode)
920 		{
921 			case ALLOCATION_MODE_DEFAULT:
922 				return "allocation";
923 			case ALLOCATION_MODE_DEVICE_GROUP:
924 				return "device_group_allocation";
925 			case ALLOCATION_MODE_PAGEABLE:
926 				return "pageable_allocation";
927 			default:
928 				TCU_THROW(InternalError, "Unknown allocation mode");
929 		}
930 	} ();
931 	de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, name));
932 
933 	const VkDeviceSize	KiB	= 1024;
934 	const VkDeviceSize	MiB	= 1024 * KiB;
935 
936 	const struct
937 	{
938 		const char* const	str;
939 		VkDeviceSize		size;
940 	} allocationSizes[] =
941 	{
942 		{   "64", 64 },
943 		{  "128", 128 },
944 		{  "256", 256 },
945 		{  "512", 512 },
946 		{ "1KiB", 1*KiB },
947 		{ "4KiB", 4*KiB },
948 		{ "8KiB", 8*KiB },
949 		{ "1MiB", 1*MiB }
950 	};
951 
952 	const int allocationPercents[] =
953 	{
954 		1
955 	};
956 
957 	const int allocationCounts[] =
958 	{
959 		1, 10, 100, 1000, -1
960 	};
961 
962 	const struct
963 	{
964 		const char* const		str;
965 		const TestConfig::Order	order;
966 	} orders[] =
967 	{
968 		{ "forward",	TestConfig::ALLOC_FREE },
969 		{ "reverse",	TestConfig::ALLOC_REVERSE_FREE },
970 		{ "mixed",		TestConfig::MIXED_ALLOC_FREE }
971 	};
972 
973 	{
974 		de::MovePtr<tcu::TestCaseGroup>	basicGroup(new tcu::TestCaseGroup(testCtx, "basic"));
975 
976 		for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
977 		{
978 			const VkDeviceSize				allocationSize		= allocationSizes[allocationSizeNdx].size;
979 			const char* const				allocationSizeName	= allocationSizes[allocationSizeNdx].str;
980 			de::MovePtr<tcu::TestCaseGroup>	sizeGroup			(new tcu::TestCaseGroup(testCtx, ("size_" + string(allocationSizeName)).c_str()));
981 
982 			for (size_t orderNdx = 0; orderNdx < DE_LENGTH_OF_ARRAY(orders); orderNdx++)
983 			{
984 				const TestConfig::Order			order				= orders[orderNdx].order;
985 				const char* const				orderName			= orders[orderNdx].str;
986 				de::MovePtr<tcu::TestCaseGroup>	orderGroup			(new tcu::TestCaseGroup(testCtx, orderName));
987 
988 				for (size_t allocationCountNdx = 0; allocationCountNdx < DE_LENGTH_OF_ARRAY(allocationCounts); allocationCountNdx++)
989 				{
990 					const int allocationCount = allocationCounts[allocationCountNdx];
991 
992 					if (allocationCount != -1 && allocationCount * allocationSize > 50 * MiB)
993 						continue;
994 
995 					TestConfig config;
996 
997 					config.memorySize				= allocationSize;
998 					config.order					= order;
999 					config.allocationMode			= allocationMode;
1000 					if (allocationCount == -1)
1001 					{
1002 						if (allocationSize < 4096)
1003 							continue;
1004 
1005 						config.memoryAllocationCount	= de::min((deUint32)(50 * MiB / allocationSize), (deUint32)MAX_ALLOCATION_COUNT);
1006 
1007 						if (config.memoryAllocationCount == 0
1008 							|| config.memoryAllocationCount == 1
1009 							|| config.memoryAllocationCount == 10
1010 							|| config.memoryAllocationCount == 100
1011 							|| config.memoryAllocationCount == 1000)
1012 						continue;
1013 					}
1014 					else
1015 						config.memoryAllocationCount	= allocationCount;
1016 
1017 					orderGroup->addChild(new InstanceFactory1<AllocateFreeTestInstance, TestConfig>(testCtx, tcu::NODETYPE_SELF_VALIDATE, "count_" + de::toString(config.memoryAllocationCount), config));
1018 				}
1019 
1020 				sizeGroup->addChild(orderGroup.release());
1021 			}
1022 
1023 			basicGroup->addChild(sizeGroup.release());
1024 		}
1025 
1026 		for (size_t allocationPercentNdx = 0; allocationPercentNdx < DE_LENGTH_OF_ARRAY(allocationPercents); allocationPercentNdx++)
1027 		{
1028 			const int						allocationPercent	= allocationPercents[allocationPercentNdx];
1029 			de::MovePtr<tcu::TestCaseGroup>	percentGroup		(new tcu::TestCaseGroup(testCtx, ("percent_" + de::toString(allocationPercent)).c_str()));
1030 
1031 			for (size_t orderNdx = 0; orderNdx < DE_LENGTH_OF_ARRAY(orders); orderNdx++)
1032 			{
1033 				const TestConfig::Order			order				= orders[orderNdx].order;
1034 				const char* const				orderName			= orders[orderNdx].str;
1035 				de::MovePtr<tcu::TestCaseGroup>	orderGroup			(new tcu::TestCaseGroup(testCtx, orderName));
1036 
1037 				for (size_t allocationCountNdx = 0; allocationCountNdx < DE_LENGTH_OF_ARRAY(allocationCounts); allocationCountNdx++)
1038 				{
1039 					const int allocationCount = allocationCounts[allocationCountNdx];
1040 
1041 					if ((allocationCount != -1) && ((float)allocationCount * (float)allocationPercent >= 1.00f / 8.00f))
1042 						continue;
1043 
1044 					TestConfig config;
1045 
1046 					config.memoryPercentage			= (float)allocationPercent / 100.0f;
1047 					config.order					= order;
1048 					config.allocationMode			= allocationMode;
1049 
1050 					if (allocationCount == -1)
1051 					{
1052 						config.memoryAllocationCount	= de::min((deUint32)((1.00f / 8.00f) / ((float)allocationPercent / 100.0f)), (deUint32)MAX_ALLOCATION_COUNT);
1053 
1054 						if (config.memoryAllocationCount == 0
1055 							|| config.memoryAllocationCount == 1
1056 							|| config.memoryAllocationCount == 10
1057 							|| config.memoryAllocationCount == 100
1058 							|| config.memoryAllocationCount == 1000)
1059 						continue;
1060 					}
1061 					else
1062 						config.memoryAllocationCount	= allocationCount;
1063 
1064 					orderGroup->addChild(new InstanceFactory1<AllocateFreeTestInstance, TestConfig>(testCtx, tcu::NODETYPE_SELF_VALIDATE, "count_" + de::toString(config.memoryAllocationCount), config));
1065 				}
1066 
1067 				percentGroup->addChild(orderGroup.release());
1068 			}
1069 
1070 			basicGroup->addChild(percentGroup.release());
1071 		}
1072 
1073 		group->addChild(basicGroup.release());
1074 	}
1075 
1076 #ifndef CTS_USES_VULKANSC
1077 // RandomAllocFreeTestInstance test uses VkAllocationCallbacks and in Vulkan SC VkAllocationCallbacks must be NULL
1078 	{
1079 		const deUint32					caseCount	= 100;
1080 		de::MovePtr<tcu::TestCaseGroup>	randomGroup	(new tcu::TestCaseGroup(testCtx, "random"));
1081 
1082 		for (deUint32 caseNdx = 0; caseNdx < caseCount; caseNdx++)
1083 		{
1084 			TestConfigRandom config(deInt32Hash(caseNdx ^ 32480), allocationMode);
1085 
1086 			randomGroup->addChild(new InstanceFactory1<RandomAllocFreeTestInstance, TestConfigRandom>(testCtx, tcu::NODETYPE_SELF_VALIDATE, de::toString(caseNdx), config));
1087 		}
1088 
1089 		group->addChild(randomGroup.release());
1090 	}
1091 #endif // CTS_USES_VULKANSC
1092 
1093 	return group.release();
1094 }
1095 
createAllocationTests(tcu::TestContext & testCtx)1096 tcu::TestCaseGroup* createAllocationTests (tcu::TestContext& testCtx)
1097 {
1098 	return createAllocationTestsCommon(testCtx, ALLOCATION_MODE_DEFAULT);
1099 }
1100 
createDeviceGroupAllocationTests(tcu::TestContext & testCtx)1101 tcu::TestCaseGroup* createDeviceGroupAllocationTests (tcu::TestContext& testCtx)
1102 {
1103 	return createAllocationTestsCommon(testCtx, ALLOCATION_MODE_DEVICE_GROUP);
1104 }
1105 
createPageableAllocationTests(tcu::TestContext & testCtx)1106 tcu::TestCaseGroup* createPageableAllocationTests (tcu::TestContext& testCtx)
1107 {
1108 	return createAllocationTestsCommon(testCtx, ALLOCATION_MODE_PAGEABLE);
1109 }
1110 
1111 } // memory
1112 } // vkt
1113