1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 Google Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Simple memory allocation tests.
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktMemoryAllocationTests.hpp"
25
26 #include "vktTestCaseUtil.hpp"
27 #include "vktCustomInstancesDevices.hpp"
28
29 #include "tcuMaybe.hpp"
30 #include "tcuResultCollector.hpp"
31 #include "tcuTestLog.hpp"
32 #include "tcuPlatform.hpp"
33 #include "tcuCommandLine.hpp"
34
35 #include "vkPlatform.hpp"
36 #include "vkStrUtil.hpp"
37 #include "vkRef.hpp"
38 #include "vkDeviceUtil.hpp"
39 #include "vkQueryUtil.hpp"
40 #include "vkRefUtil.hpp"
41 #include "vkAllocationCallbackUtil.hpp"
42
43 #include "deUniquePtr.hpp"
44 #include "deStringUtil.hpp"
45 #include "deRandom.hpp"
46
47 using tcu::Maybe;
48 using tcu::TestLog;
49
50 using std::string;
51 using std::vector;
52
53 using namespace vk;
54
55 namespace vkt
56 {
57 namespace memory
58 {
59 namespace
60 {
61
62 template<typename T>
roundUpToMultiple(const T & a,const T & b)63 T roundUpToMultiple(const T& a, const T& b)
64 {
65 return b * (a / b + (a % b != 0 ? 1 : 0));
66 }
67
68 enum
69 {
70 // The min max for allocation count is 4096. Use 4000 to take into account
71 // possible memory allocations made by layers etc.
72 MAX_ALLOCATION_COUNT = 4000
73 };
74
75 struct TestConfig
76 {
77 enum Order
78 {
79 ALLOC_FREE,
80 ALLOC_REVERSE_FREE,
81 MIXED_ALLOC_FREE,
82 ORDER_LAST
83 };
84
85 Maybe<VkDeviceSize> memorySize;
86 Maybe<float> memoryPercentage;
87 deUint32 memoryAllocationCount;
88 Order order;
89 bool useDeviceGroups;
90
TestConfigvkt::memory::__anonf1aac54e0111::TestConfig91 TestConfig (void)
92 : memoryAllocationCount ((deUint32)-1)
93 , order (ORDER_LAST)
94 , useDeviceGroups (false)
95 {
96 }
97 };
98
99 struct TestConfigRandom
100 {
101 const deUint32 seed;
102 const bool useDeviceGroups;
103
TestConfigRandomvkt::memory::__anonf1aac54e0111::TestConfigRandom104 TestConfigRandom (const deUint32 _seed, const bool _useDeviceGroups)
105 : seed (_seed)
106 , useDeviceGroups (_useDeviceGroups)
107 {
108 }
109 };
110
111 template<typename T>
roundUpToNextMultiple(T value,T multiple)112 T roundUpToNextMultiple (T value, T multiple)
113 {
114 if (value % multiple == 0)
115 return value;
116 else
117 return value + multiple - (value % multiple);
118 }
119
120 class BaseAllocateTestInstance : public TestInstance
121 {
122 public:
BaseAllocateTestInstance(Context & context,bool useDeviceGroups)123 BaseAllocateTestInstance (Context& context, bool useDeviceGroups)
124 : TestInstance (context)
125 , m_useDeviceGroups (useDeviceGroups)
126 , m_subsetAllocationAllowed (false)
127 , m_numPhysDevices (1)
128 , m_memoryProperties (getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
129 {
130 if (m_useDeviceGroups)
131 createDeviceGroup();
132 else
133 createTestDevice();
134
135 m_allocFlagsInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR;
136 m_allocFlagsInfo.pNext = DE_NULL;
137 m_allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT;
138 m_allocFlagsInfo.deviceMask = 0;
139 }
140
141 void createTestDevice (void);
142 void createDeviceGroup (void);
getDeviceInterface(void)143 const vk::DeviceInterface& getDeviceInterface (void) { return m_useDeviceGroups ? *m_deviceDriver : m_context.getDeviceInterface(); }
getDevice(void)144 vk::VkDevice getDevice (void) { return m_logicalDevice.get();}
145
146 protected:
147 bool m_useDeviceGroups;
148 bool m_subsetAllocationAllowed;
149 VkMemoryAllocateFlagsInfo m_allocFlagsInfo;
150 deUint32 m_numPhysDevices;
151 VkPhysicalDeviceMemoryProperties m_memoryProperties;
152
153 private:
154 CustomInstance m_deviceGroupInstance;
155 vk::Move<vk::VkDevice> m_logicalDevice;
156 de::MovePtr<vk::DeviceDriver> m_deviceDriver;
157 };
158
createTestDevice(void)159 void BaseAllocateTestInstance::createTestDevice (void)
160 {
161 VkInstance instance (m_context.getInstance());
162 InstanceDriver instanceDriver (m_context.getPlatformInterface(), instance);
163 const VkPhysicalDeviceFeatures deviceFeatures = getPhysicalDeviceFeatures(instanceDriver, m_context.getPhysicalDevice());
164 const float queuePriority = 1.0f;
165 deUint32 queueFamilyIndex = 0;
166 bool protMemSupported = false;
167
168 VkPhysicalDeviceProtectedMemoryFeatures protectedMemoryFeature =
169 {
170 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES,// VkStructureType sType
171 DE_NULL, // const void* pNext
172 VK_FALSE // VkBool32 protectedMemory;
173 };
174
175 VkPhysicalDeviceFeatures features;
176 deMemset(&features, 0, sizeof(vk::VkPhysicalDeviceFeatures));
177
178 VkPhysicalDeviceFeatures2 features2 =
179 {
180 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, // VkStructureType sType
181 &protectedMemoryFeature, // const void* pNext
182 features // VkPhysicalDeviceFeatures features
183 };
184
185 // Check if the physical device supports the protected memory feature
186 instanceDriver.getPhysicalDeviceFeatures2(m_context.getPhysicalDevice(), &features2);
187 protMemSupported = ((VkPhysicalDeviceProtectedMemoryFeatures*)(features2.pNext))->protectedMemory;
188
189 VkDeviceQueueCreateFlags queueCreateFlags = protMemSupported ? (vk::VkDeviceQueueCreateFlags)vk::VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0u;
190
191 VkDeviceQueueCreateInfo queueInfo =
192 {
193 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // VkStructureType sType;
194 DE_NULL, // const void* pNext;
195 queueCreateFlags, // VkDeviceQueueCreateFlags flags;
196 queueFamilyIndex, // deUint32 queueFamilyIndex;
197 1u, // deUint32 queueCount;
198 &queuePriority // const float* pQueuePriorities;
199 };
200
201 const VkDeviceCreateInfo deviceInfo =
202 {
203 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // VkStructureType sType;
204 protMemSupported ? &features2 : DE_NULL, // const void* pNext;
205 (VkDeviceCreateFlags)0, // VkDeviceCreateFlags flags;
206 1u, // uint32_t queueCreateInfoCount;
207 &queueInfo, // const VkDeviceQueueCreateInfo* pQueueCreateInfos;
208 0u, // uint32_t enabledLayerCount;
209 DE_NULL, // const char* const* ppEnabledLayerNames;
210 0u, // uint32_t enabledExtensionCount;
211 DE_NULL, // const char* const* ppEnabledExtensionNames;
212 protMemSupported ? DE_NULL : &deviceFeatures // const VkPhysicalDeviceFeatures* pEnabledFeatures;
213 };
214
215 m_logicalDevice = createCustomDevice(m_context.getTestContext().getCommandLine().isValidationEnabled(), m_context.getPlatformInterface(), instance, instanceDriver, m_context.getPhysicalDevice(), &deviceInfo);
216 }
217
createDeviceGroup(void)218 void BaseAllocateTestInstance::createDeviceGroup (void)
219 {
220 const tcu::CommandLine& cmdLine = m_context.getTestContext().getCommandLine();
221 const deUint32 devGroupIdx = cmdLine.getVKDeviceGroupId() - 1;
222 const deUint32 physDeviceIdx = cmdLine.getVKDeviceId() - 1;
223 const float queuePriority = 1.0f;
224 deUint32 queueFamilyIndex = 0;
225 const std::vector<std::string> requiredExtensions (1, "VK_KHR_device_group_creation");
226 m_deviceGroupInstance = createCustomInstanceWithExtensions(m_context, requiredExtensions);
227 std::vector<VkPhysicalDeviceGroupProperties> devGroupProperties = enumeratePhysicalDeviceGroups(m_context.getInstanceInterface(), m_deviceGroupInstance);
228 m_numPhysDevices = devGroupProperties[devGroupIdx].physicalDeviceCount;
229 m_subsetAllocationAllowed = devGroupProperties[devGroupIdx].subsetAllocation;
230 if (m_numPhysDevices < 2)
231 TCU_THROW(NotSupportedError, "Device group allocation tests not supported with 1 physical device");
232 std::vector<const char*> deviceExtensions;
233
234 if (!isCoreDeviceExtension(m_context.getUsedApiVersion(), "VK_KHR_device_group"))
235 deviceExtensions.push_back("VK_KHR_device_group");
236
237 VkDeviceGroupDeviceCreateInfo deviceGroupInfo =
238 {
239 VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR, //stype
240 DE_NULL, //pNext
241 devGroupProperties[devGroupIdx].physicalDeviceCount, //physicalDeviceCount
242 devGroupProperties[devGroupIdx].physicalDevices //physicalDevices
243 };
244 VkInstance instance (m_useDeviceGroups ? m_deviceGroupInstance : m_context.getInstance());
245 InstanceDriver instanceDriver (m_context.getPlatformInterface(), instance);
246 const VkPhysicalDeviceFeatures deviceFeatures = getPhysicalDeviceFeatures(instanceDriver, deviceGroupInfo.pPhysicalDevices[physDeviceIdx]);
247
248 const std::vector<VkQueueFamilyProperties> queueProps = getPhysicalDeviceQueueFamilyProperties(instanceDriver, devGroupProperties[devGroupIdx].physicalDevices[physDeviceIdx]);
249 for (size_t queueNdx = 0; queueNdx < queueProps.size(); queueNdx++)
250 {
251 if (queueProps[queueNdx].queueFlags & VK_QUEUE_COMPUTE_BIT)
252 queueFamilyIndex = (deUint32)queueNdx;
253 }
254
255 VkDeviceQueueCreateInfo queueInfo =
256 {
257 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // VkStructureType sType;
258 DE_NULL, // const void* pNext;
259 (VkDeviceQueueCreateFlags)0u, // VkDeviceQueueCreateFlags flags;
260 queueFamilyIndex, // deUint32 queueFamilyIndex;
261 1u, // deUint32 queueCount;
262 &queuePriority // const float* pQueuePriorities;
263 };
264
265 const VkDeviceCreateInfo deviceInfo =
266 {
267 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // VkStructureType sType;
268 m_useDeviceGroups ? &deviceGroupInfo : DE_NULL, // const void* pNext;
269 (VkDeviceCreateFlags)0, // VkDeviceCreateFlags flags;
270 1u , // uint32_t queueCreateInfoCount;
271 &queueInfo, // const VkDeviceQueueCreateInfo* pQueueCreateInfos;
272 0u, // uint32_t enabledLayerCount;
273 DE_NULL, // const char* const* ppEnabledLayerNames;
274 deUint32(deviceExtensions.size()), // uint32_t enabledExtensionCount;
275 deviceExtensions.empty() ? DE_NULL : &deviceExtensions[0], // const char* const* ppEnabledExtensionNames;
276 &deviceFeatures, // const VkPhysicalDeviceFeatures* pEnabledFeatures;
277 };
278
279 m_logicalDevice = createCustomDevice(m_context.getTestContext().getCommandLine().isValidationEnabled(), m_context.getPlatformInterface(), instance, instanceDriver, deviceGroupInfo.pPhysicalDevices[physDeviceIdx], &deviceInfo);
280 m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(m_context.getPlatformInterface(), instance, *m_logicalDevice));
281 m_memoryProperties = getPhysicalDeviceMemoryProperties(instanceDriver, deviceGroupInfo.pPhysicalDevices[physDeviceIdx]);
282 }
283
284 class AllocateFreeTestInstance : public BaseAllocateTestInstance
285 {
286 public:
AllocateFreeTestInstance(Context & context,const TestConfig config)287 AllocateFreeTestInstance (Context& context, const TestConfig config)
288 : BaseAllocateTestInstance (context, config.useDeviceGroups)
289 , m_config (config)
290 , m_result (m_context.getTestContext().getLog())
291 , m_memoryTypeIndex (0)
292 , m_memoryLimits (getMemoryLimits(context.getTestContext().getPlatform().getVulkanPlatform()))
293 {
294 DE_ASSERT(!!m_config.memorySize != !!m_config.memoryPercentage);
295 }
296
297 tcu::TestStatus iterate (void);
298
299 private:
300 const TestConfig m_config;
301 tcu::ResultCollector m_result;
302 deUint32 m_memoryTypeIndex;
303 const PlatformMemoryLimits m_memoryLimits;
304 };
305
306
iterate(void)307 tcu::TestStatus AllocateFreeTestInstance::iterate (void)
308 {
309 TestLog& log = m_context.getTestContext().getLog();
310 const VkDevice device = getDevice();
311 const DeviceInterface& vkd = getDeviceInterface();
312 VkMemoryRequirements memReqs;
313 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
314 VkBufferCreateFlags createFlags = (vk::VkBufferCreateFlagBits)0u;
315 VkBufferUsageFlags usageFlags = vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT;
316 VkSharingMode sharingMode = vk::VK_SHARING_MODE_EXCLUSIVE;
317 Move<VkBuffer> buffer;
318
319 if ((m_memoryProperties.memoryTypes[m_memoryTypeIndex].propertyFlags & vk::VK_MEMORY_PROPERTY_PROTECTED_BIT) == vk::VK_MEMORY_PROPERTY_PROTECTED_BIT)
320 {
321 createFlags |= vk::VK_BUFFER_CREATE_PROTECTED_BIT;
322 }
323
324 DE_ASSERT(m_config.memoryAllocationCount <= MAX_ALLOCATION_COUNT);
325
326 if (m_memoryTypeIndex == 0)
327 {
328 log << TestLog::Message << "Memory allocation count: " << m_config.memoryAllocationCount << TestLog::EndMessage;
329 log << TestLog::Message << "Single allocation size: " << (m_config.memorySize ? de::toString(*m_config.memorySize) : de::toString(100.0f * (*m_config.memoryPercentage)) + " percent of the heap size.") << TestLog::EndMessage;
330
331 if (m_config.order == TestConfig::ALLOC_REVERSE_FREE)
332 log << TestLog::Message << "Memory is freed in reversed order. " << TestLog::EndMessage;
333 else if (m_config.order == TestConfig::ALLOC_FREE)
334 log << TestLog::Message << "Memory is freed in same order as allocated. " << TestLog::EndMessage;
335 else if (m_config.order == TestConfig::MIXED_ALLOC_FREE)
336 log << TestLog::Message << "Memory is freed right after allocation. " << TestLog::EndMessage;
337 else
338 DE_FATAL("Unknown allocation order");
339 }
340
341 try
342 {
343 const VkMemoryType memoryType = m_memoryProperties.memoryTypes[m_memoryTypeIndex];
344 const VkMemoryHeap memoryHeap = m_memoryProperties.memoryHeaps[memoryType.heapIndex];
345
346 // Create a buffer to get the required size
347 {
348 const VkDeviceSize bufferSize = m_config.memorySize ? *m_config.memorySize : (VkDeviceSize) (*m_config.memoryPercentage * (float) memoryHeap.size);
349
350 VkBufferCreateInfo bufferParams =
351 {
352 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
353 DE_NULL, // const void* pNext;
354 createFlags, // VkBufferCreateFlags flags;
355 bufferSize, // VkDeviceSize size;
356 usageFlags, // VkBufferUsageFlags usage;
357 sharingMode, // VkSharingMode sharingMode;
358 1u, // uint32_t queueFamilyIndexCount;
359 &queueFamilyIndex, // const uint32_t* pQueueFamilyIndices;
360 };
361
362 buffer = createBuffer(vkd, device, &bufferParams);
363 vkd.getBufferMemoryRequirements(device, *buffer, &memReqs);
364 }
365
366 const VkDeviceSize allocationSize = (m_config.memorySize ? memReqs.size : (VkDeviceSize)(*m_config.memoryPercentage * (float)memoryHeap.size));
367 const VkDeviceSize roundedUpAllocationSize = roundUpToNextMultiple(allocationSize, m_memoryLimits.deviceMemoryAllocationGranularity);
368 vector<VkDeviceMemory> memoryObjects (m_config.memoryAllocationCount, (VkDeviceMemory)0);
369
370 log << TestLog::Message << "Memory type index: " << m_memoryTypeIndex << TestLog::EndMessage;
371
372 if (memoryType.heapIndex >= m_memoryProperties.memoryHeapCount)
373 m_result.fail("Invalid heap index defined for memory type.");
374
375 {
376 log << TestLog::Message << "Memory type: " << memoryType << TestLog::EndMessage;
377 log << TestLog::Message << "Memory heap: " << memoryHeap << TestLog::EndMessage;
378
379 if (roundedUpAllocationSize * m_config.memoryAllocationCount > memoryHeap.size)
380 TCU_THROW(NotSupportedError, "Memory heap doesn't have enough memory.");
381
382 #if (DE_PTR_SIZE == 4)
383 // For 32-bit binaries we cap the total host visible allocations to 1.5GB to
384 // avoid exhausting CPU virtual address space and throwing a false negative result.
385 if ((memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) &&
386 allocationSize * m_config.memoryAllocationCount * (m_subsetAllocationAllowed ? 1 : m_numPhysDevices) >= 1610612736)
387
388 log << TestLog::Message << " Skipping: Not enough CPU virtual address space for all host visible allocations." << TestLog::EndMessage;
389 else
390 {
391 #else
392 {
393 #endif
394
395 try
396 {
397 const deUint32 totalDeviceMaskCombinations = m_subsetAllocationAllowed ? (1 << m_numPhysDevices) - 1 : 1;
398 for (deUint32 deviceMask = 1; deviceMask <= totalDeviceMaskCombinations; deviceMask++)
399 {
400 // Allocate on all physical devices if subset allocation is not allowed, do only once.
401 if (!m_subsetAllocationAllowed)
402 deviceMask = (1 << m_numPhysDevices) - 1;
403 m_allocFlagsInfo.deviceMask = deviceMask;
404
405 if (m_config.order == TestConfig::ALLOC_FREE || m_config.order == TestConfig::ALLOC_REVERSE_FREE)
406 {
407 for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
408 {
409 VkMemoryAllocateInfo alloc =
410 {
411 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
412 m_useDeviceGroups ? &m_allocFlagsInfo : DE_NULL, // pNext
413 allocationSize, // allocationSize
414 m_memoryTypeIndex // memoryTypeIndex;
415 };
416
417 VkResult res = vkd.allocateMemory(device, &alloc, (const VkAllocationCallbacks*)DE_NULL, &memoryObjects[ndx]);
418
419 // Some implementations might have limitations on protected heap, and these limitations
420 // don't show up in Vulkan queries. Use a hard coded threshold after which out of memory
421 // is allowed.
422 if (res == VK_ERROR_OUT_OF_DEVICE_MEMORY && memoryType.propertyFlags & vk::VK_MEMORY_PROPERTY_PROTECTED_BIT && ndx > 80)
423 break;
424
425 VK_CHECK(res);
426
427 TCU_CHECK(!!memoryObjects[ndx]);
428 }
429
430 if (m_config.order == TestConfig::ALLOC_FREE)
431 {
432 for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
433 {
434 const VkDeviceMemory mem = memoryObjects[memoryObjects.size() - 1 - ndx];
435
436 if (!!mem)
437 {
438 vkd.freeMemory(device, mem, (const VkAllocationCallbacks *) DE_NULL);
439 memoryObjects[memoryObjects.size() - 1 - ndx] = (VkDeviceMemory) 0;
440 }
441 }
442 }
443 else
444 {
445 for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
446 {
447 const VkDeviceMemory mem = memoryObjects[ndx];
448
449 if (!!mem)
450 {
451 vkd.freeMemory(device, mem, (const VkAllocationCallbacks *) DE_NULL);
452 memoryObjects[ndx] = (VkDeviceMemory) 0;
453 }
454 }
455 }
456 }
457 else
458 {
459 for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
460 {
461 const VkMemoryAllocateInfo alloc =
462 {
463 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
464 m_useDeviceGroups ? &m_allocFlagsInfo : DE_NULL, // pNext
465 allocationSize, // allocationSize
466 m_memoryTypeIndex // memoryTypeIndex;
467 };
468
469 VK_CHECK(vkd.allocateMemory(device, &alloc, (const VkAllocationCallbacks*)DE_NULL, &memoryObjects[ndx]));
470 TCU_CHECK(!!memoryObjects[ndx]);
471
472 vkd.freeMemory(device, memoryObjects[ndx], (const VkAllocationCallbacks*)DE_NULL);
473 memoryObjects[ndx] = (VkDeviceMemory)0;
474 }
475 }
476 }
477 }
478 catch (...)
479 {
480 for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
481 {
482 const VkDeviceMemory mem = memoryObjects[ndx];
483
484 if (!!mem)
485 {
486 vkd.freeMemory(device, mem, (const VkAllocationCallbacks*)DE_NULL);
487 memoryObjects[ndx] = (VkDeviceMemory)0;
488 }
489 }
490
491 throw;
492 }
493 }
494 }
495 }
496 catch (const tcu::TestError& error)
497 {
498 m_result.fail(error.getMessage());
499 }
500
501 m_memoryTypeIndex++;
502
503 if (m_memoryTypeIndex < m_memoryProperties.memoryTypeCount)
504 return tcu::TestStatus::incomplete();
505 else
506 return tcu::TestStatus(m_result.getResult(), m_result.getMessage());
507 }
508
509 size_t computeDeviceMemorySystemMemFootprint (const DeviceInterface& vk, VkDevice device)
510 {
511 AllocationCallbackRecorder callbackRecorder (getSystemAllocator());
512
513 {
514 // 1 B allocation from memory type 0
515 const VkMemoryAllocateInfo allocInfo =
516 {
517 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
518 DE_NULL,
519 1u,
520 0u,
521 };
522 const Unique<VkDeviceMemory> memory (allocateMemory(vk, device, &allocInfo, callbackRecorder.getCallbacks()));
523 AllocationCallbackValidationResults validateRes;
524
525 validateAllocationCallbacks(callbackRecorder, &validateRes);
526
527 TCU_CHECK(validateRes.violations.empty());
528
529 return getLiveSystemAllocationTotal(validateRes)
530 + sizeof(void*)*validateRes.liveAllocations.size(); // allocation overhead
531 }
532 }
533
534 struct MemoryType
535 {
536 deUint32 index;
537 VkMemoryType type;
538 };
539
540 struct MemoryObject
541 {
542 VkDeviceMemory memory;
543 VkDeviceSize size;
544 VkMemoryPropertyFlags propertyFlags;
545 };
546
547 struct Heap
548 {
549 VkMemoryHeap heap;
550 VkDeviceSize memoryUsage;
551 VkDeviceSize maxMemoryUsage;
552 vector<MemoryType> types;
553 vector<MemoryObject> objects;
554 };
555
556 class RandomAllocFreeTestInstance : public BaseAllocateTestInstance
557 {
558 public:
559 RandomAllocFreeTestInstance (Context& context, TestConfigRandom config);
560 ~RandomAllocFreeTestInstance (void);
561
562 tcu::TestStatus iterate (void);
563
564 private:
565 const size_t m_opCount;
566 const size_t m_allocSysMemSize;
567 const PlatformMemoryLimits m_memoryLimits;
568 const deUint32 m_totalDeviceMaskCombinations;
569
570 deUint32 m_memoryObjectCount;
571 deUint32 m_memoryProtectedObjectCount;
572 deUint32 m_currentDeviceMask;
573 size_t m_opNdx;
574 de::Random m_rng;
575 vector<Heap> m_heaps;
576 VkDeviceSize m_totalSystemMem;
577 VkDeviceSize m_totalDeviceMem;
578 };
579
580 RandomAllocFreeTestInstance::RandomAllocFreeTestInstance (Context& context, TestConfigRandom config)
581 : BaseAllocateTestInstance (context, config.useDeviceGroups)
582 , m_opCount (128)
583 , m_allocSysMemSize (computeDeviceMemorySystemMemFootprint(getDeviceInterface(), context.getDevice())
584 + sizeof(MemoryObject))
585 , m_memoryLimits (getMemoryLimits(context.getTestContext().getPlatform().getVulkanPlatform()))
586 , m_totalDeviceMaskCombinations (m_subsetAllocationAllowed ? (1 << m_numPhysDevices) - 1 : 1)
587 , m_memoryObjectCount (0)
588 , m_memoryProtectedObjectCount (0)
589 , m_currentDeviceMask (m_subsetAllocationAllowed ? 1 : (1 << m_numPhysDevices) - 1)
590 , m_opNdx (0)
591 , m_rng (config.seed)
592 , m_totalSystemMem (0)
593 , m_totalDeviceMem (0)
594 {
595 TCU_CHECK(m_memoryProperties.memoryHeapCount <= 32);
596 TCU_CHECK(m_memoryProperties.memoryTypeCount <= 32);
597
598 m_heaps.resize(m_memoryProperties.memoryHeapCount);
599
600 for (deUint32 heapNdx = 0; heapNdx < m_memoryProperties.memoryHeapCount; heapNdx++)
601 {
602 m_heaps[heapNdx].heap = m_memoryProperties.memoryHeaps[heapNdx];
603 m_heaps[heapNdx].memoryUsage = 0;
604 m_heaps[heapNdx].maxMemoryUsage = m_heaps[heapNdx].heap.size / 8; /* Use at maximum 12.5% of heap */
605
606 m_heaps[heapNdx].objects.reserve(100);
607 }
608
609 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < m_memoryProperties.memoryTypeCount; memoryTypeNdx++)
610 {
611 const MemoryType type =
612 {
613 memoryTypeNdx,
614 m_memoryProperties.memoryTypes[memoryTypeNdx]
615 };
616
617 TCU_CHECK(type.type.heapIndex < m_memoryProperties.memoryHeapCount);
618
619 m_heaps[type.type.heapIndex].types.push_back(type);
620 }
621 }
622
623 RandomAllocFreeTestInstance::~RandomAllocFreeTestInstance (void)
624 {
625 const VkDevice device = getDevice();
626 const DeviceInterface& vkd = getDeviceInterface();
627
628 for (deUint32 heapNdx = 0; heapNdx < (deUint32)m_heaps.size(); heapNdx++)
629 {
630 const Heap& heap = m_heaps[heapNdx];
631
632 for (size_t objectNdx = 0; objectNdx < heap.objects.size(); objectNdx++)
633 {
634 if (!!heap.objects[objectNdx].memory)
635 vkd.freeMemory(device, heap.objects[objectNdx].memory, (const VkAllocationCallbacks*)DE_NULL);
636 }
637 }
638 }
639
640 tcu::TestStatus RandomAllocFreeTestInstance::iterate (void)
641 {
642 const VkDevice device = getDevice();
643 const DeviceInterface& vkd = getDeviceInterface();
644 TestLog& log = m_context.getTestContext().getLog();
645 const bool isUMA = m_memoryLimits.totalDeviceLocalMemory == 0;
646 const VkDeviceSize usedSysMem = isUMA ? (m_totalDeviceMem+m_totalSystemMem) : m_totalSystemMem;
647 const bool canAllocateSys = usedSysMem + m_allocSysMemSize + 1024 < m_memoryLimits.totalSystemMemory; // \note Always leave room for 1 KiB sys mem alloc
648 const bool canAllocateDev = isUMA ? canAllocateSys : (m_totalDeviceMem + 16 < m_memoryLimits.totalDeviceLocalMemory);
649 vector<size_t> nonFullHeaps;
650 vector<size_t> nonEmptyHeaps;
651 bool allocateMore;
652
653 if (m_opNdx == 0)
654 {
655 log << TestLog::Message << "Performing " << m_opCount << " random VkAllocMemory() / VkFreeMemory() calls before freeing all memory." << TestLog::EndMessage;
656 log << TestLog::Message << "Using max 1/8 of the memory in each memory heap." << TestLog::EndMessage;
657 }
658
659 // Sort heaps based on whether allocations or frees are possible
660 for (size_t heapNdx = 0; heapNdx < m_heaps.size(); ++heapNdx)
661 {
662 const bool isDeviceLocal = (m_heaps[heapNdx].heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
663 const bool isHeapFull = m_heaps[heapNdx].memoryUsage >= m_heaps[heapNdx].maxMemoryUsage;
664 const bool isHeapEmpty = m_heaps[heapNdx].memoryUsage == 0;
665
666 if (!isHeapEmpty)
667 nonEmptyHeaps.push_back(heapNdx);
668
669 if (!isHeapFull && ((isUMA && canAllocateSys) ||
670 (!isUMA && isDeviceLocal && canAllocateDev) ||
671 (!isUMA && !isDeviceLocal && canAllocateSys)))
672 nonFullHeaps.push_back(heapNdx);
673 }
674
675 if (m_opNdx >= m_opCount)
676 {
677 if (nonEmptyHeaps.empty())
678 {
679 m_currentDeviceMask++;
680 if (m_currentDeviceMask > m_totalDeviceMaskCombinations)
681 return tcu::TestStatus::pass("Pass");
682 else
683 {
684 m_opNdx = 0;
685 return tcu::TestStatus::incomplete();
686 }
687 }
688 else
689 allocateMore = false;
690 }
691 else if (!nonEmptyHeaps.empty() &&
692 !nonFullHeaps.empty() &&
693 (m_memoryObjectCount < MAX_ALLOCATION_COUNT) &&
694 canAllocateSys)
695 allocateMore = m_rng.getBool(); // Randomize if both operations are doable.
696 else if (nonEmptyHeaps.empty())
697 {
698 DE_ASSERT(canAllocateSys);
699 allocateMore = true; // Allocate more if there are no objects to free.
700 }
701 else if (nonFullHeaps.empty() || !canAllocateSys)
702 allocateMore = false; // Free objects if there is no free space for new objects.
703 else
704 {
705 allocateMore = false;
706 DE_FATAL("Fail");
707 }
708
709 if (allocateMore)
710 {
711 const size_t nonFullHeapNdx = (size_t)(m_rng.getUint32() % (deUint32)nonFullHeaps.size());
712 const size_t heapNdx = nonFullHeaps[nonFullHeapNdx];
713 Heap& heap = m_heaps[heapNdx];
714 const MemoryType& memoryType = m_rng.choose<MemoryType>(heap.types.begin(), heap.types.end());
715 const bool isDeviceLocal = (heap.heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
716 const bool isProtected = memoryType.type.propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT;
717 VkDeviceSize maxAllocSize = (isDeviceLocal && !isUMA)
718 ? de::min(heap.maxMemoryUsage - heap.memoryUsage, (VkDeviceSize)m_memoryLimits.totalDeviceLocalMemory - m_totalDeviceMem)
719 : de::min(heap.maxMemoryUsage - heap.memoryUsage, (VkDeviceSize)m_memoryLimits.totalSystemMemory - usedSysMem - m_allocSysMemSize);
720 const VkDeviceSize maxProtectedAllocSize = 1 * 1024 * 1024;
721
722 // Some implementations might have limitations on protected heap, and these
723 // limitations don't show up in Vulkan queries. Use a hard coded limit for
724 // allocations of arbitrarily selected size of 1MB as per Note at "Device
725 // Memory Allocation" at the spec to use minimum-size allocations.
726 if(isProtected)
727 maxAllocSize = (maxAllocSize > maxProtectedAllocSize) ? maxProtectedAllocSize : maxAllocSize;
728
729 const VkDeviceSize allocationSize = 1 + (m_rng.getUint64() % maxAllocSize);
730
731 if ((allocationSize > (deUint64)(heap.maxMemoryUsage - heap.memoryUsage)) && (allocationSize != 1))
732 TCU_THROW(InternalError, "Test Error: trying to allocate memory more than the available heap size.");
733
734 const MemoryObject object =
735 {
736 (VkDeviceMemory)0,
737 allocationSize,
738 memoryType.type.propertyFlags
739 };
740
741 heap.objects.push_back(object);
742
743 m_allocFlagsInfo.deviceMask = m_currentDeviceMask;
744 const VkMemoryAllocateInfo alloc =
745 {
746 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
747 m_useDeviceGroups ? &m_allocFlagsInfo : DE_NULL, // pNext
748 object.size, // allocationSize
749 memoryType.index // memoryTypeIndex;
750 };
751
752 VkResult res = vkd.allocateMemory(device, &alloc, (const VkAllocationCallbacks*)DE_NULL, &heap.objects.back().memory);
753
754 // Some implementations might have limitations on protected heap, and these
755 // limitations don't show up in Vulkan queries. Use a hard coded threshold
756 // after which out of memory is allowed as per Note at "Device Memory Allocation"
757 // at the spec to support at least 80 allocations concurrently.
758 if (res == VK_ERROR_OUT_OF_DEVICE_MEMORY && isProtected && m_memoryProtectedObjectCount > 80)
759 {
760 heap.objects.pop_back();
761 }
762 else
763 {
764 VK_CHECK(res);
765
766 TCU_CHECK(!!heap.objects.back().memory);
767 m_memoryObjectCount++;
768
769 if (isProtected)
770 m_memoryProtectedObjectCount++;
771
772 heap.memoryUsage += allocationSize;
773 (isDeviceLocal ? m_totalDeviceMem : m_totalSystemMem) += allocationSize;
774 m_totalSystemMem += m_allocSysMemSize;
775 }
776 }
777 else
778 {
779 const size_t nonEmptyHeapNdx = (size_t)(m_rng.getUint32() % (deUint32)nonEmptyHeaps.size());
780 const size_t heapNdx = nonEmptyHeaps[nonEmptyHeapNdx];
781 Heap& heap = m_heaps[heapNdx];
782 const size_t memoryObjectNdx = m_rng.getUint32() % heap.objects.size();
783 MemoryObject& memoryObject = heap.objects[memoryObjectNdx];
784 const bool isDeviceLocal = (heap.heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
785
786 vkd.freeMemory(device, memoryObject.memory, (const VkAllocationCallbacks*)DE_NULL);
787
788 memoryObject.memory = (VkDeviceMemory)0;
789 m_memoryObjectCount--;
790
791 if (memoryObject.propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT)
792 {
793 m_memoryProtectedObjectCount--;
794 memoryObject.propertyFlags = (VkMemoryPropertyFlags)0;
795 }
796
797 heap.memoryUsage -= memoryObject.size;
798 (isDeviceLocal ? m_totalDeviceMem : m_totalSystemMem) -= memoryObject.size;
799 m_totalSystemMem -= m_allocSysMemSize;
800
801 heap.objects[memoryObjectNdx] = heap.objects.back();
802 heap.objects.pop_back();
803
804 DE_ASSERT(heap.memoryUsage == 0 || !heap.objects.empty());
805 }
806
807 m_opNdx++;
808 return tcu::TestStatus::incomplete();
809 }
810
811
812 } // anonymous
813
createAllocationTestsCommon(tcu::TestContext & testCtx,bool useDeviceGroups)814 tcu::TestCaseGroup* createAllocationTestsCommon (tcu::TestContext& testCtx, bool useDeviceGroups)
815 {
816 const char* name = useDeviceGroups ? "device_group_allocation" : "allocation";
817 de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, name, "Memory allocation tests."));
818
819 const VkDeviceSize KiB = 1024;
820 const VkDeviceSize MiB = 1024 * KiB;
821
822 const struct
823 {
824 const char* const str;
825 VkDeviceSize size;
826 } allocationSizes[] =
827 {
828 { "64", 64 },
829 { "128", 128 },
830 { "256", 256 },
831 { "512", 512 },
832 { "1KiB", 1*KiB },
833 { "4KiB", 4*KiB },
834 { "8KiB", 8*KiB },
835 { "1MiB", 1*MiB }
836 };
837
838 const int allocationPercents[] =
839 {
840 1
841 };
842
843 const int allocationCounts[] =
844 {
845 1, 10, 100, 1000, -1
846 };
847
848 const struct
849 {
850 const char* const str;
851 const TestConfig::Order order;
852 } orders[] =
853 {
854 { "forward", TestConfig::ALLOC_FREE },
855 { "reverse", TestConfig::ALLOC_REVERSE_FREE },
856 { "mixed", TestConfig::MIXED_ALLOC_FREE }
857 };
858
859 {
860 de::MovePtr<tcu::TestCaseGroup> basicGroup(new tcu::TestCaseGroup(testCtx, "basic", "Basic memory allocation and free tests"));
861
862 for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
863 {
864 const VkDeviceSize allocationSize = allocationSizes[allocationSizeNdx].size;
865 const char* const allocationSizeName = allocationSizes[allocationSizeNdx].str;
866 de::MovePtr<tcu::TestCaseGroup> sizeGroup (new tcu::TestCaseGroup(testCtx, ("size_" + string(allocationSizeName)).c_str(), ("Test different allocation sizes " + de::toString(allocationSize)).c_str()));
867
868 for (size_t orderNdx = 0; orderNdx < DE_LENGTH_OF_ARRAY(orders); orderNdx++)
869 {
870 const TestConfig::Order order = orders[orderNdx].order;
871 const char* const orderName = orders[orderNdx].str;
872 const char* const orderDescription = orderName;
873 de::MovePtr<tcu::TestCaseGroup> orderGroup (new tcu::TestCaseGroup(testCtx, orderName, orderDescription));
874
875 for (size_t allocationCountNdx = 0; allocationCountNdx < DE_LENGTH_OF_ARRAY(allocationCounts); allocationCountNdx++)
876 {
877 const int allocationCount = allocationCounts[allocationCountNdx];
878
879 if (allocationCount != -1 && allocationCount * allocationSize > 50 * MiB)
880 continue;
881
882 TestConfig config;
883
884 config.memorySize = allocationSize;
885 config.order = order;
886 config.useDeviceGroups = useDeviceGroups;
887 if (allocationCount == -1)
888 {
889 if (allocationSize < 4096)
890 continue;
891
892 config.memoryAllocationCount = de::min((deUint32)(50 * MiB / allocationSize), (deUint32)MAX_ALLOCATION_COUNT);
893
894 if (config.memoryAllocationCount == 0
895 || config.memoryAllocationCount == 1
896 || config.memoryAllocationCount == 10
897 || config.memoryAllocationCount == 100
898 || config.memoryAllocationCount == 1000)
899 continue;
900 }
901 else
902 config.memoryAllocationCount = allocationCount;
903
904 orderGroup->addChild(new InstanceFactory1<AllocateFreeTestInstance, TestConfig>(testCtx, tcu::NODETYPE_SELF_VALIDATE, "count_" + de::toString(config.memoryAllocationCount), "", config));
905 }
906
907 sizeGroup->addChild(orderGroup.release());
908 }
909
910 basicGroup->addChild(sizeGroup.release());
911 }
912
913 for (size_t allocationPercentNdx = 0; allocationPercentNdx < DE_LENGTH_OF_ARRAY(allocationPercents); allocationPercentNdx++)
914 {
915 const int allocationPercent = allocationPercents[allocationPercentNdx];
916 de::MovePtr<tcu::TestCaseGroup> percentGroup (new tcu::TestCaseGroup(testCtx, ("percent_" + de::toString(allocationPercent)).c_str(), ("Test different allocation percents " + de::toString(allocationPercent)).c_str()));
917
918 for (size_t orderNdx = 0; orderNdx < DE_LENGTH_OF_ARRAY(orders); orderNdx++)
919 {
920 const TestConfig::Order order = orders[orderNdx].order;
921 const char* const orderName = orders[orderNdx].str;
922 const char* const orderDescription = orderName;
923 de::MovePtr<tcu::TestCaseGroup> orderGroup (new tcu::TestCaseGroup(testCtx, orderName, orderDescription));
924
925 for (size_t allocationCountNdx = 0; allocationCountNdx < DE_LENGTH_OF_ARRAY(allocationCounts); allocationCountNdx++)
926 {
927 const int allocationCount = allocationCounts[allocationCountNdx];
928
929 if ((allocationCount != -1) && ((float)allocationCount * (float)allocationPercent >= 1.00f / 8.00f))
930 continue;
931
932 TestConfig config;
933
934 config.memoryPercentage = (float)allocationPercent / 100.0f;
935 config.order = order;
936 config.useDeviceGroups = useDeviceGroups;
937
938 if (allocationCount == -1)
939 {
940 config.memoryAllocationCount = de::min((deUint32)((1.00f / 8.00f) / ((float)allocationPercent / 100.0f)), (deUint32)MAX_ALLOCATION_COUNT);
941
942 if (config.memoryAllocationCount == 0
943 || config.memoryAllocationCount == 1
944 || config.memoryAllocationCount == 10
945 || config.memoryAllocationCount == 100
946 || config.memoryAllocationCount == 1000)
947 continue;
948 }
949 else
950 config.memoryAllocationCount = allocationCount;
951
952 orderGroup->addChild(new InstanceFactory1<AllocateFreeTestInstance, TestConfig>(testCtx, tcu::NODETYPE_SELF_VALIDATE, "count_" + de::toString(config.memoryAllocationCount), "", config));
953 }
954
955 percentGroup->addChild(orderGroup.release());
956 }
957
958 basicGroup->addChild(percentGroup.release());
959 }
960
961 group->addChild(basicGroup.release());
962 }
963
964 {
965 const deUint32 caseCount = 100;
966 de::MovePtr<tcu::TestCaseGroup> randomGroup (new tcu::TestCaseGroup(testCtx, "random", "Random memory allocation tests."));
967
968 for (deUint32 caseNdx = 0; caseNdx < caseCount; caseNdx++)
969 {
970 TestConfigRandom config(deInt32Hash(caseNdx ^ 32480), useDeviceGroups);
971
972 randomGroup->addChild(new InstanceFactory1<RandomAllocFreeTestInstance, TestConfigRandom>(testCtx, tcu::NODETYPE_SELF_VALIDATE, de::toString(caseNdx), "Random case", config));
973 }
974
975 group->addChild(randomGroup.release());
976 }
977
978 return group.release();
979 }
980
createAllocationTests(tcu::TestContext & testCtx)981 tcu::TestCaseGroup* createAllocationTests (tcu::TestContext& testCtx)
982 {
983 return createAllocationTestsCommon(testCtx, false);
984 }
985
createDeviceGroupAllocationTests(tcu::TestContext & testCtx)986 tcu::TestCaseGroup* createDeviceGroupAllocationTests (tcu::TestContext& testCtx)
987 {
988 return createAllocationTestsCommon(testCtx, true);
989 }
990
991 } // memory
992 } // vkt
993