1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 Google Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Simple memory mapping tests.
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktMemoryMappingTests.hpp"
25
26 #include "vktTestCaseUtil.hpp"
27 #include "vktCustomInstancesDevices.hpp"
28
29 #include "tcuMaybe.hpp"
30 #include "tcuResultCollector.hpp"
31 #include "tcuTestLog.hpp"
32 #include "tcuPlatform.hpp"
33 #include "tcuTextureUtil.hpp"
34 #include "tcuCommandLine.hpp"
35
36 #include "vkDeviceUtil.hpp"
37 #include "vkPlatform.hpp"
38 #include "vkQueryUtil.hpp"
39 #include "vkRef.hpp"
40 #include "vkRefUtil.hpp"
41 #include "vkStrUtil.hpp"
42 #include "vkAllocationCallbackUtil.hpp"
43 #include "vkImageUtil.hpp"
44
45 #include "deRandom.hpp"
46 #include "deSharedPtr.hpp"
47 #include "deStringUtil.hpp"
48 #include "deUniquePtr.hpp"
49 #include "deSTLUtil.hpp"
50 #include "deMath.h"
51
52 #include <string>
53 #include <vector>
54 #include <algorithm>
55
56 using tcu::Maybe;
57 using tcu::TestLog;
58
59 using de::SharedPtr;
60
61 using std::string;
62 using std::vector;
63 using std::pair;
64
65 using namespace vk;
66
67 namespace vkt
68 {
69 namespace memory
70 {
71 namespace
72 {
73 template<typename T>
divRoundUp(const T & a,const T & b)74 T divRoundUp (const T& a, const T& b)
75 {
76 return (a / b) + (a % b == 0 ? 0 : 1);
77 }
78
79 template<typename T>
roundDownToMultiple(const T & a,const T & b)80 T roundDownToMultiple (const T& a, const T& b)
81 {
82 return b * (a / b);
83 }
84
85 template<typename T>
roundUpToMultiple(const T & a,const T & b)86 T roundUpToMultiple (const T& a, const T& b)
87 {
88 return b * (a / b + (a % b != 0 ? 1 : 0));
89 }
90
91 enum AllocationKind
92 {
93 ALLOCATION_KIND_SUBALLOCATED = 0,
94 ALLOCATION_KIND_DEDICATED_BUFFER = 1,
95 ALLOCATION_KIND_DEDICATED_IMAGE = 2,
96 ALLOCATION_KIND_LAST
97 };
98
99 // \note Bit vector that guarantees that each value takes only one bit.
100 // std::vector<bool> is often optimized to only take one bit for each bool, but
101 // that is implementation detail and in this case we really need to known how much
102 // memory is used.
103 class BitVector
104 {
105 public:
106 enum
107 {
108 BLOCK_BIT_SIZE = 8 * sizeof(deUint32)
109 };
110
BitVector(size_t size,bool value=false)111 BitVector (size_t size, bool value = false)
112 : m_data(divRoundUp<size_t>(size, (size_t)BLOCK_BIT_SIZE), value ? ~0x0u : 0x0u)
113 {
114 }
115
get(size_t ndx) const116 bool get (size_t ndx) const
117 {
118 return (m_data[ndx / BLOCK_BIT_SIZE] & (0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE))) != 0;
119 }
120
set(size_t ndx,bool value)121 void set (size_t ndx, bool value)
122 {
123 if (value)
124 m_data[ndx / BLOCK_BIT_SIZE] |= 0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE);
125 else
126 m_data[ndx / BLOCK_BIT_SIZE] &= ~(0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE));
127 }
128
setRange(size_t offset,size_t count,bool value)129 void setRange (size_t offset, size_t count, bool value)
130 {
131 size_t ndx = offset;
132
133 for (; (ndx < offset + count) && ((ndx % BLOCK_BIT_SIZE) != 0); ndx++)
134 {
135 DE_ASSERT(ndx >= offset);
136 DE_ASSERT(ndx < offset + count);
137 set(ndx, value);
138 }
139
140 {
141 const size_t endOfFullBlockNdx = roundDownToMultiple<size_t>(offset + count, BLOCK_BIT_SIZE);
142
143 if (ndx < endOfFullBlockNdx)
144 {
145 deMemset(&m_data[ndx / BLOCK_BIT_SIZE], (value ? 0xFF : 0x0), (endOfFullBlockNdx - ndx) / 8);
146 ndx = endOfFullBlockNdx;
147 }
148 }
149
150 for (; ndx < offset + count; ndx++)
151 {
152 DE_ASSERT(ndx >= offset);
153 DE_ASSERT(ndx < offset + count);
154 set(ndx, value);
155 }
156 }
157
vectorAnd(const BitVector & other,size_t offset,size_t count)158 void vectorAnd (const BitVector& other, size_t offset, size_t count)
159 {
160 size_t ndx = offset;
161
162 for (; ndx < offset + count && (ndx % BLOCK_BIT_SIZE) != 0; ndx++)
163 {
164 DE_ASSERT(ndx >= offset);
165 DE_ASSERT(ndx < offset + count);
166 set(ndx, other.get(ndx) && get(ndx));
167 }
168
169 for (; ndx < roundDownToMultiple<size_t>(offset + count, BLOCK_BIT_SIZE); ndx += BLOCK_BIT_SIZE)
170 {
171 DE_ASSERT(ndx >= offset);
172 DE_ASSERT(ndx < offset + count);
173 DE_ASSERT(ndx % BLOCK_BIT_SIZE == 0);
174 DE_ASSERT(ndx + BLOCK_BIT_SIZE <= offset + count);
175 m_data[ndx / BLOCK_BIT_SIZE] &= other.m_data[ndx / BLOCK_BIT_SIZE];
176 }
177
178 for (; ndx < offset + count; ndx++)
179 {
180 DE_ASSERT(ndx >= offset);
181 DE_ASSERT(ndx < offset + count);
182 set(ndx, other.get(ndx) && get(ndx));
183 }
184 }
185
186 private:
187 vector<deUint32> m_data;
188 };
189
190 class ReferenceMemory
191 {
192 public:
ReferenceMemory(size_t size,size_t atomSize)193 ReferenceMemory (size_t size, size_t atomSize)
194 : m_atomSize (atomSize)
195 , m_bytes (size, 0xDEu)
196 , m_defined (size, false)
197 , m_flushed (size / atomSize, false)
198 {
199 DE_ASSERT(size % m_atomSize == 0);
200 }
201
write(size_t pos,deUint8 value)202 void write (size_t pos, deUint8 value)
203 {
204 m_bytes[pos] = value;
205 m_defined.set(pos, true);
206 m_flushed.set(pos / m_atomSize, false);
207 }
208
read(size_t pos,deUint8 value)209 bool read (size_t pos, deUint8 value)
210 {
211 const bool isOk = !m_defined.get(pos)
212 || m_bytes[pos] == value;
213
214 m_bytes[pos] = value;
215 m_defined.set(pos, true);
216
217 return isOk;
218 }
219
modifyXor(size_t pos,deUint8 value,deUint8 mask)220 bool modifyXor (size_t pos, deUint8 value, deUint8 mask)
221 {
222 const bool isOk = !m_defined.get(pos)
223 || m_bytes[pos] == value;
224
225 m_bytes[pos] = value ^ mask;
226 m_defined.set(pos, true);
227 m_flushed.set(pos / m_atomSize, false);
228
229 return isOk;
230 }
231
flush(size_t offset,size_t size)232 void flush (size_t offset, size_t size)
233 {
234 DE_ASSERT((offset % m_atomSize) == 0);
235 DE_ASSERT((size % m_atomSize) == 0);
236
237 m_flushed.setRange(offset / m_atomSize, size / m_atomSize, true);
238 }
239
invalidate(size_t offset,size_t size)240 void invalidate (size_t offset, size_t size)
241 {
242 DE_ASSERT((offset % m_atomSize) == 0);
243 DE_ASSERT((size % m_atomSize) == 0);
244
245 if (m_atomSize == 1)
246 {
247 m_defined.vectorAnd(m_flushed, offset, size);
248 }
249 else
250 {
251 for (size_t ndx = 0; ndx < size / m_atomSize; ndx++)
252 {
253 if (!m_flushed.get((offset / m_atomSize) + ndx))
254 m_defined.setRange(offset + ndx * m_atomSize, m_atomSize, false);
255 }
256 }
257 }
258
259
260 private:
261 const size_t m_atomSize;
262 vector<deUint8> m_bytes;
263 BitVector m_defined;
264 BitVector m_flushed;
265 };
266
267 struct MemoryType
268 {
MemoryTypevkt::memory::__anon4e9971d30111::MemoryType269 MemoryType (deUint32 index_, const VkMemoryType& type_)
270 : index (index_)
271 , type (type_)
272 {
273 }
274
MemoryTypevkt::memory::__anon4e9971d30111::MemoryType275 MemoryType (void)
276 : index (~0u)
277 {
278 }
279
280 deUint32 index;
281 VkMemoryType type;
282 };
283
computeDeviceMemorySystemMemFootprint(const DeviceInterface & vk,VkDevice device)284 size_t computeDeviceMemorySystemMemFootprint (const DeviceInterface& vk, VkDevice device)
285 {
286 AllocationCallbackRecorder callbackRecorder (getSystemAllocator());
287
288 {
289 // 1 B allocation from memory type 0
290 const VkMemoryAllocateInfo allocInfo =
291 {
292 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
293 DE_NULL,
294 1u,
295 0u,
296 };
297 const Unique<VkDeviceMemory> memory (allocateMemory(vk, device, &allocInfo, callbackRecorder.getCallbacks()));
298 AllocationCallbackValidationResults validateRes;
299
300 validateAllocationCallbacks(callbackRecorder, &validateRes);
301
302 TCU_CHECK(validateRes.violations.empty());
303
304 return getLiveSystemAllocationTotal(validateRes)
305 + sizeof(void*)*validateRes.liveAllocations.size(); // allocation overhead
306 }
307 }
308
makeImage(const DeviceInterface & vk,VkDevice device,VkDeviceSize size,deUint32 queueFamilyIndex)309 Move<VkImage> makeImage (const DeviceInterface& vk, VkDevice device, VkDeviceSize size, deUint32 queueFamilyIndex)
310 {
311 const VkFormat formats[] =
312 {
313 VK_FORMAT_R8G8B8A8_UINT,
314 VK_FORMAT_R16G16B16A16_UINT,
315 VK_FORMAT_R32G32B32A32_UINT,
316 };
317
318 VkFormat format = VK_FORMAT_UNDEFINED;
319 deUint32 powerOfTwoSize = 0;
320
321 for (const VkFormat f : formats)
322 {
323 const int pixelSize = vk::mapVkFormat(f).getPixelSize();
324 const VkDeviceSize sizeInPixels = (size + 3u) / pixelSize;
325 const deUint32 sqrtSize = static_cast<deUint32>(deFloatCeil(deFloatSqrt(static_cast<float>(sizeInPixels))));
326
327 format = f;
328 powerOfTwoSize = deSmallestGreaterOrEquallPowerOfTwoU32(sqrtSize);
329
330 // maxImageDimension2D
331 if (powerOfTwoSize < 4096)
332 break;
333 }
334
335 const VkImageCreateInfo colorImageParams =
336 {
337 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
338 DE_NULL, // const void* pNext;
339 0u, // VkImageCreateFlags flags;
340 VK_IMAGE_TYPE_2D, // VkImageType imageType;
341 format, // VkFormat format;
342 {
343 powerOfTwoSize,
344 powerOfTwoSize,
345 1u
346 }, // VkExtent3D extent;
347 1u, // deUint32 mipLevels;
348 1u, // deUint32 arraySize;
349 VK_SAMPLE_COUNT_1_BIT, // deUint32 samples;
350 VK_IMAGE_TILING_LINEAR, // VkImageTiling tiling;
351 VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
352 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
353 1u, // deUint32 queueFamilyCount;
354 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
355 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
356 };
357
358 return createImage(vk, device, &colorImageParams);
359 }
360
makeBuffer(const DeviceInterface & vk,VkDevice device,VkDeviceSize size,deUint32 queueFamilyIndex)361 Move<VkBuffer> makeBuffer(const DeviceInterface& vk, VkDevice device, VkDeviceSize size, deUint32 queueFamilyIndex)
362 {
363 const VkBufferCreateInfo bufferParams =
364 {
365 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
366 DE_NULL, // const void* pNext;
367 0u, // VkBufferCreateFlags flags;
368 size, // VkDeviceSize size;
369 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // VkBufferUsageFlags usage;
370 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
371 1u, // deUint32 queueFamilyCount;
372 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
373 };
374 return vk::createBuffer(vk, device, &bufferParams, (const VkAllocationCallbacks*)DE_NULL);
375 }
376
getImageMemoryRequirements(const DeviceInterface & vk,VkDevice device,Move<VkImage> & image)377 VkMemoryRequirements getImageMemoryRequirements(const DeviceInterface& vk, VkDevice device, Move<VkImage>& image)
378 {
379 VkImageMemoryRequirementsInfo2 info =
380 {
381 VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, // VkStructureType sType
382 DE_NULL, // const void* pNext
383 *image // VkImage image
384 };
385 VkMemoryDedicatedRequirements dedicatedRequirements =
386 {
387 VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, // VkStructureType sType
388 DE_NULL, // const void* pNext
389 VK_FALSE, // VkBool32 prefersDedicatedAllocation
390 VK_FALSE // VkBool32 requiresDedicatedAllocation
391 };
392 VkMemoryRequirements2 req2 =
393 {
394 VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, // VkStructureType sType
395 &dedicatedRequirements, // void* pNext
396 {0, 0, 0} // VkMemoryRequirements memoryRequirements
397 };
398
399 vk.getImageMemoryRequirements2(device, &info, &req2);
400
401 return req2.memoryRequirements;
402 }
403
getBufferMemoryRequirements(const DeviceInterface & vk,VkDevice device,Move<VkBuffer> & buffer)404 VkMemoryRequirements getBufferMemoryRequirements(const DeviceInterface& vk, VkDevice device, Move<VkBuffer>& buffer)
405 {
406 VkBufferMemoryRequirementsInfo2 info =
407 {
408 VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, // VkStructureType sType
409 DE_NULL, // const void* pNext
410 *buffer // VkImage image
411 };
412 VkMemoryDedicatedRequirements dedicatedRequirements =
413 {
414 VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, // VkStructureType sType
415 DE_NULL, // const void* pNext
416 VK_FALSE, // VkBool32 prefersDedicatedAllocation
417 VK_FALSE // VkBool32 requiresDedicatedAllocation
418 };
419 VkMemoryRequirements2 req2 =
420 {
421 VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, // VkStructureType sType
422 &dedicatedRequirements, // void* pNext
423 {0, 0, 0} // VkMemoryRequirements memoryRequirements
424 };
425
426 vk.getBufferMemoryRequirements2(device, &info, &req2);
427
428 return req2.memoryRequirements;
429 }
430
allocMemory(const DeviceInterface & vk,VkDevice device,VkDeviceSize pAllocInfo_allocationSize,deUint32 pAllocInfo_memoryTypeIndex)431 Move<VkDeviceMemory> allocMemory (const DeviceInterface& vk, VkDevice device, VkDeviceSize pAllocInfo_allocationSize, deUint32 pAllocInfo_memoryTypeIndex)
432 {
433 const VkMemoryAllocateInfo pAllocInfo =
434 {
435 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
436 DE_NULL,
437 pAllocInfo_allocationSize,
438 pAllocInfo_memoryTypeIndex,
439 };
440 return allocateMemory(vk, device, &pAllocInfo);
441 }
442
findLargeAllocationSize(const DeviceInterface & vk,VkDevice device,VkDeviceSize max,deUint32 memoryTypeIndex)443 VkDeviceSize findLargeAllocationSize (const DeviceInterface& vk, VkDevice device, VkDeviceSize max, deUint32 memoryTypeIndex)
444 {
445 // max must be power of two
446 DE_ASSERT((max & (max - 1)) == 0);
447
448 for (VkDeviceSize size = max; size > 0; size >>= 1)
449 {
450 const VkMemoryAllocateInfo allocInfo =
451 {
452 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
453 DE_NULL,
454 size,
455 memoryTypeIndex,
456 };
457
458 VkDeviceMemory memory;
459 VkResult result = vk.allocateMemory(device, &allocInfo, NULL, &memory);
460
461 if (result == VK_SUCCESS)
462 {
463 vk.freeMemory(device, memory, NULL);
464 return size;
465 }
466 }
467
468 return 0;
469 }
470
allocMemory(const DeviceInterface & vk,VkDevice device,VkDeviceSize pAllocInfo_allocationSize,deUint32 pAllocInfo_memoryTypeIndex,Move<VkImage> & image,Move<VkBuffer> & buffer,const VkAllocationCallbacks * allocator=DE_NULL)471 Move<VkDeviceMemory> allocMemory (const DeviceInterface& vk, VkDevice device, VkDeviceSize pAllocInfo_allocationSize, deUint32 pAllocInfo_memoryTypeIndex, Move<VkImage>& image, Move<VkBuffer>& buffer, const VkAllocationCallbacks* allocator = DE_NULL)
472 {
473 DE_ASSERT((!image) || (!buffer));
474
475 const VkMemoryDedicatedAllocateInfo
476 dedicatedAllocateInfo =
477 {
478 VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR, // VkStructureType sType
479 DE_NULL, // const void* pNext
480 *image, // VkImage image
481 *buffer // VkBuffer buffer
482 };
483
484 const VkMemoryAllocateInfo pAllocInfo =
485 {
486 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
487 !image && !buffer ? DE_NULL : &dedicatedAllocateInfo,
488 pAllocInfo_allocationSize,
489 pAllocInfo_memoryTypeIndex,
490 };
491 return allocateMemory(vk, device, &pAllocInfo, allocator);
492 }
493
494 struct MemoryRange
495 {
MemoryRangevkt::memory::__anon4e9971d30111::MemoryRange496 MemoryRange (VkDeviceSize offset_ = ~(VkDeviceSize)0, VkDeviceSize size_ = ~(VkDeviceSize)0)
497 : offset (offset_)
498 , size (size_)
499 {
500 }
501
502 VkDeviceSize offset;
503 VkDeviceSize size;
504 };
505
506 struct TestConfig
507 {
TestConfigvkt::memory::__anon4e9971d30111::TestConfig508 TestConfig (void)
509 : allocationSize (~(VkDeviceSize)0)
510 , allocationKind (ALLOCATION_KIND_SUBALLOCATED)
511 {
512 }
513
514 VkDeviceSize allocationSize;
515 deUint32 seed;
516
517 MemoryRange mapping;
518 vector<MemoryRange> flushMappings;
519 vector<MemoryRange> invalidateMappings;
520 bool remap;
521 bool implicitUnmap;
522 AllocationKind allocationKind;
523 };
524
compareAndLogBuffer(TestLog & log,size_t size,size_t referenceSize,const deUint8 * result,const deUint8 * reference)525 bool compareAndLogBuffer (TestLog& log, size_t size, size_t referenceSize, const deUint8* result, const deUint8* reference)
526 {
527 size_t stride = size / referenceSize;
528 size_t failedBytes = 0;
529 size_t firstFailed = (size_t)-1;
530
531 DE_ASSERT(referenceSize <= size);
532
533 for (size_t ndx = 0; ndx < referenceSize; ndx += stride)
534 {
535 if (result[ndx * stride] != reference[ndx])
536 {
537 failedBytes++;
538
539 if (firstFailed == (size_t)-1)
540 firstFailed = ndx;
541 }
542 }
543
544 if (failedBytes > 0)
545 {
546 log << TestLog::Message << "Comparison failed. Failed bytes " << failedBytes << ". First failed at offset " << firstFailed << "." << TestLog::EndMessage;
547
548 std::ostringstream expectedValues;
549 std::ostringstream resultValues;
550
551 for (size_t ndx = firstFailed; ndx < firstFailed + 10 && ndx < referenceSize; ndx++)
552 {
553 if (ndx != firstFailed)
554 {
555 expectedValues << ", ";
556 resultValues << ", ";
557 }
558
559 expectedValues << reference[ndx];
560 resultValues << result[ndx * stride];
561 }
562
563 if (firstFailed + 10 < size)
564 {
565 expectedValues << "...";
566 resultValues << "...";
567 }
568
569 log << TestLog::Message << "Expected values at offset: " << firstFailed << ", " << expectedValues.str() << TestLog::EndMessage;
570 log << TestLog::Message << "Result values at offset: " << firstFailed << ", " << resultValues.str() << TestLog::EndMessage;
571
572 return false;
573 }
574 else
575 return true;
576 }
577
createProtectedMemoryDevice(const Context & context,const VkPhysicalDeviceFeatures2 & features2)578 static Move<VkDevice> createProtectedMemoryDevice(const Context& context, const VkPhysicalDeviceFeatures2& features2)
579 {
580 auto& cmdLine = context.getTestContext().getCommandLine();
581 const InstanceInterface& vki = context.getInstanceInterface();
582 const float queuePriority = 1.0f;
583 deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
584
585 VkDeviceQueueCreateInfo queueInfo =
586 {
587 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // VkStructureType sType;
588 DE_NULL, // const void* pNext;
589 vk::VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT, // VkDeviceQueueCreateFlags flags;
590 queueFamilyIndex, // deUint32 queueFamilyIndex;
591 1u, // deUint32 queueCount;
592 &queuePriority // const float* pQueuePriorities;
593 };
594
595 const VkDeviceCreateInfo deviceInfo =
596 {
597 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // VkStructureType sType;
598 &features2, // const void* pNext;
599 (VkDeviceCreateFlags)0, // VkDeviceCreateFlags flags;
600 1u, // uint32_t queueCreateInfoCount;
601 &queueInfo, // const VkDeviceQueueCreateInfo* pQueueCreateInfos;
602 0u, // uint32_t enabledLayerCount;
603 DE_NULL, // const char* const* ppEnabledLayerNames;
604 0u, // uint32_t enabledExtensionCount;
605 DE_NULL, // const char* const* ppEnabledExtensionNames;
606 DE_NULL // const VkPhysicalDeviceFeatures* pEnabledFeatures;
607 };
608
609 return createCustomDevice(cmdLine.isValidationEnabled(), context.getPlatformInterface(), context.getInstance(), vki, context.getPhysicalDevice(), &deviceInfo);
610 }
611
testMemoryMapping(Context & context,const TestConfig config)612 tcu::TestStatus testMemoryMapping (Context& context, const TestConfig config)
613 {
614 TestLog& log = context.getTestContext().getLog();
615 tcu::ResultCollector result (log);
616 bool atLeastOneTestPerformed = false;
617 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
618 const InstanceInterface& vki = context.getInstanceInterface();
619 const DeviceInterface& vkd = context.getDeviceInterface();
620 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physicalDevice);
621 const VkDeviceSize nonCoherentAtomSize = context.getDeviceProperties().limits.nonCoherentAtomSize;
622 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
623
624 //Create protected memory device if protected memory is supported
625 //otherwise use the default device
626 Move<VkDevice> protectMemoryDevice;
627 VkDevice device;
628 {
629 VkPhysicalDeviceProtectedMemoryFeatures protectedFeatures;
630 protectedFeatures.sType = vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
631 protectedFeatures.pNext = DE_NULL;
632 protectedFeatures.protectedMemory = VK_FALSE;
633
634 VkPhysicalDeviceFeatures2 deviceFeatures2;
635 deviceFeatures2.sType = vk::VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
636 deviceFeatures2.pNext = &protectedFeatures;
637
638 vki.getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &deviceFeatures2);
639 if(protectedFeatures.protectedMemory && config.implicitUnmap)
640 {
641 protectMemoryDevice = createProtectedMemoryDevice(context, deviceFeatures2);
642 device = *protectMemoryDevice;
643 }
644 else
645 {
646 device = context.getDevice();
647 }
648 }
649
650 {
651 const tcu::ScopedLogSection section (log, "TestCaseInfo", "TestCaseInfo");
652
653 log << TestLog::Message << "Seed: " << config.seed << TestLog::EndMessage;
654 log << TestLog::Message << "Allocation size: " << config.allocationSize << TestLog::EndMessage;
655 log << TestLog::Message << "Mapping, offset: " << config.mapping.offset << ", size: " << config.mapping.size << TestLog::EndMessage;
656
657 if (!config.flushMappings.empty())
658 {
659 log << TestLog::Message << "Invalidating following ranges:" << TestLog::EndMessage;
660
661 for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
662 log << TestLog::Message << "\tOffset: " << config.flushMappings[ndx].offset << ", Size: " << config.flushMappings[ndx].size << TestLog::EndMessage;
663 }
664
665 if (config.remap)
666 log << TestLog::Message << "Remapping memory between flush and invalidation." << TestLog::EndMessage;
667
668 if (!config.invalidateMappings.empty())
669 {
670 log << TestLog::Message << "Flushing following ranges:" << TestLog::EndMessage;
671
672 for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
673 log << TestLog::Message << "\tOffset: " << config.invalidateMappings[ndx].offset << ", Size: " << config.invalidateMappings[ndx].size << TestLog::EndMessage;
674 }
675 }
676
677 for (deUint32 memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
678 {
679 try
680 {
681 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(memoryTypeIndex), "MemoryType" + de::toString(memoryTypeIndex));
682 const vk::VkMemoryType& memoryType = memoryProperties.memoryTypes[memoryTypeIndex];
683 const VkMemoryHeap& memoryHeap = memoryProperties.memoryHeaps[memoryType.heapIndex];
684 const VkDeviceSize atomSize = nonCoherentAtomSize;
685 const VkDeviceSize stride = config.implicitUnmap ? 1024 : 1;
686 const deUint32 iterations = config.implicitUnmap ? 128 : 1;
687
688 VkDeviceSize allocationSize = (config.allocationSize % atomSize == 0) ? config.allocationSize : config.allocationSize + (atomSize - (config.allocationSize % atomSize));
689 size_t referenceSize = 0;
690 vector<deUint8> reference;
691
692 if (config.implicitUnmap)
693 {
694 VkDeviceSize max = 0x10000000; // 256MiB
695
696 while (memoryHeap.size <= 4 * max)
697 max >>= 1;
698
699 allocationSize = findLargeAllocationSize(vkd, device, max, memoryTypeIndex);
700 }
701
702 vk::VkMemoryRequirements req =
703 {
704 (VkDeviceSize)allocationSize,
705 (VkDeviceSize)0,
706 ~(deUint32)0u
707 };
708 Move<VkImage> image;
709 Move<VkBuffer> buffer;
710
711 if (config.allocationKind == ALLOCATION_KIND_DEDICATED_IMAGE)
712 {
713 image = makeImage(vkd, device, allocationSize, queueFamilyIndex);
714 req = getImageMemoryRequirements(vkd, device, image);
715 }
716 else if (config.allocationKind == ALLOCATION_KIND_DEDICATED_BUFFER)
717 {
718 buffer = makeBuffer(vkd, device, allocationSize, queueFamilyIndex);
719 req = getBufferMemoryRequirements(vkd, device, buffer);
720 }
721 allocationSize = req.size;
722 VkDeviceSize mappingSize = (config.mapping.size % atomSize == 0) ? config.mapping.size : config.mapping.size + (atomSize - (config.mapping.size % atomSize));
723 VkDeviceSize mappingOffset = (config.mapping.offset % atomSize == 0) ? config.mapping.offset : config.mapping.offset - (config.mapping.offset % atomSize);
724 if (config.mapping.size == config.allocationSize && config.mapping.offset == 0u)
725 {
726 mappingSize = allocationSize;
727 }
728
729 referenceSize = static_cast<size_t>(mappingSize / stride);
730 reference.resize(static_cast<size_t>(mappingOffset) + referenceSize);
731
732 log << TestLog::Message << "MemoryType: " << memoryType << TestLog::EndMessage;
733 log << TestLog::Message << "MemoryHeap: " << memoryHeap << TestLog::EndMessage;
734 log << TestLog::Message << "AtomSize: " << atomSize << TestLog::EndMessage;
735 log << TestLog::Message << "AllocationSize: " << allocationSize << TestLog::EndMessage;
736 log << TestLog::Message << "Mapping, offset: " << mappingOffset << ", size: " << mappingSize << TestLog::EndMessage;
737
738 if ((req.memoryTypeBits & (1u << memoryTypeIndex)) == 0)
739 {
740 static const char* const allocationKindName[] =
741 {
742 "suballocation",
743 "dedicated allocation of buffers",
744 "dedicated allocation of images"
745 };
746 log << TestLog::Message << "Memory type does not support " << allocationKindName[static_cast<deUint32>(config.allocationKind)] << '.' << TestLog::EndMessage;
747 continue;
748 }
749
750 if (!config.flushMappings.empty())
751 {
752 log << TestLog::Message << "Invalidating following ranges:" << TestLog::EndMessage;
753
754 for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
755 {
756 const VkDeviceSize offset = (config.flushMappings[ndx].offset % atomSize == 0) ? config.flushMappings[ndx].offset : config.flushMappings[ndx].offset - (config.flushMappings[ndx].offset % atomSize);
757 const VkDeviceSize size = (config.flushMappings[ndx].size % atomSize == 0) ? config.flushMappings[ndx].size : config.flushMappings[ndx].size + (atomSize - (config.flushMappings[ndx].size % atomSize));
758 log << TestLog::Message << "\tOffset: " << offset << ", Size: " << size << TestLog::EndMessage;
759 }
760 }
761
762 if (!config.invalidateMappings.empty())
763 {
764 log << TestLog::Message << "Flushing following ranges:" << TestLog::EndMessage;
765
766 for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
767 {
768 const VkDeviceSize offset = (config.invalidateMappings[ndx].offset % atomSize == 0) ? config.invalidateMappings[ndx].offset : config.invalidateMappings[ndx].offset - (config.invalidateMappings[ndx].offset % atomSize);
769 const VkDeviceSize size = (config.invalidateMappings[ndx].size % atomSize == 0) ? config.invalidateMappings[ndx].size : config.invalidateMappings[ndx].size + (atomSize - (config.invalidateMappings[ndx].size % atomSize));
770 log << TestLog::Message << "\tOffset: " << offset << ", Size: " << size << TestLog::EndMessage;
771 }
772 }
773
774 if ((memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
775 {
776 log << TestLog::Message << "Memory type doesn't support mapping." << TestLog::EndMessage;
777 }
778 else if (memoryHeap.size <= 4 * allocationSize)
779 {
780 log << TestLog::Message << "Memory type's heap is too small." << TestLog::EndMessage;
781 }
782 else for (deUint32 iteration = 0; iteration < iterations; iteration++)
783 {
784 atLeastOneTestPerformed = true;
785 AllocationCallbackRecorder recorder (getSystemAllocator());
786 const VkAllocationCallbacks* allocator = config.implicitUnmap ? recorder.getCallbacks() : DE_NULL;
787 Move<VkDeviceMemory> memory (allocMemory(vkd, device, allocationSize, memoryTypeIndex, image, buffer, allocator));
788 de::Random rng (config.seed);
789 deUint8* mapping = DE_NULL;
790
791 {
792 void* ptr;
793 VK_CHECK(vkd.mapMemory(device, *memory, mappingOffset, mappingSize, 0u, &ptr));
794 TCU_CHECK(ptr);
795
796 mapping = (deUint8*)ptr;
797 }
798
799 for (VkDeviceSize ndx = 0; ndx < referenceSize; ndx += stride)
800 {
801 const deUint8 val = rng.getUint8();
802
803 mapping[ndx * stride] = val;
804 reference[(size_t)(mappingOffset + ndx)] = val;
805 }
806
807 if (!config.flushMappings.empty())
808 {
809 vector<VkMappedMemoryRange> ranges;
810
811 for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
812 {
813 const VkMappedMemoryRange range =
814 {
815 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
816 DE_NULL,
817
818 *memory,
819 (config.flushMappings[ndx].offset % atomSize == 0) ? config.flushMappings[ndx].offset : config.flushMappings[ndx].offset - (config.flushMappings[ndx].offset % atomSize),
820 (config.flushMappings[ndx].size % atomSize == 0) ? config.flushMappings[ndx].size : config.flushMappings[ndx].size + (atomSize - (config.flushMappings[ndx].size % atomSize)),
821 };
822
823 ranges.push_back(range);
824 }
825
826 VK_CHECK(vkd.flushMappedMemoryRanges(device, (deUint32)ranges.size(), &ranges[0]));
827 }
828
829 if (config.remap)
830 {
831 void* ptr;
832 vkd.unmapMemory(device, *memory);
833 VK_CHECK(vkd.mapMemory(device, *memory, mappingOffset, mappingSize, 0u, &ptr));
834 TCU_CHECK(ptr);
835
836 mapping = (deUint8*)ptr;
837 }
838
839 if (!config.invalidateMappings.empty())
840 {
841 vector<VkMappedMemoryRange> ranges;
842
843 for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
844 {
845 const VkMappedMemoryRange range =
846 {
847 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
848 DE_NULL,
849
850 *memory,
851 (config.invalidateMappings[ndx].offset % atomSize == 0) ? config.invalidateMappings[ndx].offset : config.invalidateMappings[ndx].offset - (config.invalidateMappings[ndx].offset % atomSize),
852 (config.invalidateMappings[ndx].size % atomSize == 0) ? config.invalidateMappings[ndx].size : config.invalidateMappings[ndx].size + (atomSize - (config.invalidateMappings[ndx].size % atomSize)),
853 };
854
855 ranges.push_back(range);
856 }
857
858 VK_CHECK(vkd.invalidateMappedMemoryRanges(device, static_cast<deUint32>(ranges.size()), &ranges[0]));
859 }
860
861 if (!compareAndLogBuffer(log, static_cast<size_t>(mappingSize), referenceSize, mapping, &reference[static_cast<size_t>(mappingOffset)]))
862 result.fail("Unexpected values read from mapped memory.");
863
864 if (config.implicitUnmap)
865 {
866 AllocationCallbackValidationResults results;
867
868 vkd.freeMemory(device, memory.disown(), allocator);
869 validateAllocationCallbacks(recorder, &results);
870
871 if (!results.liveAllocations.empty())
872 result.fail("Live allocations remain after freeing mapped memory");
873 }
874 else
875 {
876 vkd.unmapMemory(device, *memory);
877 }
878
879 context.getTestContext().touchWatchdog();
880 }
881 }
882 catch (const tcu::TestError& error)
883 {
884 result.fail(error.getMessage());
885 }
886 }
887
888 if (!atLeastOneTestPerformed)
889 result.addResult(QP_TEST_RESULT_NOT_SUPPORTED, "No suitable memory kind found to perform test.");
890
891 return tcu::TestStatus(result.getResult(), result.getMessage());
892 }
893
894 class MemoryMapping
895 {
896 public:
897 MemoryMapping (const MemoryRange& range,
898 void* ptr,
899 ReferenceMemory& reference);
900
901 void randomRead (de::Random& rng);
902 void randomWrite (de::Random& rng);
903 void randomModify (de::Random& rng);
904
getRange(void) const905 const MemoryRange& getRange (void) const { return m_range; }
906
907 private:
908 MemoryRange m_range;
909 void* m_ptr;
910 ReferenceMemory& m_reference;
911 };
912
MemoryMapping(const MemoryRange & range,void * ptr,ReferenceMemory & reference)913 MemoryMapping::MemoryMapping (const MemoryRange& range,
914 void* ptr,
915 ReferenceMemory& reference)
916 : m_range (range)
917 , m_ptr (ptr)
918 , m_reference (reference)
919 {
920 DE_ASSERT(range.size > 0);
921 }
922
randomRead(de::Random & rng)923 void MemoryMapping::randomRead (de::Random& rng)
924 {
925 const size_t count = (size_t)rng.getInt(0, 100);
926
927 for (size_t ndx = 0; ndx < count; ndx++)
928 {
929 const size_t pos = (size_t)(rng.getUint64() % (deUint64)m_range.size);
930 const deUint8 val = ((deUint8*)m_ptr)[pos];
931
932 TCU_CHECK(m_reference.read((size_t)(m_range.offset + pos), val));
933 }
934 }
935
randomWrite(de::Random & rng)936 void MemoryMapping::randomWrite (de::Random& rng)
937 {
938 const size_t count = (size_t)rng.getInt(0, 100);
939
940 for (size_t ndx = 0; ndx < count; ndx++)
941 {
942 const size_t pos = (size_t)(rng.getUint64() % (deUint64)m_range.size);
943 const deUint8 val = rng.getUint8();
944
945 ((deUint8*)m_ptr)[pos] = val;
946 m_reference.write((size_t)(m_range.offset + pos), val);
947 }
948 }
949
randomModify(de::Random & rng)950 void MemoryMapping::randomModify (de::Random& rng)
951 {
952 const size_t count = (size_t)rng.getInt(0, 100);
953
954 for (size_t ndx = 0; ndx < count; ndx++)
955 {
956 const size_t pos = (size_t)(rng.getUint64() % (deUint64)m_range.size);
957 const deUint8 val = ((deUint8*)m_ptr)[pos];
958 const deUint8 mask = rng.getUint8();
959
960 ((deUint8*)m_ptr)[pos] = val ^ mask;
961 TCU_CHECK(m_reference.modifyXor((size_t)(m_range.offset + pos), val, mask));
962 }
963 }
964
randomSize(de::Random & rng,VkDeviceSize atomSize,VkDeviceSize maxSize)965 VkDeviceSize randomSize (de::Random& rng, VkDeviceSize atomSize, VkDeviceSize maxSize)
966 {
967 const VkDeviceSize maxSizeInAtoms = maxSize / atomSize;
968
969 DE_ASSERT(maxSizeInAtoms > 0);
970
971 return maxSizeInAtoms > 1
972 ? atomSize * (1 + (VkDeviceSize)(rng.getUint64() % (deUint64)maxSizeInAtoms))
973 : atomSize;
974 }
975
randomOffset(de::Random & rng,VkDeviceSize atomSize,VkDeviceSize maxOffset)976 VkDeviceSize randomOffset (de::Random& rng, VkDeviceSize atomSize, VkDeviceSize maxOffset)
977 {
978 const VkDeviceSize maxOffsetInAtoms = maxOffset / atomSize;
979
980 return maxOffsetInAtoms > 0
981 ? atomSize * (VkDeviceSize)(rng.getUint64() % (deUint64)(maxOffsetInAtoms + 1))
982 : 0;
983 }
984
randomRanges(de::Random & rng,vector<VkMappedMemoryRange> & ranges,size_t count,VkDeviceMemory memory,VkDeviceSize minOffset,VkDeviceSize maxSize,VkDeviceSize atomSize)985 void randomRanges (de::Random& rng, vector<VkMappedMemoryRange>& ranges, size_t count, VkDeviceMemory memory, VkDeviceSize minOffset, VkDeviceSize maxSize, VkDeviceSize atomSize)
986 {
987 ranges.resize(count);
988
989 for (size_t rangeNdx = 0; rangeNdx < count; rangeNdx++)
990 {
991 const VkDeviceSize size = randomSize(rng, atomSize, maxSize);
992 const VkDeviceSize offset = minOffset + randomOffset(rng, atomSize, maxSize - size);
993
994 const VkMappedMemoryRange range =
995 {
996 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
997 DE_NULL,
998
999 memory,
1000 offset,
1001 size
1002 };
1003 ranges[rangeNdx] = range;
1004 }
1005 }
1006
1007 class MemoryObject
1008 {
1009 public:
1010 MemoryObject (const DeviceInterface& vkd,
1011 VkDevice device,
1012 VkDeviceSize size,
1013 deUint32 memoryTypeIndex,
1014 VkDeviceSize atomSize,
1015 VkDeviceSize memoryUsage,
1016 VkDeviceSize referenceMemoryUsage);
1017
1018 ~MemoryObject (void);
1019
1020 MemoryMapping* mapRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
1021 void unmap (void);
1022
1023 void randomFlush (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
1024 void randomInvalidate (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
1025
getSize(void) const1026 VkDeviceSize getSize (void) const { return m_size; }
getMapping(void)1027 MemoryMapping* getMapping (void) { return m_mapping; }
1028
getMemoryUsage(void) const1029 VkDeviceSize getMemoryUsage (void) const { return m_memoryUsage; }
getReferenceMemoryUsage(void) const1030 VkDeviceSize getReferenceMemoryUsage (void) const { return m_referenceMemoryUsage; }
1031 private:
1032 const DeviceInterface& m_vkd;
1033 const VkDevice m_device;
1034
1035 const deUint32 m_memoryTypeIndex;
1036 const VkDeviceSize m_size;
1037 const VkDeviceSize m_atomSize;
1038 const VkDeviceSize m_memoryUsage;
1039 const VkDeviceSize m_referenceMemoryUsage;
1040
1041 Move<VkDeviceMemory> m_memory;
1042
1043 MemoryMapping* m_mapping;
1044 ReferenceMemory m_referenceMemory;
1045 };
1046
MemoryObject(const DeviceInterface & vkd,VkDevice device,VkDeviceSize size,deUint32 memoryTypeIndex,VkDeviceSize atomSize,VkDeviceSize memoryUsage,VkDeviceSize referenceMemoryUsage)1047 MemoryObject::MemoryObject (const DeviceInterface& vkd,
1048 VkDevice device,
1049 VkDeviceSize size,
1050 deUint32 memoryTypeIndex,
1051 VkDeviceSize atomSize,
1052 VkDeviceSize memoryUsage,
1053 VkDeviceSize referenceMemoryUsage)
1054 : m_vkd (vkd)
1055 , m_device (device)
1056 , m_memoryTypeIndex (memoryTypeIndex)
1057 , m_size (size)
1058 , m_atomSize (atomSize)
1059 , m_memoryUsage (memoryUsage)
1060 , m_referenceMemoryUsage (referenceMemoryUsage)
1061 , m_mapping (DE_NULL)
1062 , m_referenceMemory ((size_t)size, (size_t)m_atomSize)
1063 {
1064 m_memory = allocMemory(m_vkd, m_device, m_size, m_memoryTypeIndex);
1065 }
1066
~MemoryObject(void)1067 MemoryObject::~MemoryObject (void)
1068 {
1069 delete m_mapping;
1070 }
1071
mapRandom(const DeviceInterface & vkd,VkDevice device,de::Random & rng)1072 MemoryMapping* MemoryObject::mapRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
1073 {
1074 const VkDeviceSize size = randomSize(rng, m_atomSize, m_size);
1075 const VkDeviceSize offset = randomOffset(rng, m_atomSize, m_size - size);
1076 void* ptr;
1077
1078 DE_ASSERT(!m_mapping);
1079
1080 VK_CHECK(vkd.mapMemory(device, *m_memory, offset, size, 0u, &ptr));
1081 TCU_CHECK(ptr);
1082 m_mapping = new MemoryMapping(MemoryRange(offset, size), ptr, m_referenceMemory);
1083
1084 return m_mapping;
1085 }
1086
unmap(void)1087 void MemoryObject::unmap (void)
1088 {
1089 m_vkd.unmapMemory(m_device, *m_memory);
1090
1091 delete m_mapping;
1092 m_mapping = DE_NULL;
1093 }
1094
randomFlush(const DeviceInterface & vkd,VkDevice device,de::Random & rng)1095 void MemoryObject::randomFlush (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
1096 {
1097 const size_t rangeCount = (size_t)rng.getInt(1, 10);
1098 vector<VkMappedMemoryRange> ranges (rangeCount);
1099
1100 randomRanges(rng, ranges, rangeCount, *m_memory, m_mapping->getRange().offset, m_mapping->getRange().size, m_atomSize);
1101
1102 for (size_t rangeNdx = 0; rangeNdx < ranges.size(); rangeNdx++)
1103 m_referenceMemory.flush((size_t)ranges[rangeNdx].offset, (size_t)ranges[rangeNdx].size);
1104
1105 VK_CHECK(vkd.flushMappedMemoryRanges(device, (deUint32)ranges.size(), ranges.empty() ? DE_NULL : &ranges[0]));
1106 }
1107
randomInvalidate(const DeviceInterface & vkd,VkDevice device,de::Random & rng)1108 void MemoryObject::randomInvalidate (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
1109 {
1110 const size_t rangeCount = (size_t)rng.getInt(1, 10);
1111 vector<VkMappedMemoryRange> ranges (rangeCount);
1112
1113 randomRanges(rng, ranges, rangeCount, *m_memory, m_mapping->getRange().offset, m_mapping->getRange().size, m_atomSize);
1114
1115 for (size_t rangeNdx = 0; rangeNdx < ranges.size(); rangeNdx++)
1116 m_referenceMemory.invalidate((size_t)ranges[rangeNdx].offset, (size_t)ranges[rangeNdx].size);
1117
1118 VK_CHECK(vkd.invalidateMappedMemoryRanges(device, (deUint32)ranges.size(), ranges.empty() ? DE_NULL : &ranges[0]));
1119 }
1120
1121 enum
1122 {
1123 MAX_MEMORY_USAGE_DIV = 2, // Use only 1/2 of each memory heap.
1124 MAX_MEMORY_ALLOC_DIV = 2, // Do not alloc more than 1/2 of available space.
1125 };
1126
1127 template<typename T>
removeFirstEqual(vector<T> & vec,const T & val)1128 void removeFirstEqual (vector<T>& vec, const T& val)
1129 {
1130 for (size_t ndx = 0; ndx < vec.size(); ndx++)
1131 {
1132 if (vec[ndx] == val)
1133 {
1134 vec[ndx] = vec.back();
1135 vec.pop_back();
1136 return;
1137 }
1138 }
1139 }
1140
1141 enum MemoryClass
1142 {
1143 MEMORY_CLASS_SYSTEM = 0,
1144 MEMORY_CLASS_DEVICE,
1145
1146 MEMORY_CLASS_LAST
1147 };
1148
1149 // \todo [2016-04-20 pyry] Consider estimating memory fragmentation
1150 class TotalMemoryTracker
1151 {
1152 public:
TotalMemoryTracker(void)1153 TotalMemoryTracker (void)
1154 {
1155 std::fill(DE_ARRAY_BEGIN(m_usage), DE_ARRAY_END(m_usage), 0);
1156 }
1157
allocate(MemoryClass memClass,VkDeviceSize size)1158 void allocate (MemoryClass memClass, VkDeviceSize size)
1159 {
1160 m_usage[memClass] += size;
1161 }
1162
free(MemoryClass memClass,VkDeviceSize size)1163 void free (MemoryClass memClass, VkDeviceSize size)
1164 {
1165 DE_ASSERT(size <= m_usage[memClass]);
1166 m_usage[memClass] -= size;
1167 }
1168
getUsage(MemoryClass memClass) const1169 VkDeviceSize getUsage (MemoryClass memClass) const
1170 {
1171 return m_usage[memClass];
1172 }
1173
getTotalUsage(void) const1174 VkDeviceSize getTotalUsage (void) const
1175 {
1176 VkDeviceSize total = 0;
1177 for (int ndx = 0; ndx < MEMORY_CLASS_LAST; ++ndx)
1178 total += getUsage((MemoryClass)ndx);
1179 return total;
1180 }
1181
1182 private:
1183 VkDeviceSize m_usage[MEMORY_CLASS_LAST];
1184 };
1185
getHostPageSize(void)1186 VkDeviceSize getHostPageSize (void)
1187 {
1188 return 4096;
1189 }
1190
1191 class MemoryHeap
1192 {
1193 public:
MemoryHeap(const VkMemoryHeap & heap,const vector<MemoryType> & memoryTypes,const PlatformMemoryLimits & memoryLimits,const VkDeviceSize nonCoherentAtomSize,TotalMemoryTracker & totalMemTracker)1194 MemoryHeap (const VkMemoryHeap& heap,
1195 const vector<MemoryType>& memoryTypes,
1196 const PlatformMemoryLimits& memoryLimits,
1197 const VkDeviceSize nonCoherentAtomSize,
1198 TotalMemoryTracker& totalMemTracker)
1199 : m_heap (heap)
1200 , m_memoryTypes (memoryTypes)
1201 , m_limits (memoryLimits)
1202 , m_nonCoherentAtomSize (nonCoherentAtomSize)
1203 , m_minAtomSize (nonCoherentAtomSize)
1204 , m_totalMemTracker (totalMemTracker)
1205 , m_usage (0)
1206 {
1207 }
1208
~MemoryHeap(void)1209 ~MemoryHeap (void)
1210 {
1211 for (vector<MemoryObject*>::iterator iter = m_objects.begin(); iter != m_objects.end(); ++iter)
1212 delete *iter;
1213 }
1214
1215 bool full (void) const;
empty(void) const1216 bool empty (void) const
1217 {
1218 return m_usage == 0 && !full();
1219 }
1220
1221 MemoryObject* allocateRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
1222
getRandomObject(de::Random & rng) const1223 MemoryObject* getRandomObject (de::Random& rng) const
1224 {
1225 return rng.choose<MemoryObject*>(m_objects.begin(), m_objects.end());
1226 }
1227
free(MemoryObject * object)1228 void free (MemoryObject* object)
1229 {
1230 removeFirstEqual(m_objects, object);
1231 m_usage -= object->getMemoryUsage();
1232 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, object->getReferenceMemoryUsage());
1233 m_totalMemTracker.free(getMemoryClass(), object->getMemoryUsage());
1234 delete object;
1235 }
1236
1237 private:
getMemoryClass(void) const1238 MemoryClass getMemoryClass (void) const
1239 {
1240 if ((m_heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
1241 return MEMORY_CLASS_DEVICE;
1242 else
1243 return MEMORY_CLASS_SYSTEM;
1244 }
1245
1246 const VkMemoryHeap m_heap;
1247 const vector<MemoryType> m_memoryTypes;
1248 const PlatformMemoryLimits& m_limits;
1249 const VkDeviceSize m_nonCoherentAtomSize;
1250 const VkDeviceSize m_minAtomSize;
1251 TotalMemoryTracker& m_totalMemTracker;
1252
1253 VkDeviceSize m_usage;
1254 vector<MemoryObject*> m_objects;
1255 };
1256
1257 // Heap is full if there is not enough memory to allocate minimal memory object.
full(void) const1258 bool MemoryHeap::full (void) const
1259 {
1260 DE_ASSERT(m_usage <= m_heap.size/MAX_MEMORY_USAGE_DIV);
1261
1262 const VkDeviceSize availableInHeap = m_heap.size/MAX_MEMORY_USAGE_DIV - m_usage;
1263 const bool isUMA = m_limits.totalDeviceLocalMemory == 0;
1264 const MemoryClass memClass = getMemoryClass();
1265 const VkDeviceSize minAllocationSize = de::max(m_minAtomSize, memClass == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
1266 // Memory required for reference. One byte and one bit for each byte and one bit per each m_atomSize.
1267 const VkDeviceSize minReferenceSize = minAllocationSize
1268 + divRoundUp<VkDeviceSize>(minAllocationSize, 8)
1269 + divRoundUp<VkDeviceSize>(minAllocationSize, m_minAtomSize * 8);
1270
1271 if (isUMA)
1272 {
1273 const VkDeviceSize totalUsage = m_totalMemTracker.getTotalUsage();
1274 const VkDeviceSize totalSysMem = (VkDeviceSize)m_limits.totalSystemMemory;
1275
1276 DE_ASSERT(totalUsage <= totalSysMem);
1277
1278 return (minAllocationSize + minReferenceSize) > (totalSysMem - totalUsage)
1279 || minAllocationSize > availableInHeap;
1280 }
1281 else
1282 {
1283 const VkDeviceSize totalUsage = m_totalMemTracker.getTotalUsage();
1284 const VkDeviceSize totalSysMem = (VkDeviceSize)m_limits.totalSystemMemory;
1285
1286 const VkDeviceSize totalMemClass = memClass == MEMORY_CLASS_SYSTEM
1287 ? m_limits.totalSystemMemory
1288 : m_limits.totalDeviceLocalMemory;
1289 const VkDeviceSize usedMemClass = m_totalMemTracker.getUsage(memClass);
1290
1291 DE_ASSERT(usedMemClass <= totalMemClass);
1292
1293 return minAllocationSize > availableInHeap
1294 || minAllocationSize > (totalMemClass - usedMemClass)
1295 || minReferenceSize > (totalSysMem - totalUsage);
1296 }
1297 }
1298
allocateRandom(const DeviceInterface & vkd,VkDevice device,de::Random & rng)1299 MemoryObject* MemoryHeap::allocateRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
1300 {
1301 pair<MemoryType, VkDeviceSize> memoryTypeMaxSizePair;
1302
1303 // Pick random memory type
1304 {
1305 vector<pair<MemoryType, VkDeviceSize> > memoryTypes;
1306
1307 const VkDeviceSize availableInHeap = m_heap.size/MAX_MEMORY_USAGE_DIV - m_usage;
1308 const bool isUMA = m_limits.totalDeviceLocalMemory == 0;
1309 const MemoryClass memClass = getMemoryClass();
1310
1311 // Collect memory types that can be allocated and the maximum size of allocation.
1312 // Memory type can be only allocated if minimal memory allocation is less than available memory.
1313 for (size_t memoryTypeNdx = 0; memoryTypeNdx < m_memoryTypes.size(); memoryTypeNdx++)
1314 {
1315 const MemoryType type = m_memoryTypes[memoryTypeNdx];
1316 const VkDeviceSize atomSize = m_nonCoherentAtomSize;
1317 const VkDeviceSize allocationSizeGranularity = de::max(atomSize, memClass == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
1318 const VkDeviceSize minAllocationSize = allocationSizeGranularity;
1319 const VkDeviceSize minReferenceSize = minAllocationSize
1320 + divRoundUp<VkDeviceSize>(minAllocationSize, 8)
1321 + divRoundUp<VkDeviceSize>(minAllocationSize, atomSize * 8);
1322
1323 if (isUMA)
1324 {
1325 // Max memory size calculation is little tricky since reference memory requires 1/n bits per byte.
1326 const VkDeviceSize totalUsage = m_totalMemTracker.getTotalUsage();
1327 const VkDeviceSize totalSysMem = (VkDeviceSize)m_limits.totalSystemMemory;
1328 const VkDeviceSize availableBits = (totalSysMem - totalUsage) * 8;
1329 // availableBits == maxAllocationSizeBits + maxAllocationReferenceSizeBits
1330 // maxAllocationReferenceSizeBits == maxAllocationSizeBits + (maxAllocationSizeBits / 8) + (maxAllocationSizeBits / atomSizeBits)
1331 // availableBits == maxAllocationSizeBits + maxAllocationSizeBits + (maxAllocationSizeBits / 8) + (maxAllocationSizeBits / atomSizeBits)
1332 // availableBits == 2 * maxAllocationSizeBits + (maxAllocationSizeBits / 8) + (maxAllocationSizeBits / atomSizeBits)
1333 // availableBits == (2 + 1/8 + 1/atomSizeBits) * maxAllocationSizeBits
1334 // 8 * availableBits == (16 + 1 + 8/atomSizeBits) * maxAllocationSizeBits
1335 // atomSizeBits * 8 * availableBits == (17 * atomSizeBits + 8) * maxAllocationSizeBits
1336 // maxAllocationSizeBits == atomSizeBits * 8 * availableBits / (17 * atomSizeBits + 8)
1337 // maxAllocationSizeBytes == maxAllocationSizeBits / 8
1338 // maxAllocationSizeBytes == atomSizeBits * availableBits / (17 * atomSizeBits + 8)
1339 // atomSizeBits = atomSize * 8
1340 // maxAllocationSizeBytes == atomSize * 8 * availableBits / (17 * atomSize * 8 + 8)
1341 // maxAllocationSizeBytes == atomSize * availableBits / (17 * atomSize + 1)
1342 //
1343 // Finally, the allocation size must be less than or equal to memory heap size
1344 const VkDeviceSize maxAllocationSize = roundDownToMultiple(de::min((atomSize * availableBits) / (17 * atomSize + 1), availableInHeap), allocationSizeGranularity);
1345
1346 DE_ASSERT(totalUsage <= totalSysMem);
1347 DE_ASSERT(maxAllocationSize <= totalSysMem);
1348
1349 if (minAllocationSize + minReferenceSize <= (totalSysMem - totalUsage) && minAllocationSize <= availableInHeap)
1350 {
1351 DE_ASSERT(maxAllocationSize >= minAllocationSize);
1352 memoryTypes.push_back(std::make_pair(type, maxAllocationSize));
1353 }
1354 }
1355 else
1356 {
1357 // Max memory size calculation is little tricky since reference memory requires 1/n bits per byte.
1358 const VkDeviceSize totalUsage = m_totalMemTracker.getTotalUsage();
1359 const VkDeviceSize totalSysMem = (VkDeviceSize)m_limits.totalSystemMemory;
1360
1361 const VkDeviceSize totalMemClass = memClass == MEMORY_CLASS_SYSTEM
1362 ? m_limits.totalSystemMemory
1363 : m_limits.totalDeviceLocalMemory;
1364 const VkDeviceSize usedMemClass = m_totalMemTracker.getUsage(memClass);
1365 // availableRefBits = maxRefBits + maxRefBits/8 + maxRefBits/atomSizeBits
1366 // availableRefBits = maxRefBits * (1 + 1/8 + 1/atomSizeBits)
1367 // 8 * availableRefBits = maxRefBits * (8 + 1 + 8/atomSizeBits)
1368 // 8 * atomSizeBits * availableRefBits = maxRefBits * (9 * atomSizeBits + 8)
1369 // maxRefBits = 8 * atomSizeBits * availableRefBits / (9 * atomSizeBits + 8)
1370 // atomSizeBits = atomSize * 8
1371 // maxRefBits = 8 * atomSize * 8 * availableRefBits / (9 * atomSize * 8 + 8)
1372 // maxRefBits = atomSize * 8 * availableRefBits / (9 * atomSize + 1)
1373 // maxRefBytes = atomSize * availableRefBits / (9 * atomSize + 1)
1374 //
1375 // Finally, the allocation size must be less than or equal to memory heap size
1376 const VkDeviceSize maxAllocationSize = roundDownToMultiple(de::min(de::min(totalMemClass - usedMemClass, (atomSize * 8 * (totalSysMem - totalUsage)) / (9 * atomSize + 1)), availableInHeap), allocationSizeGranularity);
1377
1378 DE_ASSERT(usedMemClass <= totalMemClass);
1379
1380 if (minAllocationSize <= availableInHeap
1381 && minAllocationSize <= (totalMemClass - usedMemClass)
1382 && minReferenceSize <= (totalSysMem - totalUsage))
1383 {
1384 DE_ASSERT(maxAllocationSize >= minAllocationSize);
1385 memoryTypes.push_back(std::make_pair(type, maxAllocationSize));
1386 }
1387
1388 }
1389 }
1390
1391 memoryTypeMaxSizePair = rng.choose<pair<MemoryType, VkDeviceSize> >(memoryTypes.begin(), memoryTypes.end());
1392 }
1393
1394 const MemoryType type = memoryTypeMaxSizePair.first;
1395 const VkDeviceSize maxAllocationSize = memoryTypeMaxSizePair.second / MAX_MEMORY_ALLOC_DIV;
1396 const VkDeviceSize atomSize = m_nonCoherentAtomSize;
1397 const VkDeviceSize allocationSizeGranularity = de::max(atomSize, getMemoryClass() == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
1398 const VkDeviceSize size = randomSize(rng, atomSize, maxAllocationSize);
1399 const VkDeviceSize memoryUsage = roundUpToMultiple(size, allocationSizeGranularity);
1400 const VkDeviceSize referenceMemoryUsage = size + divRoundUp<VkDeviceSize>(size, 8) + divRoundUp<VkDeviceSize>(size / atomSize, 8);
1401
1402 DE_ASSERT(size <= maxAllocationSize);
1403
1404 MemoryObject* const object = new MemoryObject(vkd, device, size, type.index, atomSize, memoryUsage, referenceMemoryUsage);
1405
1406 m_usage += memoryUsage;
1407 m_totalMemTracker.allocate(getMemoryClass(), memoryUsage);
1408 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, referenceMemoryUsage);
1409 m_objects.push_back(object);
1410
1411 return object;
1412 }
1413
getMemoryObjectSystemSize(Context & context)1414 size_t getMemoryObjectSystemSize (Context& context)
1415 {
1416 return computeDeviceMemorySystemMemFootprint(context.getDeviceInterface(), context.getDevice())
1417 + sizeof(MemoryObject)
1418 + sizeof(de::SharedPtr<MemoryObject>);
1419 }
1420
getMemoryMappingSystemSize(void)1421 size_t getMemoryMappingSystemSize (void)
1422 {
1423 return sizeof(MemoryMapping) + sizeof(de::SharedPtr<MemoryMapping>);
1424 }
1425
1426 class RandomMemoryMappingInstance : public TestInstance
1427 {
1428 public:
RandomMemoryMappingInstance(Context & context,deUint32 seed)1429 RandomMemoryMappingInstance (Context& context, deUint32 seed)
1430 : TestInstance (context)
1431 , m_memoryObjectSysMemSize (getMemoryObjectSystemSize(context))
1432 , m_memoryMappingSysMemSize (getMemoryMappingSystemSize())
1433 , m_memoryLimits (getMemoryLimits(context.getTestContext().getPlatform().getVulkanPlatform()))
1434 , m_rng (seed)
1435 , m_opNdx (0)
1436 {
1437 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
1438 const InstanceInterface& vki = context.getInstanceInterface();
1439 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physicalDevice);
1440 const VkDeviceSize nonCoherentAtomSize = context.getDeviceProperties().limits.nonCoherentAtomSize;
1441
1442 // Initialize heaps
1443 {
1444 vector<vector<MemoryType> > memoryTypes (memoryProperties.memoryHeapCount);
1445
1446 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < memoryProperties.memoryTypeCount; memoryTypeNdx++)
1447 {
1448 if (memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
1449 memoryTypes[memoryProperties.memoryTypes[memoryTypeNdx].heapIndex].push_back(MemoryType(memoryTypeNdx, memoryProperties.memoryTypes[memoryTypeNdx]));
1450 }
1451
1452 for (deUint32 heapIndex = 0; heapIndex < memoryProperties.memoryHeapCount; heapIndex++)
1453 {
1454 const VkMemoryHeap heapInfo = memoryProperties.memoryHeaps[heapIndex];
1455
1456 if (!memoryTypes[heapIndex].empty())
1457 {
1458 const de::SharedPtr<MemoryHeap> heap (new MemoryHeap(heapInfo, memoryTypes[heapIndex], m_memoryLimits, nonCoherentAtomSize, m_totalMemTracker));
1459
1460 TCU_CHECK_INTERNAL(!heap->full());
1461
1462 m_memoryHeaps.push_back(heap);
1463 }
1464 }
1465 }
1466 }
1467
~RandomMemoryMappingInstance(void)1468 ~RandomMemoryMappingInstance (void)
1469 {
1470 }
1471
iterate(void)1472 tcu::TestStatus iterate (void)
1473 {
1474 const size_t opCount = 100;
1475 const float memoryOpProbability = 0.5f; // 0.50
1476 const float flushInvalidateProbability = 0.4f; // 0.20
1477 const float mapProbability = 0.50f; // 0.15
1478 const float unmapProbability = 0.25f; // 0.075
1479
1480 const float allocProbability = 0.75f; // Versun free
1481
1482 const VkDevice device = m_context.getDevice();
1483 const DeviceInterface& vkd = m_context.getDeviceInterface();
1484
1485 const VkDeviceSize sysMemUsage = (m_memoryLimits.totalDeviceLocalMemory == 0)
1486 ? m_totalMemTracker.getTotalUsage()
1487 : m_totalMemTracker.getUsage(MEMORY_CLASS_SYSTEM);
1488
1489 if (!m_memoryMappings.empty() && m_rng.getFloat() < memoryOpProbability)
1490 {
1491 // Perform operations on mapped memory
1492 MemoryMapping* const mapping = m_rng.choose<MemoryMapping*>(m_memoryMappings.begin(), m_memoryMappings.end());
1493
1494 enum Op
1495 {
1496 OP_READ = 0,
1497 OP_WRITE,
1498 OP_MODIFY,
1499 OP_LAST
1500 };
1501
1502 const Op op = (Op)(m_rng.getUint32() % OP_LAST);
1503
1504 switch (op)
1505 {
1506 case OP_READ:
1507 mapping->randomRead(m_rng);
1508 break;
1509
1510 case OP_WRITE:
1511 mapping->randomWrite(m_rng);
1512 break;
1513
1514 case OP_MODIFY:
1515 mapping->randomModify(m_rng);
1516 break;
1517
1518 default:
1519 DE_FATAL("Invalid operation");
1520 }
1521 }
1522 else if (!m_mappedMemoryObjects.empty() && m_rng.getFloat() < flushInvalidateProbability)
1523 {
1524 MemoryObject* const object = m_rng.choose<MemoryObject*>(m_mappedMemoryObjects.begin(), m_mappedMemoryObjects.end());
1525
1526 if (m_rng.getBool())
1527 object->randomFlush(vkd, device, m_rng);
1528 else
1529 object->randomInvalidate(vkd, device, m_rng);
1530 }
1531 else if (!m_mappedMemoryObjects.empty() && m_rng.getFloat() < unmapProbability)
1532 {
1533 // Unmap memory object
1534 MemoryObject* const object = m_rng.choose<MemoryObject*>(m_mappedMemoryObjects.begin(), m_mappedMemoryObjects.end());
1535
1536 // Remove mapping
1537 removeFirstEqual(m_memoryMappings, object->getMapping());
1538
1539 object->unmap();
1540 removeFirstEqual(m_mappedMemoryObjects, object);
1541 m_nonMappedMemoryObjects.push_back(object);
1542
1543 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryMappingSysMemSize);
1544 }
1545 else if (!m_nonMappedMemoryObjects.empty() &&
1546 (m_rng.getFloat() < mapProbability) &&
1547 (sysMemUsage+m_memoryMappingSysMemSize <= (VkDeviceSize)m_memoryLimits.totalSystemMemory))
1548 {
1549 // Map memory object
1550 MemoryObject* const object = m_rng.choose<MemoryObject*>(m_nonMappedMemoryObjects.begin(), m_nonMappedMemoryObjects.end());
1551 MemoryMapping* mapping = object->mapRandom(vkd, device, m_rng);
1552
1553 m_memoryMappings.push_back(mapping);
1554 m_mappedMemoryObjects.push_back(object);
1555 removeFirstEqual(m_nonMappedMemoryObjects, object);
1556
1557 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryMappingSysMemSize);
1558 }
1559 else
1560 {
1561 // Sort heaps based on capacity (full or not)
1562 vector<MemoryHeap*> nonFullHeaps;
1563 vector<MemoryHeap*> nonEmptyHeaps;
1564
1565 if (sysMemUsage+m_memoryObjectSysMemSize <= (VkDeviceSize)m_memoryLimits.totalSystemMemory)
1566 {
1567 // For the duration of sorting reserve MemoryObject space from system memory
1568 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1569
1570 for (vector<de::SharedPtr<MemoryHeap> >::const_iterator heapIter = m_memoryHeaps.begin();
1571 heapIter != m_memoryHeaps.end();
1572 ++heapIter)
1573 {
1574 if (!(*heapIter)->full())
1575 nonFullHeaps.push_back(heapIter->get());
1576
1577 if (!(*heapIter)->empty())
1578 nonEmptyHeaps.push_back(heapIter->get());
1579 }
1580
1581 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1582 }
1583 else
1584 {
1585 // Not possible to even allocate MemoryObject from system memory, look for non-empty heaps
1586 for (vector<de::SharedPtr<MemoryHeap> >::const_iterator heapIter = m_memoryHeaps.begin();
1587 heapIter != m_memoryHeaps.end();
1588 ++heapIter)
1589 {
1590 if (!(*heapIter)->empty())
1591 nonEmptyHeaps.push_back(heapIter->get());
1592 }
1593 }
1594
1595 if (!nonFullHeaps.empty() && (nonEmptyHeaps.empty() || m_rng.getFloat() < allocProbability))
1596 {
1597 // Reserve MemoryObject from sys mem first
1598 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1599
1600 // Allocate more memory objects
1601 MemoryHeap* const heap = m_rng.choose<MemoryHeap*>(nonFullHeaps.begin(), nonFullHeaps.end());
1602 MemoryObject* const object = heap->allocateRandom(vkd, device, m_rng);
1603
1604 m_nonMappedMemoryObjects.push_back(object);
1605 }
1606 else
1607 {
1608 // Free memory objects
1609 MemoryHeap* const heap = m_rng.choose<MemoryHeap*>(nonEmptyHeaps.begin(), nonEmptyHeaps.end());
1610 MemoryObject* const object = heap->getRandomObject(m_rng);
1611
1612 // Remove mapping
1613 if (object->getMapping())
1614 {
1615 removeFirstEqual(m_memoryMappings, object->getMapping());
1616 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, m_memoryMappingSysMemSize);
1617 }
1618
1619 removeFirstEqual(m_mappedMemoryObjects, object);
1620 removeFirstEqual(m_nonMappedMemoryObjects, object);
1621
1622 heap->free(object);
1623 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1624 }
1625 }
1626
1627 m_opNdx += 1;
1628 if (m_opNdx == opCount)
1629 return tcu::TestStatus::pass("Pass");
1630 else
1631 return tcu::TestStatus::incomplete();
1632 }
1633
1634 private:
1635 const size_t m_memoryObjectSysMemSize;
1636 const size_t m_memoryMappingSysMemSize;
1637 const PlatformMemoryLimits m_memoryLimits;
1638
1639 de::Random m_rng;
1640 size_t m_opNdx;
1641
1642 TotalMemoryTracker m_totalMemTracker;
1643 vector<de::SharedPtr<MemoryHeap> > m_memoryHeaps;
1644
1645 vector<MemoryObject*> m_mappedMemoryObjects;
1646 vector<MemoryObject*> m_nonMappedMemoryObjects;
1647 vector<MemoryMapping*> m_memoryMappings;
1648 };
1649
1650 enum Op
1651 {
1652 OP_NONE = 0,
1653
1654 OP_FLUSH,
1655 OP_SUB_FLUSH,
1656 OP_SUB_FLUSH_SEPARATE,
1657 OP_SUB_FLUSH_OVERLAPPING,
1658
1659 OP_INVALIDATE,
1660 OP_SUB_INVALIDATE,
1661 OP_SUB_INVALIDATE_SEPARATE,
1662 OP_SUB_INVALIDATE_OVERLAPPING,
1663
1664 OP_REMAP,
1665 OP_IMPLICIT_UNMAP,
1666
1667 OP_LAST
1668 };
1669
subMappedConfig(VkDeviceSize allocationSize,const MemoryRange & mapping,Op op,deUint32 seed,AllocationKind allocationKind)1670 TestConfig subMappedConfig (VkDeviceSize allocationSize,
1671 const MemoryRange& mapping,
1672 Op op,
1673 deUint32 seed,
1674 AllocationKind allocationKind)
1675 {
1676 TestConfig config;
1677
1678 config.allocationSize = allocationSize;
1679 config.seed = seed;
1680 config.mapping = mapping;
1681 config.remap = false;
1682 config.implicitUnmap = false;
1683 config.allocationKind = allocationKind;
1684
1685 switch (op)
1686 {
1687 case OP_NONE:
1688 break;
1689
1690 case OP_REMAP:
1691 config.remap = true;
1692 break;
1693
1694 case OP_IMPLICIT_UNMAP:
1695 config.implicitUnmap = true;
1696 break;
1697
1698 case OP_FLUSH:
1699 config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1700 break;
1701
1702 case OP_SUB_FLUSH:
1703 DE_ASSERT(mapping.size / 4 > 0);
1704
1705 config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1706 break;
1707
1708 case OP_SUB_FLUSH_SEPARATE:
1709 DE_ASSERT(mapping.size / 2 > 0);
1710
1711 config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 2, mapping.size - (mapping.size / 2)));
1712 config.flushMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1713
1714 break;
1715
1716 case OP_SUB_FLUSH_OVERLAPPING:
1717 DE_ASSERT((mapping.size / 3) > 0);
1718
1719 config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 3, mapping.size - (mapping.size / 2)));
1720 config.flushMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1721
1722 break;
1723
1724 case OP_INVALIDATE:
1725 config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1726 config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1727 break;
1728
1729 case OP_SUB_INVALIDATE:
1730 DE_ASSERT(mapping.size / 4 > 0);
1731
1732 config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1733 config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1734 break;
1735
1736 case OP_SUB_INVALIDATE_SEPARATE:
1737 DE_ASSERT(mapping.size / 2 > 0);
1738
1739 config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 2, mapping.size - (mapping.size / 2)));
1740 config.flushMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1741
1742 config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size / 2, mapping.size - (mapping.size / 2)));
1743 config.invalidateMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1744
1745 break;
1746
1747 case OP_SUB_INVALIDATE_OVERLAPPING:
1748 DE_ASSERT((mapping.size / 3) > 0);
1749
1750 config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 3, mapping.size - (mapping.size / 2)));
1751 config.flushMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1752
1753 config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size / 3, mapping.size - (mapping.size / 2)));
1754 config.invalidateMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1755
1756 break;
1757
1758 default:
1759 DE_FATAL("Unknown Op");
1760 return TestConfig();
1761 }
1762 for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
1763 {
1764 if (config.flushMappings[ndx].offset + config.flushMappings[ndx].size > mapping.size) {
1765 config.flushMappings[ndx].size = VK_WHOLE_SIZE;
1766 }
1767 }
1768 for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
1769 {
1770 if (config.invalidateMappings[ndx].offset + config.invalidateMappings[ndx].size > mapping.size) {
1771 config.invalidateMappings[ndx].size = VK_WHOLE_SIZE;
1772 }
1773 }
1774 return config;
1775 }
1776
fullMappedConfig(VkDeviceSize allocationSize,Op op,deUint32 seed,AllocationKind allocationKind)1777 TestConfig fullMappedConfig (VkDeviceSize allocationSize,
1778 Op op,
1779 deUint32 seed,
1780 AllocationKind allocationKind)
1781 {
1782 return subMappedConfig(allocationSize, MemoryRange(0, allocationSize), op, seed, allocationKind);
1783 }
1784
checkSupport(Context & context,TestConfig config)1785 void checkSupport (Context& context, TestConfig config)
1786 {
1787 if (config.allocationKind == ALLOCATION_KIND_DEDICATED_IMAGE
1788 || config.allocationKind == ALLOCATION_KIND_DEDICATED_BUFFER)
1789 {
1790 context.requireDeviceFunctionality("VK_KHR_dedicated_allocation");
1791 }
1792 }
1793
1794 } // anonymous
1795
createMappingTests(tcu::TestContext & testCtx)1796 tcu::TestCaseGroup* createMappingTests (tcu::TestContext& testCtx)
1797 {
1798 de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "mapping", "Memory mapping tests."));
1799 de::MovePtr<tcu::TestCaseGroup> dedicated (new tcu::TestCaseGroup(testCtx, "dedicated_alloc", "Dedicated memory mapping tests."));
1800 de::MovePtr<tcu::TestCaseGroup> sets[] =
1801 {
1802 de::MovePtr<tcu::TestCaseGroup> (new tcu::TestCaseGroup(testCtx, "suballocation", "Suballocated memory mapping tests.")),
1803 de::MovePtr<tcu::TestCaseGroup> (new tcu::TestCaseGroup(testCtx, "buffer", "Buffer dedicated memory mapping tests.")),
1804 de::MovePtr<tcu::TestCaseGroup> (new tcu::TestCaseGroup(testCtx, "image", "Image dedicated memory mapping tests."))
1805 };
1806
1807 const VkDeviceSize allocationSizes[] =
1808 {
1809 0, 33, 257, 4087, 8095, 1*1024*1024 + 1,
1810 };
1811
1812 const VkDeviceSize offsets[] =
1813 {
1814 0, 17, 129, 255, 1025, 32*1024+1
1815 };
1816
1817 const VkDeviceSize sizes[] =
1818 {
1819 31, 255, 1025, 4085, 1*1024*1024 - 1
1820 };
1821
1822 const struct
1823 {
1824 const Op op;
1825 const char* const name;
1826 } ops[] =
1827 {
1828 { OP_NONE, "simple" },
1829 { OP_REMAP, "remap" },
1830 { OP_IMPLICIT_UNMAP, "implicit_unmap" },
1831 { OP_FLUSH, "flush" },
1832 { OP_SUB_FLUSH, "subflush" },
1833 { OP_SUB_FLUSH_SEPARATE, "subflush_separate" },
1834 { OP_SUB_FLUSH_SEPARATE, "subflush_overlapping" },
1835
1836 { OP_INVALIDATE, "invalidate" },
1837 { OP_SUB_INVALIDATE, "subinvalidate" },
1838 { OP_SUB_INVALIDATE_SEPARATE, "subinvalidate_separate" },
1839 { OP_SUB_INVALIDATE_SEPARATE, "subinvalidate_overlapping" }
1840 };
1841
1842 // .full
1843 for (size_t allocationKindNdx = 0; allocationKindNdx < ALLOCATION_KIND_LAST; allocationKindNdx++)
1844 {
1845 de::MovePtr<tcu::TestCaseGroup> fullGroup (new tcu::TestCaseGroup(testCtx, "full", "Map memory completely."));
1846
1847 for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
1848 {
1849 const VkDeviceSize allocationSize = allocationSizes[allocationSizeNdx];
1850 const string sizeGroupName = (allocationSize == 0) ? "variable" : de::toString(allocationSize);
1851 de::MovePtr<tcu::TestCaseGroup> allocationSizeGroup (new tcu::TestCaseGroup(testCtx, sizeGroupName.c_str(), ""));
1852
1853 for (size_t opNdx = 0; opNdx < DE_LENGTH_OF_ARRAY(ops); opNdx++)
1854 {
1855 const Op op = ops[opNdx].op;
1856
1857 // implicit_unmap ignores allocationSize
1858 if (((allocationSize == 0) && (op != OP_IMPLICIT_UNMAP)) ||
1859 ((allocationSize != 0) && (op == OP_IMPLICIT_UNMAP)))
1860 continue;
1861
1862 const char* const name = ops[opNdx].name;
1863 const deUint32 seed = (deUint32)(opNdx * allocationSizeNdx);
1864 const TestConfig config = fullMappedConfig(allocationSize, op, seed, static_cast<AllocationKind>(allocationKindNdx));
1865
1866 addFunctionCase(allocationSizeGroup.get(), name, name, checkSupport, testMemoryMapping, config);
1867 }
1868
1869 fullGroup->addChild(allocationSizeGroup.release());
1870 }
1871
1872 sets[allocationKindNdx]->addChild(fullGroup.release());
1873 }
1874
1875 // .sub
1876 for (size_t allocationKindNdx = 0; allocationKindNdx < ALLOCATION_KIND_LAST; allocationKindNdx++)
1877 {
1878 de::MovePtr<tcu::TestCaseGroup> subGroup (new tcu::TestCaseGroup(testCtx, "sub", "Map part of the memory."));
1879
1880 for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
1881 {
1882 const VkDeviceSize allocationSize = allocationSizes[allocationSizeNdx];
1883 const string sizeGroupName = (allocationSize == 0) ? "variable" : de::toString(allocationSize);
1884 de::MovePtr<tcu::TestCaseGroup> allocationSizeGroup (new tcu::TestCaseGroup(testCtx, sizeGroupName.c_str(), ""));
1885
1886 for (size_t offsetNdx = 0; offsetNdx < DE_LENGTH_OF_ARRAY(offsets); offsetNdx++)
1887 {
1888 const VkDeviceSize offset = offsets[offsetNdx];
1889
1890 if (offset >= allocationSize)
1891 continue;
1892
1893 de::MovePtr<tcu::TestCaseGroup> offsetGroup (new tcu::TestCaseGroup(testCtx, ("offset_" + de::toString(offset)).c_str(), ""));
1894
1895 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
1896 {
1897 const VkDeviceSize size = sizes[sizeNdx];
1898
1899 if (offset + size > allocationSize)
1900 continue;
1901
1902 if (offset == 0 && size == allocationSize)
1903 continue;
1904
1905 de::MovePtr<tcu::TestCaseGroup> sizeGroup (new tcu::TestCaseGroup(testCtx, ("size_" + de::toString(size)).c_str(), ""));
1906
1907 for (size_t opNdx = 0; opNdx < DE_LENGTH_OF_ARRAY(ops); opNdx++)
1908 {
1909 const Op op = ops[opNdx].op;
1910
1911 // implicit_unmap ignores allocationSize
1912 if (((allocationSize == 0) && (op != OP_IMPLICIT_UNMAP)) ||
1913 ((allocationSize != 0) && (op == OP_IMPLICIT_UNMAP)))
1914 continue;
1915
1916 const deUint32 seed = (deUint32)(opNdx * allocationSizeNdx);
1917 const char* const name = ops[opNdx].name;
1918 const TestConfig config = subMappedConfig(allocationSize, MemoryRange(offset, size), op, seed, static_cast<AllocationKind>(allocationKindNdx));
1919
1920 addFunctionCase(sizeGroup.get(), name, name, checkSupport, testMemoryMapping, config);
1921 }
1922
1923 offsetGroup->addChild(sizeGroup.release());
1924 }
1925
1926 allocationSizeGroup->addChild(offsetGroup.release());
1927 }
1928
1929 subGroup->addChild(allocationSizeGroup.release());
1930 }
1931
1932 sets[allocationKindNdx]->addChild(subGroup.release());
1933 }
1934
1935 // .random
1936 {
1937 de::MovePtr<tcu::TestCaseGroup> randomGroup (new tcu::TestCaseGroup(testCtx, "random", "Random memory mapping tests."));
1938 de::Random rng (3927960301u);
1939
1940 for (size_t ndx = 0; ndx < 100; ndx++)
1941 {
1942 const deUint32 seed = rng.getUint32();
1943 const std::string name = de::toString(ndx);
1944
1945 randomGroup->addChild(new InstanceFactory1<RandomMemoryMappingInstance, deUint32>(testCtx, tcu::NODETYPE_SELF_VALIDATE, de::toString(ndx), "Random case", seed));
1946 }
1947
1948 sets[static_cast<deUint32>(ALLOCATION_KIND_SUBALLOCATED)]->addChild(randomGroup.release());
1949 }
1950
1951 group->addChild(sets[0].release());
1952 dedicated->addChild(sets[1].release());
1953 dedicated->addChild(sets[2].release());
1954 group->addChild(dedicated.release());
1955
1956 return group.release();
1957 }
1958
1959 } // memory
1960 } // vkt
1961