• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*-------------------------------------------------------------------------
2  * Vulkan CTS Framework
3  * --------------------
4  *
5  * Copyright (c) 2015 Google Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file
21  * \brief Memory allocation callback utilities.
22  *//*--------------------------------------------------------------------*/
23 
24 #include "vkAllocationCallbackUtil.hpp"
25 #include "tcuFormatUtil.hpp"
26 #include "tcuTestLog.hpp"
27 #include "deSTLUtil.hpp"
28 #include "deMemory.h"
29 
30 #include <map>
31 
32 namespace vk
33 {
34 
35 // System default allocator
36 
systemAllocate(void *,size_t size,size_t alignment,VkSystemAllocationScope)37 static VKAPI_ATTR void* VKAPI_CALL systemAllocate (void*, size_t size, size_t alignment, VkSystemAllocationScope)
38 {
39 	if (size > 0)
40 		return deAlignedMalloc(size, (deUint32)alignment);
41 	else
42 		return DE_NULL;
43 }
44 
systemFree(void *,void * pMem)45 static VKAPI_ATTR void VKAPI_CALL systemFree (void*, void* pMem)
46 {
47 	deAlignedFree(pMem);
48 }
49 
systemReallocate(void *,void * pOriginal,size_t size,size_t alignment,VkSystemAllocationScope)50 static VKAPI_ATTR void* VKAPI_CALL systemReallocate (void*, void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope)
51 {
52 	return deAlignedRealloc(pOriginal, size, alignment);
53 }
54 
systemInternalAllocationNotification(void *,size_t,VkInternalAllocationType,VkSystemAllocationScope)55 static VKAPI_ATTR void VKAPI_CALL systemInternalAllocationNotification (void*, size_t, VkInternalAllocationType, VkSystemAllocationScope)
56 {
57 }
58 
systemInternalFreeNotification(void *,size_t,VkInternalAllocationType,VkSystemAllocationScope)59 static VKAPI_ATTR void VKAPI_CALL systemInternalFreeNotification (void*, size_t, VkInternalAllocationType, VkSystemAllocationScope)
60 {
61 }
62 
63 static const VkAllocationCallbacks s_systemAllocator =
64 {
65 	DE_NULL,		// pUserData
66 	systemAllocate,
67 	systemReallocate,
68 	systemFree,
69 	systemInternalAllocationNotification,
70 	systemInternalFreeNotification,
71 };
72 
getSystemAllocator(void)73 const VkAllocationCallbacks* getSystemAllocator (void)
74 {
75 	return &s_systemAllocator;
76 }
77 
78 // AllocationCallbacks
79 
allocationCallback(void * pUserData,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)80 static VKAPI_ATTR void* VKAPI_CALL allocationCallback (void* pUserData, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
81 {
82 	return reinterpret_cast<AllocationCallbacks*>(pUserData)->allocate(size, alignment, allocationScope);
83 }
84 
reallocationCallback(void * pUserData,void * pOriginal,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)85 static VKAPI_ATTR void* VKAPI_CALL reallocationCallback (void* pUserData, void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
86 {
87 	return reinterpret_cast<AllocationCallbacks*>(pUserData)->reallocate(pOriginal, size, alignment, allocationScope);
88 }
89 
freeCallback(void * pUserData,void * pMem)90 static VKAPI_ATTR void VKAPI_CALL freeCallback (void* pUserData, void* pMem)
91 {
92 	reinterpret_cast<AllocationCallbacks*>(pUserData)->free(pMem);
93 }
94 
internalAllocationNotificationCallback(void * pUserData,size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)95 static VKAPI_ATTR void VKAPI_CALL internalAllocationNotificationCallback (void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
96 {
97 	reinterpret_cast<AllocationCallbacks*>(pUserData)->notifyInternalAllocation(size, allocationType, allocationScope);
98 }
99 
internalFreeNotificationCallback(void * pUserData,size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)100 static VKAPI_ATTR void VKAPI_CALL internalFreeNotificationCallback (void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
101 {
102 	reinterpret_cast<AllocationCallbacks*>(pUserData)->notifyInternalFree(size, allocationType, allocationScope);
103 }
104 
makeCallbacks(AllocationCallbacks * object)105 static VkAllocationCallbacks makeCallbacks (AllocationCallbacks* object)
106 {
107 	const VkAllocationCallbacks callbacks =
108 	{
109 		reinterpret_cast<void*>(object),
110 		allocationCallback,
111 		reallocationCallback,
112 		freeCallback,
113 		internalAllocationNotificationCallback,
114 		internalFreeNotificationCallback
115 	};
116 	return callbacks;
117 }
118 
AllocationCallbacks(void)119 AllocationCallbacks::AllocationCallbacks (void)
120 	: m_callbacks(makeCallbacks(this))
121 {
122 }
123 
~AllocationCallbacks(void)124 AllocationCallbacks::~AllocationCallbacks (void)
125 {
126 }
127 
128 // AllocationCallbackRecord
129 
allocation(size_t size,size_t alignment,VkSystemAllocationScope scope,void * returnedPtr)130 AllocationCallbackRecord AllocationCallbackRecord::allocation (size_t size, size_t alignment, VkSystemAllocationScope scope, void* returnedPtr)
131 {
132 	AllocationCallbackRecord record;
133 
134 	record.type							= TYPE_ALLOCATION;
135 	record.data.allocation.size			= size;
136 	record.data.allocation.alignment	= alignment;
137 	record.data.allocation.scope		= scope;
138 	record.data.allocation.returnedPtr	= returnedPtr;
139 
140 	return record;
141 }
142 
reallocation(void * original,size_t size,size_t alignment,VkSystemAllocationScope scope,void * returnedPtr)143 AllocationCallbackRecord AllocationCallbackRecord::reallocation (void* original, size_t size, size_t alignment, VkSystemAllocationScope scope, void* returnedPtr)
144 {
145 	AllocationCallbackRecord record;
146 
147 	record.type								= TYPE_REALLOCATION;
148 	record.data.reallocation.original		= original;
149 	record.data.reallocation.size			= size;
150 	record.data.reallocation.alignment		= alignment;
151 	record.data.reallocation.scope			= scope;
152 	record.data.reallocation.returnedPtr	= returnedPtr;
153 
154 	return record;
155 }
156 
free(void * mem)157 AllocationCallbackRecord AllocationCallbackRecord::free (void* mem)
158 {
159 	AllocationCallbackRecord record;
160 
161 	record.type				= TYPE_FREE;
162 	record.data.free.mem	= mem;
163 
164 	return record;
165 }
166 
internalAllocation(size_t size,VkInternalAllocationType type,VkSystemAllocationScope scope)167 AllocationCallbackRecord AllocationCallbackRecord::internalAllocation (size_t size, VkInternalAllocationType type, VkSystemAllocationScope scope)
168 {
169 	AllocationCallbackRecord record;
170 
171 	record.type								= TYPE_INTERNAL_ALLOCATION;
172 	record.data.internalAllocation.size		= size;
173 	record.data.internalAllocation.type		= type;
174 	record.data.internalAllocation.scope	= scope;
175 
176 	return record;
177 }
178 
internalFree(size_t size,VkInternalAllocationType type,VkSystemAllocationScope scope)179 AllocationCallbackRecord AllocationCallbackRecord::internalFree (size_t size, VkInternalAllocationType type, VkSystemAllocationScope scope)
180 {
181 	AllocationCallbackRecord record;
182 
183 	record.type								= TYPE_INTERNAL_FREE;
184 	record.data.internalAllocation.size		= size;
185 	record.data.internalAllocation.type		= type;
186 	record.data.internalAllocation.scope	= scope;
187 
188 	return record;
189 }
190 
191 // ChainedAllocator
192 
ChainedAllocator(const VkAllocationCallbacks * nextAllocator)193 ChainedAllocator::ChainedAllocator (const VkAllocationCallbacks* nextAllocator)
194 	: m_nextAllocator(nextAllocator)
195 {
196 }
197 
~ChainedAllocator(void)198 ChainedAllocator::~ChainedAllocator (void)
199 {
200 }
201 
allocate(size_t size,size_t alignment,VkSystemAllocationScope allocationScope)202 void* ChainedAllocator::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
203 {
204 	return m_nextAllocator->pfnAllocation(m_nextAllocator->pUserData, size, alignment, allocationScope);
205 }
206 
reallocate(void * original,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)207 void* ChainedAllocator::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
208 {
209 	return m_nextAllocator->pfnReallocation(m_nextAllocator->pUserData, original, size, alignment, allocationScope);
210 }
211 
free(void * mem)212 void ChainedAllocator::free (void* mem)
213 {
214 	m_nextAllocator->pfnFree(m_nextAllocator->pUserData, mem);
215 }
216 
notifyInternalAllocation(size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)217 void ChainedAllocator::notifyInternalAllocation (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
218 {
219 	m_nextAllocator->pfnInternalAllocation(m_nextAllocator->pUserData, size, allocationType, allocationScope);
220 }
221 
notifyInternalFree(size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)222 void ChainedAllocator::notifyInternalFree (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
223 {
224 	m_nextAllocator->pfnInternalFree(m_nextAllocator->pUserData, size, allocationType, allocationScope);
225 }
226 
227 // AllocationCallbackRecorder
228 
AllocationCallbackRecorder(const VkAllocationCallbacks * allocator,deUint32 callCountHint)229 AllocationCallbackRecorder::AllocationCallbackRecorder (const VkAllocationCallbacks* allocator, deUint32 callCountHint)
230 	: ChainedAllocator	(allocator)
231 	, m_records			(callCountHint)
232 {
233 }
234 
~AllocationCallbackRecorder(void)235 AllocationCallbackRecorder::~AllocationCallbackRecorder (void)
236 {
237 }
238 
allocate(size_t size,size_t alignment,VkSystemAllocationScope allocationScope)239 void* AllocationCallbackRecorder::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
240 {
241 	void* const	ptr	= ChainedAllocator::allocate(size, alignment, allocationScope);
242 
243 	m_records.append(AllocationCallbackRecord::allocation(size, alignment, allocationScope, ptr));
244 
245 	return ptr;
246 }
247 
reallocate(void * original,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)248 void* AllocationCallbackRecorder::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
249 {
250 	void* const	ptr	= ChainedAllocator::reallocate(original, size, alignment, allocationScope);
251 
252 	m_records.append(AllocationCallbackRecord::reallocation(original, size, alignment, allocationScope, ptr));
253 
254 	return ptr;
255 }
256 
free(void * mem)257 void AllocationCallbackRecorder::free (void* mem)
258 {
259 	ChainedAllocator::free(mem);
260 
261 	m_records.append(AllocationCallbackRecord::free(mem));
262 }
263 
notifyInternalAllocation(size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)264 void AllocationCallbackRecorder::notifyInternalAllocation (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
265 {
266 	ChainedAllocator::notifyInternalAllocation(size, allocationType, allocationScope);
267 
268 	m_records.append(AllocationCallbackRecord::internalAllocation(size, allocationType, allocationScope));
269 }
270 
notifyInternalFree(size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)271 void AllocationCallbackRecorder::notifyInternalFree (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
272 {
273 	ChainedAllocator::notifyInternalFree(size, allocationType, allocationScope);
274 
275 	m_records.append(AllocationCallbackRecord::internalFree(size, allocationType, allocationScope));
276 }
277 
278 // DeterministicFailAllocator
279 
DeterministicFailAllocator(const VkAllocationCallbacks * allocator,Mode mode,deUint32 numPassingAllocs)280 DeterministicFailAllocator::DeterministicFailAllocator (const VkAllocationCallbacks* allocator, Mode mode, deUint32 numPassingAllocs)
281 	: ChainedAllocator	(allocator)
282 {
283 	reset(mode, numPassingAllocs);
284 }
285 
~DeterministicFailAllocator(void)286 DeterministicFailAllocator::~DeterministicFailAllocator (void)
287 {
288 }
289 
reset(Mode mode,deUint32 numPassingAllocs)290 void DeterministicFailAllocator::reset (Mode mode, deUint32 numPassingAllocs)
291 {
292 	m_mode				= mode;
293 	m_numPassingAllocs	= numPassingAllocs;
294 	m_allocationNdx		= 0;
295 }
296 
allocate(size_t size,size_t alignment,VkSystemAllocationScope allocationScope)297 void* DeterministicFailAllocator::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
298 {
299 	if ((m_mode == MODE_DO_NOT_COUNT) ||
300 		(deAtomicIncrementUint32(&m_allocationNdx) <= m_numPassingAllocs))
301 		return ChainedAllocator::allocate(size, alignment, allocationScope);
302 	else
303 		return DE_NULL;
304 }
305 
reallocate(void * original,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)306 void* DeterministicFailAllocator::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
307 {
308 	if ((m_mode == MODE_DO_NOT_COUNT) ||
309 		(deAtomicIncrementUint32(&m_allocationNdx) <= m_numPassingAllocs))
310 		return ChainedAllocator::reallocate(original, size, alignment, allocationScope);
311 	else
312 		return DE_NULL;
313 }
314 
315 // Utils
316 
AllocationCallbackValidationResults(void)317 AllocationCallbackValidationResults::AllocationCallbackValidationResults (void)
318 {
319 	deMemset(internalAllocationTotal, 0, sizeof(internalAllocationTotal));
320 }
321 
clear(void)322 void AllocationCallbackValidationResults::clear (void)
323 {
324 	liveAllocations.clear();
325 	violations.clear();
326 	deMemset(internalAllocationTotal, 0, sizeof(internalAllocationTotal));
327 }
328 
329 namespace
330 {
331 
332 struct AllocationSlot
333 {
334 	AllocationCallbackRecord	record;
335 	bool						isLive;
336 
AllocationSlotvk::__anone7f633ed0111::AllocationSlot337 	AllocationSlot (void)
338 		: isLive	(false)
339 	{}
340 
AllocationSlotvk::__anone7f633ed0111::AllocationSlot341 	AllocationSlot (const AllocationCallbackRecord& record_, bool isLive_)
342 		: record	(record_)
343 		, isLive	(isLive_)
344 	{}
345 };
346 
getAlignment(const AllocationCallbackRecord & record)347 size_t getAlignment (const AllocationCallbackRecord& record)
348 {
349 	if (record.type == AllocationCallbackRecord::TYPE_ALLOCATION)
350 		return record.data.allocation.alignment;
351 	else if (record.type == AllocationCallbackRecord::TYPE_REALLOCATION)
352 		return record.data.reallocation.alignment;
353 	else
354 	{
355 		DE_ASSERT(false);
356 		return 0;
357 	}
358 }
359 
360 } // anonymous
361 
validateAllocationCallbacks(const AllocationCallbackRecorder & recorder,AllocationCallbackValidationResults * results)362 void validateAllocationCallbacks (const AllocationCallbackRecorder& recorder, AllocationCallbackValidationResults* results)
363 {
364 	std::vector<AllocationSlot>		allocations;
365 	std::map<void*, size_t>			ptrToSlotIndex;
366 
367 	DE_ASSERT(results->liveAllocations.empty() && results->violations.empty());
368 
369 	for (AllocationCallbackRecorder::RecordIterator callbackIter = recorder.getRecordsBegin();
370 		 callbackIter != recorder.getRecordsEnd();
371 		 ++callbackIter)
372 	{
373 		const AllocationCallbackRecord&		record	= *callbackIter;
374 
375 		// Validate scope
376 		{
377 			const VkSystemAllocationScope* const	scopePtr	= record.type == AllocationCallbackRecord::TYPE_ALLOCATION			? &record.data.allocation.scope
378 																: record.type == AllocationCallbackRecord::TYPE_REALLOCATION		? &record.data.reallocation.scope
379 																: record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION	? &record.data.internalAllocation.scope
380 																: record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE		? &record.data.internalAllocation.scope
381 																: DE_NULL;
382 
383 			if (scopePtr && !de::inBounds(*scopePtr, (VkSystemAllocationScope)0, VK_SYSTEM_ALLOCATION_SCOPE_LAST))
384 				results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_ALLOCATION_SCOPE));
385 		}
386 
387 		// Validate alignment
388 		if (record.type == AllocationCallbackRecord::TYPE_ALLOCATION ||
389 			record.type == AllocationCallbackRecord::TYPE_REALLOCATION)
390 		{
391 			if (!deIsPowerOfTwoSize(getAlignment(record)))
392 				results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_ALIGNMENT));
393 		}
394 
395 		// Validate actual allocation behavior
396 		switch (record.type)
397 		{
398 			case AllocationCallbackRecord::TYPE_ALLOCATION:
399 			{
400 				if (record.data.allocation.returnedPtr)
401 				{
402 					if (!de::contains(ptrToSlotIndex, record.data.allocation.returnedPtr))
403 					{
404 						ptrToSlotIndex[record.data.allocation.returnedPtr] = allocations.size();
405 						allocations.push_back(AllocationSlot(record, true));
406 					}
407 					else
408 					{
409 						const size_t		slotNdx		= ptrToSlotIndex[record.data.allocation.returnedPtr];
410 						if (!allocations[slotNdx].isLive)
411 						{
412 							allocations[slotNdx].isLive = true;
413 							allocations[slotNdx].record = record;
414 						}
415 						else
416 						{
417 							// we should not have multiple live allocations with the same pointer
418 							DE_ASSERT(false);
419 						}
420 					}
421 				}
422 
423 				break;
424 			}
425 
426 			case AllocationCallbackRecord::TYPE_REALLOCATION:
427 			{
428 				if (de::contains(ptrToSlotIndex, record.data.reallocation.original))
429 				{
430 					const size_t		origSlotNdx		= ptrToSlotIndex[record.data.reallocation.original];
431 					AllocationSlot&		origSlot		= allocations[origSlotNdx];
432 
433 					DE_ASSERT(record.data.reallocation.original != DE_NULL);
434 
435 					if (record.data.reallocation.size > 0)
436 					{
437 						if (getAlignment(origSlot.record) != record.data.reallocation.alignment)
438 							results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_DIFFERENT_ALIGNMENT));
439 
440 						if (record.data.reallocation.original == record.data.reallocation.returnedPtr)
441 						{
442 							if (!origSlot.isLive)
443 							{
444 								results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_FREED_PTR));
445 								origSlot.isLive	= true; // Mark live to suppress further errors
446 							}
447 
448 							// Just update slot record
449 							allocations[origSlotNdx].record = record;
450 						}
451 						else
452 						{
453 							if (record.data.reallocation.returnedPtr)
454 							{
455 								allocations[origSlotNdx].isLive = false;
456 								if (!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr))
457 								{
458 									ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
459 									allocations.push_back(AllocationSlot(record, true));
460 								}
461 								else
462 								{
463 									const size_t slotNdx = ptrToSlotIndex[record.data.reallocation.returnedPtr];
464 									if (!allocations[slotNdx].isLive)
465 									{
466 										allocations[slotNdx].isLive = true;
467 										allocations[slotNdx].record = record;
468 									}
469 									else
470 									{
471 										// we should not have multiple live allocations with the same pointer
472 										DE_ASSERT(false);
473 									}
474 								}
475 							}
476 							// else original ptr remains valid and live
477 						}
478 					}
479 					else
480 					{
481 						DE_ASSERT(!record.data.reallocation.returnedPtr);
482 
483 						origSlot.isLive = false;
484 					}
485 				}
486 				else
487 				{
488 					if (record.data.reallocation.original)
489 						results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_NOT_ALLOCATED_PTR));
490 
491 					if (record.data.reallocation.returnedPtr)
492 					{
493 						if (!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr))
494 						{
495 							ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
496 							allocations.push_back(AllocationSlot(record, true));
497 						}
498 						else
499 						{
500 							const size_t slotNdx = ptrToSlotIndex[record.data.reallocation.returnedPtr];
501 							DE_ASSERT(!allocations[slotNdx].isLive);
502 							allocations[slotNdx].isLive = true;
503 							allocations[slotNdx].record = record;
504 						}
505 					}
506 				}
507 
508 				break;
509 			}
510 
511 			case AllocationCallbackRecord::TYPE_FREE:
512 			{
513 				if (record.data.free.mem != DE_NULL) // Freeing null pointer is valid and ignored
514 				{
515 					if (de::contains(ptrToSlotIndex, record.data.free.mem))
516 					{
517 						const size_t	slotNdx		= ptrToSlotIndex[record.data.free.mem];
518 
519 						if (allocations[slotNdx].isLive)
520 							allocations[slotNdx].isLive = false;
521 						else
522 							results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_DOUBLE_FREE));
523 					}
524 					else
525 						results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_FREE_NOT_ALLOCATED_PTR));
526 				}
527 
528 				break;
529 			}
530 
531 			case AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION:
532 			case AllocationCallbackRecord::TYPE_INTERNAL_FREE:
533 			{
534 				if (de::inBounds(record.data.internalAllocation.type, (VkInternalAllocationType)0, VK_INTERNAL_ALLOCATION_TYPE_LAST))
535 				{
536 					size_t* const		totalAllocSizePtr	= &results->internalAllocationTotal[record.data.internalAllocation.type][record.data.internalAllocation.scope];
537 					const size_t		size				= record.data.internalAllocation.size;
538 
539 					if (record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE)
540 					{
541 						if (*totalAllocSizePtr < size)
542 						{
543 							results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_NEGATIVE_INTERNAL_ALLOCATION_TOTAL));
544 							*totalAllocSizePtr = 0; // Reset to 0 to suppress compound errors
545 						}
546 						else
547 							*totalAllocSizePtr -= size;
548 					}
549 					else
550 						*totalAllocSizePtr += size;
551 				}
552 				else
553 					results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_INTERNAL_ALLOCATION_TYPE));
554 
555 				break;
556 			}
557 
558 			default:
559 				DE_ASSERT(false);
560 		}
561 	}
562 
563 	DE_ASSERT(!de::contains(ptrToSlotIndex, DE_NULL));
564 
565 	// Collect live allocations
566 	for (std::vector<AllocationSlot>::const_iterator slotIter = allocations.begin();
567 		 slotIter != allocations.end();
568 		 ++slotIter)
569 	{
570 		if (slotIter->isLive)
571 			results->liveAllocations.push_back(slotIter->record);
572 	}
573 }
574 
checkAndLog(tcu::TestLog & log,const AllocationCallbackValidationResults & results,deUint32 allowedLiveAllocScopeBits)575 bool checkAndLog (tcu::TestLog& log, const AllocationCallbackValidationResults& results, deUint32 allowedLiveAllocScopeBits)
576 {
577 	using tcu::TestLog;
578 
579 	size_t	numLeaks	= 0;
580 
581 	if (!results.violations.empty())
582 	{
583 		for (size_t violationNdx = 0; violationNdx < results.violations.size(); ++violationNdx)
584 		{
585 			log << TestLog::Message << "VIOLATION " << (violationNdx+1)
586 													<< ": " << results.violations[violationNdx]
587 													<< " (" << results.violations[violationNdx].record << ")"
588 				<< TestLog::EndMessage;
589 		}
590 
591 		log << TestLog::Message << "ERROR: Found " << results.violations.size() << " invalid allocation callbacks!" << TestLog::EndMessage;
592 	}
593 
594 	// Verify live allocations
595 	for (size_t liveNdx = 0; liveNdx < results.liveAllocations.size(); ++liveNdx)
596 	{
597 		const AllocationCallbackRecord&		record	= results.liveAllocations[liveNdx];
598 		const VkSystemAllocationScope		scope	= record.type == AllocationCallbackRecord::TYPE_ALLOCATION		? record.data.allocation.scope
599 													: record.type == AllocationCallbackRecord::TYPE_REALLOCATION	? record.data.reallocation.scope
600 													: VK_SYSTEM_ALLOCATION_SCOPE_LAST;
601 
602 		DE_ASSERT(de::inBounds(scope, (VkSystemAllocationScope)0, VK_SYSTEM_ALLOCATION_SCOPE_LAST));
603 
604 		if ((allowedLiveAllocScopeBits & (1u << scope)) == 0)
605 		{
606 			log << TestLog::Message << "LEAK " << (numLeaks+1) << ": " << record << TestLog::EndMessage;
607 			numLeaks += 1;
608 		}
609 	}
610 
611 	// Verify internal allocations
612 	for (int internalAllocTypeNdx = 0; internalAllocTypeNdx < VK_INTERNAL_ALLOCATION_TYPE_LAST; ++internalAllocTypeNdx)
613 	{
614 		for (int scopeNdx = 0; scopeNdx < VK_SYSTEM_ALLOCATION_SCOPE_LAST; ++scopeNdx)
615 		{
616 			const VkInternalAllocationType	type			= (VkInternalAllocationType)internalAllocTypeNdx;
617 			const VkSystemAllocationScope	scope			= (VkSystemAllocationScope)scopeNdx;
618 			const size_t					totalAllocated	= results.internalAllocationTotal[type][scope];
619 
620 			if ((allowedLiveAllocScopeBits & (1u << scopeNdx)) == 0 &&
621 				totalAllocated > 0)
622 			{
623 				log << TestLog::Message << "LEAK " << (numLeaks+1) << ": " << totalAllocated
624 										<< " bytes of (" << type << ", " << scope << ") internal memory is still allocated"
625 					<< TestLog::EndMessage;
626 				numLeaks += 1;
627 			}
628 		}
629 	}
630 
631 	if (numLeaks > 0)
632 		log << TestLog::Message << "ERROR: Found " << numLeaks << " memory leaks!" << TestLog::EndMessage;
633 
634 	return results.violations.empty() && numLeaks == 0;
635 }
636 
validateAndLog(tcu::TestLog & log,const AllocationCallbackRecorder & recorder,deUint32 allowedLiveAllocScopeBits)637 bool validateAndLog (tcu::TestLog& log, const AllocationCallbackRecorder& recorder, deUint32 allowedLiveAllocScopeBits)
638 {
639 	AllocationCallbackValidationResults	validationResults;
640 
641 	validateAllocationCallbacks(recorder, &validationResults);
642 
643 	return checkAndLog(log, validationResults, allowedLiveAllocScopeBits);
644 }
645 
getLiveSystemAllocationTotal(const AllocationCallbackValidationResults & validationResults)646 size_t getLiveSystemAllocationTotal (const AllocationCallbackValidationResults& validationResults)
647 {
648 	size_t	allocationTotal	= 0;
649 
650 	DE_ASSERT(validationResults.violations.empty());
651 
652 	for (std::vector<AllocationCallbackRecord>::const_iterator alloc = validationResults.liveAllocations.begin();
653 		 alloc != validationResults.liveAllocations.end();
654 		 ++alloc)
655 	{
656 		DE_ASSERT(alloc->type == AllocationCallbackRecord::TYPE_ALLOCATION ||
657 				  alloc->type == AllocationCallbackRecord::TYPE_REALLOCATION);
658 
659 		const size_t	size		= (alloc->type == AllocationCallbackRecord::TYPE_ALLOCATION ? alloc->data.allocation.size : alloc->data.reallocation.size);
660 		const size_t	alignment	= (alloc->type == AllocationCallbackRecord::TYPE_ALLOCATION ? alloc->data.allocation.alignment : alloc->data.reallocation.alignment);
661 
662 		allocationTotal += size + alignment - (alignment > 0 ? 1 : 0);
663 	}
664 
665 	for (int internalAllocationTypeNdx = 0; internalAllocationTypeNdx < VK_INTERNAL_ALLOCATION_TYPE_LAST; ++internalAllocationTypeNdx)
666 	{
667 		for (int internalAllocationScopeNdx = 0; internalAllocationScopeNdx < VK_SYSTEM_ALLOCATION_SCOPE_LAST; ++internalAllocationScopeNdx)
668 			allocationTotal += validationResults.internalAllocationTotal[internalAllocationTypeNdx][internalAllocationScopeNdx];
669 	}
670 
671 	return allocationTotal;
672 }
673 
operator <<(std::ostream & str,const AllocationCallbackRecord & record)674 std::ostream& operator<< (std::ostream& str, const AllocationCallbackRecord& record)
675 {
676 	switch (record.type)
677 	{
678 		case AllocationCallbackRecord::TYPE_ALLOCATION:
679 			str << "ALLOCATION: size=" << record.data.allocation.size
680 				<< ", alignment=" << record.data.allocation.alignment
681 				<< ", scope=" << record.data.allocation.scope
682 				<< ", returnedPtr=" << tcu::toHex(record.data.allocation.returnedPtr);
683 			break;
684 
685 		case AllocationCallbackRecord::TYPE_REALLOCATION:
686 			str << "REALLOCATION: original=" << tcu::toHex(record.data.reallocation.original)
687 				<< ", size=" << record.data.reallocation.size
688 				<< ", alignment=" << record.data.reallocation.alignment
689 				<< ", scope=" << record.data.reallocation.scope
690 				<< ", returnedPtr=" << tcu::toHex(record.data.reallocation.returnedPtr);
691 			break;
692 
693 		case AllocationCallbackRecord::TYPE_FREE:
694 			str << "FREE: mem=" << tcu::toHex(record.data.free.mem);
695 			break;
696 
697 		case AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION:
698 		case AllocationCallbackRecord::TYPE_INTERNAL_FREE:
699 			str << "INTERNAL_" << (record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ? "ALLOCATION" : "FREE")
700 				<< ": size=" << record.data.internalAllocation.size
701 				<< ", type=" << record.data.internalAllocation.type
702 				<< ", scope=" << record.data.internalAllocation.scope;
703 			break;
704 
705 		default:
706 			DE_ASSERT(false);
707 	}
708 
709 	return str;
710 }
711 
operator <<(std::ostream & str,const AllocationCallbackViolation & violation)712 std::ostream& operator<< (std::ostream& str, const AllocationCallbackViolation& violation)
713 {
714 	switch (violation.reason)
715 	{
716 		case AllocationCallbackViolation::REASON_DOUBLE_FREE:
717 		{
718 			DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_FREE);
719 			str << "Double free of " << tcu::toHex(violation.record.data.free.mem);
720 			break;
721 		}
722 
723 		case AllocationCallbackViolation::REASON_FREE_NOT_ALLOCATED_PTR:
724 		{
725 			DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_FREE);
726 			str << "Attempt to free " << tcu::toHex(violation.record.data.free.mem) << " which has not been allocated";
727 			break;
728 		}
729 
730 		case AllocationCallbackViolation::REASON_REALLOC_NOT_ALLOCATED_PTR:
731 		{
732 			DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_REALLOCATION);
733 			str << "Attempt to reallocate " << tcu::toHex(violation.record.data.reallocation.original) << " which has not been allocated";
734 			break;
735 		}
736 
737 		case AllocationCallbackViolation::REASON_REALLOC_FREED_PTR:
738 		{
739 			DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_REALLOCATION);
740 			str << "Attempt to reallocate " << tcu::toHex(violation.record.data.reallocation.original) << " which has been freed";
741 			break;
742 		}
743 
744 		case AllocationCallbackViolation::REASON_NEGATIVE_INTERNAL_ALLOCATION_TOTAL:
745 		{
746 			DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE);
747 			str << "Internal allocation total for (" << violation.record.data.internalAllocation.type << ", " << violation.record.data.internalAllocation.scope << ") is negative";
748 			break;
749 		}
750 
751 		case AllocationCallbackViolation::REASON_INVALID_INTERNAL_ALLOCATION_TYPE:
752 		{
753 			DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ||
754 					  violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE);
755 			str << "Invalid internal allocation type " << tcu::toHex(violation.record.data.internalAllocation.type);
756 			break;
757 		}
758 
759 		case AllocationCallbackViolation::REASON_INVALID_ALLOCATION_SCOPE:
760 		{
761 			str << "Invalid allocation scope";
762 			break;
763 		}
764 
765 		case AllocationCallbackViolation::REASON_INVALID_ALIGNMENT:
766 		{
767 			str << "Invalid alignment";
768 			break;
769 		}
770 
771 		case AllocationCallbackViolation::REASON_REALLOC_DIFFERENT_ALIGNMENT:
772 		{
773 			str << "Reallocation with different alignment";
774 			break;
775 		}
776 
777 		default:
778 			DE_ASSERT(false);
779 	}
780 
781 	return str;
782 }
783 
784 } // vk
785