1 /*
2 * Copyright (c) 2021 The Khronos Group Inc.
3 * Copyright (c) 2021 Valve Corporation
4 * Copyright (c) 2021 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials are
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice(s) and this permission notice shall be included in
14 * all copies or substantial portions of the Materials.
15 *
16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS.
24 *
25 * Author: Charles Giessen <charles@lunarg.com>
26 */
27
28 #include "test_environment.h"
29
30 #include <mutex>
31
32 struct MemoryTrackerSettings {
33 MemoryTrackerSettings() = default;
MemoryTrackerSettingsMemoryTrackerSettings34 MemoryTrackerSettings(bool should_fail_on_allocation, size_t fail_after_allocations, bool should_fail_after_set_number_of_calls,
35 size_t fail_after_calls)
36 : should_fail_on_allocation(should_fail_on_allocation),
37 fail_after_allocations(fail_after_allocations),
38 should_fail_after_set_number_of_calls(should_fail_after_set_number_of_calls),
39 fail_after_calls(fail_after_calls) {}
40 bool should_fail_on_allocation = false;
41 size_t fail_after_allocations = 0; // fail after this number of allocations in total
42 bool should_fail_after_set_number_of_calls = false;
43 size_t fail_after_calls = 0; // fail after this number of calls to alloc or realloc
44 };
45
46 class MemoryTracker {
47 std::mutex main_mutex;
48 MemoryTrackerSettings settings{};
49 VkAllocationCallbacks callbacks{};
50 // Implementation internals
51 struct AllocationDetails {
52 size_t requested_size_bytes;
53 size_t actual_size_bytes;
54 VkSystemAllocationScope alloc_scope;
55 };
56 const static size_t UNKNOWN_ALLOCATION = std::numeric_limits<size_t>::max();
57 size_t allocation_count = 0;
58 size_t call_count = 0;
59 std::vector<std::unique_ptr<char[]>> allocations;
60 std::vector<void*> allocations_aligned;
61 std::vector<AllocationDetails> allocation_details;
add_element(std::unique_ptr<char[]> && alloc,void * aligned_alloc,AllocationDetails detail)62 void add_element(std::unique_ptr<char[]>&& alloc, void* aligned_alloc, AllocationDetails detail) {
63 allocations.push_back(std::move(alloc));
64 allocations_aligned.push_back(aligned_alloc);
65 allocation_details.push_back(detail);
66 }
erase_index(size_t index)67 void erase_index(size_t index) {
68 allocations.erase(std::next(allocations.begin(), index));
69 allocations_aligned.erase(std::next(allocations_aligned.begin(), index));
70 allocation_details.erase(std::next(allocation_details.begin(), index));
71 }
find_element(void * ptr)72 size_t find_element(void* ptr) {
73 auto it = std::find(allocations_aligned.begin(), allocations_aligned.end(), ptr);
74 if (it == allocations_aligned.end()) return UNKNOWN_ALLOCATION;
75 return it - allocations_aligned.begin();
76 }
77
allocate(size_t size,size_t alignment,VkSystemAllocationScope alloc_scope)78 void* allocate(size_t size, size_t alignment, VkSystemAllocationScope alloc_scope) {
79 if (settings.should_fail_on_allocation && allocation_count == settings.fail_after_allocations) return nullptr;
80 if (settings.should_fail_after_set_number_of_calls && call_count == settings.fail_after_calls) return nullptr;
81 call_count++;
82 AllocationDetails detail{size, size + (alignment - 1), alloc_scope};
83 auto alloc = std::unique_ptr<char[]>(new char[detail.actual_size_bytes]);
84 if (!alloc) return nullptr;
85 uint64_t addr = (uint64_t)alloc.get();
86 addr += (alignment - 1);
87 addr &= ~(alignment - 1);
88 void* aligned_alloc = (void*)addr;
89 add_element(std::move(alloc), aligned_alloc, detail);
90 allocation_count++;
91 return allocations_aligned.back();
92 }
reallocate(void * pOriginal,size_t size,size_t alignment,VkSystemAllocationScope alloc_scope)93 void* reallocate(void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope alloc_scope) {
94 if (pOriginal == nullptr) {
95 return allocate(size, alignment, alloc_scope);
96 }
97 size_t index = find_element(pOriginal);
98 if (index == UNKNOWN_ALLOCATION) return nullptr;
99 size_t original_size = allocation_details[index].requested_size_bytes;
100
101 // We only care about the case where realloc is used to increase the size
102 if (size >= original_size && settings.should_fail_after_set_number_of_calls && call_count == settings.fail_after_calls)
103 return nullptr;
104 call_count++;
105 if (size == 0) {
106 erase_index(index);
107 allocation_count--;
108 return nullptr;
109 } else if (size < original_size) {
110 return pOriginal;
111 } else {
112 void* new_alloc = allocate(size, alignment, alloc_scope);
113 if (new_alloc == nullptr) return nullptr;
114 memcpy(new_alloc, pOriginal, original_size);
115 erase_index(index);
116 return new_alloc;
117 }
118 }
free(void * pMemory)119 void free(void* pMemory) {
120 if (pMemory == nullptr) return;
121 size_t index = find_element(pMemory);
122 if (index == UNKNOWN_ALLOCATION) return;
123 erase_index(index);
124 assert(allocation_count != 0 && "Cant free when there are no valid allocations");
125 allocation_count--;
126 }
127
128 // Implementation of public functions
impl_allocation(size_t size,size_t alignment,VkSystemAllocationScope allocationScope)129 void* impl_allocation(size_t size, size_t alignment, VkSystemAllocationScope allocationScope) noexcept {
130 std::lock_guard<std::mutex> lg(main_mutex);
131 void* addr = allocate(size, alignment, allocationScope);
132 return addr;
133 }
impl_reallocation(void * pOriginal,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)134 void* impl_reallocation(void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope allocationScope) noexcept {
135 std::lock_guard<std::mutex> lg(main_mutex);
136 void* addr = reallocate(pOriginal, size, alignment, allocationScope);
137 return addr;
138 }
impl_free(void * pMemory)139 void impl_free(void* pMemory) noexcept {
140 std::lock_guard<std::mutex> lg(main_mutex);
141 free(pMemory);
142 }
impl_internal_allocation_notification(size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)143 void impl_internal_allocation_notification(size_t size, VkInternalAllocationType allocationType,
144 VkSystemAllocationScope allocationScope) noexcept {
145 std::lock_guard<std::mutex> lg(main_mutex);
146 // TODO?
147 }
impl_internal_free(size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)148 void impl_internal_free(size_t size, VkInternalAllocationType allocationType,
149 VkSystemAllocationScope allocationScope) noexcept {
150 std::lock_guard<std::mutex> lg(main_mutex);
151 // TODO?
152 }
153
154 public:
MemoryTracker(MemoryTrackerSettings settings)155 MemoryTracker(MemoryTrackerSettings settings) noexcept : settings(settings) {
156 allocations.reserve(512);
157 allocations_aligned.reserve(512);
158 allocation_details.reserve(512);
159
160 callbacks.pUserData = this;
161 callbacks.pfnAllocation = public_allocation;
162 callbacks.pfnReallocation = public_reallocation;
163 callbacks.pfnFree = public_free;
164 callbacks.pfnInternalAllocation = public_internal_allocation_notification;
165 callbacks.pfnInternalFree = public_internal_free;
166 }
MemoryTracker()167 MemoryTracker() noexcept : MemoryTracker(MemoryTrackerSettings{}) {}
168
get()169 VkAllocationCallbacks* get() noexcept { return &callbacks; }
170
empty()171 bool empty() noexcept { return allocation_count == 0; }
172
update_settings(MemoryTrackerSettings new_settings)173 void update_settings(MemoryTrackerSettings new_settings) noexcept { settings = new_settings; }
current_allocation_count() const174 size_t current_allocation_count() const noexcept { return allocation_count; }
current_call_count() const175 size_t current_call_count() const noexcept { return call_count; }
176 // Static callbacks
public_allocation(void * pUserData,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)177 static VKAPI_ATTR void* VKAPI_CALL public_allocation(void* pUserData, size_t size, size_t alignment,
178 VkSystemAllocationScope allocationScope) noexcept {
179 return reinterpret_cast<MemoryTracker*>(pUserData)->impl_allocation(size, alignment, allocationScope);
180 }
public_reallocation(void * pUserData,void * pOriginal,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)181 static VKAPI_ATTR void* VKAPI_CALL public_reallocation(void* pUserData, void* pOriginal, size_t size, size_t alignment,
182 VkSystemAllocationScope allocationScope) noexcept {
183 return reinterpret_cast<MemoryTracker*>(pUserData)->impl_reallocation(pOriginal, size, alignment, allocationScope);
184 }
public_free(void * pUserData,void * pMemory)185 static VKAPI_ATTR void VKAPI_CALL public_free(void* pUserData, void* pMemory) noexcept {
186 reinterpret_cast<MemoryTracker*>(pUserData)->impl_free(pMemory);
187 }
public_internal_allocation_notification(void * pUserData,size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)188 static VKAPI_ATTR void VKAPI_CALL public_internal_allocation_notification(void* pUserData, size_t size,
189 VkInternalAllocationType allocationType,
190 VkSystemAllocationScope allocationScope) noexcept {
191 reinterpret_cast<MemoryTracker*>(pUserData)->impl_internal_allocation_notification(size, allocationType, allocationScope);
192 }
public_internal_free(void * pUserData,size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)193 static VKAPI_ATTR void VKAPI_CALL public_internal_free(void* pUserData, size_t size, VkInternalAllocationType allocationType,
194 VkSystemAllocationScope allocationScope) noexcept {
195 reinterpret_cast<MemoryTracker*>(pUserData)->impl_internal_free(size, allocationType, allocationScope);
196 }
197 };
198
199 // Test making sure the allocation functions are called to allocate and cleanup everything during
200 // a CreateInstance/DestroyInstance call pair.
TEST(Allocation,Instance)201 TEST(Allocation, Instance) {
202 FrameworkEnvironment env{};
203 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
204
205 MemoryTracker tracker;
206 {
207 InstWrapper inst{env.vulkan_functions, tracker.get()};
208 inst.CheckCreate();
209 }
210 ASSERT_TRUE(tracker.empty());
211 }
212
213 // Test making sure the allocation functions are called to allocate and cleanup everything during
214 // a CreateInstance/DestroyInstance call pair with a call to GetInstanceProcAddr.
TEST(Allocation,GetInstanceProcAddr)215 TEST(Allocation, GetInstanceProcAddr) {
216 FrameworkEnvironment env{};
217 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
218
219 MemoryTracker tracker;
220 {
221 InstWrapper inst{env.vulkan_functions, tracker.get()};
222 inst.CheckCreate();
223
224 auto* pfnCreateDevice = inst->vkGetInstanceProcAddr(inst, "vkCreateDevice");
225 auto* pfnDestroyDevice = inst->vkGetInstanceProcAddr(inst, "vkDestroyDevice");
226 ASSERT_TRUE(pfnCreateDevice != nullptr && pfnDestroyDevice != nullptr);
227 }
228 ASSERT_TRUE(tracker.empty());
229 }
230
231 // Test making sure the allocation functions are called to allocate and cleanup everything during
232 // a vkEnumeratePhysicalDevices call pair.
TEST(Allocation,EnumeratePhysicalDevices)233 TEST(Allocation, EnumeratePhysicalDevices) {
234 FrameworkEnvironment env{};
235 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
236
237 MemoryTracker tracker;
238 auto& driver = env.get_test_icd();
239 driver.physical_devices.emplace_back("physical_device_0");
240 {
241 InstWrapper inst{env.vulkan_functions, tracker.get()};
242 inst.CheckCreate();
243 uint32_t physical_count = 1;
244 uint32_t returned_physical_count = 0;
245 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, nullptr));
246 ASSERT_EQ(physical_count, returned_physical_count);
247
248 VkPhysicalDevice physical_device;
249 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, &physical_device));
250 ASSERT_EQ(physical_count, returned_physical_count);
251 }
252 ASSERT_TRUE(tracker.empty());
253 }
254
255 // Test making sure the allocation functions are called to allocate and cleanup everything from
256 // vkCreateInstance, to vkCreateDevicce, and then through their destructors. With special
257 // allocators used on both the instance and device.
TEST(Allocation,InstanceAndDevice)258 TEST(Allocation, InstanceAndDevice) {
259 FrameworkEnvironment env{};
260 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
261
262 MemoryTracker tracker;
263 auto& driver = env.get_test_icd();
264 driver.physical_devices.emplace_back("physical_device_0");
265 driver.physical_devices[0].add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false});
266 {
267 InstWrapper inst{env.vulkan_functions, tracker.get()};
268 inst.CheckCreate();
269
270 uint32_t physical_count = 1;
271 uint32_t returned_physical_count = 0;
272 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, nullptr));
273 ASSERT_EQ(physical_count, returned_physical_count);
274
275 VkPhysicalDevice physical_device;
276 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, &physical_device));
277 ASSERT_EQ(physical_count, returned_physical_count);
278
279 uint32_t family_count = 1;
280 uint32_t returned_family_count = 0;
281 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &returned_family_count, nullptr);
282 ASSERT_EQ(returned_family_count, family_count);
283
284 VkQueueFamilyProperties family;
285 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &returned_family_count, &family);
286 ASSERT_EQ(returned_family_count, family_count);
287 ASSERT_EQ(family.queueFlags, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT));
288 ASSERT_EQ(family.queueCount, family_count);
289 ASSERT_EQ(family.timestampValidBits, 0U);
290
291 DeviceCreateInfo dev_create_info;
292 DeviceQueueCreateInfo queue_info;
293 queue_info.add_priority(0.0f);
294 dev_create_info.add_device_queue(queue_info);
295
296 VkDevice device;
297 ASSERT_EQ(inst->vkCreateDevice(physical_device, dev_create_info.get(), tracker.get(), &device), VK_SUCCESS);
298 inst->vkDestroyDevice(device, tracker.get());
299 }
300 ASSERT_TRUE(tracker.empty());
301 }
302 // Test making sure the allocation functions are called to allocate and cleanup everything from
303 // vkCreateInstance, to vkCreateDevicce, and then through their destructors. With special
304 // allocators used on only the instance and not the device.
TEST(Allocation,InstanceButNotDevice)305 TEST(Allocation, InstanceButNotDevice) {
306 FrameworkEnvironment env{};
307 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
308
309 MemoryTracker tracker;
310 {
311 auto& driver = env.get_test_icd();
312 driver.physical_devices.emplace_back("physical_device_0");
313 driver.physical_devices[0].add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false});
314
315 InstWrapper inst{env.vulkan_functions, tracker.get()};
316 inst.CheckCreate();
317
318 uint32_t physical_count = 1;
319 uint32_t returned_physical_count = 0;
320 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, nullptr));
321 ASSERT_EQ(physical_count, returned_physical_count);
322
323 VkPhysicalDevice physical_device;
324 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, &physical_device));
325 ASSERT_EQ(physical_count, returned_physical_count);
326
327 uint32_t family_count = 1;
328 uint32_t returned_family_count = 0;
329 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &returned_family_count, nullptr);
330 ASSERT_EQ(returned_family_count, family_count);
331
332 VkQueueFamilyProperties family;
333 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &returned_family_count, &family);
334 ASSERT_EQ(returned_family_count, family_count);
335 ASSERT_EQ(family.queueFlags, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT));
336 ASSERT_EQ(family.queueCount, family_count);
337 ASSERT_EQ(family.timestampValidBits, 0U);
338
339 DeviceCreateInfo dev_create_info;
340 DeviceQueueCreateInfo queue_info;
341 queue_info.add_priority(0.0f);
342 dev_create_info.add_device_queue(queue_info);
343
344 VkDevice device;
345 ASSERT_EQ(inst->vkCreateDevice(physical_device, dev_create_info.get(), nullptr, &device), VK_SUCCESS);
346 inst->vkDestroyDevice(device, nullptr);
347 }
348 ASSERT_TRUE(tracker.empty());
349 }
350
351 // Test making sure the allocation functions are called to allocate and cleanup everything from
352 // vkCreateInstance, to vkCreateDevicce, and then through their destructors. With special
353 // allocators used on only the device and not the instance.
TEST(Allocation,DeviceButNotInstance)354 TEST(Allocation, DeviceButNotInstance) {
355 FrameworkEnvironment env{};
356 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
357
358 const char* layer_name = "VkLayerImplicit0";
359 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
360 .set_name(layer_name)
361 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
362 .set_disable_environment("DISABLE_ENV")),
363 "test_layer.json");
364 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
365
366 MemoryTracker tracker;
367 {
368 auto& driver = env.get_test_icd();
369 driver.physical_devices.emplace_back("physical_device_0");
370 driver.physical_devices[0].add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false});
371
372 InstWrapper inst{env.vulkan_functions};
373 inst.CheckCreate();
374
375 uint32_t physical_count = 1;
376 uint32_t returned_physical_count = 0;
377 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, nullptr));
378 ASSERT_EQ(physical_count, returned_physical_count);
379
380 VkPhysicalDevice physical_device;
381 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, &physical_device));
382 ASSERT_EQ(physical_count, returned_physical_count);
383
384 uint32_t family_count = 1;
385 uint32_t returned_family_count = 0;
386 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &returned_family_count, nullptr);
387 ASSERT_EQ(returned_family_count, family_count);
388
389 VkQueueFamilyProperties family;
390 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &returned_family_count, &family);
391 ASSERT_EQ(returned_family_count, family_count);
392 ASSERT_EQ(family.queueFlags, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT));
393 ASSERT_EQ(family.queueCount, family_count);
394 ASSERT_EQ(family.timestampValidBits, 0U);
395
396 DeviceCreateInfo dev_create_info;
397 DeviceQueueCreateInfo queue_info;
398 queue_info.add_priority(0.0f);
399 dev_create_info.add_device_queue(queue_info);
400
401 VkDevice device;
402 ASSERT_EQ(inst->vkCreateDevice(physical_device, dev_create_info.get(), tracker.get(), &device), VK_SUCCESS);
403 inst->vkDestroyDevice(device, tracker.get());
404 }
405 ASSERT_TRUE(tracker.empty());
406 }
407
408 // Test failure during vkCreateInstance to make sure we don't leak memory if
409 // one of the out-of-memory conditions trigger.
TEST(Allocation,CreateInstanceIntentionalAllocFail)410 TEST(Allocation, CreateInstanceIntentionalAllocFail) {
411 FrameworkEnvironment env{};
412 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
413
414 const char* layer_name = "VkLayerImplicit0";
415 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
416 .set_name(layer_name)
417 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
418 .set_disable_environment("DISABLE_ENV")),
419 "test_layer.json");
420 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
421
422 size_t fail_index = 0;
423 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
424 while (result == VK_ERROR_OUT_OF_HOST_MEMORY && fail_index <= 10000) {
425 MemoryTracker tracker(MemoryTrackerSettings{false, 0, true, fail_index});
426
427 VkInstance instance;
428 InstanceCreateInfo inst_create_info{};
429 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
430 if (result == VK_SUCCESS) {
431 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
432 }
433 ASSERT_TRUE(tracker.empty());
434 fail_index++;
435 }
436 }
437
438 // Test failure during vkCreateInstance to make sure we don't leak memory if
439 // one of the out-of-memory conditions trigger.
TEST(Allocation,DriverEnvVarIntentionalAllocFail)440 TEST(Allocation, DriverEnvVarIntentionalAllocFail) {
441 FrameworkEnvironment env{};
442 env.add_icd(TestICDDetails{TEST_ICD_PATH_VERSION_2}.set_discovery_type(ManifestDiscoveryType::env_var));
443
444 const char* layer_name = "VkLayerImplicit0";
445 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
446 .set_name(layer_name)
447 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
448 .set_disable_environment("DISABLE_ENV")),
449 "test_layer.json");
450 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
451
452 auto driver_files = get_env_var("VK_DRIVER_FILES");
453 driver_files += OS_ENV_VAR_LIST_SEPARATOR;
454 driver_files += (fs::path("totally_made_up") / "path_to_fake" / "jason_file.json").str();
455 set_env_var("VK_DRIVER_FILES", driver_files);
456
457 size_t fail_index = 66; // 0
458 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
459 while (result == VK_ERROR_OUT_OF_HOST_MEMORY && fail_index <= 10000) {
460 MemoryTracker tracker(MemoryTrackerSettings{false, 0, true, fail_index});
461
462 VkInstance instance;
463 InstanceCreateInfo inst_create_info{};
464 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
465 if (result == VK_SUCCESS) {
466 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
467 }
468 ASSERT_TRUE(tracker.empty());
469 fail_index++;
470 }
471 remove_env_var("VK_DRIVER_FILES");
472 }
473
474 // Test failure during vkCreateDevice to make sure we don't leak memory if
475 // one of the out-of-memory conditions trigger.
476 // Use 2 physical devices so that anything which copies a list of devices item by item
477 // may fail.
TEST(Allocation,CreateDeviceIntentionalAllocFail)478 TEST(Allocation, CreateDeviceIntentionalAllocFail) {
479 FrameworkEnvironment env{};
480 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
481
482 auto& driver = env.get_test_icd();
483 driver.physical_devices.emplace_back("physical_device_0");
484 driver.physical_devices[0].add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false});
485 driver.physical_devices.emplace_back("physical_device_1");
486 driver.physical_devices[1].add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false});
487
488 const char* layer_name = "VK_LAYER_VkLayerImplicit0";
489 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
490 .set_name(layer_name)
491 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
492 .set_disable_environment("DISABLE_ENV")),
493 "test_layer.json");
494 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
495
496 InstWrapper inst{env.vulkan_functions};
497 inst.CheckCreate();
498
499 uint32_t physical_count = 2;
500 uint32_t returned_physical_count = 0;
501 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, nullptr));
502 ASSERT_EQ(physical_count, returned_physical_count);
503
504 VkPhysicalDevice physical_devices[2];
505 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, physical_devices));
506 ASSERT_EQ(physical_count, returned_physical_count);
507
508 uint32_t family_count = 1;
509 uint32_t returned_family_count = 0;
510 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_devices[0], &returned_family_count, nullptr);
511 ASSERT_EQ(returned_family_count, family_count);
512
513 VkQueueFamilyProperties family;
514 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_devices[0], &returned_family_count, &family);
515 ASSERT_EQ(returned_family_count, family_count);
516 ASSERT_EQ(family.queueFlags, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT));
517 ASSERT_EQ(family.queueCount, family_count);
518 ASSERT_EQ(family.timestampValidBits, 0U);
519
520 size_t fail_index = 0;
521 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
522 while (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
523 MemoryTracker tracker(MemoryTrackerSettings{false, 0, true, fail_index});
524
525 DeviceCreateInfo dev_create_info;
526 DeviceQueueCreateInfo queue_info;
527 queue_info.add_priority(0.0f);
528 dev_create_info.add_device_queue(queue_info);
529
530 VkDevice device;
531 result = inst->vkCreateDevice(physical_devices[0], dev_create_info.get(), tracker.get(), &device);
532 if (result == VK_SUCCESS || fail_index > 10000) {
533 inst->vkDestroyDevice(device, tracker.get());
534 break;
535 }
536 ASSERT_TRUE(tracker.empty());
537 fail_index++;
538 }
539 }
540
541 // Test failure during vkCreateInstance and vkCreateDevice to make sure we don't
542 // leak memory if one of the out-of-memory conditions trigger.
TEST(Allocation,CreateInstanceDeviceIntentionalAllocFail)543 TEST(Allocation, CreateInstanceDeviceIntentionalAllocFail) {
544 FrameworkEnvironment env{};
545 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
546
547 auto& driver = env.get_test_icd();
548 driver.physical_devices.emplace_back("physical_device_0");
549 driver.physical_devices[0].add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false});
550
551 const char* layer_name = "VkLayerImplicit0";
552 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
553 .set_name(layer_name)
554 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
555 .set_disable_environment("DISABLE_ENV")),
556 "test_layer.json");
557 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
558
559 size_t fail_index = 0;
560 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
561 while (result == VK_ERROR_OUT_OF_HOST_MEMORY && fail_index <= 10000) {
562 MemoryTracker tracker(MemoryTrackerSettings{false, 0, true, fail_index});
563 fail_index++; // applies to the next loop
564
565 VkInstance instance;
566 InstanceCreateInfo inst_create_info{};
567 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
568 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
569 ASSERT_TRUE(tracker.empty());
570 continue;
571 }
572
573 uint32_t physical_count = 1;
574 uint32_t returned_physical_count = 0;
575 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, nullptr);
576 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
577 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
578 ASSERT_TRUE(tracker.empty());
579 continue;
580 }
581 ASSERT_EQ(physical_count, returned_physical_count);
582
583 VkPhysicalDevice physical_device;
584 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, &physical_device);
585 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
586 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
587 ASSERT_TRUE(tracker.empty());
588 continue;
589 }
590 ASSERT_EQ(physical_count, returned_physical_count);
591
592 uint32_t family_count = 1;
593 uint32_t returned_family_count = 0;
594 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &returned_family_count, nullptr);
595 ASSERT_EQ(returned_family_count, family_count);
596
597 VkQueueFamilyProperties family;
598 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &returned_family_count, &family);
599 ASSERT_EQ(returned_family_count, family_count);
600 ASSERT_EQ(family.queueFlags, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT));
601 ASSERT_EQ(family.queueCount, family_count);
602 ASSERT_EQ(family.timestampValidBits, 0U);
603
604 DeviceCreateInfo dev_create_info;
605 DeviceQueueCreateInfo queue_info;
606 queue_info.add_priority(0.0f);
607 dev_create_info.add_device_queue(queue_info);
608
609 VkDevice device;
610 result = env.vulkan_functions.vkCreateDevice(physical_device, dev_create_info.get(), tracker.get(), &device);
611 if (result == VK_SUCCESS) {
612 env.vulkan_functions.vkDestroyDevice(device, tracker.get());
613 }
614 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
615
616 ASSERT_TRUE(tracker.empty());
617 }
618 }
619
620 // Test failure during vkCreateInstance when a driver of the wrong architecture is present
621 // to make sure the loader uses the valid ICD and doesn't report incompatible driver just because
622 // an incompatible driver exists
TEST(TryLoadWrongBinaries,CreateInstanceIntentionalAllocFail)623 TEST(TryLoadWrongBinaries, CreateInstanceIntentionalAllocFail) {
624 FrameworkEnvironment env{};
625 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
626 env.add_icd(TestICDDetails(CURRENT_PLATFORM_DUMMY_BINARY_WRONG_TYPE).set_is_fake(true));
627
628 const char* layer_name = "VkLayerImplicit0";
629 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
630 .set_name(layer_name)
631 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
632 .set_disable_environment("DISABLE_ENV")),
633 "test_layer.json");
634 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
635
636 size_t fail_index = 0;
637 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
638 while (result == VK_ERROR_OUT_OF_HOST_MEMORY && fail_index <= 10000) {
639 MemoryTracker tracker(MemoryTrackerSettings{false, 0, true, fail_index});
640
641 VkInstance instance;
642 InstanceCreateInfo inst_create_info{};
643 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
644 if (result == VK_SUCCESS) {
645 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
646 }
647 ASSERT_NE(result, VK_ERROR_INCOMPATIBLE_DRIVER);
648 ASSERT_TRUE(tracker.empty());
649 fail_index++;
650 }
651 }
652
653 // Test failure during vkCreateInstance and vkCreateDevice to make sure we don't
654 // leak memory if one of the out-of-memory conditions trigger.
TEST(Allocation,EnumeratePhysicalDevicesIntentionalAllocFail)655 TEST(Allocation, EnumeratePhysicalDevicesIntentionalAllocFail) {
656 FrameworkEnvironment env{};
657 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
658
659 const char* layer_name = "VkLayerImplicit0";
660 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
661 .set_name(layer_name)
662 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
663 .set_disable_environment("DISABLE_ENV")),
664 "test_layer.json");
665 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
666
667 size_t fail_index = 0;
668 bool reached_the_end = false;
669 uint32_t starting_physical_dev_count = 3;
670 while (!reached_the_end && fail_index <= 100) {
671 fail_index++; // applies to the next loop
672 uint32_t physical_dev_count = starting_physical_dev_count;
673 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
674 auto& driver = env.reset_icd();
675
676 for (uint32_t i = 0; i < physical_dev_count; i++) {
677 driver.physical_devices.emplace_back(std::string("physical_device_") + std::to_string(i));
678 driver.physical_devices[i].add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false});
679 }
680 MemoryTracker tracker{MemoryTrackerSettings{false, 0, true, fail_index}};
681 InstanceCreateInfo inst_create_info;
682 VkInstance instance;
683 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
684 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
685 ASSERT_TRUE(tracker.empty());
686 continue;
687 }
688
689 uint32_t returned_physical_count = 0;
690 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, nullptr);
691 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
692 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
693 ASSERT_TRUE(tracker.empty());
694 continue;
695 }
696 ASSERT_EQ(physical_dev_count, returned_physical_count);
697
698 for (uint32_t i = 0; i < 2; i++) {
699 driver.physical_devices.emplace_back(std::string("physical_device_") + std::to_string(physical_dev_count));
700 driver.physical_devices.back().add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false});
701 physical_dev_count += 1;
702 }
703
704 std::vector<VkPhysicalDevice> physical_devices{physical_dev_count, VK_NULL_HANDLE};
705 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, physical_devices.data());
706 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
707 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
708 ASSERT_TRUE(tracker.empty());
709 continue;
710 }
711 if (result == VK_INCOMPLETE) {
712 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, nullptr);
713 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
714 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
715 ASSERT_TRUE(tracker.empty());
716 continue;
717 }
718 physical_devices.resize(returned_physical_count);
719 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, physical_devices.data());
720 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
721 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
722 ASSERT_TRUE(tracker.empty());
723 continue;
724 }
725 }
726 ASSERT_EQ(physical_dev_count, returned_physical_count);
727
728 std::array<VkDevice, 3> devices;
729 for (uint32_t i = 0; i < returned_physical_count; i++) {
730 uint32_t family_count = 1;
731 uint32_t returned_family_count = 0;
732 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_devices[i], &returned_family_count, nullptr);
733 ASSERT_EQ(returned_family_count, family_count);
734
735 VkQueueFamilyProperties family;
736 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_devices[i], &returned_family_count, &family);
737 ASSERT_EQ(returned_family_count, family_count);
738 ASSERT_EQ(family.queueFlags, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT));
739 ASSERT_EQ(family.queueCount, family_count);
740 ASSERT_EQ(family.timestampValidBits, 0U);
741
742 DeviceCreateInfo dev_create_info;
743 DeviceQueueCreateInfo queue_info;
744 queue_info.add_priority(0.0f);
745 dev_create_info.add_device_queue(queue_info);
746
747 result = env.vulkan_functions.vkCreateDevice(physical_devices[i], dev_create_info.get(), tracker.get(), &devices[i]);
748 }
749 for (uint32_t i = 0; i < returned_physical_count; i++) {
750 if (result == VK_SUCCESS) {
751 env.vulkan_functions.vkDestroyDevice(devices[i], tracker.get());
752 }
753 }
754
755 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
756 ASSERT_TRUE(tracker.empty());
757 reached_the_end = true;
758 }
759 }
760 #if defined(WIN32)
761 // Test failure during vkCreateInstance and vkCreateDevice to make sure we don't
762 // leak memory if one of the out-of-memory conditions trigger.
TEST(Allocation,CreateInstanceDeviceWithDXGIDriverIntentionalAllocFail)763 TEST(Allocation, CreateInstanceDeviceWithDXGIDriverIntentionalAllocFail) {
764 FrameworkEnvironment env{};
765 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_6).set_discovery_type(ManifestDiscoveryType::none));
766 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
767
768 for (uint32_t i = 0; i < 2; i++) {
769 auto& driver = env.get_test_icd(i);
770 driver.physical_devices.emplace_back(std::string("physical_device_") + std::to_string(i));
771 driver.physical_devices[0].add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false});
772 }
773
774 const char* layer_name = "VkLayerImplicit0";
775 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
776 .set_name(layer_name)
777 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
778 .set_disable_environment("DISABLE_ENV")),
779 "test_layer.json");
780 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
781
782 auto& known_driver = known_driver_list.at(2); // which drive this test pretends to be
783 DXGI_ADAPTER_DESC1 desc1{};
784 desc1.VendorId = known_driver.vendor_id;
785 desc1.AdapterLuid = _LUID{10, 1000};
786 env.platform_shim->add_dxgi_adapter(GpuType::discrete, desc1);
787 env.get_test_icd().set_adapterLUID(desc1.AdapterLuid);
788
789 env.platform_shim->add_d3dkmt_adapter(D3DKMT_Adapter{0, _LUID{10, 1000}}.add_driver_manifest_path(env.get_icd_manifest_path()));
790
791 size_t fail_index = 0;
792 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
793 while (result == VK_ERROR_OUT_OF_HOST_MEMORY && fail_index <= 10000) {
794 MemoryTracker tracker(MemoryTrackerSettings{false, 0, true, fail_index});
795 fail_index++; // applies to the next loop
796
797 VkInstance instance;
798 InstanceCreateInfo inst_create_info{};
799 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
800 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
801 ASSERT_TRUE(tracker.empty());
802 continue;
803 }
804
805 uint32_t physical_count = 2;
806 uint32_t returned_physical_count = 0;
807 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, nullptr);
808 if (result == VK_ERROR_OUT_OF_HOST_MEMORY || result == VK_INCOMPLETE) {
809 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
810 ASSERT_TRUE(tracker.empty());
811 continue;
812 }
813 ASSERT_EQ(physical_count, returned_physical_count);
814
815 std::array<VkPhysicalDevice, 2> physical_devices;
816 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, physical_devices.data());
817 if (result == VK_ERROR_OUT_OF_HOST_MEMORY || result == VK_INCOMPLETE) {
818 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
819 ASSERT_TRUE(tracker.empty());
820 continue;
821 }
822 ASSERT_EQ(physical_count, returned_physical_count);
823
824 std::array<VkDevice, 2> devices;
825 for (uint32_t i = 0; i < returned_physical_count; i++) {
826 uint32_t family_count = 1;
827 uint32_t returned_family_count = 0;
828 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_devices[i], &returned_family_count, nullptr);
829 ASSERT_EQ(returned_family_count, family_count);
830
831 VkQueueFamilyProperties family;
832 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_devices[i], &returned_family_count, &family);
833 ASSERT_EQ(returned_family_count, family_count);
834 ASSERT_EQ(family.queueFlags, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT));
835 ASSERT_EQ(family.queueCount, family_count);
836 ASSERT_EQ(family.timestampValidBits, 0U);
837
838 DeviceCreateInfo dev_create_info;
839 DeviceQueueCreateInfo queue_info;
840 queue_info.add_priority(0.0f);
841 dev_create_info.add_device_queue(queue_info);
842
843 result = env.vulkan_functions.vkCreateDevice(physical_devices[i], dev_create_info.get(), tracker.get(), &devices[i]);
844 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
845 devices[i] = VK_NULL_HANDLE;
846 }
847 }
848 for (uint32_t i = 0; i < returned_physical_count; i++) {
849 if (devices[i] != VK_NULL_HANDLE) {
850 env.vulkan_functions.vkDestroyDevice(devices[i], tracker.get());
851 }
852 }
853 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
854
855 ASSERT_TRUE(tracker.empty());
856 }
857 }
858 #endif
859