1 /*
2 * Copyright (c) 2021-2023 The Khronos Group Inc.
3 * Copyright (c) 2021-2023 Valve Corporation
4 * Copyright (c) 2021-2023 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials are
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice(s) and this permission notice shall be included in
14 * all copies or substantial portions of the Materials.
15 *
16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS.
24 *
25 * Author: Charles Giessen <charles@lunarg.com>
26 */
27
28 #include "test_environment.h"
29
30 #include <fstream>
31 #include <mutex>
32
33 struct MemoryTrackerSettings {
34 bool should_fail_on_allocation = false;
35 size_t fail_after_allocations = 0; // fail after this number of allocations in total
36 bool should_fail_after_set_number_of_calls = false;
37 size_t fail_after_calls = 0; // fail after this number of calls to alloc or realloc
38 };
39
40 class MemoryTracker {
41 std::mutex main_mutex;
42 MemoryTrackerSettings settings{};
43 VkAllocationCallbacks callbacks{};
44 // Implementation internals
45 struct AllocationDetails {
46 std::unique_ptr<char[]> allocation;
47 size_t requested_size_bytes;
48 size_t actual_size_bytes;
49 size_t alignment;
50 VkSystemAllocationScope alloc_scope;
51 };
52 const static size_t UNKNOWN_ALLOCATION = std::numeric_limits<size_t>::max();
53 size_t allocation_count = 0;
54 size_t call_count = 0;
55 std::unordered_map<void*, AllocationDetails> allocations;
56
allocate(size_t size,size_t alignment,VkSystemAllocationScope alloc_scope)57 void* allocate(size_t size, size_t alignment, VkSystemAllocationScope alloc_scope) {
58 if ((settings.should_fail_on_allocation && allocation_count == settings.fail_after_allocations) ||
59 (settings.should_fail_after_set_number_of_calls && call_count == settings.fail_after_calls)) {
60 return nullptr;
61 }
62 call_count++;
63 allocation_count++;
64 AllocationDetails detail{nullptr, size, size + (alignment - 1), alignment, alloc_scope};
65 detail.allocation = std::unique_ptr<char[]>(new char[detail.actual_size_bytes]);
66 if (!detail.allocation) {
67 abort();
68 };
69 uint64_t addr = (uint64_t)detail.allocation.get();
70 addr += (alignment - 1);
71 addr &= ~(alignment - 1);
72 void* aligned_alloc = (void*)addr;
73 allocations.insert(std::make_pair(aligned_alloc, std::move(detail)));
74 return aligned_alloc;
75 }
reallocate(void * pOriginal,size_t size,size_t alignment,VkSystemAllocationScope alloc_scope)76 void* reallocate(void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope alloc_scope) {
77 if (pOriginal == nullptr) {
78 return allocate(size, alignment, alloc_scope);
79 }
80 auto elem = allocations.find(pOriginal);
81 if (elem == allocations.end()) return nullptr;
82 size_t original_size = elem->second.requested_size_bytes;
83
84 // We only care about the case where realloc is used to increase the size
85 if (size >= original_size && settings.should_fail_after_set_number_of_calls && call_count == settings.fail_after_calls)
86 return nullptr;
87 call_count++;
88 if (size == 0) {
89 allocations.erase(elem);
90 allocation_count--;
91 return nullptr;
92 } else if (size < original_size) {
93 return pOriginal;
94 } else {
95 void* new_alloc = allocate(size, alignment, alloc_scope);
96 if (new_alloc == nullptr) return nullptr;
97 allocation_count--; // allocate() increments this, we we don't want that
98 call_count--; // allocate() also increments this, we don't want that
99 memcpy(new_alloc, pOriginal, original_size);
100 allocations.erase(elem);
101 return new_alloc;
102 }
103 }
free(void * pMemory)104 void free(void* pMemory) {
105 if (pMemory == nullptr) return;
106 auto elem = allocations.find(pMemory);
107 if (elem == allocations.end()) {
108 assert(false && "Should never be freeing memory that wasn't allocated by the MemoryTracker!");
109 return;
110 }
111 allocations.erase(elem);
112 assert(allocation_count != 0 && "Cant free when there are no valid allocations");
113 allocation_count--;
114 }
115
116 // Implementation of public functions
impl_allocation(size_t size,size_t alignment,VkSystemAllocationScope allocationScope)117 void* impl_allocation(size_t size, size_t alignment, VkSystemAllocationScope allocationScope) noexcept {
118 std::lock_guard<std::mutex> lg(main_mutex);
119 void* addr = allocate(size, alignment, allocationScope);
120 return addr;
121 }
impl_reallocation(void * pOriginal,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)122 void* impl_reallocation(void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope allocationScope) noexcept {
123 std::lock_guard<std::mutex> lg(main_mutex);
124 void* addr = reallocate(pOriginal, size, alignment, allocationScope);
125 return addr;
126 }
impl_free(void * pMemory)127 void impl_free(void* pMemory) noexcept {
128 std::lock_guard<std::mutex> lg(main_mutex);
129 free(pMemory);
130 }
impl_internal_allocation_notification(size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)131 void impl_internal_allocation_notification([[maybe_unused]] size_t size,
132 [[maybe_unused]] VkInternalAllocationType allocationType,
133 [[maybe_unused]] VkSystemAllocationScope allocationScope) noexcept {
134 std::lock_guard<std::mutex> lg(main_mutex);
135 // TODO?
136 }
impl_internal_free(size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)137 void impl_internal_free([[maybe_unused]] size_t size, [[maybe_unused]] VkInternalAllocationType allocationType,
138 [[maybe_unused]] VkSystemAllocationScope allocationScope) noexcept {
139 std::lock_guard<std::mutex> lg(main_mutex);
140 // TODO?
141 }
142
143 public:
MemoryTracker(MemoryTrackerSettings settings)144 MemoryTracker(MemoryTrackerSettings settings) noexcept : settings(settings) {
145 allocations.reserve(3000);
146
147 callbacks.pUserData = this;
148 callbacks.pfnAllocation = public_allocation;
149 callbacks.pfnReallocation = public_reallocation;
150 callbacks.pfnFree = public_free;
151 callbacks.pfnInternalAllocation = public_internal_allocation_notification;
152 callbacks.pfnInternalFree = public_internal_free;
153 }
MemoryTracker()154 MemoryTracker() noexcept : MemoryTracker(MemoryTrackerSettings{}) {}
155
get()156 VkAllocationCallbacks* get() noexcept { return &callbacks; }
157
empty()158 bool empty() noexcept { return allocation_count == 0; }
159
160 // Static callbacks
public_allocation(void * pUserData,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)161 static VKAPI_ATTR void* VKAPI_CALL public_allocation(void* pUserData, size_t size, size_t alignment,
162 VkSystemAllocationScope allocationScope) noexcept {
163 return reinterpret_cast<MemoryTracker*>(pUserData)->impl_allocation(size, alignment, allocationScope);
164 }
public_reallocation(void * pUserData,void * pOriginal,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)165 static VKAPI_ATTR void* VKAPI_CALL public_reallocation(void* pUserData, void* pOriginal, size_t size, size_t alignment,
166 VkSystemAllocationScope allocationScope) noexcept {
167 return reinterpret_cast<MemoryTracker*>(pUserData)->impl_reallocation(pOriginal, size, alignment, allocationScope);
168 }
public_free(void * pUserData,void * pMemory)169 static VKAPI_ATTR void VKAPI_CALL public_free(void* pUserData, void* pMemory) noexcept {
170 reinterpret_cast<MemoryTracker*>(pUserData)->impl_free(pMemory);
171 }
public_internal_allocation_notification(void * pUserData,size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)172 static VKAPI_ATTR void VKAPI_CALL public_internal_allocation_notification(void* pUserData, size_t size,
173 VkInternalAllocationType allocationType,
174 VkSystemAllocationScope allocationScope) noexcept {
175 reinterpret_cast<MemoryTracker*>(pUserData)->impl_internal_allocation_notification(size, allocationType, allocationScope);
176 }
public_internal_free(void * pUserData,size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)177 static VKAPI_ATTR void VKAPI_CALL public_internal_free(void* pUserData, size_t size, VkInternalAllocationType allocationType,
178 VkSystemAllocationScope allocationScope) noexcept {
179 reinterpret_cast<MemoryTracker*>(pUserData)->impl_internal_free(size, allocationType, allocationScope);
180 }
181 };
182
183 // Test making sure the allocation functions are called to allocate and cleanup everything during
184 // a CreateInstance/DestroyInstance call pair.
TEST(Allocation,Instance)185 TEST(Allocation, Instance) {
186 FrameworkEnvironment env{};
187 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
188
189 MemoryTracker tracker;
190 {
191 InstWrapper inst{env.vulkan_functions, tracker.get()};
192 ASSERT_NO_FATAL_FAILURE(inst.CheckCreate());
193 }
194 ASSERT_TRUE(tracker.empty());
195 }
196
197 // Test making sure the allocation functions are called to allocate and cleanup everything during
198 // a CreateInstance/DestroyInstance call pair with a call to GetInstanceProcAddr.
TEST(Allocation,GetInstanceProcAddr)199 TEST(Allocation, GetInstanceProcAddr) {
200 FrameworkEnvironment env{};
201 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
202
203 MemoryTracker tracker;
204 {
205 InstWrapper inst{env.vulkan_functions, tracker.get()};
206 ASSERT_NO_FATAL_FAILURE(inst.CheckCreate());
207
208 auto* pfnCreateDevice = inst->vkGetInstanceProcAddr(inst, "vkCreateDevice");
209 auto* pfnDestroyDevice = inst->vkGetInstanceProcAddr(inst, "vkDestroyDevice");
210 ASSERT_TRUE(pfnCreateDevice != nullptr && pfnDestroyDevice != nullptr);
211 }
212 ASSERT_TRUE(tracker.empty());
213 }
214
215 // Test making sure the allocation functions are called to allocate and cleanup everything during
216 // a vkEnumeratePhysicalDevices call pair.
TEST(Allocation,EnumeratePhysicalDevices)217 TEST(Allocation, EnumeratePhysicalDevices) {
218 FrameworkEnvironment env{};
219 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2)).add_physical_device("physical_device_0");
220
221 MemoryTracker tracker;
222 {
223 InstWrapper inst{env.vulkan_functions, tracker.get()};
224 ASSERT_NO_FATAL_FAILURE(inst.CheckCreate());
225 uint32_t physical_count = 1;
226 uint32_t returned_physical_count = 0;
227 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, nullptr));
228 ASSERT_EQ(physical_count, returned_physical_count);
229
230 VkPhysicalDevice physical_device;
231 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, &physical_device));
232 ASSERT_EQ(physical_count, returned_physical_count);
233 }
234 ASSERT_TRUE(tracker.empty());
235 }
236
237 // Test making sure the allocation functions are called to allocate and cleanup everything from
238 // vkCreateInstance, to vkCreateDevicce, and then through their destructors. With special
239 // allocators used on both the instance and device.
TEST(Allocation,InstanceAndDevice)240 TEST(Allocation, InstanceAndDevice) {
241 FrameworkEnvironment env{};
242 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2))
243 .add_physical_device(PhysicalDevice{"physical_device_0"}
244 .add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false})
245 .finish());
246
247 MemoryTracker tracker;
248 {
249 InstWrapper inst{env.vulkan_functions, tracker.get()};
250 ASSERT_NO_FATAL_FAILURE(inst.CheckCreate());
251
252 uint32_t physical_count = 1;
253 uint32_t returned_physical_count = 0;
254 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, nullptr));
255 ASSERT_EQ(physical_count, returned_physical_count);
256
257 VkPhysicalDevice physical_device;
258 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, &physical_device));
259 ASSERT_EQ(physical_count, returned_physical_count);
260
261 uint32_t family_count = 1;
262 uint32_t returned_family_count = 0;
263 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &returned_family_count, nullptr);
264 ASSERT_EQ(returned_family_count, family_count);
265
266 VkQueueFamilyProperties family;
267 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &returned_family_count, &family);
268 ASSERT_EQ(returned_family_count, family_count);
269 ASSERT_EQ(family.queueFlags, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT));
270 ASSERT_EQ(family.queueCount, family_count);
271 ASSERT_EQ(family.timestampValidBits, 0U);
272
273 DeviceCreateInfo dev_create_info;
274 dev_create_info.add_device_queue(DeviceQueueCreateInfo{}.add_priority(0.0f));
275
276 VkDevice device;
277 ASSERT_EQ(inst->vkCreateDevice(physical_device, dev_create_info.get(), tracker.get(), &device), VK_SUCCESS);
278
279 VkQueue queue;
280 inst->vkGetDeviceQueue(device, 0, 0, &queue);
281
282 inst->vkDestroyDevice(device, tracker.get());
283 }
284 ASSERT_TRUE(tracker.empty());
285 }
286 // Test making sure the allocation functions are called to allocate and cleanup everything from
287 // vkCreateInstance, to vkCreateDevicce, and then through their destructors. With special
288 // allocators used on only the instance and not the device.
TEST(Allocation,InstanceButNotDevice)289 TEST(Allocation, InstanceButNotDevice) {
290 FrameworkEnvironment env{};
291 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2))
292 .add_physical_device(PhysicalDevice{"physical_device_0"}
293 .add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false})
294 .finish());
295
296 MemoryTracker tracker;
297 {
298 InstWrapper inst{env.vulkan_functions, tracker.get()};
299 ASSERT_NO_FATAL_FAILURE(inst.CheckCreate());
300
301 uint32_t physical_count = 1;
302 uint32_t returned_physical_count = 0;
303 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, nullptr));
304 ASSERT_EQ(physical_count, returned_physical_count);
305
306 VkPhysicalDevice physical_device;
307 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, &physical_device));
308 ASSERT_EQ(physical_count, returned_physical_count);
309
310 uint32_t family_count = 1;
311 uint32_t returned_family_count = 0;
312 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &returned_family_count, nullptr);
313 ASSERT_EQ(returned_family_count, family_count);
314
315 VkQueueFamilyProperties family;
316 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &returned_family_count, &family);
317 ASSERT_EQ(returned_family_count, family_count);
318 ASSERT_EQ(family.queueFlags, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT));
319 ASSERT_EQ(family.queueCount, family_count);
320 ASSERT_EQ(family.timestampValidBits, 0U);
321
322 DeviceCreateInfo dev_create_info;
323 dev_create_info.add_device_queue(DeviceQueueCreateInfo{}.add_priority(0.0f));
324
325 VkDevice device;
326 ASSERT_EQ(inst->vkCreateDevice(physical_device, dev_create_info.get(), nullptr, &device), VK_SUCCESS);
327 VkQueue queue;
328 inst->vkGetDeviceQueue(device, 0, 0, &queue);
329
330 inst->vkDestroyDevice(device, nullptr);
331 }
332 ASSERT_TRUE(tracker.empty());
333 }
334
335 // Test making sure the allocation functions are called to allocate and cleanup everything from
336 // vkCreateInstance, to vkCreateDevicce, and then through their destructors. With special
337 // allocators used on only the device and not the instance.
TEST(Allocation,DeviceButNotInstance)338 TEST(Allocation, DeviceButNotInstance) {
339 FrameworkEnvironment env{};
340 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2))
341 .add_physical_device(PhysicalDevice{"physical_device_0"}
342 .add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false})
343 .finish());
344
345 const char* layer_name = "VK_LAYER_implicit";
346 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
347 .set_name(layer_name)
348 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
349 .set_disable_environment("DISABLE_ENV")),
350 "test_layer.json");
351 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
352
353 MemoryTracker tracker;
354 {
355 InstWrapper inst{env.vulkan_functions};
356 ASSERT_NO_FATAL_FAILURE(inst.CheckCreate());
357
358 uint32_t physical_count = 1;
359 uint32_t returned_physical_count = 0;
360 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, nullptr));
361 ASSERT_EQ(physical_count, returned_physical_count);
362
363 VkPhysicalDevice physical_device;
364 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, &physical_device));
365 ASSERT_EQ(physical_count, returned_physical_count);
366
367 uint32_t family_count = 1;
368 uint32_t returned_family_count = 0;
369 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &returned_family_count, nullptr);
370 ASSERT_EQ(returned_family_count, family_count);
371
372 VkQueueFamilyProperties family;
373 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &returned_family_count, &family);
374 ASSERT_EQ(returned_family_count, family_count);
375 ASSERT_EQ(family.queueFlags, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT));
376 ASSERT_EQ(family.queueCount, family_count);
377 ASSERT_EQ(family.timestampValidBits, 0U);
378
379 DeviceCreateInfo dev_create_info;
380 dev_create_info.add_device_queue(DeviceQueueCreateInfo{}.add_priority(0.0f));
381
382 VkDevice device;
383 ASSERT_EQ(inst->vkCreateDevice(physical_device, dev_create_info.get(), tracker.get(), &device), VK_SUCCESS);
384
385 VkQueue queue;
386 inst->vkGetDeviceQueue(device, 0, 0, &queue);
387
388 inst->vkDestroyDevice(device, tracker.get());
389 }
390 ASSERT_TRUE(tracker.empty());
391 }
392
393 // Test failure during vkCreateInstance to make sure we don't leak memory if
394 // one of the out-of-memory conditions trigger.
TEST(Allocation,CreateInstanceIntentionalAllocFail)395 TEST(Allocation, CreateInstanceIntentionalAllocFail) {
396 FrameworkEnvironment env{FrameworkSettings{}.set_log_filter("error,warn")};
397 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
398
399 const char* layer_name = "VK_LAYER_implicit";
400 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
401 .set_name(layer_name)
402 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
403 .set_disable_environment("DISABLE_ENV")),
404 "test_layer.json");
405 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
406
407 size_t fail_index = 0;
408 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
409 while (result == VK_ERROR_OUT_OF_HOST_MEMORY && fail_index <= 10000) {
410 MemoryTracker tracker({false, 0, true, fail_index});
411
412 VkInstance instance;
413 InstanceCreateInfo inst_create_info{};
414 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
415 if (result == VK_SUCCESS) {
416 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
417 }
418 ASSERT_TRUE(tracker.empty());
419 fail_index++;
420 }
421 }
422
423 // Test failure during vkCreateInstance to make sure we don't leak memory if
424 // one of the out-of-memory conditions trigger and there are invalid jsons in the same folder
TEST(Allocation,CreateInstanceIntentionalAllocFailInvalidManifests)425 TEST(Allocation, CreateInstanceIntentionalAllocFailInvalidManifests) {
426 FrameworkEnvironment env{FrameworkSettings{}.set_log_filter("error,warn")};
427 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
428
429 std::vector<std::string> invalid_jsons;
430 invalid_jsons.push_back(",");
431 invalid_jsons.push_back("{},[]");
432 invalid_jsons.push_back("{ \"foo\":\"bar\", }");
433 invalid_jsons.push_back("{\"foo\":\"bar\", \"baz\": [], },");
434 invalid_jsons.push_back("{\"foo\":\"bar\", \"baz\": [{},] },");
435 invalid_jsons.push_back("{\"foo\":\"bar\", \"baz\": {\"fee\"} },");
436 invalid_jsons.push_back("{\"\":\"bar\", \"baz\": {}");
437 invalid_jsons.push_back("{\"foo\":\"bar\", \"baz\": {\"fee\":1234, true, \"ab\":\"bc\"} },");
438
439 for (size_t i = 0; i < invalid_jsons.size(); i++) {
440 auto file_name = std::string("invalid_implicit_layer_") + std::to_string(i) + ".json";
441 std::filesystem::path new_path =
442 env.get_folder(ManifestLocation::implicit_layer).write_manifest(file_name, invalid_jsons[i]);
443 env.platform_shim->add_manifest(ManifestCategory::implicit_layer, new_path);
444 }
445
446 const char* layer_name = "VkLayerImplicit0";
447 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
448 .set_name(layer_name)
449 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
450 .set_disable_environment("DISABLE_ENV")),
451 "test_layer.json");
452
453 size_t fail_index = 0;
454 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
455 while (result == VK_ERROR_OUT_OF_HOST_MEMORY && fail_index <= 10000) {
456 MemoryTracker tracker({false, 0, true, fail_index});
457
458 VkInstance instance;
459 InstanceCreateInfo inst_create_info{};
460 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
461 if (result == VK_SUCCESS) {
462 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
463 }
464 ASSERT_TRUE(tracker.empty());
465 fail_index++;
466 }
467 }
468
469 // Test failure during vkCreateInstance & surface creation to make sure we don't leak memory if
470 // one of the out-of-memory conditions trigger.
TEST(Allocation,CreateSurfaceIntentionalAllocFail)471 TEST(Allocation, CreateSurfaceIntentionalAllocFail) {
472 FrameworkEnvironment env{FrameworkSettings{}.set_log_filter("error,warn")};
473 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2)).setup_WSI();
474
475 const char* layer_name = "VK_LAYER_implicit";
476 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
477 .set_name(layer_name)
478 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
479 .set_disable_environment("DISABLE_ENV")),
480 "test_layer.json");
481 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
482
483 size_t fail_index = 0;
484 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
485 while (result == VK_ERROR_OUT_OF_HOST_MEMORY && fail_index <= 10000) {
486 MemoryTracker tracker({false, 0, true, fail_index});
487
488 VkInstance instance;
489 InstanceCreateInfo inst_create_info{};
490 inst_create_info.setup_WSI();
491 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
492 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
493 ASSERT_TRUE(tracker.empty());
494 fail_index++;
495 continue;
496 }
497
498 VkSurfaceKHR surface{};
499 result = create_surface(&env.vulkan_functions, instance, surface);
500 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
501 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
502 ASSERT_TRUE(tracker.empty());
503 fail_index++;
504 continue;
505 }
506 env.vulkan_functions.vkDestroySurfaceKHR(instance, surface, tracker.get());
507
508 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
509 ASSERT_TRUE(tracker.empty());
510 fail_index++;
511 }
512 }
513
514 // Test failure during vkCreateInstance to make sure we don't leak memory if
515 // one of the out-of-memory conditions trigger.
TEST(Allocation,CreateInstanceIntentionalAllocFailWithSettingsFilePresent)516 TEST(Allocation, CreateInstanceIntentionalAllocFailWithSettingsFilePresent) {
517 FrameworkEnvironment env{FrameworkSettings{}.set_log_filter("error,warn")};
518 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
519
520 const char* layer_name = "VK_LAYER_implicit";
521 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
522 .set_name(layer_name)
523 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
524 .set_disable_environment("DISABLE_ENV")),
525 "test_layer.json");
526 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
527
528 env.update_loader_settings(
529 env.loader_settings.add_app_specific_setting(AppSpecificSettings{}.add_stderr_log_filter("all").add_layer_configuration(
530 LoaderSettingsLayerConfiguration{}
531 .set_name(layer_name)
532 .set_control("auto")
533 .set_path(env.get_shimmed_layer_manifest_path(0)))));
534
535 size_t fail_index = 0;
536 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
537 while (result == VK_ERROR_OUT_OF_HOST_MEMORY && fail_index <= 10000) {
538 MemoryTracker tracker({false, 0, true, fail_index});
539
540 VkInstance instance;
541 InstanceCreateInfo inst_create_info{};
542 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
543 if (result == VK_SUCCESS) {
544 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
545 }
546 ASSERT_TRUE(tracker.empty());
547 fail_index++;
548 }
549 }
550
551 // Test failure during vkCreateInstance & surface creation to make sure we don't leak memory if
552 // one of the out-of-memory conditions trigger.
TEST(Allocation,CreateSurfaceIntentionalAllocFailWithSettingsFilePresent)553 TEST(Allocation, CreateSurfaceIntentionalAllocFailWithSettingsFilePresent) {
554 FrameworkEnvironment env{FrameworkSettings{}.set_log_filter("error,warn")};
555 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2)).setup_WSI();
556
557 const char* layer_name = "VK_LAYER_implicit";
558 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
559 .set_name(layer_name)
560 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
561 .set_disable_environment("DISABLE_ENV")),
562 "test_layer.json");
563 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
564 env.update_loader_settings(
565 env.loader_settings.add_app_specific_setting(AppSpecificSettings{}.add_stderr_log_filter("all").add_layer_configuration(
566 LoaderSettingsLayerConfiguration{}
567 .set_name(layer_name)
568 .set_control("auto")
569 .set_path(env.get_shimmed_layer_manifest_path(0)))));
570
571 size_t fail_index = 0;
572 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
573 while (result == VK_ERROR_OUT_OF_HOST_MEMORY && fail_index <= 10000) {
574 MemoryTracker tracker({false, 0, true, fail_index});
575
576 VkInstance instance;
577 InstanceCreateInfo inst_create_info{};
578 inst_create_info.setup_WSI();
579 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
580 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
581 ASSERT_TRUE(tracker.empty());
582 fail_index++;
583 continue;
584 }
585
586 VkSurfaceKHR surface{};
587 result = create_surface(&env.vulkan_functions, instance, surface);
588 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
589 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
590 ASSERT_TRUE(tracker.empty());
591 fail_index++;
592 continue;
593 }
594 env.vulkan_functions.vkDestroySurfaceKHR(instance, surface, tracker.get());
595
596 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
597 ASSERT_TRUE(tracker.empty());
598 fail_index++;
599 }
600 }
601
602 // Test failure during vkCreateInstance to make sure we don't leak memory if
603 // one of the out-of-memory conditions trigger.
TEST(Allocation,DriverEnvVarIntentionalAllocFail)604 TEST(Allocation, DriverEnvVarIntentionalAllocFail) {
605 FrameworkEnvironment env{FrameworkSettings{}.set_log_filter("error,warn")};
606 env.add_icd(TestICDDetails{TEST_ICD_PATH_VERSION_2}.set_discovery_type(ManifestDiscoveryType::env_var));
607
608 const char* layer_name = "VK_LAYER_implicit";
609 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
610 .set_name(layer_name)
611 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
612 .set_disable_environment("DISABLE_ENV")),
613 "test_layer.json");
614 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
615
616 env.env_var_vk_icd_filenames.add_to_list("totally_made_up/path_to_fake/jason_file.json");
617 env.env_var_vk_icd_filenames.add_to_list("another\\bonkers\\file_path.json");
618 size_t fail_index = 0;
619 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
620 while (result == VK_ERROR_OUT_OF_HOST_MEMORY && fail_index <= 10000) {
621 MemoryTracker tracker({false, 0, true, fail_index});
622
623 VkInstance instance;
624 InstanceCreateInfo inst_create_info{};
625 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
626 if (result == VK_SUCCESS) {
627 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
628 }
629 ASSERT_TRUE(tracker.empty());
630 fail_index++;
631 }
632 }
633
634 // Test failure during vkCreateDevice to make sure we don't leak memory if
635 // one of the out-of-memory conditions trigger.
636 // Use 2 physical devices so that anything which copies a list of devices item by item
637 // may fail.
TEST(Allocation,CreateDeviceIntentionalAllocFail)638 TEST(Allocation, CreateDeviceIntentionalAllocFail) {
639 FrameworkEnvironment env{FrameworkSettings{}.set_log_filter("error,warn")};
640 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2))
641 .add_physical_device(PhysicalDevice{"physical_device_0"}
642 .add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false})
643 .finish())
644 .add_physical_device(PhysicalDevice{"physical_device_1"}
645 .add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false})
646 .finish());
647
648 const char* layer_name = "VK_LAYER_implicit";
649 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
650 .set_name(layer_name)
651 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
652 .set_disable_environment("DISABLE_ENV")),
653 "test_layer.json");
654 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
655
656 InstWrapper inst{env.vulkan_functions};
657 ASSERT_NO_FATAL_FAILURE(inst.CheckCreate());
658
659 uint32_t physical_count = 2;
660 uint32_t returned_physical_count = 0;
661 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, nullptr));
662 ASSERT_EQ(physical_count, returned_physical_count);
663
664 VkPhysicalDevice physical_devices[2];
665 ASSERT_EQ(VK_SUCCESS, inst->vkEnumeratePhysicalDevices(inst.inst, &returned_physical_count, physical_devices));
666 ASSERT_EQ(physical_count, returned_physical_count);
667
668 uint32_t family_count = 1;
669 uint32_t returned_family_count = 0;
670 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_devices[0], &returned_family_count, nullptr);
671 ASSERT_EQ(returned_family_count, family_count);
672
673 VkQueueFamilyProperties family;
674 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_devices[0], &returned_family_count, &family);
675 ASSERT_EQ(returned_family_count, family_count);
676 ASSERT_EQ(family.queueFlags, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT));
677 ASSERT_EQ(family.queueCount, family_count);
678 ASSERT_EQ(family.timestampValidBits, 0U);
679
680 size_t fail_index = 0;
681 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
682 while (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
683 MemoryTracker tracker({false, 0, true, fail_index});
684
685 DeviceCreateInfo dev_create_info;
686 dev_create_info.add_device_queue(DeviceQueueCreateInfo{}.add_priority(0.0f));
687
688 VkDevice device;
689 result = inst->vkCreateDevice(physical_devices[0], dev_create_info.get(), tracker.get(), &device);
690 if (result == VK_SUCCESS || fail_index > 10000) {
691 VkQueue queue;
692 inst->vkGetDeviceQueue(device, 0, 0, &queue);
693
694 inst->vkDestroyDevice(device, tracker.get());
695 break;
696 }
697 ASSERT_TRUE(tracker.empty());
698 fail_index++;
699 }
700 }
701
702 // Test failure during vkCreateInstance and vkCreateDevice to make sure we don't
703 // leak memory if one of the out-of-memory conditions trigger.
704 // Includes drivers with several instance extensions, drivers that will fail to load, directly loaded drivers
TEST(Allocation,CreateInstanceDeviceIntentionalAllocFail)705 TEST(Allocation, CreateInstanceDeviceIntentionalAllocFail) {
706 FrameworkEnvironment env{FrameworkSettings{}.set_log_filter("error,warn")};
707 uint32_t num_physical_devices = 4;
708 uint32_t num_implicit_layers = 3;
709 for (uint32_t i = 0; i < num_physical_devices; i++) {
710 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2)
711 .icd_manifest.set_is_portability_driver(false)
712 .set_library_arch(sizeof(void*) == 8 ? "64" : "32"))
713 .set_icd_api_version(VK_API_VERSION_1_1)
714 .add_instance_extension("VK_KHR_get_physical_device_properties2")
715 .add_physical_device("physical_device_0")
716 .physical_devices.at(0)
717 .add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false})
718 .add_extensions({"VK_EXT_one", "VK_EXT_two", "VK_EXT_three", "VK_EXT_four", "VK_EXT_five"});
719 }
720
721 env.add_icd(TestICDDetails(CURRENT_PLATFORM_DUMMY_BINARY_WRONG_TYPE).set_is_fake(true));
722
723 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_7).set_discovery_type(ManifestDiscoveryType::none));
724
725 VkDirectDriverLoadingInfoLUNARG ddl_info{};
726 ddl_info.sType = VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_INFO_LUNARG;
727 ddl_info.pfnGetInstanceProcAddr = env.icds.back().icd_library.get_symbol("vk_icdGetInstanceProcAddr");
728
729 VkDirectDriverLoadingListLUNARG ddl_list{};
730 ddl_list.sType = VK_STRUCTURE_TYPE_DIRECT_DRIVER_LOADING_LIST_LUNARG;
731 ddl_list.mode = VK_DIRECT_DRIVER_LOADING_MODE_INCLUSIVE_LUNARG;
732 ddl_list.driverCount = 1;
733 ddl_list.pDrivers = &ddl_info;
734
735 const char* layer_name = "VK_LAYER_ImplicitAllocFail";
736 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
737 .set_name(layer_name)
738 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
739 .set_disable_environment("DISABLE_ENV")),
740 "test_layer.json");
741 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
742 for (uint32_t i = 1; i < num_implicit_layers + 1; i++) {
743 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
744 .set_name("VK_LAYER_Implicit1" + std::to_string(i))
745 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
746 .set_disable_environment("DISABLE_ENV")),
747 "test_layer_" + std::to_string(i) + ".json");
748 }
749 // Throw in a complex json file to flex the json allocation routines
750 env.write_file_from_source(COMPLEX_JSON_FILE, ManifestCategory::explicit_layer, ManifestLocation::explicit_layer,
751 "VK_LAYER_complex_file.json");
752
753 size_t fail_index = 0;
754 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
755 while (result == VK_ERROR_OUT_OF_HOST_MEMORY && fail_index <= 10000) {
756 MemoryTracker tracker{{false, 0, true, fail_index}};
757 fail_index++; // applies to the next loop
758
759 VkInstance instance;
760 InstanceCreateInfo inst_create_info{};
761 inst_create_info.add_extension(VK_LUNARG_DIRECT_DRIVER_LOADING_EXTENSION_NAME);
762 inst_create_info.instance_info.pNext = reinterpret_cast<const void*>(&ddl_list);
763 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
764 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
765 ASSERT_TRUE(tracker.empty());
766 continue;
767 }
768 ASSERT_EQ(result, VK_SUCCESS);
769
770 uint32_t returned_physical_count = 0;
771 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, nullptr);
772 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
773 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
774 ASSERT_TRUE(tracker.empty());
775 continue;
776 }
777 ASSERT_EQ(result, VK_SUCCESS);
778 ASSERT_EQ(num_physical_devices, returned_physical_count);
779
780 std::vector<VkPhysicalDevice> physical_devices;
781 physical_devices.resize(returned_physical_count);
782 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, physical_devices.data());
783 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
784 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
785 ASSERT_TRUE(tracker.empty());
786 continue;
787 }
788 ASSERT_EQ(result, VK_SUCCESS);
789 ASSERT_EQ(num_physical_devices, returned_physical_count);
790 for (uint32_t i = 0; i < returned_physical_count; i++) {
791 uint32_t family_count = 1;
792 uint32_t returned_family_count = 0;
793 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_devices.at(i), &returned_family_count, nullptr);
794 ASSERT_EQ(returned_family_count, family_count);
795
796 VkQueueFamilyProperties family;
797 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_devices.at(i), &returned_family_count, &family);
798 ASSERT_EQ(returned_family_count, family_count);
799 ASSERT_EQ(family.queueFlags, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT));
800 ASSERT_EQ(family.queueCount, family_count);
801 ASSERT_EQ(family.timestampValidBits, 0U);
802
803 DeviceCreateInfo dev_create_info;
804 dev_create_info.add_device_queue(DeviceQueueCreateInfo{}.add_priority(0.0f));
805
806 VkDevice device;
807 result = env.vulkan_functions.vkCreateDevice(physical_devices.at(i), dev_create_info.get(), tracker.get(), &device);
808 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
809 break;
810 }
811 ASSERT_EQ(result, VK_SUCCESS);
812
813 VkQueue queue;
814 env.vulkan_functions.vkGetDeviceQueue(device, 0, 0, &queue);
815
816 env.vulkan_functions.vkDestroyDevice(device, tracker.get());
817 }
818 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
819
820 ASSERT_TRUE(tracker.empty());
821 }
822 }
823
824 // Test failure during vkCreateInstance when a driver of the wrong architecture is present
825 // to make sure the loader uses the valid ICD and doesn't report incompatible driver just because
826 // an incompatible driver exists
TEST(TryLoadWrongBinaries,CreateInstanceIntentionalAllocFail)827 TEST(TryLoadWrongBinaries, CreateInstanceIntentionalAllocFail) {
828 FrameworkEnvironment env{FrameworkSettings{}.set_log_filter("error,warn")};
829 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
830 env.add_icd(TestICDDetails(CURRENT_PLATFORM_DUMMY_BINARY_WRONG_TYPE).set_is_fake(true));
831
832 const char* layer_name = "VK_LAYER_implicit";
833 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
834 .set_name(layer_name)
835 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
836 .set_disable_environment("DISABLE_ENV")),
837 "test_layer.json");
838 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
839
840 size_t fail_index = 0;
841 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
842 while (result == VK_ERROR_OUT_OF_HOST_MEMORY && fail_index <= 10000) {
843 MemoryTracker tracker({false, 0, true, fail_index});
844
845 VkInstance instance;
846 InstanceCreateInfo inst_create_info{};
847 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
848 if (result == VK_SUCCESS) {
849 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
850 }
851 ASSERT_NE(result, VK_ERROR_INCOMPATIBLE_DRIVER);
852 ASSERT_TRUE(tracker.empty());
853 fail_index++;
854 }
855 }
856
857 // Test failure during vkCreateInstance and vkCreateDevice to make sure we don't
858 // leak memory if one of the out-of-memory conditions trigger.
TEST(Allocation,EnumeratePhysicalDevicesIntentionalAllocFail)859 TEST(Allocation, EnumeratePhysicalDevicesIntentionalAllocFail) {
860 FrameworkEnvironment env{FrameworkSettings{}.set_log_filter("error,warn")};
861 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
862
863 const char* layer_name = "VK_LAYER_implicit";
864 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
865 .set_name(layer_name)
866 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
867 .set_disable_environment("DISABLE_ENV")),
868 "test_layer.json");
869 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
870
871 size_t fail_index = 0;
872 bool reached_the_end = false;
873 uint32_t starting_physical_dev_count = 3;
874 while (!reached_the_end && fail_index <= 10000) {
875 fail_index++; // applies to the next loop
876 uint32_t physical_dev_count = starting_physical_dev_count;
877 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
878 auto& driver = env.reset_icd();
879
880 for (uint32_t i = 0; i < physical_dev_count; i++) {
881 driver.physical_devices.emplace_back(std::string("physical_device_") + std::to_string(i))
882 .add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false});
883 }
884 MemoryTracker tracker{{false, 0, true, fail_index}};
885 InstanceCreateInfo inst_create_info;
886 VkInstance instance;
887 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
888 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
889 ASSERT_TRUE(tracker.empty());
890 continue;
891 }
892
893 uint32_t returned_physical_count = 0;
894 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, nullptr);
895 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
896 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
897 ASSERT_TRUE(tracker.empty());
898 continue;
899 }
900 ASSERT_EQ(physical_dev_count, returned_physical_count);
901
902 for (uint32_t i = 0; i < 2; i++) {
903 driver.physical_devices.emplace_back(std::string("physical_device_") + std::to_string(physical_dev_count))
904 .add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false});
905 physical_dev_count += 1;
906 }
907
908 std::vector<VkPhysicalDevice> physical_devices{physical_dev_count, VK_NULL_HANDLE};
909 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, physical_devices.data());
910 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
911 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
912 ASSERT_TRUE(tracker.empty());
913 continue;
914 }
915 if (result == VK_INCOMPLETE) {
916 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, nullptr);
917 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
918 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
919 ASSERT_TRUE(tracker.empty());
920 continue;
921 }
922 physical_devices.resize(returned_physical_count);
923 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, physical_devices.data());
924 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
925 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
926 ASSERT_TRUE(tracker.empty());
927 continue;
928 }
929 }
930 ASSERT_EQ(physical_dev_count, returned_physical_count);
931
932 std::array<VkDevice, 5> devices;
933 for (uint32_t i = 0; i < returned_physical_count; i++) {
934 uint32_t family_count = 1;
935 uint32_t returned_family_count = 0;
936 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_devices[i], &returned_family_count, nullptr);
937 ASSERT_EQ(returned_family_count, family_count);
938
939 VkQueueFamilyProperties family;
940 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_devices[i], &returned_family_count, &family);
941 ASSERT_EQ(returned_family_count, family_count);
942 ASSERT_EQ(family.queueFlags, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT));
943 ASSERT_EQ(family.queueCount, family_count);
944 ASSERT_EQ(family.timestampValidBits, 0U);
945
946 DeviceCreateInfo dev_create_info;
947 dev_create_info.add_device_queue(DeviceQueueCreateInfo{}.add_priority(0.0f));
948
949 result = env.vulkan_functions.vkCreateDevice(physical_devices[i], dev_create_info.get(), tracker.get(), &devices[i]);
950
951 VkQueue queue;
952 if (result == VK_SUCCESS) {
953 env.vulkan_functions.vkGetDeviceQueue(devices[i], 0, 0, &queue);
954 }
955 }
956 for (uint32_t i = 0; i < returned_physical_count; i++) {
957 if (result == VK_SUCCESS) {
958 env.vulkan_functions.vkDestroyDevice(devices[i], tracker.get());
959 }
960 }
961
962 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
963 ASSERT_TRUE(tracker.empty());
964 reached_the_end = true;
965 }
966 }
967 #if defined(WIN32)
968 // Test failure during vkCreateInstance and vkCreateDevice to make sure we don't
969 // leak memory if one of the out-of-memory conditions trigger.
TEST(Allocation,CreateInstanceDeviceWithDXGIDriverIntentionalAllocFail)970 TEST(Allocation, CreateInstanceDeviceWithDXGIDriverIntentionalAllocFail) {
971 FrameworkEnvironment env{FrameworkSettings{}.set_log_filter("error,warn")};
972 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_6).set_discovery_type(ManifestDiscoveryType::null_dir));
973 env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
974
975 for (uint32_t i = 0; i < 2; i++) {
976 auto& driver = env.get_test_icd(i);
977 driver.physical_devices.emplace_back(std::string("physical_device_") + std::to_string(i))
978 .add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false});
979 }
980
981 const char* layer_name = "VK_LAYER_implicit";
982 env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
983 .set_name(layer_name)
984 .set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
985 .set_disable_environment("DISABLE_ENV")),
986 "test_layer.json");
987 env.get_test_layer().set_do_spurious_allocations_in_create_instance(true).set_do_spurious_allocations_in_create_device(true);
988
989 auto& known_driver = known_driver_list.at(2); // which drive this test pretends to be
990 DXGI_ADAPTER_DESC1 desc1{};
991 desc1.VendorId = known_driver.vendor_id;
992 desc1.AdapterLuid = _LUID{10, 1000};
993 env.platform_shim->add_dxgi_adapter(GpuType::discrete, desc1);
994 env.get_test_icd(0).set_adapterLUID(desc1.AdapterLuid);
995
996 env.platform_shim->add_d3dkmt_adapter(D3DKMT_Adapter{0, _LUID{10, 1000}}.add_driver_manifest_path(env.get_icd_manifest_path()));
997
998 size_t fail_index = 0;
999 VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
1000 while (result == VK_ERROR_OUT_OF_HOST_MEMORY && fail_index <= 10000) {
1001 MemoryTracker tracker({false, 0, true, fail_index});
1002 fail_index++; // applies to the next loop
1003
1004 VkInstance instance;
1005 InstanceCreateInfo inst_create_info{};
1006 result = env.vulkan_functions.vkCreateInstance(inst_create_info.get(), tracker.get(), &instance);
1007 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
1008 ASSERT_TRUE(tracker.empty());
1009 continue;
1010 }
1011
1012 uint32_t physical_count = 2;
1013 uint32_t returned_physical_count = 0;
1014 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, nullptr);
1015 if (result == VK_ERROR_OUT_OF_HOST_MEMORY || result == VK_INCOMPLETE) {
1016 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
1017 ASSERT_TRUE(tracker.empty());
1018 continue;
1019 }
1020 ASSERT_EQ(physical_count, returned_physical_count);
1021
1022 std::array<VkPhysicalDevice, 2> physical_devices;
1023 result = env.vulkan_functions.vkEnumeratePhysicalDevices(instance, &returned_physical_count, physical_devices.data());
1024 if (result == VK_ERROR_OUT_OF_HOST_MEMORY || result == VK_INCOMPLETE) {
1025 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
1026 ASSERT_TRUE(tracker.empty());
1027 continue;
1028 }
1029 ASSERT_EQ(physical_count, returned_physical_count);
1030
1031 std::array<VkDevice, 2> devices;
1032 for (uint32_t i = 0; i < returned_physical_count; i++) {
1033 uint32_t family_count = 1;
1034 uint32_t returned_family_count = 0;
1035 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_devices[i], &returned_family_count, nullptr);
1036 ASSERT_EQ(returned_family_count, family_count);
1037
1038 VkQueueFamilyProperties family;
1039 env.vulkan_functions.vkGetPhysicalDeviceQueueFamilyProperties(physical_devices[i], &returned_family_count, &family);
1040 ASSERT_EQ(returned_family_count, family_count);
1041 ASSERT_EQ(family.queueFlags, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT));
1042 ASSERT_EQ(family.queueCount, family_count);
1043 ASSERT_EQ(family.timestampValidBits, 0U);
1044
1045 DeviceCreateInfo dev_create_info;
1046 dev_create_info.add_device_queue(DeviceQueueCreateInfo{}.add_priority(0.0f));
1047
1048 result = env.vulkan_functions.vkCreateDevice(physical_devices[i], dev_create_info.get(), tracker.get(), &devices[i]);
1049 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
1050 devices[i] = VK_NULL_HANDLE;
1051 } else {
1052 VkQueue queue;
1053 env.vulkan_functions.vkGetDeviceQueue(devices[i], 0, 0, &queue);
1054 }
1055 }
1056 for (uint32_t i = 0; i < returned_physical_count; i++) {
1057 if (devices[i] != VK_NULL_HANDLE) {
1058 env.vulkan_functions.vkDestroyDevice(devices[i], tracker.get());
1059 }
1060 }
1061 env.vulkan_functions.vkDestroyInstance(instance, tracker.get());
1062
1063 ASSERT_TRUE(tracker.empty());
1064 }
1065 }
1066 #endif
1067