• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2015-2019 The Khronos Group Inc.
3  * Copyright (c) 2015-2019 Valve Corporation
4  * Copyright (c) 2015-2019 LunarG, Inc.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
19  * Author: Tony Barbour <tony@LunarG.com>
20  */
21 
22 #include "test_common.h"    // NOEXCEPT macro (must precede vktestbinding.h)
23 #include "vktestbinding.h"  // Left for clarity, no harm, already included via test_common.h
24 #include "vk_typemap_helper.h"
25 #include <algorithm>
26 #include <assert.h>
27 #include <iostream>
28 #include <stdarg.h>
29 #include <string.h>  // memset(), memcmp()
30 
31 namespace {
32 
33 #define NON_DISPATCHABLE_HANDLE_INIT(create_func, dev, ...)                              \
34     do {                                                                                 \
35         handle_type handle;                                                              \
36         if (EXPECT(create_func(dev.handle(), __VA_ARGS__, NULL, &handle) == VK_SUCCESS)) \
37             NonDispHandle::init(dev.handle(), handle);                                   \
38     } while (0)
39 
40 #define NON_DISPATCHABLE_HANDLE_DTOR(cls, destroy_func)            \
41     cls::~cls() {                                                  \
42         if (initialized()) destroy_func(device(), handle(), NULL); \
43     }
44 
45 #define STRINGIFY(x) #x
46 #define EXPECT(expr) ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, __FUNCTION__))
47 
48 vk_testing::ErrorCallback error_callback;
49 
expect_failure(const char * expr,const char * file,unsigned int line,const char * function)50 bool expect_failure(const char *expr, const char *file, unsigned int line, const char *function) {
51     if (error_callback) {
52         error_callback(expr, file, line, function);
53     } else {
54         std::cerr << file << ":" << line << ": " << function << ": Expectation `" << expr << "' failed.\n";
55     }
56 
57     return false;
58 }
59 
60 }  // namespace
61 
62 namespace vk_testing {
63 
set_error_callback(ErrorCallback callback)64 void set_error_callback(ErrorCallback callback) { error_callback = callback; }
65 
properties() const66 VkPhysicalDeviceProperties PhysicalDevice::properties() const {
67     VkPhysicalDeviceProperties info;
68 
69     vkGetPhysicalDeviceProperties(handle(), &info);
70 
71     return info;
72 }
73 
queue_properties() const74 std::vector<VkQueueFamilyProperties> PhysicalDevice::queue_properties() const {
75     std::vector<VkQueueFamilyProperties> info;
76     uint32_t count;
77 
78     // Call once with NULL data to receive count
79     vkGetPhysicalDeviceQueueFamilyProperties(handle(), &count, NULL);
80     info.resize(count);
81     vkGetPhysicalDeviceQueueFamilyProperties(handle(), &count, info.data());
82 
83     return info;
84 }
85 
memory_properties() const86 VkPhysicalDeviceMemoryProperties PhysicalDevice::memory_properties() const {
87     VkPhysicalDeviceMemoryProperties info;
88 
89     vkGetPhysicalDeviceMemoryProperties(handle(), &info);
90 
91     return info;
92 }
93 
features() const94 VkPhysicalDeviceFeatures PhysicalDevice::features() const {
95     VkPhysicalDeviceFeatures features;
96     vkGetPhysicalDeviceFeatures(handle(), &features);
97     return features;
98 }
99 
100 /*
101  * Return list of Global layers available
102  */
GetGlobalLayers()103 std::vector<VkLayerProperties> GetGlobalLayers() {
104     VkResult err;
105     std::vector<VkLayerProperties> layers;
106     uint32_t layer_count;
107 
108     do {
109         layer_count = 0;
110         err = vkEnumerateInstanceLayerProperties(&layer_count, NULL);
111 
112         if (err == VK_SUCCESS) {
113             layers.reserve(layer_count);
114             err = vkEnumerateInstanceLayerProperties(&layer_count, layers.data());
115         }
116     } while (err == VK_INCOMPLETE);
117 
118     assert(err == VK_SUCCESS);
119 
120     return layers;
121 }
122 
123 /*
124  * Return list of Global extensions provided by the ICD / Loader
125  */
GetGlobalExtensions()126 std::vector<VkExtensionProperties> GetGlobalExtensions() { return GetGlobalExtensions(NULL); }
127 
128 /*
129  * Return list of Global extensions provided by the specified layer
130  * If pLayerName is NULL, will return extensions implemented by the loader /
131  * ICDs
132  */
GetGlobalExtensions(const char * pLayerName)133 std::vector<VkExtensionProperties> GetGlobalExtensions(const char *pLayerName) {
134     std::vector<VkExtensionProperties> exts;
135     uint32_t ext_count;
136     VkResult err;
137 
138     do {
139         ext_count = 0;
140         err = vkEnumerateInstanceExtensionProperties(pLayerName, &ext_count, NULL);
141 
142         if (err == VK_SUCCESS) {
143             exts.resize(ext_count);
144             err = vkEnumerateInstanceExtensionProperties(pLayerName, &ext_count, exts.data());
145         }
146     } while (err == VK_INCOMPLETE);
147 
148     assert(err == VK_SUCCESS);
149 
150     return exts;
151 }
152 
153 /*
154  * Return list of PhysicalDevice extensions provided by the ICD / Loader
155  */
extensions() const156 std::vector<VkExtensionProperties> PhysicalDevice::extensions() const { return extensions(NULL); }
157 
158 /*
159  * Return list of PhysicalDevice extensions provided by the specified layer
160  * If pLayerName is NULL, will return extensions for ICD / loader.
161  */
extensions(const char * pLayerName) const162 std::vector<VkExtensionProperties> PhysicalDevice::extensions(const char *pLayerName) const {
163     std::vector<VkExtensionProperties> exts;
164     VkResult err;
165 
166     do {
167         uint32_t extCount = 0;
168         err = vkEnumerateDeviceExtensionProperties(handle(), pLayerName, &extCount, NULL);
169 
170         if (err == VK_SUCCESS) {
171             exts.resize(extCount);
172             err = vkEnumerateDeviceExtensionProperties(handle(), pLayerName, &extCount, exts.data());
173         }
174     } while (err == VK_INCOMPLETE);
175 
176     assert(err == VK_SUCCESS);
177 
178     return exts;
179 }
180 
set_memory_type(const uint32_t type_bits,VkMemoryAllocateInfo * info,const VkFlags properties,const VkFlags forbid) const181 bool PhysicalDevice::set_memory_type(const uint32_t type_bits, VkMemoryAllocateInfo *info, const VkFlags properties,
182                                      const VkFlags forbid) const {
183     uint32_t type_mask = type_bits;
184     // Search memtypes to find first index with those properties
185     for (uint32_t i = 0; i < memory_properties_.memoryTypeCount; i++) {
186         if ((type_mask & 1) == 1) {
187             // Type is available, does it match user properties?
188             if ((memory_properties_.memoryTypes[i].propertyFlags & properties) == properties &&
189                 (memory_properties_.memoryTypes[i].propertyFlags & forbid) == 0) {
190                 info->memoryTypeIndex = i;
191                 return true;
192             }
193         }
194         type_mask >>= 1;
195     }
196     // No memory types matched, return failure
197     return false;
198 }
199 
200 /*
201  * Return list of PhysicalDevice layers
202  */
layers() const203 std::vector<VkLayerProperties> PhysicalDevice::layers() const {
204     std::vector<VkLayerProperties> layer_props;
205     VkResult err;
206 
207     do {
208         uint32_t layer_count = 0;
209         err = vkEnumerateDeviceLayerProperties(handle(), &layer_count, NULL);
210 
211         if (err == VK_SUCCESS) {
212             layer_props.reserve(layer_count);
213             err = vkEnumerateDeviceLayerProperties(handle(), &layer_count, layer_props.data());
214         }
215     } while (err == VK_INCOMPLETE);
216 
217     assert(err == VK_SUCCESS);
218 
219     return layer_props;
220 }
221 
QueueCreateInfoArray(const std::vector<VkQueueFamilyProperties> & queue_props)222 QueueCreateInfoArray::QueueCreateInfoArray(const std::vector<VkQueueFamilyProperties> &queue_props)
223     : queue_info_(), queue_priorities_() {
224     queue_info_.reserve(queue_props.size());
225 
226     for (uint32_t i = 0; i < (uint32_t)queue_props.size(); ++i) {
227         if (queue_props[i].queueCount > 0) {
228             VkDeviceQueueCreateInfo qi = {};
229             qi.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
230             qi.pNext = NULL;
231             qi.queueFamilyIndex = i;
232             qi.queueCount = queue_props[i].queueCount;
233             queue_priorities_.emplace_back(qi.queueCount, 0.0f);
234             qi.pQueuePriorities = queue_priorities_[i].data();
235             queue_info_.push_back(qi);
236         }
237     }
238 }
239 
~Device()240 Device::~Device() {
241     if (!initialized()) return;
242 
243     vkDestroyDevice(handle(), NULL);
244 }
245 
init(std::vector<const char * > & extensions,VkPhysicalDeviceFeatures * features,void * create_device_pnext)246 void Device::init(std::vector<const char *> &extensions, VkPhysicalDeviceFeatures *features, void *create_device_pnext) {
247     // request all queues
248     const std::vector<VkQueueFamilyProperties> queue_props = phy_.queue_properties();
249     QueueCreateInfoArray queue_info(phy_.queue_properties());
250     for (uint32_t i = 0; i < (uint32_t)queue_props.size(); i++) {
251         if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
252             graphics_queue_node_index_ = i;
253             break;
254         }
255     }
256     // Only request creation with queuefamilies that have at least one queue
257     std::vector<VkDeviceQueueCreateInfo> create_queue_infos;
258     auto qci = queue_info.data();
259     for (uint32_t j = 0; j < queue_info.size(); ++j) {
260         if (qci[j].queueCount) {
261             create_queue_infos.push_back(qci[j]);
262         }
263     }
264 
265     enabled_extensions_ = extensions;
266 
267     VkDeviceCreateInfo dev_info = {};
268     dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
269     dev_info.pNext = create_device_pnext;
270     dev_info.queueCreateInfoCount = create_queue_infos.size();
271     dev_info.pQueueCreateInfos = create_queue_infos.data();
272     dev_info.enabledLayerCount = 0;
273     dev_info.ppEnabledLayerNames = NULL;
274     dev_info.enabledExtensionCount = extensions.size();
275     dev_info.ppEnabledExtensionNames = extensions.data();
276 
277     VkPhysicalDeviceFeatures all_features;
278     // Let VkPhysicalDeviceFeatures2 take priority over VkPhysicalDeviceFeatures,
279     // since it supports extensions
280 
281     if (!(lvl_find_in_chain<VkPhysicalDeviceFeatures2>(dev_info.pNext))) {
282         if (features) {
283             dev_info.pEnabledFeatures = features;
284         } else {
285             // request all supportable features enabled
286             all_features = phy().features();
287             dev_info.pEnabledFeatures = &all_features;
288         }
289     }
290 
291     init(dev_info);
292 }
293 
init(const VkDeviceCreateInfo & info)294 void Device::init(const VkDeviceCreateInfo &info) {
295     VkDevice dev;
296 
297     if (EXPECT(vkCreateDevice(phy_.handle(), &info, NULL, &dev) == VK_SUCCESS)) Handle::init(dev);
298 
299     init_queues();
300     init_formats();
301 }
302 
init_queues()303 void Device::init_queues() {
304     uint32_t queue_node_count;
305 
306     // Call with NULL data to get count
307     vkGetPhysicalDeviceQueueFamilyProperties(phy_.handle(), &queue_node_count, NULL);
308     EXPECT(queue_node_count >= 1);
309 
310     VkQueueFamilyProperties *queue_props = new VkQueueFamilyProperties[queue_node_count];
311 
312     vkGetPhysicalDeviceQueueFamilyProperties(phy_.handle(), &queue_node_count, queue_props);
313 
314     queue_families_.resize(queue_node_count);
315     for (uint32_t i = 0; i < queue_node_count; i++) {
316         VkQueue queue;
317 
318         QueueFamilyQueues &queue_storage = queue_families_[i];
319         queue_storage.reserve(queue_props[i].queueCount);
320         for (uint32_t j = 0; j < queue_props[i].queueCount; j++) {
321             // TODO: Need to add support for separate MEMMGR and work queues,
322             // including synchronization
323             vkGetDeviceQueue(handle(), i, j, &queue);
324 
325             // Store single copy of the queue object that will self destruct
326             queue_storage.emplace_back(new Queue(queue, i));
327 
328             if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
329                 queues_[GRAPHICS].push_back(queue_storage.back().get());
330             }
331 
332             if (queue_props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) {
333                 queues_[COMPUTE].push_back(queue_storage.back().get());
334             }
335 
336             if (queue_props[i].queueFlags & VK_QUEUE_TRANSFER_BIT) {
337                 queues_[DMA].push_back(queue_storage.back().get());
338             }
339         }
340     }
341 
342     delete[] queue_props;
343 
344     EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty());
345 }
queue_family_queues(uint32_t queue_family) const346 const Device::QueueFamilyQueues &Device::queue_family_queues(uint32_t queue_family) const {
347     assert(queue_family < queue_families_.size());
348     return queue_families_[queue_family];
349 }
350 
init_formats()351 void Device::init_formats() {
352     for (int f = VK_FORMAT_BEGIN_RANGE; f <= VK_FORMAT_END_RANGE; f++) {
353         const VkFormat fmt = static_cast<VkFormat>(f);
354         const VkFormatProperties props = format_properties(fmt);
355 
356         if (props.linearTilingFeatures) {
357             const Format tmp = {fmt, VK_IMAGE_TILING_LINEAR, props.linearTilingFeatures};
358             formats_.push_back(tmp);
359         }
360 
361         if (props.optimalTilingFeatures) {
362             const Format tmp = {fmt, VK_IMAGE_TILING_OPTIMAL, props.optimalTilingFeatures};
363             formats_.push_back(tmp);
364         }
365     }
366 
367     EXPECT(!formats_.empty());
368 }
369 
IsEnabledExtension(const char * extension)370 bool Device::IsEnabledExtension(const char *extension) {
371     const auto is_x = [&extension](const char *enabled_extension) { return strcmp(extension, enabled_extension) == 0; };
372     return std::any_of(enabled_extensions_.begin(), enabled_extensions_.end(), is_x);
373 }
374 
format_properties(VkFormat format)375 VkFormatProperties Device::format_properties(VkFormat format) {
376     VkFormatProperties data;
377     vkGetPhysicalDeviceFormatProperties(phy().handle(), format, &data);
378 
379     return data;
380 }
381 
wait()382 void Device::wait() { EXPECT(vkDeviceWaitIdle(handle()) == VK_SUCCESS); }
383 
wait(const std::vector<const Fence * > & fences,bool wait_all,uint64_t timeout)384 VkResult Device::wait(const std::vector<const Fence *> &fences, bool wait_all, uint64_t timeout) {
385     const std::vector<VkFence> fence_handles = MakeVkHandles<VkFence>(fences);
386     VkResult err = vkWaitForFences(handle(), fence_handles.size(), fence_handles.data(), wait_all, timeout);
387     EXPECT(err == VK_SUCCESS || err == VK_TIMEOUT);
388 
389     return err;
390 }
391 
update_descriptor_sets(const std::vector<VkWriteDescriptorSet> & writes,const std::vector<VkCopyDescriptorSet> & copies)392 void Device::update_descriptor_sets(const std::vector<VkWriteDescriptorSet> &writes,
393                                     const std::vector<VkCopyDescriptorSet> &copies) {
394     vkUpdateDescriptorSets(handle(), writes.size(), writes.data(), copies.size(), copies.data());
395 }
396 
submit(const std::vector<const CommandBuffer * > & cmds,const Fence & fence,bool expect_success)397 VkResult Queue::submit(const std::vector<const CommandBuffer *> &cmds, const Fence &fence, bool expect_success) {
398     const std::vector<VkCommandBuffer> cmd_handles = MakeVkHandles<VkCommandBuffer>(cmds);
399     VkSubmitInfo submit_info;
400     submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
401     submit_info.pNext = NULL;
402     submit_info.waitSemaphoreCount = 0;
403     submit_info.pWaitSemaphores = NULL;
404     submit_info.pWaitDstStageMask = NULL;
405     submit_info.commandBufferCount = (uint32_t)cmd_handles.size();
406     submit_info.pCommandBuffers = cmd_handles.data();
407     submit_info.signalSemaphoreCount = 0;
408     submit_info.pSignalSemaphores = NULL;
409 
410     VkResult result = vkQueueSubmit(handle(), 1, &submit_info, fence.handle());
411     if (expect_success) EXPECT(result == VK_SUCCESS);
412     return result;
413 }
414 
submit(const CommandBuffer & cmd,const Fence & fence,bool expect_success)415 VkResult Queue::submit(const CommandBuffer &cmd, const Fence &fence, bool expect_success) {
416     return submit(std::vector<const CommandBuffer *>(1, &cmd), fence, expect_success);
417 }
418 
submit(const CommandBuffer & cmd,bool expect_success)419 VkResult Queue::submit(const CommandBuffer &cmd, bool expect_success) {
420     Fence fence;
421     return submit(cmd, fence);
422 }
423 
wait()424 VkResult Queue::wait() {
425     VkResult result = vkQueueWaitIdle(handle());
426     EXPECT(result == VK_SUCCESS);
427     return result;
428 }
429 
~DeviceMemory()430 DeviceMemory::~DeviceMemory() {
431     if (initialized()) vkFreeMemory(device(), handle(), NULL);
432 }
433 
init(const Device & dev,const VkMemoryAllocateInfo & info)434 void DeviceMemory::init(const Device &dev, const VkMemoryAllocateInfo &info) {
435     NON_DISPATCHABLE_HANDLE_INIT(vkAllocateMemory, dev, &info);
436 }
437 
map(VkFlags flags) const438 const void *DeviceMemory::map(VkFlags flags) const {
439     void *data;
440     if (!EXPECT(vkMapMemory(device(), handle(), 0, VK_WHOLE_SIZE, flags, &data) == VK_SUCCESS)) data = NULL;
441 
442     return data;
443 }
444 
map(VkFlags flags)445 void *DeviceMemory::map(VkFlags flags) {
446     void *data;
447     if (!EXPECT(vkMapMemory(device(), handle(), 0, VK_WHOLE_SIZE, flags, &data) == VK_SUCCESS)) data = NULL;
448 
449     return data;
450 }
451 
unmap() const452 void DeviceMemory::unmap() const { vkUnmapMemory(device(), handle()); }
453 
get_resource_alloc_info(const Device & dev,const VkMemoryRequirements & reqs,VkMemoryPropertyFlags mem_props)454 VkMemoryAllocateInfo DeviceMemory::get_resource_alloc_info(const Device &dev, const VkMemoryRequirements &reqs,
455                                                            VkMemoryPropertyFlags mem_props) {
456     // Find appropriate memory type for given reqs
457     VkPhysicalDeviceMemoryProperties dev_mem_props = dev.phy().memory_properties();
458     uint32_t mem_type_index = 0;
459     for (mem_type_index = 0; mem_type_index < dev_mem_props.memoryTypeCount; ++mem_type_index) {
460         if (mem_props == (mem_props & dev_mem_props.memoryTypes[mem_type_index].propertyFlags)) break;
461     }
462     // If we exceeded types, then this device doesn't have the memory we need
463     assert(mem_type_index < dev_mem_props.memoryTypeCount);
464     VkMemoryAllocateInfo info = alloc_info(reqs.size, mem_type_index);
465     EXPECT(dev.phy().set_memory_type(reqs.memoryTypeBits, &info, mem_props));
466     return info;
467 }
468 
NON_DISPATCHABLE_HANDLE_DTOR(Fence,vkDestroyFence)469 NON_DISPATCHABLE_HANDLE_DTOR(Fence, vkDestroyFence)
470 
471 void Fence::init(const Device &dev, const VkFenceCreateInfo &info) { NON_DISPATCHABLE_HANDLE_INIT(vkCreateFence, dev, &info); }
472 
wait(VkBool32 wait_all,uint64_t timeout) const473 VkResult Fence::wait(VkBool32 wait_all, uint64_t timeout) const {
474     VkFence fence = handle();
475     return vkWaitForFences(device(), 1, &fence, wait_all, timeout);
476 }
477 
NON_DISPATCHABLE_HANDLE_DTOR(Semaphore,vkDestroySemaphore)478 NON_DISPATCHABLE_HANDLE_DTOR(Semaphore, vkDestroySemaphore)
479 
480 void Semaphore::init(const Device &dev, const VkSemaphoreCreateInfo &info) {
481     NON_DISPATCHABLE_HANDLE_INIT(vkCreateSemaphore, dev, &info);
482 }
483 
NON_DISPATCHABLE_HANDLE_DTOR(Event,vkDestroyEvent)484 NON_DISPATCHABLE_HANDLE_DTOR(Event, vkDestroyEvent)
485 
486 void Event::init(const Device &dev, const VkEventCreateInfo &info) { NON_DISPATCHABLE_HANDLE_INIT(vkCreateEvent, dev, &info); }
487 
set()488 void Event::set() { EXPECT(vkSetEvent(device(), handle()) == VK_SUCCESS); }
489 
reset()490 void Event::reset() { EXPECT(vkResetEvent(device(), handle()) == VK_SUCCESS); }
491 
NON_DISPATCHABLE_HANDLE_DTOR(QueryPool,vkDestroyQueryPool)492 NON_DISPATCHABLE_HANDLE_DTOR(QueryPool, vkDestroyQueryPool)
493 
494 void QueryPool::init(const Device &dev, const VkQueryPoolCreateInfo &info) {
495     NON_DISPATCHABLE_HANDLE_INIT(vkCreateQueryPool, dev, &info);
496 }
497 
results(uint32_t first,uint32_t count,size_t size,void * data,size_t stride)498 VkResult QueryPool::results(uint32_t first, uint32_t count, size_t size, void *data, size_t stride) {
499     VkResult err = vkGetQueryPoolResults(device(), handle(), first, count, size, data, stride, 0);
500     EXPECT(err == VK_SUCCESS || err == VK_NOT_READY);
501 
502     return err;
503 }
504 
NON_DISPATCHABLE_HANDLE_DTOR(Buffer,vkDestroyBuffer)505 NON_DISPATCHABLE_HANDLE_DTOR(Buffer, vkDestroyBuffer)
506 
507 void Buffer::init(const Device &dev, const VkBufferCreateInfo &info, VkMemoryPropertyFlags mem_props) {
508     init_no_mem(dev, info);
509 
510     internal_mem_.init(dev, DeviceMemory::get_resource_alloc_info(dev, memory_requirements(), mem_props));
511     bind_memory(internal_mem_, 0);
512 }
513 
init_no_mem(const Device & dev,const VkBufferCreateInfo & info)514 void Buffer::init_no_mem(const Device &dev, const VkBufferCreateInfo &info) {
515     NON_DISPATCHABLE_HANDLE_INIT(vkCreateBuffer, dev, &info);
516     create_info_ = info;
517 }
518 
memory_requirements() const519 VkMemoryRequirements Buffer::memory_requirements() const {
520     VkMemoryRequirements reqs;
521 
522     vkGetBufferMemoryRequirements(device(), handle(), &reqs);
523 
524     return reqs;
525 }
526 
bind_memory(const DeviceMemory & mem,VkDeviceSize mem_offset)527 void Buffer::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset) {
528     EXPECT(vkBindBufferMemory(device(), handle(), mem.handle(), mem_offset) == VK_SUCCESS);
529 }
530 
NON_DISPATCHABLE_HANDLE_DTOR(BufferView,vkDestroyBufferView)531 NON_DISPATCHABLE_HANDLE_DTOR(BufferView, vkDestroyBufferView)
532 
533 void BufferView::init(const Device &dev, const VkBufferViewCreateInfo &info) {
534     NON_DISPATCHABLE_HANDLE_INIT(vkCreateBufferView, dev, &info);
535 }
536 
NON_DISPATCHABLE_HANDLE_DTOR(Image,vkDestroyImage)537 NON_DISPATCHABLE_HANDLE_DTOR(Image, vkDestroyImage)
538 
539 void Image::init(const Device &dev, const VkImageCreateInfo &info, VkMemoryPropertyFlags mem_props) {
540     init_no_mem(dev, info);
541 
542     if (initialized()) {
543         internal_mem_.init(dev, DeviceMemory::get_resource_alloc_info(dev, memory_requirements(), mem_props));
544         bind_memory(internal_mem_, 0);
545     }
546 }
547 
init_no_mem(const Device & dev,const VkImageCreateInfo & info)548 void Image::init_no_mem(const Device &dev, const VkImageCreateInfo &info) {
549     NON_DISPATCHABLE_HANDLE_INIT(vkCreateImage, dev, &info);
550     if (initialized()) {
551         init_info(dev, info);
552     }
553 }
554 
init_info(const Device & dev,const VkImageCreateInfo & info)555 void Image::init_info(const Device &dev, const VkImageCreateInfo &info) {
556     create_info_ = info;
557 
558     for (std::vector<Device::Format>::const_iterator it = dev.formats().begin(); it != dev.formats().end(); it++) {
559         if (memcmp(&it->format, &create_info_.format, sizeof(it->format)) == 0 && it->tiling == create_info_.tiling) {
560             format_features_ = it->features;
561             break;
562         }
563     }
564 }
565 
memory_requirements() const566 VkMemoryRequirements Image::memory_requirements() const {
567     VkMemoryRequirements reqs;
568 
569     vkGetImageMemoryRequirements(device(), handle(), &reqs);
570 
571     return reqs;
572 }
573 
bind_memory(const DeviceMemory & mem,VkDeviceSize mem_offset)574 void Image::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset) {
575     EXPECT(vkBindImageMemory(device(), handle(), mem.handle(), mem_offset) == VK_SUCCESS);
576 }
577 
subresource_layout(const VkImageSubresource & subres) const578 VkSubresourceLayout Image::subresource_layout(const VkImageSubresource &subres) const {
579     VkSubresourceLayout data;
580     size_t size = sizeof(data);
581     vkGetImageSubresourceLayout(device(), handle(), &subres, &data);
582     if (size != sizeof(data)) memset(&data, 0, sizeof(data));
583 
584     return data;
585 }
586 
subresource_layout(const VkImageSubresourceLayers & subrescopy) const587 VkSubresourceLayout Image::subresource_layout(const VkImageSubresourceLayers &subrescopy) const {
588     VkSubresourceLayout data;
589     VkImageSubresource subres = subresource(subrescopy.aspectMask, subrescopy.mipLevel, subrescopy.baseArrayLayer);
590     size_t size = sizeof(data);
591     vkGetImageSubresourceLayout(device(), handle(), &subres, &data);
592     if (size != sizeof(data)) memset(&data, 0, sizeof(data));
593 
594     return data;
595 }
596 
transparent() const597 bool Image::transparent() const {
598     return (create_info_.tiling == VK_IMAGE_TILING_LINEAR && create_info_.samples == VK_SAMPLE_COUNT_1_BIT &&
599             !(create_info_.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)));
600 }
601 
NON_DISPATCHABLE_HANDLE_DTOR(ImageView,vkDestroyImageView)602 NON_DISPATCHABLE_HANDLE_DTOR(ImageView, vkDestroyImageView)
603 
604 void ImageView::init(const Device &dev, const VkImageViewCreateInfo &info) {
605     NON_DISPATCHABLE_HANDLE_INIT(vkCreateImageView, dev, &info);
606 }
607 
~AccelerationStructure()608 AccelerationStructure::~AccelerationStructure() {
609     if (initialized()) {
610         PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV =
611             (PFN_vkDestroyAccelerationStructureNV)vkGetDeviceProcAddr(device(), "vkDestroyAccelerationStructureNV");
612         assert(vkDestroyAccelerationStructureNV != nullptr);
613 
614         vkDestroyAccelerationStructureNV(device(), handle(), nullptr);
615     }
616 }
617 
memory_requirements() const618 VkMemoryRequirements2 AccelerationStructure::memory_requirements() const {
619     PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV =
620         (PFN_vkGetAccelerationStructureMemoryRequirementsNV)vkGetDeviceProcAddr(device(),
621                                                                                 "vkGetAccelerationStructureMemoryRequirementsNV");
622     assert(vkGetAccelerationStructureMemoryRequirementsNV != nullptr);
623 
624     VkAccelerationStructureMemoryRequirementsInfoNV memoryRequirementsInfo = {};
625     memoryRequirementsInfo.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV;
626     memoryRequirementsInfo.type = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV;
627     memoryRequirementsInfo.accelerationStructure = handle();
628 
629     VkMemoryRequirements2 memoryRequirements = {};
630     vkGetAccelerationStructureMemoryRequirementsNV(device(), &memoryRequirementsInfo, &memoryRequirements);
631     return memoryRequirements;
632 }
633 
build_scratch_memory_requirements() const634 VkMemoryRequirements2 AccelerationStructure::build_scratch_memory_requirements() const {
635     PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV =
636         (PFN_vkGetAccelerationStructureMemoryRequirementsNV)vkGetDeviceProcAddr(device(),
637                                                                                 "vkGetAccelerationStructureMemoryRequirementsNV");
638     assert(vkGetAccelerationStructureMemoryRequirementsNV != nullptr);
639 
640     VkAccelerationStructureMemoryRequirementsInfoNV memoryRequirementsInfo = {};
641     memoryRequirementsInfo.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV;
642     memoryRequirementsInfo.type = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV;
643     memoryRequirementsInfo.accelerationStructure = handle();
644 
645     VkMemoryRequirements2 memoryRequirements = {};
646     vkGetAccelerationStructureMemoryRequirementsNV(device(), &memoryRequirementsInfo, &memoryRequirements);
647     return memoryRequirements;
648 }
649 
init(const Device & dev,const VkAccelerationStructureCreateInfoNV & info,bool init_memory)650 void AccelerationStructure::init(const Device &dev, const VkAccelerationStructureCreateInfoNV &info, bool init_memory) {
651     PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV =
652         (PFN_vkCreateAccelerationStructureNV)vkGetDeviceProcAddr(dev.handle(), "vkCreateAccelerationStructureNV");
653     assert(vkCreateAccelerationStructureNV != nullptr);
654 
655     NON_DISPATCHABLE_HANDLE_INIT(vkCreateAccelerationStructureNV, dev, &info);
656 
657     info_ = info.info;
658 
659     if (init_memory) {
660         memory_.init(dev, DeviceMemory::get_resource_alloc_info(dev, memory_requirements().memoryRequirements,
661                                                                 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT));
662 
663         PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV =
664             (PFN_vkBindAccelerationStructureMemoryNV)vkGetDeviceProcAddr(dev.handle(), "vkBindAccelerationStructureMemoryNV");
665         assert(vkBindAccelerationStructureMemoryNV != nullptr);
666 
667         VkBindAccelerationStructureMemoryInfoNV bind_info = {};
668         bind_info.sType = VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV;
669         bind_info.accelerationStructure = handle();
670         bind_info.memory = memory_.handle();
671         EXPECT(vkBindAccelerationStructureMemoryNV(dev.handle(), 1, &bind_info) == VK_SUCCESS);
672 
673         PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV =
674             (PFN_vkGetAccelerationStructureHandleNV)vkGetDeviceProcAddr(dev.handle(), "vkGetAccelerationStructureHandleNV");
675         assert(vkGetAccelerationStructureHandleNV != nullptr);
676         EXPECT(vkGetAccelerationStructureHandleNV(dev.handle(), handle(), sizeof(uint64_t), &opaque_handle_) == VK_SUCCESS);
677     }
678 }
679 
create_scratch_buffer(const Device & dev,Buffer * buffer)680 void AccelerationStructure::create_scratch_buffer(const Device &dev, Buffer *buffer) {
681     VkMemoryRequirements scratch_buffer_memory_requirements = build_scratch_memory_requirements().memoryRequirements;
682 
683     VkBufferCreateInfo create_info = {};
684     create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
685     create_info.size = scratch_buffer_memory_requirements.size;
686     create_info.usage = VK_BUFFER_USAGE_RAY_TRACING_BIT_NV;
687     return buffer->init(dev, create_info, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
688 }
689 
NON_DISPATCHABLE_HANDLE_DTOR(ShaderModule,vkDestroyShaderModule)690 NON_DISPATCHABLE_HANDLE_DTOR(ShaderModule, vkDestroyShaderModule)
691 
692 void ShaderModule::init(const Device &dev, const VkShaderModuleCreateInfo &info) {
693     NON_DISPATCHABLE_HANDLE_INIT(vkCreateShaderModule, dev, &info);
694 }
695 
init_try(const Device & dev,const VkShaderModuleCreateInfo & info)696 VkResult ShaderModule::init_try(const Device &dev, const VkShaderModuleCreateInfo &info) {
697     VkShaderModule mod;
698 
699     VkResult err = vkCreateShaderModule(dev.handle(), &info, NULL, &mod);
700     if (err == VK_SUCCESS) NonDispHandle::init(dev.handle(), mod);
701 
702     return err;
703 }
704 
NON_DISPATCHABLE_HANDLE_DTOR(Pipeline,vkDestroyPipeline)705 NON_DISPATCHABLE_HANDLE_DTOR(Pipeline, vkDestroyPipeline)
706 
707 void Pipeline::init(const Device &dev, const VkGraphicsPipelineCreateInfo &info) {
708     VkPipelineCache cache;
709     VkPipelineCacheCreateInfo ci;
710     memset((void *)&ci, 0, sizeof(VkPipelineCacheCreateInfo));
711     ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
712     VkResult err = vkCreatePipelineCache(dev.handle(), &ci, NULL, &cache);
713     if (err == VK_SUCCESS) {
714         NON_DISPATCHABLE_HANDLE_INIT(vkCreateGraphicsPipelines, dev, cache, 1, &info);
715         vkDestroyPipelineCache(dev.handle(), cache, NULL);
716     }
717 }
718 
init_try(const Device & dev,const VkGraphicsPipelineCreateInfo & info)719 VkResult Pipeline::init_try(const Device &dev, const VkGraphicsPipelineCreateInfo &info) {
720     VkPipeline pipe;
721     VkPipelineCache cache;
722     VkPipelineCacheCreateInfo ci;
723     memset((void *)&ci, 0, sizeof(VkPipelineCacheCreateInfo));
724     ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
725     VkResult err = vkCreatePipelineCache(dev.handle(), &ci, NULL, &cache);
726     EXPECT(err == VK_SUCCESS);
727     if (err == VK_SUCCESS) {
728         err = vkCreateGraphicsPipelines(dev.handle(), cache, 1, &info, NULL, &pipe);
729         if (err == VK_SUCCESS) {
730             NonDispHandle::init(dev.handle(), pipe);
731         }
732         vkDestroyPipelineCache(dev.handle(), cache, NULL);
733     }
734 
735     return err;
736 }
737 
init(const Device & dev,const VkComputePipelineCreateInfo & info)738 void Pipeline::init(const Device &dev, const VkComputePipelineCreateInfo &info) {
739     VkPipelineCache cache;
740     VkPipelineCacheCreateInfo ci;
741     memset((void *)&ci, 0, sizeof(VkPipelineCacheCreateInfo));
742     ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
743     VkResult err = vkCreatePipelineCache(dev.handle(), &ci, NULL, &cache);
744     if (err == VK_SUCCESS) {
745         NON_DISPATCHABLE_HANDLE_INIT(vkCreateComputePipelines, dev, cache, 1, &info);
746         vkDestroyPipelineCache(dev.handle(), cache, NULL);
747     }
748 }
749 
NON_DISPATCHABLE_HANDLE_DTOR(PipelineLayout,vkDestroyPipelineLayout)750 NON_DISPATCHABLE_HANDLE_DTOR(PipelineLayout, vkDestroyPipelineLayout)
751 
752 void PipelineLayout::init(const Device &dev, VkPipelineLayoutCreateInfo &info,
753                           const std::vector<const DescriptorSetLayout *> &layouts) {
754     const std::vector<VkDescriptorSetLayout> layout_handles = MakeVkHandles<VkDescriptorSetLayout>(layouts);
755     info.setLayoutCount = layout_handles.size();
756     info.pSetLayouts = layout_handles.data();
757 
758     NON_DISPATCHABLE_HANDLE_INIT(vkCreatePipelineLayout, dev, &info);
759 }
760 
NON_DISPATCHABLE_HANDLE_DTOR(Sampler,vkDestroySampler)761 NON_DISPATCHABLE_HANDLE_DTOR(Sampler, vkDestroySampler)
762 
763 void Sampler::init(const Device &dev, const VkSamplerCreateInfo &info) {
764     NON_DISPATCHABLE_HANDLE_INIT(vkCreateSampler, dev, &info);
765 }
766 
NON_DISPATCHABLE_HANDLE_DTOR(DescriptorSetLayout,vkDestroyDescriptorSetLayout)767 NON_DISPATCHABLE_HANDLE_DTOR(DescriptorSetLayout, vkDestroyDescriptorSetLayout)
768 
769 void DescriptorSetLayout::init(const Device &dev, const VkDescriptorSetLayoutCreateInfo &info) {
770     NON_DISPATCHABLE_HANDLE_INIT(vkCreateDescriptorSetLayout, dev, &info);
771 }
772 
NON_DISPATCHABLE_HANDLE_DTOR(DescriptorPool,vkDestroyDescriptorPool)773 NON_DISPATCHABLE_HANDLE_DTOR(DescriptorPool, vkDestroyDescriptorPool)
774 
775 void DescriptorPool::init(const Device &dev, const VkDescriptorPoolCreateInfo &info) {
776     setDynamicUsage(info.flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT);
777     NON_DISPATCHABLE_HANDLE_INIT(vkCreateDescriptorPool, dev, &info);
778 }
779 
reset()780 void DescriptorPool::reset() { EXPECT(vkResetDescriptorPool(device(), handle(), 0) == VK_SUCCESS); }
781 
alloc_sets(const Device & dev,const std::vector<const DescriptorSetLayout * > & layouts)782 std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev,
783                                                         const std::vector<const DescriptorSetLayout *> &layouts) {
784     const std::vector<VkDescriptorSetLayout> layout_handles = MakeVkHandles<VkDescriptorSetLayout>(layouts);
785 
786     std::vector<VkDescriptorSet> set_handles;
787     set_handles.resize(layout_handles.size());
788 
789     VkDescriptorSetAllocateInfo alloc_info = {};
790     alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
791     alloc_info.descriptorSetCount = layout_handles.size();
792     alloc_info.descriptorPool = handle();
793     alloc_info.pSetLayouts = layout_handles.data();
794     VkResult err = vkAllocateDescriptorSets(device(), &alloc_info, set_handles.data());
795     EXPECT(err == VK_SUCCESS);
796 
797     std::vector<DescriptorSet *> sets;
798     for (std::vector<VkDescriptorSet>::const_iterator it = set_handles.begin(); it != set_handles.end(); it++) {
799         // do descriptor sets need memories bound?
800         DescriptorSet *descriptorSet = new DescriptorSet(dev, this, *it);
801         sets.push_back(descriptorSet);
802     }
803     return sets;
804 }
805 
alloc_sets(const Device & dev,const DescriptorSetLayout & layout,uint32_t count)806 std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, const DescriptorSetLayout &layout, uint32_t count) {
807     return alloc_sets(dev, std::vector<const DescriptorSetLayout *>(count, &layout));
808 }
809 
alloc_sets(const Device & dev,const DescriptorSetLayout & layout)810 DescriptorSet *DescriptorPool::alloc_sets(const Device &dev, const DescriptorSetLayout &layout) {
811     std::vector<DescriptorSet *> set = alloc_sets(dev, layout, 1);
812     return (set.empty()) ? NULL : set[0];
813 }
814 
~DescriptorSet()815 DescriptorSet::~DescriptorSet() {
816     if (initialized()) {
817         // Only call vkFree* on sets allocated from pool with usage *_DYNAMIC
818         if (containing_pool_->getDynamicUsage()) {
819             VkDescriptorSet sets[1] = {handle()};
820             EXPECT(vkFreeDescriptorSets(device(), containing_pool_->GetObj(), 1, sets) == VK_SUCCESS);
821         }
822     }
823 }
824 
NON_DISPATCHABLE_HANDLE_DTOR(CommandPool,vkDestroyCommandPool)825 NON_DISPATCHABLE_HANDLE_DTOR(CommandPool, vkDestroyCommandPool)
826 
827 void CommandPool::init(const Device &dev, const VkCommandPoolCreateInfo &info) {
828     NON_DISPATCHABLE_HANDLE_INIT(vkCreateCommandPool, dev, &info);
829 }
830 
~CommandBuffer()831 CommandBuffer::~CommandBuffer() {
832     if (initialized()) {
833         VkCommandBuffer cmds[] = {handle()};
834         vkFreeCommandBuffers(dev_handle_, cmd_pool_, 1, cmds);
835     }
836 }
837 
init(const Device & dev,const VkCommandBufferAllocateInfo & info)838 void CommandBuffer::init(const Device &dev, const VkCommandBufferAllocateInfo &info) {
839     VkCommandBuffer cmd;
840 
841     // Make sure commandPool is set
842     assert(info.commandPool);
843 
844     if (EXPECT(vkAllocateCommandBuffers(dev.handle(), &info, &cmd) == VK_SUCCESS)) {
845         Handle::init(cmd);
846         dev_handle_ = dev.handle();
847         cmd_pool_ = info.commandPool;
848     }
849 }
850 
begin(const VkCommandBufferBeginInfo * info)851 void CommandBuffer::begin(const VkCommandBufferBeginInfo *info) { EXPECT(vkBeginCommandBuffer(handle(), info) == VK_SUCCESS); }
852 
begin()853 void CommandBuffer::begin() {
854     VkCommandBufferBeginInfo info = {};
855     VkCommandBufferInheritanceInfo hinfo = {};
856     info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
857     info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
858     info.pInheritanceInfo = &hinfo;
859     hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
860     hinfo.pNext = NULL;
861     hinfo.renderPass = VK_NULL_HANDLE;
862     hinfo.subpass = 0;
863     hinfo.framebuffer = VK_NULL_HANDLE;
864     hinfo.occlusionQueryEnable = VK_FALSE;
865     hinfo.queryFlags = 0;
866     hinfo.pipelineStatistics = 0;
867 
868     begin(&info);
869 }
870 
end()871 void CommandBuffer::end() { EXPECT(vkEndCommandBuffer(handle()) == VK_SUCCESS); }
872 
reset(VkCommandBufferResetFlags flags)873 void CommandBuffer::reset(VkCommandBufferResetFlags flags) { EXPECT(vkResetCommandBuffer(handle(), flags) == VK_SUCCESS); }
874 
875 }  // namespace vk_testing
876