1 /*
2 * Copyright (c) 2015-2016 The Khronos Group Inc.
3 * Copyright (c) 2015-2016 Valve Corporation
4 * Copyright (c) 2015-2016 LunarG, Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
19 * Author: Tony Barbour <tony@LunarG.com>
20 */
21
22 #include "test_common.h" // NOEXCEPT macro (must precede vktestbinding.h)
23 #include "vktestbinding.h" // Left for clarity, no harm, already included via test_common.h
24 #include "vk_typemap_helper.h"
25 #include <algorithm>
26 #include <assert.h>
27 #include <iostream>
28 #include <stdarg.h>
29 #include <string.h> // memset(), memcmp()
30
31 namespace {
32
33 #define NON_DISPATCHABLE_HANDLE_INIT(create_func, dev, ...) \
34 do { \
35 handle_type handle; \
36 if (EXPECT(create_func(dev.handle(), __VA_ARGS__, NULL, &handle) == VK_SUCCESS)) \
37 NonDispHandle::init(dev.handle(), handle); \
38 } while (0)
39
40 #define NON_DISPATCHABLE_HANDLE_DTOR(cls, destroy_func) \
41 cls::~cls() { \
42 if (initialized()) destroy_func(device(), handle(), NULL); \
43 }
44
45 #define STRINGIFY(x) #x
46 #define EXPECT(expr) ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, __FUNCTION__))
47
48 vk_testing::ErrorCallback error_callback;
49
expect_failure(const char * expr,const char * file,unsigned int line,const char * function)50 bool expect_failure(const char *expr, const char *file, unsigned int line, const char *function) {
51 if (error_callback) {
52 error_callback(expr, file, line, function);
53 } else {
54 std::cerr << file << ":" << line << ": " << function << ": Expectation `" << expr << "' failed.\n";
55 }
56
57 return false;
58 }
59
60 } // namespace
61
62 namespace vk_testing {
63
set_error_callback(ErrorCallback callback)64 void set_error_callback(ErrorCallback callback) { error_callback = callback; }
65
properties() const66 VkPhysicalDeviceProperties PhysicalDevice::properties() const {
67 VkPhysicalDeviceProperties info;
68
69 vkGetPhysicalDeviceProperties(handle(), &info);
70
71 return info;
72 }
73
queue_properties() const74 std::vector<VkQueueFamilyProperties> PhysicalDevice::queue_properties() const {
75 std::vector<VkQueueFamilyProperties> info;
76 uint32_t count;
77
78 // Call once with NULL data to receive count
79 vkGetPhysicalDeviceQueueFamilyProperties(handle(), &count, NULL);
80 info.resize(count);
81 vkGetPhysicalDeviceQueueFamilyProperties(handle(), &count, info.data());
82
83 return info;
84 }
85
memory_properties() const86 VkPhysicalDeviceMemoryProperties PhysicalDevice::memory_properties() const {
87 VkPhysicalDeviceMemoryProperties info;
88
89 vkGetPhysicalDeviceMemoryProperties(handle(), &info);
90
91 return info;
92 }
93
features() const94 VkPhysicalDeviceFeatures PhysicalDevice::features() const {
95 VkPhysicalDeviceFeatures features;
96 vkGetPhysicalDeviceFeatures(handle(), &features);
97 return features;
98 }
99
100 /*
101 * Return list of Global layers available
102 */
GetGlobalLayers()103 std::vector<VkLayerProperties> GetGlobalLayers() {
104 VkResult err;
105 std::vector<VkLayerProperties> layers;
106 uint32_t layer_count;
107
108 do {
109 layer_count = 0;
110 err = vkEnumerateInstanceLayerProperties(&layer_count, NULL);
111
112 if (err == VK_SUCCESS) {
113 layers.reserve(layer_count);
114 err = vkEnumerateInstanceLayerProperties(&layer_count, layers.data());
115 }
116 } while (err == VK_INCOMPLETE);
117
118 assert(err == VK_SUCCESS);
119
120 return layers;
121 }
122
123 /*
124 * Return list of Global extensions provided by the ICD / Loader
125 */
GetGlobalExtensions()126 std::vector<VkExtensionProperties> GetGlobalExtensions() { return GetGlobalExtensions(NULL); }
127
128 /*
129 * Return list of Global extensions provided by the specified layer
130 * If pLayerName is NULL, will return extensions implemented by the loader /
131 * ICDs
132 */
GetGlobalExtensions(const char * pLayerName)133 std::vector<VkExtensionProperties> GetGlobalExtensions(const char *pLayerName) {
134 std::vector<VkExtensionProperties> exts;
135 uint32_t ext_count;
136 VkResult err;
137
138 do {
139 ext_count = 0;
140 err = vkEnumerateInstanceExtensionProperties(pLayerName, &ext_count, NULL);
141
142 if (err == VK_SUCCESS) {
143 exts.resize(ext_count);
144 err = vkEnumerateInstanceExtensionProperties(pLayerName, &ext_count, exts.data());
145 }
146 } while (err == VK_INCOMPLETE);
147
148 assert(err == VK_SUCCESS);
149
150 return exts;
151 }
152
153 /*
154 * Return list of PhysicalDevice extensions provided by the ICD / Loader
155 */
extensions() const156 std::vector<VkExtensionProperties> PhysicalDevice::extensions() const { return extensions(NULL); }
157
158 /*
159 * Return list of PhysicalDevice extensions provided by the specified layer
160 * If pLayerName is NULL, will return extensions for ICD / loader.
161 */
extensions(const char * pLayerName) const162 std::vector<VkExtensionProperties> PhysicalDevice::extensions(const char *pLayerName) const {
163 std::vector<VkExtensionProperties> exts;
164 VkResult err;
165
166 do {
167 uint32_t extCount = 0;
168 err = vkEnumerateDeviceExtensionProperties(handle(), pLayerName, &extCount, NULL);
169
170 if (err == VK_SUCCESS) {
171 exts.resize(extCount);
172 err = vkEnumerateDeviceExtensionProperties(handle(), pLayerName, &extCount, exts.data());
173 }
174 } while (err == VK_INCOMPLETE);
175
176 assert(err == VK_SUCCESS);
177
178 return exts;
179 }
180
set_memory_type(const uint32_t type_bits,VkMemoryAllocateInfo * info,const VkFlags properties,const VkFlags forbid) const181 bool PhysicalDevice::set_memory_type(const uint32_t type_bits, VkMemoryAllocateInfo *info, const VkFlags properties,
182 const VkFlags forbid) const {
183 uint32_t type_mask = type_bits;
184 // Search memtypes to find first index with those properties
185 for (uint32_t i = 0; i < memory_properties_.memoryTypeCount; i++) {
186 if ((type_mask & 1) == 1) {
187 // Type is available, does it match user properties?
188 if ((memory_properties_.memoryTypes[i].propertyFlags & properties) == properties &&
189 (memory_properties_.memoryTypes[i].propertyFlags & forbid) == 0) {
190 info->memoryTypeIndex = i;
191 return true;
192 }
193 }
194 type_mask >>= 1;
195 }
196 // No memory types matched, return failure
197 return false;
198 }
199
200 /*
201 * Return list of PhysicalDevice layers
202 */
layers() const203 std::vector<VkLayerProperties> PhysicalDevice::layers() const {
204 std::vector<VkLayerProperties> layer_props;
205 VkResult err;
206
207 do {
208 uint32_t layer_count = 0;
209 err = vkEnumerateDeviceLayerProperties(handle(), &layer_count, NULL);
210
211 if (err == VK_SUCCESS) {
212 layer_props.reserve(layer_count);
213 err = vkEnumerateDeviceLayerProperties(handle(), &layer_count, layer_props.data());
214 }
215 } while (err == VK_INCOMPLETE);
216
217 assert(err == VK_SUCCESS);
218
219 return layer_props;
220 }
221
QueueCreateInfoArray(const std::vector<VkQueueFamilyProperties> & queue_props)222 QueueCreateInfoArray::QueueCreateInfoArray(const std::vector<VkQueueFamilyProperties> &queue_props)
223 : queue_info_(), queue_priorities_() {
224 queue_info_.reserve(queue_props.size());
225
226 for (uint32_t i = 0; i < (uint32_t)queue_props.size(); ++i) {
227 if (queue_props[i].queueCount > 0) {
228 VkDeviceQueueCreateInfo qi = {};
229 qi.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
230 qi.pNext = NULL;
231 qi.queueFamilyIndex = i;
232 qi.queueCount = queue_props[i].queueCount;
233 queue_priorities_.emplace_back(qi.queueCount, 0.0f);
234 qi.pQueuePriorities = queue_priorities_[i].data();
235 queue_info_.push_back(qi);
236 }
237 }
238 }
239
~Device()240 Device::~Device() {
241 if (!initialized()) return;
242
243 vkDestroyDevice(handle(), NULL);
244 }
245
init(std::vector<const char * > & extensions,VkPhysicalDeviceFeatures * features,void * create_device_pnext)246 void Device::init(std::vector<const char *> &extensions, VkPhysicalDeviceFeatures *features, void *create_device_pnext) {
247 // request all queues
248 const std::vector<VkQueueFamilyProperties> queue_props = phy_.queue_properties();
249 QueueCreateInfoArray queue_info(phy_.queue_properties());
250 for (uint32_t i = 0; i < (uint32_t)queue_props.size(); i++) {
251 if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
252 graphics_queue_node_index_ = i;
253 break;
254 }
255 }
256 // Only request creation with queuefamilies that have at least one queue
257 std::vector<VkDeviceQueueCreateInfo> create_queue_infos;
258 auto qci = queue_info.data();
259 for (uint32_t j = 0; j < queue_info.size(); ++j) {
260 if (qci[j].queueCount) {
261 create_queue_infos.push_back(qci[j]);
262 }
263 }
264
265 enabled_extensions_ = extensions;
266
267 VkDeviceCreateInfo dev_info = {};
268 dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
269 dev_info.pNext = create_device_pnext;
270 dev_info.queueCreateInfoCount = create_queue_infos.size();
271 dev_info.pQueueCreateInfos = create_queue_infos.data();
272 dev_info.enabledLayerCount = 0;
273 dev_info.ppEnabledLayerNames = NULL;
274 dev_info.enabledExtensionCount = extensions.size();
275 dev_info.ppEnabledExtensionNames = extensions.data();
276
277 VkPhysicalDeviceFeatures all_features;
278 // Let VkPhysicalDeviceFeatures2 take priority over VkPhysicalDeviceFeatures,
279 // since it supports extensions
280
281 if (!(lvl_find_in_chain<VkPhysicalDeviceFeatures2>(dev_info.pNext))) {
282 if (features) {
283 dev_info.pEnabledFeatures = features;
284 } else {
285 // request all supportable features enabled
286 all_features = phy().features();
287 dev_info.pEnabledFeatures = &all_features;
288 }
289 }
290
291 init(dev_info);
292 }
293
init(const VkDeviceCreateInfo & info)294 void Device::init(const VkDeviceCreateInfo &info) {
295 VkDevice dev;
296
297 if (EXPECT(vkCreateDevice(phy_.handle(), &info, NULL, &dev) == VK_SUCCESS)) Handle::init(dev);
298
299 init_queues();
300 init_formats();
301 }
302
init_queues()303 void Device::init_queues() {
304 uint32_t queue_node_count;
305
306 // Call with NULL data to get count
307 vkGetPhysicalDeviceQueueFamilyProperties(phy_.handle(), &queue_node_count, NULL);
308 EXPECT(queue_node_count >= 1);
309
310 VkQueueFamilyProperties *queue_props = new VkQueueFamilyProperties[queue_node_count];
311
312 vkGetPhysicalDeviceQueueFamilyProperties(phy_.handle(), &queue_node_count, queue_props);
313
314 queue_families_.resize(queue_node_count);
315 for (uint32_t i = 0; i < queue_node_count; i++) {
316 VkQueue queue;
317
318 QueueFamilyQueues &queue_storage = queue_families_[i];
319 queue_storage.reserve(queue_props[i].queueCount);
320 for (uint32_t j = 0; j < queue_props[i].queueCount; j++) {
321 // TODO: Need to add support for separate MEMMGR and work queues,
322 // including synchronization
323 vkGetDeviceQueue(handle(), i, j, &queue);
324
325 // Store single copy of the queue object that will self destruct
326 queue_storage.emplace_back(new Queue(queue, i));
327
328 if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
329 queues_[GRAPHICS].push_back(queue_storage.back().get());
330 }
331
332 if (queue_props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) {
333 queues_[COMPUTE].push_back(queue_storage.back().get());
334 }
335
336 if (queue_props[i].queueFlags & VK_QUEUE_TRANSFER_BIT) {
337 queues_[DMA].push_back(queue_storage.back().get());
338 }
339 }
340 }
341
342 delete[] queue_props;
343
344 EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty());
345 }
queue_family_queues(uint32_t queue_family) const346 const Device::QueueFamilyQueues &Device::queue_family_queues(uint32_t queue_family) const {
347 assert(queue_family < queue_families_.size());
348 return queue_families_[queue_family];
349 }
350
init_formats()351 void Device::init_formats() {
352 for (int f = VK_FORMAT_BEGIN_RANGE; f <= VK_FORMAT_END_RANGE; f++) {
353 const VkFormat fmt = static_cast<VkFormat>(f);
354 const VkFormatProperties props = format_properties(fmt);
355
356 if (props.linearTilingFeatures) {
357 const Format tmp = {fmt, VK_IMAGE_TILING_LINEAR, props.linearTilingFeatures};
358 formats_.push_back(tmp);
359 }
360
361 if (props.optimalTilingFeatures) {
362 const Format tmp = {fmt, VK_IMAGE_TILING_OPTIMAL, props.optimalTilingFeatures};
363 formats_.push_back(tmp);
364 }
365 }
366
367 EXPECT(!formats_.empty());
368 }
369
IsEnabledExtension(const char * extension)370 bool Device::IsEnabledExtension(const char *extension) {
371 const auto is_x = [&extension](const char *enabled_extension) { return strcmp(extension, enabled_extension) == 0; };
372 return std::any_of(enabled_extensions_.begin(), enabled_extensions_.end(), is_x);
373 }
374
format_properties(VkFormat format)375 VkFormatProperties Device::format_properties(VkFormat format) {
376 VkFormatProperties data;
377 vkGetPhysicalDeviceFormatProperties(phy().handle(), format, &data);
378
379 return data;
380 }
381
wait()382 void Device::wait() { EXPECT(vkDeviceWaitIdle(handle()) == VK_SUCCESS); }
383
wait(const std::vector<const Fence * > & fences,bool wait_all,uint64_t timeout)384 VkResult Device::wait(const std::vector<const Fence *> &fences, bool wait_all, uint64_t timeout) {
385 const std::vector<VkFence> fence_handles = MakeVkHandles<VkFence>(fences);
386 VkResult err = vkWaitForFences(handle(), fence_handles.size(), fence_handles.data(), wait_all, timeout);
387 EXPECT(err == VK_SUCCESS || err == VK_TIMEOUT);
388
389 return err;
390 }
391
update_descriptor_sets(const std::vector<VkWriteDescriptorSet> & writes,const std::vector<VkCopyDescriptorSet> & copies)392 void Device::update_descriptor_sets(const std::vector<VkWriteDescriptorSet> &writes,
393 const std::vector<VkCopyDescriptorSet> &copies) {
394 vkUpdateDescriptorSets(handle(), writes.size(), writes.data(), copies.size(), copies.data());
395 }
396
submit(const std::vector<const CommandBuffer * > & cmds,const Fence & fence,bool expect_success)397 VkResult Queue::submit(const std::vector<const CommandBuffer *> &cmds, const Fence &fence, bool expect_success) {
398 const std::vector<VkCommandBuffer> cmd_handles = MakeVkHandles<VkCommandBuffer>(cmds);
399 VkSubmitInfo submit_info;
400 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
401 submit_info.pNext = NULL;
402 submit_info.waitSemaphoreCount = 0;
403 submit_info.pWaitSemaphores = NULL;
404 submit_info.pWaitDstStageMask = NULL;
405 submit_info.commandBufferCount = (uint32_t)cmd_handles.size();
406 submit_info.pCommandBuffers = cmd_handles.data();
407 submit_info.signalSemaphoreCount = 0;
408 submit_info.pSignalSemaphores = NULL;
409
410 VkResult result = vkQueueSubmit(handle(), 1, &submit_info, fence.handle());
411 if (expect_success) EXPECT(result == VK_SUCCESS);
412 return result;
413 }
414
submit(const CommandBuffer & cmd,const Fence & fence,bool expect_success)415 VkResult Queue::submit(const CommandBuffer &cmd, const Fence &fence, bool expect_success) {
416 return submit(std::vector<const CommandBuffer *>(1, &cmd), fence, expect_success);
417 }
418
submit(const CommandBuffer & cmd,bool expect_success)419 VkResult Queue::submit(const CommandBuffer &cmd, bool expect_success) {
420 Fence fence;
421 return submit(cmd, fence);
422 }
423
wait()424 VkResult Queue::wait() {
425 VkResult result = vkQueueWaitIdle(handle());
426 EXPECT(result == VK_SUCCESS);
427 return result;
428 }
429
~DeviceMemory()430 DeviceMemory::~DeviceMemory() {
431 if (initialized()) vkFreeMemory(device(), handle(), NULL);
432 }
433
init(const Device & dev,const VkMemoryAllocateInfo & info)434 void DeviceMemory::init(const Device &dev, const VkMemoryAllocateInfo &info) {
435 NON_DISPATCHABLE_HANDLE_INIT(vkAllocateMemory, dev, &info);
436 }
437
map(VkFlags flags) const438 const void *DeviceMemory::map(VkFlags flags) const {
439 void *data;
440 if (!EXPECT(vkMapMemory(device(), handle(), 0, VK_WHOLE_SIZE, flags, &data) == VK_SUCCESS)) data = NULL;
441
442 return data;
443 }
444
map(VkFlags flags)445 void *DeviceMemory::map(VkFlags flags) {
446 void *data;
447 if (!EXPECT(vkMapMemory(device(), handle(), 0, VK_WHOLE_SIZE, flags, &data) == VK_SUCCESS)) data = NULL;
448
449 return data;
450 }
451
unmap() const452 void DeviceMemory::unmap() const { vkUnmapMemory(device(), handle()); }
453
get_resource_alloc_info(const Device & dev,const VkMemoryRequirements & reqs,VkMemoryPropertyFlags mem_props)454 VkMemoryAllocateInfo DeviceMemory::get_resource_alloc_info(const Device &dev, const VkMemoryRequirements &reqs,
455 VkMemoryPropertyFlags mem_props) {
456 // Find appropriate memory type for given reqs
457 VkPhysicalDeviceMemoryProperties dev_mem_props = dev.phy().memory_properties();
458 uint32_t mem_type_index = 0;
459 for (mem_type_index = 0; mem_type_index < dev_mem_props.memoryTypeCount; ++mem_type_index) {
460 if (mem_props == (mem_props & dev_mem_props.memoryTypes[mem_type_index].propertyFlags)) break;
461 }
462 // If we exceeded types, then this device doesn't have the memory we need
463 assert(mem_type_index < dev_mem_props.memoryTypeCount);
464 VkMemoryAllocateInfo info = alloc_info(reqs.size, mem_type_index);
465 EXPECT(dev.phy().set_memory_type(reqs.memoryTypeBits, &info, mem_props));
466 return info;
467 }
468
NON_DISPATCHABLE_HANDLE_DTOR(Fence,vkDestroyFence)469 NON_DISPATCHABLE_HANDLE_DTOR(Fence, vkDestroyFence)
470
471 void Fence::init(const Device &dev, const VkFenceCreateInfo &info) { NON_DISPATCHABLE_HANDLE_INIT(vkCreateFence, dev, &info); }
472
wait(VkBool32 wait_all,uint64_t timeout) const473 VkResult Fence::wait(VkBool32 wait_all, uint64_t timeout) const {
474 VkFence fence = handle();
475 return vkWaitForFences(device(), 1, &fence, wait_all, timeout);
476 }
477
NON_DISPATCHABLE_HANDLE_DTOR(Semaphore,vkDestroySemaphore)478 NON_DISPATCHABLE_HANDLE_DTOR(Semaphore, vkDestroySemaphore)
479
480 void Semaphore::init(const Device &dev, const VkSemaphoreCreateInfo &info) {
481 NON_DISPATCHABLE_HANDLE_INIT(vkCreateSemaphore, dev, &info);
482 }
483
NON_DISPATCHABLE_HANDLE_DTOR(Event,vkDestroyEvent)484 NON_DISPATCHABLE_HANDLE_DTOR(Event, vkDestroyEvent)
485
486 void Event::init(const Device &dev, const VkEventCreateInfo &info) { NON_DISPATCHABLE_HANDLE_INIT(vkCreateEvent, dev, &info); }
487
set()488 void Event::set() { EXPECT(vkSetEvent(device(), handle()) == VK_SUCCESS); }
489
reset()490 void Event::reset() { EXPECT(vkResetEvent(device(), handle()) == VK_SUCCESS); }
491
NON_DISPATCHABLE_HANDLE_DTOR(QueryPool,vkDestroyQueryPool)492 NON_DISPATCHABLE_HANDLE_DTOR(QueryPool, vkDestroyQueryPool)
493
494 void QueryPool::init(const Device &dev, const VkQueryPoolCreateInfo &info) {
495 NON_DISPATCHABLE_HANDLE_INIT(vkCreateQueryPool, dev, &info);
496 }
497
results(uint32_t first,uint32_t count,size_t size,void * data,size_t stride)498 VkResult QueryPool::results(uint32_t first, uint32_t count, size_t size, void *data, size_t stride) {
499 VkResult err = vkGetQueryPoolResults(device(), handle(), first, count, size, data, stride, 0);
500 EXPECT(err == VK_SUCCESS || err == VK_NOT_READY);
501
502 return err;
503 }
504
NON_DISPATCHABLE_HANDLE_DTOR(Buffer,vkDestroyBuffer)505 NON_DISPATCHABLE_HANDLE_DTOR(Buffer, vkDestroyBuffer)
506
507 void Buffer::init(const Device &dev, const VkBufferCreateInfo &info, VkMemoryPropertyFlags mem_props) {
508 init_no_mem(dev, info);
509
510 internal_mem_.init(dev, DeviceMemory::get_resource_alloc_info(dev, memory_requirements(), mem_props));
511 bind_memory(internal_mem_, 0);
512 }
513
init_no_mem(const Device & dev,const VkBufferCreateInfo & info)514 void Buffer::init_no_mem(const Device &dev, const VkBufferCreateInfo &info) {
515 NON_DISPATCHABLE_HANDLE_INIT(vkCreateBuffer, dev, &info);
516 create_info_ = info;
517 }
518
memory_requirements() const519 VkMemoryRequirements Buffer::memory_requirements() const {
520 VkMemoryRequirements reqs;
521
522 vkGetBufferMemoryRequirements(device(), handle(), &reqs);
523
524 return reqs;
525 }
526
bind_memory(const DeviceMemory & mem,VkDeviceSize mem_offset)527 void Buffer::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset) {
528 EXPECT(vkBindBufferMemory(device(), handle(), mem.handle(), mem_offset) == VK_SUCCESS);
529 }
530
NON_DISPATCHABLE_HANDLE_DTOR(BufferView,vkDestroyBufferView)531 NON_DISPATCHABLE_HANDLE_DTOR(BufferView, vkDestroyBufferView)
532
533 void BufferView::init(const Device &dev, const VkBufferViewCreateInfo &info) {
534 NON_DISPATCHABLE_HANDLE_INIT(vkCreateBufferView, dev, &info);
535 }
536
NON_DISPATCHABLE_HANDLE_DTOR(Image,vkDestroyImage)537 NON_DISPATCHABLE_HANDLE_DTOR(Image, vkDestroyImage)
538
539 void Image::init(const Device &dev, const VkImageCreateInfo &info, VkMemoryPropertyFlags mem_props) {
540 init_no_mem(dev, info);
541
542 if (initialized()) {
543 internal_mem_.init(dev, DeviceMemory::get_resource_alloc_info(dev, memory_requirements(), mem_props));
544 bind_memory(internal_mem_, 0);
545 }
546 }
547
init_no_mem(const Device & dev,const VkImageCreateInfo & info)548 void Image::init_no_mem(const Device &dev, const VkImageCreateInfo &info) {
549 NON_DISPATCHABLE_HANDLE_INIT(vkCreateImage, dev, &info);
550 if (initialized()) {
551 init_info(dev, info);
552 }
553 }
554
init_info(const Device & dev,const VkImageCreateInfo & info)555 void Image::init_info(const Device &dev, const VkImageCreateInfo &info) {
556 create_info_ = info;
557
558 for (std::vector<Device::Format>::const_iterator it = dev.formats().begin(); it != dev.formats().end(); it++) {
559 if (memcmp(&it->format, &create_info_.format, sizeof(it->format)) == 0 && it->tiling == create_info_.tiling) {
560 format_features_ = it->features;
561 break;
562 }
563 }
564 }
565
memory_requirements() const566 VkMemoryRequirements Image::memory_requirements() const {
567 VkMemoryRequirements reqs;
568
569 vkGetImageMemoryRequirements(device(), handle(), &reqs);
570
571 return reqs;
572 }
573
bind_memory(const DeviceMemory & mem,VkDeviceSize mem_offset)574 void Image::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset) {
575 EXPECT(vkBindImageMemory(device(), handle(), mem.handle(), mem_offset) == VK_SUCCESS);
576 }
577
subresource_layout(const VkImageSubresource & subres) const578 VkSubresourceLayout Image::subresource_layout(const VkImageSubresource &subres) const {
579 VkSubresourceLayout data;
580 size_t size = sizeof(data);
581 vkGetImageSubresourceLayout(device(), handle(), &subres, &data);
582 if (size != sizeof(data)) memset(&data, 0, sizeof(data));
583
584 return data;
585 }
586
subresource_layout(const VkImageSubresourceLayers & subrescopy) const587 VkSubresourceLayout Image::subresource_layout(const VkImageSubresourceLayers &subrescopy) const {
588 VkSubresourceLayout data;
589 VkImageSubresource subres = subresource(subrescopy.aspectMask, subrescopy.mipLevel, subrescopy.baseArrayLayer);
590 size_t size = sizeof(data);
591 vkGetImageSubresourceLayout(device(), handle(), &subres, &data);
592 if (size != sizeof(data)) memset(&data, 0, sizeof(data));
593
594 return data;
595 }
596
transparent() const597 bool Image::transparent() const {
598 return (create_info_.tiling == VK_IMAGE_TILING_LINEAR && create_info_.samples == VK_SAMPLE_COUNT_1_BIT &&
599 !(create_info_.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)));
600 }
601
NON_DISPATCHABLE_HANDLE_DTOR(ImageView,vkDestroyImageView)602 NON_DISPATCHABLE_HANDLE_DTOR(ImageView, vkDestroyImageView)
603
604 void ImageView::init(const Device &dev, const VkImageViewCreateInfo &info) {
605 NON_DISPATCHABLE_HANDLE_INIT(vkCreateImageView, dev, &info);
606 }
607
NON_DISPATCHABLE_HANDLE_DTOR(ShaderModule,vkDestroyShaderModule)608 NON_DISPATCHABLE_HANDLE_DTOR(ShaderModule, vkDestroyShaderModule)
609
610 void ShaderModule::init(const Device &dev, const VkShaderModuleCreateInfo &info) {
611 NON_DISPATCHABLE_HANDLE_INIT(vkCreateShaderModule, dev, &info);
612 }
613
init_try(const Device & dev,const VkShaderModuleCreateInfo & info)614 VkResult ShaderModule::init_try(const Device &dev, const VkShaderModuleCreateInfo &info) {
615 VkShaderModule mod;
616
617 VkResult err = vkCreateShaderModule(dev.handle(), &info, NULL, &mod);
618 if (err == VK_SUCCESS) NonDispHandle::init(dev.handle(), mod);
619
620 return err;
621 }
622
NON_DISPATCHABLE_HANDLE_DTOR(Pipeline,vkDestroyPipeline)623 NON_DISPATCHABLE_HANDLE_DTOR(Pipeline, vkDestroyPipeline)
624
625 void Pipeline::init(const Device &dev, const VkGraphicsPipelineCreateInfo &info) {
626 VkPipelineCache cache;
627 VkPipelineCacheCreateInfo ci;
628 memset((void *)&ci, 0, sizeof(VkPipelineCacheCreateInfo));
629 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
630 VkResult err = vkCreatePipelineCache(dev.handle(), &ci, NULL, &cache);
631 if (err == VK_SUCCESS) {
632 NON_DISPATCHABLE_HANDLE_INIT(vkCreateGraphicsPipelines, dev, cache, 1, &info);
633 vkDestroyPipelineCache(dev.handle(), cache, NULL);
634 }
635 }
636
init_try(const Device & dev,const VkGraphicsPipelineCreateInfo & info)637 VkResult Pipeline::init_try(const Device &dev, const VkGraphicsPipelineCreateInfo &info) {
638 VkPipeline pipe;
639 VkPipelineCache cache;
640 VkPipelineCacheCreateInfo ci;
641 memset((void *)&ci, 0, sizeof(VkPipelineCacheCreateInfo));
642 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
643 VkResult err = vkCreatePipelineCache(dev.handle(), &ci, NULL, &cache);
644 EXPECT(err == VK_SUCCESS);
645 if (err == VK_SUCCESS) {
646 err = vkCreateGraphicsPipelines(dev.handle(), cache, 1, &info, NULL, &pipe);
647 if (err == VK_SUCCESS) {
648 NonDispHandle::init(dev.handle(), pipe);
649 }
650 vkDestroyPipelineCache(dev.handle(), cache, NULL);
651 }
652
653 return err;
654 }
655
init(const Device & dev,const VkComputePipelineCreateInfo & info)656 void Pipeline::init(const Device &dev, const VkComputePipelineCreateInfo &info) {
657 VkPipelineCache cache;
658 VkPipelineCacheCreateInfo ci;
659 memset((void *)&ci, 0, sizeof(VkPipelineCacheCreateInfo));
660 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
661 VkResult err = vkCreatePipelineCache(dev.handle(), &ci, NULL, &cache);
662 if (err == VK_SUCCESS) {
663 NON_DISPATCHABLE_HANDLE_INIT(vkCreateComputePipelines, dev, cache, 1, &info);
664 vkDestroyPipelineCache(dev.handle(), cache, NULL);
665 }
666 }
667
NON_DISPATCHABLE_HANDLE_DTOR(PipelineLayout,vkDestroyPipelineLayout)668 NON_DISPATCHABLE_HANDLE_DTOR(PipelineLayout, vkDestroyPipelineLayout)
669
670 void PipelineLayout::init(const Device &dev, VkPipelineLayoutCreateInfo &info,
671 const std::vector<const DescriptorSetLayout *> &layouts) {
672 const std::vector<VkDescriptorSetLayout> layout_handles = MakeVkHandles<VkDescriptorSetLayout>(layouts);
673 info.setLayoutCount = layout_handles.size();
674 info.pSetLayouts = layout_handles.data();
675
676 NON_DISPATCHABLE_HANDLE_INIT(vkCreatePipelineLayout, dev, &info);
677 }
678
NON_DISPATCHABLE_HANDLE_DTOR(Sampler,vkDestroySampler)679 NON_DISPATCHABLE_HANDLE_DTOR(Sampler, vkDestroySampler)
680
681 void Sampler::init(const Device &dev, const VkSamplerCreateInfo &info) {
682 NON_DISPATCHABLE_HANDLE_INIT(vkCreateSampler, dev, &info);
683 }
684
NON_DISPATCHABLE_HANDLE_DTOR(DescriptorSetLayout,vkDestroyDescriptorSetLayout)685 NON_DISPATCHABLE_HANDLE_DTOR(DescriptorSetLayout, vkDestroyDescriptorSetLayout)
686
687 void DescriptorSetLayout::init(const Device &dev, const VkDescriptorSetLayoutCreateInfo &info) {
688 NON_DISPATCHABLE_HANDLE_INIT(vkCreateDescriptorSetLayout, dev, &info);
689 }
690
NON_DISPATCHABLE_HANDLE_DTOR(DescriptorPool,vkDestroyDescriptorPool)691 NON_DISPATCHABLE_HANDLE_DTOR(DescriptorPool, vkDestroyDescriptorPool)
692
693 void DescriptorPool::init(const Device &dev, const VkDescriptorPoolCreateInfo &info) {
694 setDynamicUsage(info.flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT);
695 NON_DISPATCHABLE_HANDLE_INIT(vkCreateDescriptorPool, dev, &info);
696 }
697
reset()698 void DescriptorPool::reset() { EXPECT(vkResetDescriptorPool(device(), handle(), 0) == VK_SUCCESS); }
699
alloc_sets(const Device & dev,const std::vector<const DescriptorSetLayout * > & layouts)700 std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev,
701 const std::vector<const DescriptorSetLayout *> &layouts) {
702 const std::vector<VkDescriptorSetLayout> layout_handles = MakeVkHandles<VkDescriptorSetLayout>(layouts);
703
704 std::vector<VkDescriptorSet> set_handles;
705 set_handles.resize(layout_handles.size());
706
707 VkDescriptorSetAllocateInfo alloc_info = {};
708 alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
709 alloc_info.descriptorSetCount = layout_handles.size();
710 alloc_info.descriptorPool = handle();
711 alloc_info.pSetLayouts = layout_handles.data();
712 VkResult err = vkAllocateDescriptorSets(device(), &alloc_info, set_handles.data());
713 EXPECT(err == VK_SUCCESS);
714
715 std::vector<DescriptorSet *> sets;
716 for (std::vector<VkDescriptorSet>::const_iterator it = set_handles.begin(); it != set_handles.end(); it++) {
717 // do descriptor sets need memories bound?
718 DescriptorSet *descriptorSet = new DescriptorSet(dev, this, *it);
719 sets.push_back(descriptorSet);
720 }
721 return sets;
722 }
723
alloc_sets(const Device & dev,const DescriptorSetLayout & layout,uint32_t count)724 std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, const DescriptorSetLayout &layout, uint32_t count) {
725 return alloc_sets(dev, std::vector<const DescriptorSetLayout *>(count, &layout));
726 }
727
alloc_sets(const Device & dev,const DescriptorSetLayout & layout)728 DescriptorSet *DescriptorPool::alloc_sets(const Device &dev, const DescriptorSetLayout &layout) {
729 std::vector<DescriptorSet *> set = alloc_sets(dev, layout, 1);
730 return (set.empty()) ? NULL : set[0];
731 }
732
~DescriptorSet()733 DescriptorSet::~DescriptorSet() {
734 if (initialized()) {
735 // Only call vkFree* on sets allocated from pool with usage *_DYNAMIC
736 if (containing_pool_->getDynamicUsage()) {
737 VkDescriptorSet sets[1] = {handle()};
738 EXPECT(vkFreeDescriptorSets(device(), containing_pool_->GetObj(), 1, sets) == VK_SUCCESS);
739 }
740 }
741 }
742
NON_DISPATCHABLE_HANDLE_DTOR(CommandPool,vkDestroyCommandPool)743 NON_DISPATCHABLE_HANDLE_DTOR(CommandPool, vkDestroyCommandPool)
744
745 void CommandPool::init(const Device &dev, const VkCommandPoolCreateInfo &info) {
746 NON_DISPATCHABLE_HANDLE_INIT(vkCreateCommandPool, dev, &info);
747 }
748
~CommandBuffer()749 CommandBuffer::~CommandBuffer() {
750 if (initialized()) {
751 VkCommandBuffer cmds[] = {handle()};
752 vkFreeCommandBuffers(dev_handle_, cmd_pool_, 1, cmds);
753 }
754 }
755
init(const Device & dev,const VkCommandBufferAllocateInfo & info)756 void CommandBuffer::init(const Device &dev, const VkCommandBufferAllocateInfo &info) {
757 VkCommandBuffer cmd;
758
759 // Make sure commandPool is set
760 assert(info.commandPool);
761
762 if (EXPECT(vkAllocateCommandBuffers(dev.handle(), &info, &cmd) == VK_SUCCESS)) {
763 Handle::init(cmd);
764 dev_handle_ = dev.handle();
765 cmd_pool_ = info.commandPool;
766 }
767 }
768
begin(const VkCommandBufferBeginInfo * info)769 void CommandBuffer::begin(const VkCommandBufferBeginInfo *info) { EXPECT(vkBeginCommandBuffer(handle(), info) == VK_SUCCESS); }
770
begin()771 void CommandBuffer::begin() {
772 VkCommandBufferBeginInfo info = {};
773 VkCommandBufferInheritanceInfo hinfo = {};
774 info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
775 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
776 info.pInheritanceInfo = &hinfo;
777 hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
778 hinfo.pNext = NULL;
779 hinfo.renderPass = VK_NULL_HANDLE;
780 hinfo.subpass = 0;
781 hinfo.framebuffer = VK_NULL_HANDLE;
782 hinfo.occlusionQueryEnable = VK_FALSE;
783 hinfo.queryFlags = 0;
784 hinfo.pipelineStatistics = 0;
785
786 begin(&info);
787 }
788
end()789 void CommandBuffer::end() { EXPECT(vkEndCommandBuffer(handle()) == VK_SUCCESS); }
790
reset(VkCommandBufferResetFlags flags)791 void CommandBuffer::reset(VkCommandBufferResetFlags flags) { EXPECT(vkResetCommandBuffer(handle(), flags) == VK_SUCCESS); }
792
793 } // namespace vk_testing
794