1 /*
2 * Copyright (c) 2015-2016 The Khronos Group Inc.
3 * Copyright (c) 2015-2016 Valve Corporation
4 * Copyright (c) 2015-2016 LunarG, Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
19 * Author: Tony Barbour <tony@LunarG.com>
20 */
21
22 #include "test_common.h" // NOEXCEPT macro (must precede vktestbinding.h)
23 #include "vktestbinding.h" // Left for clarity, no harm, already included via test_common.h
24 #include <algorithm>
25 #include <assert.h>
26 #include <iostream>
27 #include <stdarg.h>
28 #include <string.h> // memset(), memcmp()
29
30 namespace {
31
32 #define NON_DISPATCHABLE_HANDLE_INIT(create_func, dev, ...) \
33 do { \
34 handle_type handle; \
35 if (EXPECT(create_func(dev.handle(), __VA_ARGS__, NULL, &handle) == VK_SUCCESS)) \
36 NonDispHandle::init(dev.handle(), handle); \
37 } while (0)
38
39 #define NON_DISPATCHABLE_HANDLE_DTOR(cls, destroy_func) \
40 cls::~cls() { \
41 if (initialized()) destroy_func(device(), handle(), NULL); \
42 }
43
44 #define STRINGIFY(x) #x
45 #define EXPECT(expr) ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, __FUNCTION__))
46
47 vk_testing::ErrorCallback error_callback;
48
expect_failure(const char * expr,const char * file,unsigned int line,const char * function)49 bool expect_failure(const char *expr, const char *file, unsigned int line, const char *function) {
50 if (error_callback) {
51 error_callback(expr, file, line, function);
52 } else {
53 std::cerr << file << ":" << line << ": " << function << ": Expectation `" << expr << "' failed.\n";
54 }
55
56 return false;
57 }
58
59 } // namespace
60
61 namespace vk_testing {
62
set_error_callback(ErrorCallback callback)63 void set_error_callback(ErrorCallback callback) { error_callback = callback; }
64
properties() const65 VkPhysicalDeviceProperties PhysicalDevice::properties() const {
66 VkPhysicalDeviceProperties info;
67
68 vkGetPhysicalDeviceProperties(handle(), &info);
69
70 return info;
71 }
72
queue_properties() const73 std::vector<VkQueueFamilyProperties> PhysicalDevice::queue_properties() const {
74 std::vector<VkQueueFamilyProperties> info;
75 uint32_t count;
76
77 // Call once with NULL data to receive count
78 vkGetPhysicalDeviceQueueFamilyProperties(handle(), &count, NULL);
79 info.resize(count);
80 vkGetPhysicalDeviceQueueFamilyProperties(handle(), &count, info.data());
81
82 return info;
83 }
84
memory_properties() const85 VkPhysicalDeviceMemoryProperties PhysicalDevice::memory_properties() const {
86 VkPhysicalDeviceMemoryProperties info;
87
88 vkGetPhysicalDeviceMemoryProperties(handle(), &info);
89
90 return info;
91 }
92
features() const93 VkPhysicalDeviceFeatures PhysicalDevice::features() const {
94 VkPhysicalDeviceFeatures features;
95 vkGetPhysicalDeviceFeatures(handle(), &features);
96 return features;
97 }
98
99 /*
100 * Return list of Global layers available
101 */
GetGlobalLayers()102 std::vector<VkLayerProperties> GetGlobalLayers() {
103 VkResult err;
104 std::vector<VkLayerProperties> layers;
105 uint32_t layer_count;
106
107 do {
108 layer_count = 0;
109 err = vkEnumerateInstanceLayerProperties(&layer_count, NULL);
110
111 if (err == VK_SUCCESS) {
112 layers.reserve(layer_count);
113 err = vkEnumerateInstanceLayerProperties(&layer_count, layers.data());
114 }
115 } while (err == VK_INCOMPLETE);
116
117 assert(err == VK_SUCCESS);
118
119 return layers;
120 }
121
122 /*
123 * Return list of Global extensions provided by the ICD / Loader
124 */
GetGlobalExtensions()125 std::vector<VkExtensionProperties> GetGlobalExtensions() { return GetGlobalExtensions(NULL); }
126
127 /*
128 * Return list of Global extensions provided by the specified layer
129 * If pLayerName is NULL, will return extensions implemented by the loader /
130 * ICDs
131 */
GetGlobalExtensions(const char * pLayerName)132 std::vector<VkExtensionProperties> GetGlobalExtensions(const char *pLayerName) {
133 std::vector<VkExtensionProperties> exts;
134 uint32_t ext_count;
135 VkResult err;
136
137 do {
138 ext_count = 0;
139 err = vkEnumerateInstanceExtensionProperties(pLayerName, &ext_count, NULL);
140
141 if (err == VK_SUCCESS) {
142 exts.resize(ext_count);
143 err = vkEnumerateInstanceExtensionProperties(pLayerName, &ext_count, exts.data());
144 }
145 } while (err == VK_INCOMPLETE);
146
147 assert(err == VK_SUCCESS);
148
149 return exts;
150 }
151
152 /*
153 * Return list of PhysicalDevice extensions provided by the ICD / Loader
154 */
extensions() const155 std::vector<VkExtensionProperties> PhysicalDevice::extensions() const { return extensions(NULL); }
156
157 /*
158 * Return list of PhysicalDevice extensions provided by the specified layer
159 * If pLayerName is NULL, will return extensions for ICD / loader.
160 */
extensions(const char * pLayerName) const161 std::vector<VkExtensionProperties> PhysicalDevice::extensions(const char *pLayerName) const {
162 std::vector<VkExtensionProperties> exts;
163 VkResult err;
164
165 do {
166 uint32_t extCount = 0;
167 err = vkEnumerateDeviceExtensionProperties(handle(), pLayerName, &extCount, NULL);
168
169 if (err == VK_SUCCESS) {
170 exts.resize(extCount);
171 err = vkEnumerateDeviceExtensionProperties(handle(), pLayerName, &extCount, exts.data());
172 }
173 } while (err == VK_INCOMPLETE);
174
175 assert(err == VK_SUCCESS);
176
177 return exts;
178 }
179
set_memory_type(const uint32_t type_bits,VkMemoryAllocateInfo * info,const VkFlags properties,const VkFlags forbid) const180 bool PhysicalDevice::set_memory_type(const uint32_t type_bits, VkMemoryAllocateInfo *info, const VkFlags properties,
181 const VkFlags forbid) const {
182 uint32_t type_mask = type_bits;
183 // Search memtypes to find first index with those properties
184 for (uint32_t i = 0; i < memory_properties_.memoryTypeCount; i++) {
185 if ((type_mask & 1) == 1) {
186 // Type is available, does it match user properties?
187 if ((memory_properties_.memoryTypes[i].propertyFlags & properties) == properties &&
188 (memory_properties_.memoryTypes[i].propertyFlags & forbid) == 0) {
189 info->memoryTypeIndex = i;
190 return true;
191 }
192 }
193 type_mask >>= 1;
194 }
195 // No memory types matched, return failure
196 return false;
197 }
198
199 /*
200 * Return list of PhysicalDevice layers
201 */
layers() const202 std::vector<VkLayerProperties> PhysicalDevice::layers() const {
203 std::vector<VkLayerProperties> layer_props;
204 VkResult err;
205
206 do {
207 uint32_t layer_count = 0;
208 err = vkEnumerateDeviceLayerProperties(handle(), &layer_count, NULL);
209
210 if (err == VK_SUCCESS) {
211 layer_props.reserve(layer_count);
212 err = vkEnumerateDeviceLayerProperties(handle(), &layer_count, layer_props.data());
213 }
214 } while (err == VK_INCOMPLETE);
215
216 assert(err == VK_SUCCESS);
217
218 return layer_props;
219 }
220
QueueCreateInfoArray(const std::vector<VkQueueFamilyProperties> & queue_props)221 QueueCreateInfoArray::QueueCreateInfoArray(const std::vector<VkQueueFamilyProperties> &queue_props)
222 : queue_info_(), queue_priorities_() {
223 queue_info_.reserve(queue_props.size());
224
225 for (uint32_t i = 0; i < (uint32_t)queue_props.size(); ++i) {
226 if (queue_props[i].queueCount > 0) {
227 VkDeviceQueueCreateInfo qi = {};
228 qi.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
229 qi.pNext = NULL;
230 qi.queueFamilyIndex = i;
231 qi.queueCount = queue_props[i].queueCount;
232 queue_priorities_.emplace_back(qi.queueCount, 0.0f);
233 qi.pQueuePriorities = queue_priorities_[i].data();
234 queue_info_.push_back(qi);
235 }
236 }
237 }
238
~Device()239 Device::~Device() {
240 if (!initialized()) return;
241
242 for (int i = 0; i < QUEUE_COUNT; i++) {
243 for (std::vector<Queue *>::iterator it = queues_[i].begin(); it != queues_[i].end(); it++) delete *it;
244 queues_[i].clear();
245 }
246
247 vkDestroyDevice(handle(), NULL);
248 }
249
init(std::vector<const char * > & extensions,VkPhysicalDeviceFeatures * features)250 void Device::init(std::vector<const char *> &extensions, VkPhysicalDeviceFeatures *features) {
251 // request all queues
252 const std::vector<VkQueueFamilyProperties> queue_props = phy_.queue_properties();
253 QueueCreateInfoArray queue_info(phy_.queue_properties());
254 for (uint32_t i = 0; i < (uint32_t)queue_props.size(); i++) {
255 if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
256 graphics_queue_node_index_ = i;
257 }
258 }
259 // Only request creation with queuefamilies that have at least one queue
260 std::vector<VkDeviceQueueCreateInfo> create_queue_infos;
261 auto qci = queue_info.data();
262 for (uint32_t j = 0; j < queue_info.size(); ++j) {
263 if (qci[j].queueCount) {
264 create_queue_infos.push_back(qci[j]);
265 }
266 }
267
268 enabled_extensions_ = extensions;
269
270 VkDeviceCreateInfo dev_info = {};
271 dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
272 dev_info.pNext = NULL;
273 dev_info.queueCreateInfoCount = create_queue_infos.size();
274 dev_info.pQueueCreateInfos = create_queue_infos.data();
275 dev_info.enabledLayerCount = 0;
276 dev_info.ppEnabledLayerNames = NULL;
277 dev_info.enabledExtensionCount = extensions.size();
278 dev_info.ppEnabledExtensionNames = extensions.data();
279
280 VkPhysicalDeviceFeatures all_features;
281 if (features) {
282 dev_info.pEnabledFeatures = features;
283 } else {
284 // request all supportable features enabled
285 all_features = phy().features();
286 dev_info.pEnabledFeatures = &all_features;
287 }
288
289 init(dev_info);
290 }
291
init(const VkDeviceCreateInfo & info)292 void Device::init(const VkDeviceCreateInfo &info) {
293 VkDevice dev;
294
295 if (EXPECT(vkCreateDevice(phy_.handle(), &info, NULL, &dev) == VK_SUCCESS)) Handle::init(dev);
296
297 init_queues();
298 init_formats();
299 }
300
init_queues()301 void Device::init_queues() {
302 uint32_t queue_node_count;
303
304 // Call with NULL data to get count
305 vkGetPhysicalDeviceQueueFamilyProperties(phy_.handle(), &queue_node_count, NULL);
306 EXPECT(queue_node_count >= 1);
307
308 VkQueueFamilyProperties *queue_props = new VkQueueFamilyProperties[queue_node_count];
309
310 vkGetPhysicalDeviceQueueFamilyProperties(phy_.handle(), &queue_node_count, queue_props);
311
312 for (uint32_t i = 0; i < queue_node_count; i++) {
313 VkQueue queue;
314
315 for (uint32_t j = 0; j < queue_props[i].queueCount; j++) {
316 // TODO: Need to add support for separate MEMMGR and work queues,
317 // including synchronization
318 vkGetDeviceQueue(handle(), i, j, &queue);
319
320 if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
321 queues_[GRAPHICS].push_back(new Queue(queue, i));
322 }
323
324 if (queue_props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) {
325 queues_[COMPUTE].push_back(new Queue(queue, i));
326 }
327
328 if (queue_props[i].queueFlags & VK_QUEUE_TRANSFER_BIT) {
329 queues_[DMA].push_back(new Queue(queue, i));
330 }
331 }
332 }
333
334 delete[] queue_props;
335
336 EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty());
337 }
338
init_formats()339 void Device::init_formats() {
340 for (int f = VK_FORMAT_BEGIN_RANGE; f <= VK_FORMAT_END_RANGE; f++) {
341 const VkFormat fmt = static_cast<VkFormat>(f);
342 const VkFormatProperties props = format_properties(fmt);
343
344 if (props.linearTilingFeatures) {
345 const Format tmp = {fmt, VK_IMAGE_TILING_LINEAR, props.linearTilingFeatures};
346 formats_.push_back(tmp);
347 }
348
349 if (props.optimalTilingFeatures) {
350 const Format tmp = {fmt, VK_IMAGE_TILING_OPTIMAL, props.optimalTilingFeatures};
351 formats_.push_back(tmp);
352 }
353 }
354
355 EXPECT(!formats_.empty());
356 }
357
IsEnbledExtension(const char * extension)358 bool Device::IsEnbledExtension(const char *extension) {
359 const auto is_x = [&extension](const char *enabled_extension) { return strcmp(extension, enabled_extension) == 0; };
360 return std::any_of(enabled_extensions_.begin(), enabled_extensions_.end(), is_x);
361 }
362
format_properties(VkFormat format)363 VkFormatProperties Device::format_properties(VkFormat format) {
364 VkFormatProperties data;
365 vkGetPhysicalDeviceFormatProperties(phy().handle(), format, &data);
366
367 return data;
368 }
369
wait()370 void Device::wait() { EXPECT(vkDeviceWaitIdle(handle()) == VK_SUCCESS); }
371
wait(const std::vector<const Fence * > & fences,bool wait_all,uint64_t timeout)372 VkResult Device::wait(const std::vector<const Fence *> &fences, bool wait_all, uint64_t timeout) {
373 const std::vector<VkFence> fence_handles = MakeVkHandles<VkFence>(fences);
374 VkResult err = vkWaitForFences(handle(), fence_handles.size(), fence_handles.data(), wait_all, timeout);
375 EXPECT(err == VK_SUCCESS || err == VK_TIMEOUT);
376
377 return err;
378 }
379
update_descriptor_sets(const std::vector<VkWriteDescriptorSet> & writes,const std::vector<VkCopyDescriptorSet> & copies)380 void Device::update_descriptor_sets(const std::vector<VkWriteDescriptorSet> &writes,
381 const std::vector<VkCopyDescriptorSet> &copies) {
382 vkUpdateDescriptorSets(handle(), writes.size(), writes.data(), copies.size(), copies.data());
383 }
384
submit(const std::vector<const CommandBuffer * > & cmds,Fence & fence)385 void Queue::submit(const std::vector<const CommandBuffer *> &cmds, Fence &fence) {
386 const std::vector<VkCommandBuffer> cmd_handles = MakeVkHandles<VkCommandBuffer>(cmds);
387 VkSubmitInfo submit_info;
388 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
389 submit_info.pNext = NULL;
390 submit_info.waitSemaphoreCount = 0;
391 submit_info.pWaitSemaphores = NULL;
392 submit_info.pWaitDstStageMask = NULL;
393 submit_info.commandBufferCount = (uint32_t)cmd_handles.size();
394 submit_info.pCommandBuffers = cmd_handles.data();
395 submit_info.signalSemaphoreCount = 0;
396 submit_info.pSignalSemaphores = NULL;
397
398 EXPECT(vkQueueSubmit(handle(), 1, &submit_info, fence.handle()) == VK_SUCCESS);
399 }
400
submit(const CommandBuffer & cmd,Fence & fence)401 void Queue::submit(const CommandBuffer &cmd, Fence &fence) { submit(std::vector<const CommandBuffer *>(1, &cmd), fence); }
402
submit(const CommandBuffer & cmd)403 void Queue::submit(const CommandBuffer &cmd) {
404 Fence fence;
405 submit(cmd, fence);
406 }
407
wait()408 void Queue::wait() { EXPECT(vkQueueWaitIdle(handle()) == VK_SUCCESS); }
409
~DeviceMemory()410 DeviceMemory::~DeviceMemory() {
411 if (initialized()) vkFreeMemory(device(), handle(), NULL);
412 }
413
init(const Device & dev,const VkMemoryAllocateInfo & info)414 void DeviceMemory::init(const Device &dev, const VkMemoryAllocateInfo &info) {
415 NON_DISPATCHABLE_HANDLE_INIT(vkAllocateMemory, dev, &info);
416 }
417
map(VkFlags flags) const418 const void *DeviceMemory::map(VkFlags flags) const {
419 void *data;
420 if (!EXPECT(vkMapMemory(device(), handle(), 0, VK_WHOLE_SIZE, flags, &data) == VK_SUCCESS)) data = NULL;
421
422 return data;
423 }
424
map(VkFlags flags)425 void *DeviceMemory::map(VkFlags flags) {
426 void *data;
427 if (!EXPECT(vkMapMemory(device(), handle(), 0, VK_WHOLE_SIZE, flags, &data) == VK_SUCCESS)) data = NULL;
428
429 return data;
430 }
431
unmap() const432 void DeviceMemory::unmap() const { vkUnmapMemory(device(), handle()); }
433
get_resource_alloc_info(const Device & dev,const VkMemoryRequirements & reqs,VkMemoryPropertyFlags mem_props)434 VkMemoryAllocateInfo DeviceMemory::get_resource_alloc_info(const Device &dev, const VkMemoryRequirements &reqs,
435 VkMemoryPropertyFlags mem_props) {
436 // Find appropriate memory type for given reqs
437 VkPhysicalDeviceMemoryProperties dev_mem_props = dev.phy().memory_properties();
438 uint32_t mem_type_index = 0;
439 for (mem_type_index = 0; mem_type_index < dev_mem_props.memoryTypeCount; ++mem_type_index) {
440 if (mem_props == (mem_props & dev_mem_props.memoryTypes[mem_type_index].propertyFlags)) break;
441 }
442 // If we exceeded types, then this device doesn't have the memory we need
443 assert(mem_type_index < dev_mem_props.memoryTypeCount);
444 VkMemoryAllocateInfo info = alloc_info(reqs.size, mem_type_index);
445 EXPECT(dev.phy().set_memory_type(reqs.memoryTypeBits, &info, mem_props));
446 return info;
447 }
448
NON_DISPATCHABLE_HANDLE_DTOR(Fence,vkDestroyFence)449 NON_DISPATCHABLE_HANDLE_DTOR(Fence, vkDestroyFence)
450
451 void Fence::init(const Device &dev, const VkFenceCreateInfo &info) { NON_DISPATCHABLE_HANDLE_INIT(vkCreateFence, dev, &info); }
452
NON_DISPATCHABLE_HANDLE_DTOR(Semaphore,vkDestroySemaphore)453 NON_DISPATCHABLE_HANDLE_DTOR(Semaphore, vkDestroySemaphore)
454
455 void Semaphore::init(const Device &dev, const VkSemaphoreCreateInfo &info) {
456 NON_DISPATCHABLE_HANDLE_INIT(vkCreateSemaphore, dev, &info);
457 }
458
NON_DISPATCHABLE_HANDLE_DTOR(Event,vkDestroyEvent)459 NON_DISPATCHABLE_HANDLE_DTOR(Event, vkDestroyEvent)
460
461 void Event::init(const Device &dev, const VkEventCreateInfo &info) { NON_DISPATCHABLE_HANDLE_INIT(vkCreateEvent, dev, &info); }
462
set()463 void Event::set() { EXPECT(vkSetEvent(device(), handle()) == VK_SUCCESS); }
464
reset()465 void Event::reset() { EXPECT(vkResetEvent(device(), handle()) == VK_SUCCESS); }
466
NON_DISPATCHABLE_HANDLE_DTOR(QueryPool,vkDestroyQueryPool)467 NON_DISPATCHABLE_HANDLE_DTOR(QueryPool, vkDestroyQueryPool)
468
469 void QueryPool::init(const Device &dev, const VkQueryPoolCreateInfo &info) {
470 NON_DISPATCHABLE_HANDLE_INIT(vkCreateQueryPool, dev, &info);
471 }
472
results(uint32_t first,uint32_t count,size_t size,void * data,size_t stride)473 VkResult QueryPool::results(uint32_t first, uint32_t count, size_t size, void *data, size_t stride) {
474 VkResult err = vkGetQueryPoolResults(device(), handle(), first, count, size, data, stride, 0);
475 EXPECT(err == VK_SUCCESS || err == VK_NOT_READY);
476
477 return err;
478 }
479
NON_DISPATCHABLE_HANDLE_DTOR(Buffer,vkDestroyBuffer)480 NON_DISPATCHABLE_HANDLE_DTOR(Buffer, vkDestroyBuffer)
481
482 void Buffer::init(const Device &dev, const VkBufferCreateInfo &info, VkMemoryPropertyFlags mem_props) {
483 init_no_mem(dev, info);
484
485 internal_mem_.init(dev, DeviceMemory::get_resource_alloc_info(dev, memory_requirements(), mem_props));
486 bind_memory(internal_mem_, 0);
487 }
488
init_no_mem(const Device & dev,const VkBufferCreateInfo & info)489 void Buffer::init_no_mem(const Device &dev, const VkBufferCreateInfo &info) {
490 NON_DISPATCHABLE_HANDLE_INIT(vkCreateBuffer, dev, &info);
491 create_info_ = info;
492 }
493
memory_requirements() const494 VkMemoryRequirements Buffer::memory_requirements() const {
495 VkMemoryRequirements reqs;
496
497 vkGetBufferMemoryRequirements(device(), handle(), &reqs);
498
499 return reqs;
500 }
501
bind_memory(const DeviceMemory & mem,VkDeviceSize mem_offset)502 void Buffer::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset) {
503 EXPECT(vkBindBufferMemory(device(), handle(), mem.handle(), mem_offset) == VK_SUCCESS);
504 }
505
NON_DISPATCHABLE_HANDLE_DTOR(BufferView,vkDestroyBufferView)506 NON_DISPATCHABLE_HANDLE_DTOR(BufferView, vkDestroyBufferView)
507
508 void BufferView::init(const Device &dev, const VkBufferViewCreateInfo &info) {
509 NON_DISPATCHABLE_HANDLE_INIT(vkCreateBufferView, dev, &info);
510 }
511
NON_DISPATCHABLE_HANDLE_DTOR(Image,vkDestroyImage)512 NON_DISPATCHABLE_HANDLE_DTOR(Image, vkDestroyImage)
513
514 void Image::init(const Device &dev, const VkImageCreateInfo &info, VkMemoryPropertyFlags mem_props) {
515 init_no_mem(dev, info);
516
517 if (initialized()) {
518 internal_mem_.init(dev, DeviceMemory::get_resource_alloc_info(dev, memory_requirements(), mem_props));
519 bind_memory(internal_mem_, 0);
520 }
521 }
522
init_no_mem(const Device & dev,const VkImageCreateInfo & info)523 void Image::init_no_mem(const Device &dev, const VkImageCreateInfo &info) {
524 NON_DISPATCHABLE_HANDLE_INIT(vkCreateImage, dev, &info);
525 if (initialized()) {
526 init_info(dev, info);
527 }
528 }
529
init_info(const Device & dev,const VkImageCreateInfo & info)530 void Image::init_info(const Device &dev, const VkImageCreateInfo &info) {
531 create_info_ = info;
532
533 for (std::vector<Device::Format>::const_iterator it = dev.formats().begin(); it != dev.formats().end(); it++) {
534 if (memcmp(&it->format, &create_info_.format, sizeof(it->format)) == 0 && it->tiling == create_info_.tiling) {
535 format_features_ = it->features;
536 break;
537 }
538 }
539 }
540
memory_requirements() const541 VkMemoryRequirements Image::memory_requirements() const {
542 VkMemoryRequirements reqs;
543
544 vkGetImageMemoryRequirements(device(), handle(), &reqs);
545
546 return reqs;
547 }
548
bind_memory(const DeviceMemory & mem,VkDeviceSize mem_offset)549 void Image::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset) {
550 EXPECT(vkBindImageMemory(device(), handle(), mem.handle(), mem_offset) == VK_SUCCESS);
551 }
552
subresource_layout(const VkImageSubresource & subres) const553 VkSubresourceLayout Image::subresource_layout(const VkImageSubresource &subres) const {
554 VkSubresourceLayout data;
555 size_t size = sizeof(data);
556 vkGetImageSubresourceLayout(device(), handle(), &subres, &data);
557 if (size != sizeof(data)) memset(&data, 0, sizeof(data));
558
559 return data;
560 }
561
subresource_layout(const VkImageSubresourceLayers & subrescopy) const562 VkSubresourceLayout Image::subresource_layout(const VkImageSubresourceLayers &subrescopy) const {
563 VkSubresourceLayout data;
564 VkImageSubresource subres = subresource(subrescopy.aspectMask, subrescopy.mipLevel, subrescopy.baseArrayLayer);
565 size_t size = sizeof(data);
566 vkGetImageSubresourceLayout(device(), handle(), &subres, &data);
567 if (size != sizeof(data)) memset(&data, 0, sizeof(data));
568
569 return data;
570 }
571
transparent() const572 bool Image::transparent() const {
573 return (create_info_.tiling == VK_IMAGE_TILING_LINEAR && create_info_.samples == VK_SAMPLE_COUNT_1_BIT &&
574 !(create_info_.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)));
575 }
576
NON_DISPATCHABLE_HANDLE_DTOR(ImageView,vkDestroyImageView)577 NON_DISPATCHABLE_HANDLE_DTOR(ImageView, vkDestroyImageView)
578
579 void ImageView::init(const Device &dev, const VkImageViewCreateInfo &info) {
580 NON_DISPATCHABLE_HANDLE_INIT(vkCreateImageView, dev, &info);
581 }
582
NON_DISPATCHABLE_HANDLE_DTOR(ShaderModule,vkDestroyShaderModule)583 NON_DISPATCHABLE_HANDLE_DTOR(ShaderModule, vkDestroyShaderModule)
584
585 void ShaderModule::init(const Device &dev, const VkShaderModuleCreateInfo &info) {
586 NON_DISPATCHABLE_HANDLE_INIT(vkCreateShaderModule, dev, &info);
587 }
588
init_try(const Device & dev,const VkShaderModuleCreateInfo & info)589 VkResult ShaderModule::init_try(const Device &dev, const VkShaderModuleCreateInfo &info) {
590 VkShaderModule mod;
591
592 VkResult err = vkCreateShaderModule(dev.handle(), &info, NULL, &mod);
593 if (err == VK_SUCCESS) NonDispHandle::init(dev.handle(), mod);
594
595 return err;
596 }
597
NON_DISPATCHABLE_HANDLE_DTOR(Pipeline,vkDestroyPipeline)598 NON_DISPATCHABLE_HANDLE_DTOR(Pipeline, vkDestroyPipeline)
599
600 void Pipeline::init(const Device &dev, const VkGraphicsPipelineCreateInfo &info) {
601 VkPipelineCache cache;
602 VkPipelineCacheCreateInfo ci;
603 memset((void *)&ci, 0, sizeof(VkPipelineCacheCreateInfo));
604 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
605 VkResult err = vkCreatePipelineCache(dev.handle(), &ci, NULL, &cache);
606 if (err == VK_SUCCESS) {
607 NON_DISPATCHABLE_HANDLE_INIT(vkCreateGraphicsPipelines, dev, cache, 1, &info);
608 vkDestroyPipelineCache(dev.handle(), cache, NULL);
609 }
610 }
611
init_try(const Device & dev,const VkGraphicsPipelineCreateInfo & info)612 VkResult Pipeline::init_try(const Device &dev, const VkGraphicsPipelineCreateInfo &info) {
613 VkPipeline pipe;
614 VkPipelineCache cache;
615 VkPipelineCacheCreateInfo ci;
616 memset((void *)&ci, 0, sizeof(VkPipelineCacheCreateInfo));
617 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
618 VkResult err = vkCreatePipelineCache(dev.handle(), &ci, NULL, &cache);
619 EXPECT(err == VK_SUCCESS);
620 if (err == VK_SUCCESS) {
621 err = vkCreateGraphicsPipelines(dev.handle(), cache, 1, &info, NULL, &pipe);
622 if (err == VK_SUCCESS) {
623 NonDispHandle::init(dev.handle(), pipe);
624 }
625 vkDestroyPipelineCache(dev.handle(), cache, NULL);
626 }
627
628 return err;
629 }
630
init(const Device & dev,const VkComputePipelineCreateInfo & info)631 void Pipeline::init(const Device &dev, const VkComputePipelineCreateInfo &info) {
632 VkPipelineCache cache;
633 VkPipelineCacheCreateInfo ci;
634 memset((void *)&ci, 0, sizeof(VkPipelineCacheCreateInfo));
635 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
636 VkResult err = vkCreatePipelineCache(dev.handle(), &ci, NULL, &cache);
637 if (err == VK_SUCCESS) {
638 NON_DISPATCHABLE_HANDLE_INIT(vkCreateComputePipelines, dev, cache, 1, &info);
639 vkDestroyPipelineCache(dev.handle(), cache, NULL);
640 }
641 }
642
NON_DISPATCHABLE_HANDLE_DTOR(PipelineLayout,vkDestroyPipelineLayout)643 NON_DISPATCHABLE_HANDLE_DTOR(PipelineLayout, vkDestroyPipelineLayout)
644
645 void PipelineLayout::init(const Device &dev, VkPipelineLayoutCreateInfo &info,
646 const std::vector<const DescriptorSetLayout *> &layouts) {
647 const std::vector<VkDescriptorSetLayout> layout_handles = MakeVkHandles<VkDescriptorSetLayout>(layouts);
648 info.setLayoutCount = layout_handles.size();
649 info.pSetLayouts = layout_handles.data();
650
651 NON_DISPATCHABLE_HANDLE_INIT(vkCreatePipelineLayout, dev, &info);
652 }
653
NON_DISPATCHABLE_HANDLE_DTOR(Sampler,vkDestroySampler)654 NON_DISPATCHABLE_HANDLE_DTOR(Sampler, vkDestroySampler)
655
656 void Sampler::init(const Device &dev, const VkSamplerCreateInfo &info) {
657 NON_DISPATCHABLE_HANDLE_INIT(vkCreateSampler, dev, &info);
658 }
659
NON_DISPATCHABLE_HANDLE_DTOR(DescriptorSetLayout,vkDestroyDescriptorSetLayout)660 NON_DISPATCHABLE_HANDLE_DTOR(DescriptorSetLayout, vkDestroyDescriptorSetLayout)
661
662 void DescriptorSetLayout::init(const Device &dev, const VkDescriptorSetLayoutCreateInfo &info) {
663 NON_DISPATCHABLE_HANDLE_INIT(vkCreateDescriptorSetLayout, dev, &info);
664 }
665
NON_DISPATCHABLE_HANDLE_DTOR(DescriptorPool,vkDestroyDescriptorPool)666 NON_DISPATCHABLE_HANDLE_DTOR(DescriptorPool, vkDestroyDescriptorPool)
667
668 void DescriptorPool::init(const Device &dev, const VkDescriptorPoolCreateInfo &info) {
669 setDynamicUsage(info.flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT);
670 NON_DISPATCHABLE_HANDLE_INIT(vkCreateDescriptorPool, dev, &info);
671 }
672
reset()673 void DescriptorPool::reset() { EXPECT(vkResetDescriptorPool(device(), handle(), 0) == VK_SUCCESS); }
674
alloc_sets(const Device & dev,const std::vector<const DescriptorSetLayout * > & layouts)675 std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev,
676 const std::vector<const DescriptorSetLayout *> &layouts) {
677 const std::vector<VkDescriptorSetLayout> layout_handles = MakeVkHandles<VkDescriptorSetLayout>(layouts);
678
679 std::vector<VkDescriptorSet> set_handles;
680 set_handles.resize(layout_handles.size());
681
682 VkDescriptorSetAllocateInfo alloc_info = {};
683 alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
684 alloc_info.descriptorSetCount = layout_handles.size();
685 alloc_info.descriptorPool = handle();
686 alloc_info.pSetLayouts = layout_handles.data();
687 VkResult err = vkAllocateDescriptorSets(device(), &alloc_info, set_handles.data());
688 EXPECT(err == VK_SUCCESS);
689
690 std::vector<DescriptorSet *> sets;
691 for (std::vector<VkDescriptorSet>::const_iterator it = set_handles.begin(); it != set_handles.end(); it++) {
692 // do descriptor sets need memories bound?
693 DescriptorSet *descriptorSet = new DescriptorSet(dev, this, *it);
694 sets.push_back(descriptorSet);
695 }
696 return sets;
697 }
698
alloc_sets(const Device & dev,const DescriptorSetLayout & layout,uint32_t count)699 std::vector<DescriptorSet *> DescriptorPool::alloc_sets(const Device &dev, const DescriptorSetLayout &layout, uint32_t count) {
700 return alloc_sets(dev, std::vector<const DescriptorSetLayout *>(count, &layout));
701 }
702
alloc_sets(const Device & dev,const DescriptorSetLayout & layout)703 DescriptorSet *DescriptorPool::alloc_sets(const Device &dev, const DescriptorSetLayout &layout) {
704 std::vector<DescriptorSet *> set = alloc_sets(dev, layout, 1);
705 return (set.empty()) ? NULL : set[0];
706 }
707
~DescriptorSet()708 DescriptorSet::~DescriptorSet() {
709 if (initialized()) {
710 // Only call vkFree* on sets allocated from pool with usage *_DYNAMIC
711 if (containing_pool_->getDynamicUsage()) {
712 VkDescriptorSet sets[1] = {handle()};
713 EXPECT(vkFreeDescriptorSets(device(), containing_pool_->GetObj(), 1, sets) == VK_SUCCESS);
714 }
715 }
716 }
717
NON_DISPATCHABLE_HANDLE_DTOR(CommandPool,vkDestroyCommandPool)718 NON_DISPATCHABLE_HANDLE_DTOR(CommandPool, vkDestroyCommandPool)
719
720 void CommandPool::init(const Device &dev, const VkCommandPoolCreateInfo &info) {
721 NON_DISPATCHABLE_HANDLE_INIT(vkCreateCommandPool, dev, &info);
722 }
723
~CommandBuffer()724 CommandBuffer::~CommandBuffer() {
725 if (initialized()) {
726 VkCommandBuffer cmds[] = {handle()};
727 vkFreeCommandBuffers(dev_handle_, cmd_pool_, 1, cmds);
728 }
729 }
730
init(const Device & dev,const VkCommandBufferAllocateInfo & info)731 void CommandBuffer::init(const Device &dev, const VkCommandBufferAllocateInfo &info) {
732 VkCommandBuffer cmd;
733
734 // Make sure commandPool is set
735 assert(info.commandPool);
736
737 if (EXPECT(vkAllocateCommandBuffers(dev.handle(), &info, &cmd) == VK_SUCCESS)) {
738 Handle::init(cmd);
739 dev_handle_ = dev.handle();
740 cmd_pool_ = info.commandPool;
741 }
742 }
743
begin(const VkCommandBufferBeginInfo * info)744 void CommandBuffer::begin(const VkCommandBufferBeginInfo *info) { EXPECT(vkBeginCommandBuffer(handle(), info) == VK_SUCCESS); }
745
begin()746 void CommandBuffer::begin() {
747 VkCommandBufferBeginInfo info = {};
748 VkCommandBufferInheritanceInfo hinfo = {};
749 info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
750 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
751 info.pInheritanceInfo = &hinfo;
752 hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
753 hinfo.pNext = NULL;
754 hinfo.renderPass = VK_NULL_HANDLE;
755 hinfo.subpass = 0;
756 hinfo.framebuffer = VK_NULL_HANDLE;
757 hinfo.occlusionQueryEnable = VK_FALSE;
758 hinfo.queryFlags = 0;
759 hinfo.pipelineStatistics = 0;
760
761 begin(&info);
762 }
763
end()764 void CommandBuffer::end() { EXPECT(vkEndCommandBuffer(handle()) == VK_SUCCESS); }
765
reset(VkCommandBufferResetFlags flags)766 void CommandBuffer::reset(VkCommandBufferResetFlags flags) { EXPECT(vkResetCommandBuffer(handle(), flags) == VK_SUCCESS); }
767
768 } // namespace vk_testing
769